Merge "DrmUtils: refactor IDrm/ICrypto creation"
diff --git a/apex/ld.config.txt b/apex/ld.config.txt
index 8f60bb2..ec8f049 100644
--- a/apex/ld.config.txt
+++ b/apex/ld.config.txt
@@ -49,7 +49,7 @@
 namespace.platform.asan.search.paths += /apex/com.android.runtime/${LIB}
 
 # /system/lib/libc.so, etc are symlinks to /apex/com.android.lib/lib/bionic/libc.so, etc.
-# Add /apex/... pat to the permitted paths because linker uses realpath(3)
+# Add /apex/... path to the permitted paths because linker uses realpath(3)
 # to check the accessibility of the lib. We could add this to search.paths
 # instead but that makes the resolution of bionic libs be dependent on
 # the order of /system/lib and /apex/... in search.paths. If /apex/...
@@ -136,3 +136,9 @@
 
 # Add a link for libz.so which is llndk on devices where VNDK is not enforced.
 namespace.sphal.link.platform.shared_libs += libz.so
+
+# With VNDK APEX, /system/${LIB}/vndk-sp${VNDK_VER} is a symlink to the following.
+# Add /apex/... path to the permitted paths because linker uses realpath(3)
+# to check the accessibility of the lib.
+namespace.sphal.permitted.paths += /apex/com.android.vndk.${VNDK_APEX_VER}/${LIB}
+namespace.sphal.asan.permitted.paths += /apex/com.android.vndk.${VNDK_APEX_VER}/${LIB}
diff --git a/camera/ndk/impl/ACameraMetadata.cpp b/camera/ndk/impl/ACameraMetadata.cpp
index 77dcd48..7c41b5e 100644
--- a/camera/ndk/impl/ACameraMetadata.cpp
+++ b/camera/ndk/impl/ACameraMetadata.cpp
@@ -479,6 +479,7 @@
         case ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE:
         case ACAMERA_CONTROL_POST_RAW_SENSITIVITY_BOOST:
         case ACAMERA_CONTROL_ENABLE_ZSL:
+        case ACAMERA_CONTROL_BOKEH_MODE:
         case ACAMERA_EDGE_MODE:
         case ACAMERA_FLASH_MODE:
         case ACAMERA_HOT_PIXEL_MODE:
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 68fe045..825f308 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -140,7 +140,7 @@
      * application controls how the color mapping is performed.</p>
      * <p>We define the expected processing pipeline below. For consistency
      * across devices, this is always the case with TRANSFORM_MATRIX.</p>
-     * <p>When either FULL or HIGH_QUALITY is used, the camera device may
+     * <p>When either FAST or HIGH_QUALITY is used, the camera device may
      * do additional processing but ACAMERA_COLOR_CORRECTION_GAINS and
      * ACAMERA_COLOR_CORRECTION_TRANSFORM will still be provided by the
      * camera device (in the results) and be roughly correct.</p>
@@ -1734,6 +1734,77 @@
      */
     ACAMERA_CONTROL_AF_SCENE_CHANGE =                           // byte (acamera_metadata_enum_android_control_af_scene_change_t)
             ACAMERA_CONTROL_START + 42,
+    /**
+     * <p>The list of bokeh modes that are supported by this camera device, and each bokeh mode's
+     * maximum streaming (non-stall) size with bokeh effect.</p>
+     *
+     * <p>Type: int32[3*n]</p>
+     *
+     * <p>This tag may appear in:
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul></p>
+     *
+     * <p>For OFF mode, the camera behaves normally with no bokeh effect.</p>
+     * <p>For STILL_CAPTURE mode, the maximum streaming dimension specifies the limit under which
+     * bokeh is effective when capture intent is PREVIEW. Note that when capture intent is
+     * PREVIEW, the bokeh effect may not be as high quality compared to STILL_CAPTURE intent
+     * in order to maintain reasonable frame rate. The maximum streaming dimension must be one
+     * of the YUV_420_888 or PRIVATE resolutions in availableStreamConfigurations, or (0, 0)
+     * if preview bokeh is not supported. If the application configures a stream larger than
+     * the maximum streaming dimension, bokeh effect may not be applied for this stream for
+     * PREVIEW intent.</p>
+     * <p>For CONTINUOUS mode, the maximum streaming dimension specifies the limit under which
+     * bokeh is effective. This dimension must be one of the YUV_420_888 or PRIVATE resolutions
+     * in availableStreamConfigurations, and if the sensor maximum resolution is larger than or
+     * equal to 1080p, the maximum streaming dimension must be at least 1080p. If the
+     * application configures a stream with larger dimension, the stream may not have bokeh
+     * effect applied.</p>
+     */
+    ACAMERA_CONTROL_AVAILABLE_BOKEH_CAPABILITIES =              // int32[3*n]
+            ACAMERA_CONTROL_START + 43,
+    /**
+     * <p>Whether bokeh mode is enabled for a particular capture request.</p>
+     *
+     * <p>Type: byte (acamera_metadata_enum_android_control_bokeh_mode_t)</p>
+     *
+     * <p>This tag may appear in:
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul></p>
+     *
+     * <p>With bokeh mode, the camera device may blur out the parts of scene that are not in
+     * focus, creating a bokeh (or shallow depth of field) effect for people or objects.</p>
+     * <p>When set to STILL_CAPTURE bokeh mode with STILL_CAPTURE capture intent, due to the extra
+     * processing needed for high quality bokeh effect, the stall may be longer than when
+     * capture intent is not STILL_CAPTURE.</p>
+     * <p>When set to STILL_CAPTURE bokeh mode with PREVIEW capture intent,</p>
+     * <ul>
+     * <li>If the camera device has BURST_CAPTURE capability, the frame rate requirement of
+     * BURST_CAPTURE must still be met.</li>
+     * <li>All streams not larger than the maximum streaming dimension for STILL_CAPTURE mode
+     * (queried via {@link ACAMERA_CONTROL_AVAILABLE_BOKEH_CAPABILITIES })
+     * will have preview bokeh effect applied.</li>
+     * </ul>
+     * <p>When set to CONTINUOUS mode, configured streams dimension should not exceed this mode's
+     * maximum streaming dimension in order to have bokeh effect applied. Bokeh effect may not
+     * be available for streams larger than the maximum streaming dimension.</p>
+     * <p>Switching between different bokeh modes may involve reconfiguration of the camera
+     * pipeline, resulting in long latency. The application should check this key against the
+     * available session keys queried via
+     * {@link ACameraManager_getCameraCharacteristics }.</p>
+     * <p>When bokeh mode is on, the camera device may override certain control parameters, such as
+     * reduce frame rate or use face priority scene mode, to achieve best power and quality
+     * tradeoffs. When turned on, AE, AWB, and AF run in auto modes, and only the mandatory
+     * stream combinations of LIMITED hardware level are guaranteed.</p>
+     * <p>For a logical multi-camera, bokeh may be implemented by stereo vision from sub-cameras
+     * with different field of view. As a result, when bokeh mode is enabled, the camera device
+     * may override android.scaler.CropRegion, and the field of view will be smaller than when
+     * bokeh mode is off.</p>
+     */
+    ACAMERA_CONTROL_BOKEH_MODE =                                // byte (acamera_metadata_enum_android_control_bokeh_mode_t)
+            ACAMERA_CONTROL_START + 44,
     ACAMERA_CONTROL_END,
 
     /**
@@ -7002,6 +7073,31 @@
 
 } acamera_metadata_enum_android_control_af_scene_change_t;
 
+// ACAMERA_CONTROL_BOKEH_MODE
+typedef enum acamera_metadata_enum_acamera_control_bokeh_mode {
+    /**
+     * <p>Bokeh mode is disabled.</p>
+     */
+    ACAMERA_CONTROL_BOKEH_MODE_OFF                                   = 0,
+
+    /**
+     * <p>High quality bokeh mode is enabled for all non-raw streams (including YUV,
+     * JPEG, and IMPLEMENTATION_DEFINED) when capture intent is STILL_CAPTURE. Due to the
+     * extra image processing, this mode may introduce additional stall to non-raw streams.
+     * This mode should be used in high quality still capture use case.</p>
+     */
+    ACAMERA_CONTROL_BOKEH_MODE_STILL_CAPTURE                         = 1,
+
+    /**
+     * <p>Bokeh effect must not slow down capture rate relative to sensor raw output,
+     * and the effect is applied to all processed streams no larger than the maximum
+     * streaming dimension. This mode should be used if performance and power are a
+     * priority, such as video recording.</p>
+     */
+    ACAMERA_CONTROL_BOKEH_MODE_CONTINUOUS                            = 2,
+
+} acamera_metadata_enum_android_control_bokeh_mode_t;
+
 
 
 // ACAMERA_EDGE_MODE
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index 9ae87d8..e8cfece 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -304,7 +304,7 @@
             seekTimeUs = -1;
 
             if (shouldSeek) {
-                seekTimeUs = (rand() * (float)durationUs) / RAND_MAX;
+                seekTimeUs = (rand() * (float)durationUs) / (float)RAND_MAX;
                 options.setSeekTo(seekTimeUs);
 
                 printf("seeking to %" PRId64 " us (%.2f secs)\n",
diff --git a/drm/libmediadrm/CryptoHal.cpp b/drm/libmediadrm/CryptoHal.cpp
index 58110d4..6a0e75e 100644
--- a/drm/libmediadrm/CryptoHal.cpp
+++ b/drm/libmediadrm/CryptoHal.cpp
@@ -118,7 +118,6 @@
 CryptoHal::CryptoHal()
     : mFactories(makeCryptoFactories()),
       mInitCheck((mFactories.size() == 0) ? ERROR_UNSUPPORTED : NO_INIT),
-      mNextBufferId(0),
       mHeapSeqNum(0) {
 }
 
@@ -260,17 +259,18 @@
     using ::android::hardware::fromHeap;
     using ::android::hardware::HidlMemory;
 
-    if (heap == NULL) {
-        ALOGE("setHeapBase(): heap is NULL");
+    if (heap == NULL || mHeapSeqNum < 0) {
+        ALOGE("setHeapBase(): heap %p mHeapSeqNum %d", heap.get(), mHeapSeqNum);
         return -1;
     }
 
     Mutex::Autolock autoLock(mLock);
 
     int32_t seqNum = mHeapSeqNum++;
+    uint32_t bufferId = static_cast<uint32_t>(seqNum);
     sp<HidlMemory> hidlMemory = fromHeap(heap);
-    mHeapBases.add(seqNum, HeapBase(mNextBufferId, heap->getSize()));
-    Return<void> hResult = mPlugin->setSharedBufferBase(*hidlMemory, mNextBufferId++);
+    mHeapBases.add(seqNum, HeapBase(bufferId, heap->getSize()));
+    Return<void> hResult = mPlugin->setSharedBufferBase(*hidlMemory, bufferId);
     ALOGE_IF(!hResult.isOk(), "setSharedBufferBase(): remote call failed");
     return seqNum;
 }
diff --git a/drm/libmediadrm/include/mediadrm/CryptoHal.h b/drm/libmediadrm/include/mediadrm/CryptoHal.h
index 73c029f..9e61777 100644
--- a/drm/libmediadrm/include/mediadrm/CryptoHal.h
+++ b/drm/libmediadrm/include/mediadrm/CryptoHal.h
@@ -97,7 +97,6 @@
     };
 
     KeyedVector<int32_t, HeapBase> mHeapBases;
-    uint32_t mNextBufferId;
     int32_t mHeapSeqNum;
 
     Vector<sp<ICryptoFactory>> makeCryptoFactories();
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
index bf35224..af7c367 100644
--- a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
+++ b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
@@ -97,7 +97,8 @@
 ///////////////////////////////////////////////////////////////////////////////
 ClearKeyCasPlugin::ClearKeyCasPlugin(
         void *appData, CasPluginCallback callback)
-    : mCallback(callback), mCallbackExt(NULL), mAppData(appData) {
+    : mCallback(callback), mCallbackExt(NULL), mStatusCallback(NULL),
+    mAppData(appData) {
     ALOGV("CTOR");
 }
 
@@ -112,6 +113,13 @@
     ClearKeySessionLibrary::get()->destroyPlugin(this);
 }
 
+status_t ClearKeyCasPlugin::setStatusCallback(
+    CasPluginStatusCallback callback) {
+    ALOGV("setStatusCallback");
+    mStatusCallback = callback;
+    return OK;
+}
+
 status_t ClearKeyCasPlugin::setPrivateData(const CasData &/*data*/) {
     ALOGV("setPrivateData");
 
@@ -135,6 +143,19 @@
     return ClearKeySessionLibrary::get()->addSession(this, sessionId);
 }
 
+status_t ClearKeyCasPlugin::openSession(uint32_t intent, uint32_t mode,
+    CasSessionId* sessionId) {
+    ALOGV("openSession with intent=%d, mode=%d", intent, mode);
+    // Echo the received information to the callback.
+    // Clear key plugin doesn't use any event, echo'ing for testing only.
+    if (mStatusCallback != NULL) {
+        mStatusCallback((void*)mAppData, intent, mode);
+    }
+
+    // Clear key plugin doesn't use intent and mode.
+    return ClearKeySessionLibrary::get()->addSession(this, sessionId);
+}
+
 status_t ClearKeyCasPlugin::closeSession(const CasSessionId &sessionId) {
     ALOGV("closeSession: sessionId=%s", sessionIdToString(sessionId).string());
     std::shared_ptr<ClearKeyCasSession> session =
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h
index f48d5b1..c6938e6 100644
--- a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h
+++ b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h
@@ -71,11 +71,17 @@
     ClearKeyCasPlugin(void *appData, CasPluginCallbackExt callback);
     virtual ~ClearKeyCasPlugin();
 
+    virtual status_t setStatusCallback(
+            CasPluginStatusCallback callback) override;
+
     virtual status_t setPrivateData(
             const CasData &data) override;
 
     virtual status_t openSession(CasSessionId *sessionId) override;
 
+    virtual status_t openSession(uint32_t intent, uint32_t mode,
+                                     CasSessionId *sessionId) override;
+
     virtual status_t closeSession(
             const CasSessionId &sessionId) override;
 
@@ -105,6 +111,7 @@
     std::unique_ptr<KeyFetcher> mKeyFetcher;
     CasPluginCallback mCallback;
     CasPluginCallbackExt mCallbackExt;
+    CasPluginStatusCallback mStatusCallback;
     void* mAppData;
 };
 
diff --git a/drm/mediacas/plugins/mock/MockCasPlugin.cpp b/drm/mediacas/plugins/mock/MockCasPlugin.cpp
index 2964791..f8bab0a 100644
--- a/drm/mediacas/plugins/mock/MockCasPlugin.cpp
+++ b/drm/mediacas/plugins/mock/MockCasPlugin.cpp
@@ -111,6 +111,12 @@
     MockSessionLibrary::get()->destroyPlugin(this);
 }
 
+status_t MockCasPlugin::setStatusCallback(
+    CasPluginStatusCallback /*callback*/) {
+    ALOGV("setStatusCallback");
+    return OK;
+}
+
 status_t MockCasPlugin::setPrivateData(const CasData& /*data*/) {
     ALOGV("setPrivateData");
     return OK;
@@ -121,6 +127,13 @@
     return MockSessionLibrary::get()->addSession(this, sessionId);
 }
 
+status_t MockCasPlugin::openSession(uint32_t intent, uint32_t mode,
+    CasSessionId* sessionId) {
+    ALOGV("openSession with intent=%d, mode=%d", intent, mode);
+    // Clear key plugin doesn't use intent and mode.
+    return MockSessionLibrary::get()->addSession(this, sessionId);
+}
+
 status_t MockCasPlugin::closeSession(const CasSessionId &sessionId) {
     ALOGV("closeSession: sessionId=%s", arrayToString(sessionId).string());
     Mutex::Autolock lock(mLock);
diff --git a/drm/mediacas/plugins/mock/MockCasPlugin.h b/drm/mediacas/plugins/mock/MockCasPlugin.h
index 74b540c..660fd44 100644
--- a/drm/mediacas/plugins/mock/MockCasPlugin.h
+++ b/drm/mediacas/plugins/mock/MockCasPlugin.h
@@ -65,11 +65,17 @@
     MockCasPlugin();
     virtual ~MockCasPlugin();
 
+    virtual status_t setStatusCallback(
+            CasPluginStatusCallback callback) override;
+
     virtual status_t setPrivateData(
             const CasData &data) override;
 
     virtual status_t openSession(CasSessionId *sessionId) override;
 
+    virtual status_t openSession(uint32_t intent, uint32_t mode,
+                                     CasSessionId *sessionId) override;
+
     virtual status_t closeSession(
             const CasSessionId &sessionId) override;
 
diff --git a/media/bufferpool/2.0/AccessorImpl.cpp b/media/bufferpool/2.0/AccessorImpl.cpp
index 84ce172..1947656 100644
--- a/media/bufferpool/2.0/AccessorImpl.cpp
+++ b/media/bufferpool/2.0/AccessorImpl.cpp
@@ -38,7 +38,7 @@
     static constexpr int64_t kLogDurationUs = 5000000; // 5 secs
 
     static constexpr size_t kMinAllocBytesForEviction = 1024*1024*15;
-    static constexpr size_t kMinBufferCountForEviction = 40;
+    static constexpr size_t kMinBufferCountForEviction = 25;
 }
 
 // Buffer structure in bufferpool process
@@ -723,8 +723,8 @@
                   mStats.mTotalFetches, mStats.mTotalTransfers);
         }
         for (auto freeIt = mFreeBuffers.begin(); freeIt != mFreeBuffers.end();) {
-            if (!clearCache && mStats.mSizeCached < kMinAllocBytesForEviction
-                    && mBuffers.size() < kMinBufferCountForEviction) {
+            if (!clearCache && (mStats.mSizeCached < kMinAllocBytesForEviction
+                    || mBuffers.size() < kMinBufferCountForEviction)) {
                 break;
             }
             auto it = mBuffers.find(*freeIt);
diff --git a/media/codec2/components/aac/C2SoftAacEnc.cpp b/media/codec2/components/aac/C2SoftAacEnc.cpp
index be52a1d..4db94f5 100644
--- a/media/codec2/components/aac/C2SoftAacEnc.cpp
+++ b/media/codec2/components/aac/C2SoftAacEnc.cpp
@@ -155,9 +155,7 @@
       mNumBytesPerInputFrame(0u),
       mOutBufferSize(0u),
       mSentCodecSpecificData(false),
-      mInputTimeSet(false),
       mInputSize(0),
-      mNextFrameTimestampUs(0),
       mSignalledError(false),
       mOutIndex(0u),
       mRemainderLen(0u) {
@@ -182,9 +180,9 @@
 
 c2_status_t C2SoftAacEnc::onStop() {
     mSentCodecSpecificData = false;
-    mInputTimeSet = false;
     mInputSize = 0u;
-    mNextFrameTimestampUs = 0;
+    mNextFrameTimestampUs.reset();
+    mLastFrameEndTimestampUs.reset();
     mSignalledError = false;
     mRemainderLen = 0;
     return C2_OK;
@@ -201,9 +199,9 @@
 
 c2_status_t C2SoftAacEnc::onFlush_sm() {
     mSentCodecSpecificData = false;
-    mInputTimeSet = false;
     mInputSize = 0u;
-    mNextFrameTimestampUs = 0;
+    mNextFrameTimestampUs.reset();
+    mLastFrameEndTimestampUs.reset();
     return C2_OK;
 }
 
@@ -366,9 +364,19 @@
         data = view.data();
         capacity = view.capacity();
     }
-    if (!mInputTimeSet && capacity > 0) {
-        mNextFrameTimestampUs = work->input.ordinal.timestamp;
-        mInputTimeSet = true;
+    c2_cntr64_t inputTimestampUs = work->input.ordinal.timestamp;
+    if (inputTimestampUs < mLastFrameEndTimestampUs.value_or(inputTimestampUs)) {
+        ALOGW("Correcting overlapping timestamp: last frame ended at %lldus but "
+              "current frame is starting at %lldus. Using the last frame's end timestamp",
+              mLastFrameEndTimestampUs->peekll(), inputTimestampUs.peekll());
+        inputTimestampUs = *mLastFrameEndTimestampUs;
+    }
+    if (capacity > 0) {
+        if (!mNextFrameTimestampUs) {
+            mNextFrameTimestampUs = work->input.ordinal.timestamp;
+        }
+        mLastFrameEndTimestampUs = inputTimestampUs
+                + (capacity / sizeof(int16_t) * 1000000ll / channelCount / sampleRate);
     }
 
     size_t numFrames =
@@ -376,8 +384,7 @@
         / mNumBytesPerInputFrame;
     ALOGV("capacity = %zu; mInputSize = %zu; numFrames = %zu "
           "mNumBytesPerInputFrame = %u inputTS = %lld remaining = %zu",
-          capacity, mInputSize, numFrames,
-          mNumBytesPerInputFrame, work->input.ordinal.timestamp.peekll(),
+          capacity, mInputSize, numFrames, mNumBytesPerInputFrame, inputTimestampUs.peekll(),
           mRemainderLen);
 
     std::shared_ptr<C2LinearBlock> block;
@@ -505,8 +512,10 @@
                 mInputSize = 0;
                 int consumed = (capacity / sizeof(int16_t)) - inargs.numInSamples
                         + outargs.numInSamples;
-                c2_cntr64_t currentFrameTimestampUs = mNextFrameTimestampUs;
-                mNextFrameTimestampUs = work->input.ordinal.timestamp
+                ALOGV("consumed = %d, capacity = %zu, inSamples = %d, outSamples = %d",
+                      consumed, capacity, inargs.numInSamples, outargs.numInSamples);
+                c2_cntr64_t currentFrameTimestampUs = *mNextFrameTimestampUs;
+                mNextFrameTimestampUs = inputTimestampUs
                         + (consumed * 1000000ll / channelCount / sampleRate);
                 std::shared_ptr<C2Buffer> buffer = createLinearBuffer(block, 0, outargs.numOutBytes);
 #if 0
@@ -533,7 +542,7 @@
         }
         ALOGV("encoderErr = %d mInputSize = %zu "
               "inargs.numInSamples = %d, mNextFrameTimestampUs = %lld",
-              encoderErr, mInputSize, inargs.numInSamples, mNextFrameTimestampUs.peekll());
+              encoderErr, mInputSize, inargs.numInSamples, mNextFrameTimestampUs->peekll());
     }
     if (eos && inBufferSize[0] > 0) {
         if (numFrames && !block) {
@@ -617,9 +626,9 @@
 
     (void)pool;
     mSentCodecSpecificData = false;
-    mInputTimeSet = false;
     mInputSize = 0u;
-    mNextFrameTimestampUs = 0;
+    mNextFrameTimestampUs.reset();
+    mLastFrameEndTimestampUs.reset();
 
     // TODO: we don't have any pending work at this time to drain.
     return C2_OK;
diff --git a/media/codec2/components/aac/C2SoftAacEnc.h b/media/codec2/components/aac/C2SoftAacEnc.h
index 6ecfbdd..9a28280 100644
--- a/media/codec2/components/aac/C2SoftAacEnc.h
+++ b/media/codec2/components/aac/C2SoftAacEnc.h
@@ -18,6 +18,7 @@
 #define ANDROID_C2_SOFT_AAC_ENC_H_
 
 #include <atomic>
+#include <optional>
 
 #include <SimpleC2Component.h>
 
@@ -54,9 +55,9 @@
     UINT mOutBufferSize;
 
     bool mSentCodecSpecificData;
-    bool mInputTimeSet;
     size_t mInputSize;
-    c2_cntr64_t mNextFrameTimestampUs;
+    std::optional<c2_cntr64_t> mNextFrameTimestampUs;
+    std::optional<c2_cntr64_t> mLastFrameEndTimestampUs;
 
     bool mSignalledError;
     std::atomic_uint64_t mOutIndex;
diff --git a/media/codec2/components/g711/C2SoftG711Dec.cpp b/media/codec2/components/g711/C2SoftG711Dec.cpp
index b6cc32e..4ff0793 100644
--- a/media/codec2/components/g711/C2SoftG711Dec.cpp
+++ b/media/codec2/components/g711/C2SoftG711Dec.cpp
@@ -73,8 +73,8 @@
 
         addParameter(
                 DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
-                .withDefault(new C2StreamChannelCountInfo::output(0u, 6))
-                .withFields({C2F(mChannelCount, value).equalTo(1)})
+                .withDefault(new C2StreamChannelCountInfo::output(0u, 1))
+                .withFields({C2F(mChannelCount, value).inRange(1, 6)})
                 .withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
                 .build());
 
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.cpp b/media/codec2/components/vpx/C2SoftVpxDec.cpp
index 62076f8..c7d73f4 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxDec.cpp
@@ -594,12 +594,10 @@
         }
     }
 
-    int64_t frameIndex = work->input.ordinal.frameIndex.peekll();
-
     if (inSize) {
         uint8_t *bitstream = const_cast<uint8_t *>(rView.data() + inOffset);
         vpx_codec_err_t err = vpx_codec_decode(
-                mCodecCtx, bitstream, inSize, &frameIndex, 0);
+                mCodecCtx, bitstream, inSize, &work->input.ordinal.frameIndex, 0);
         if (err != VPX_CODEC_OK) {
             ALOGE("on2 decoder failed to decode frame. err: %d", err);
             mSignalledError = true;
@@ -609,7 +607,20 @@
         }
     }
 
-    (void)outputBuffer(pool, work);
+    status_t err = outputBuffer(pool, work);
+    if (err == NOT_ENOUGH_DATA) {
+        if (inSize > 0) {
+            ALOGV("Maybe non-display frame at %lld.",
+                  work->input.ordinal.frameIndex.peekll());
+            // send the work back with empty buffer.
+            inSize = 0;
+        }
+    } else if (err != OK) {
+        ALOGD("Error while getting the output frame out");
+        // work->result would be already filled; do fillEmptyWork() below to
+        // send the work back.
+        inSize = 0;
+    }
 
     if (eos) {
         drainInternal(DRAIN_COMPONENT_WITH_EOS, pool, work);
@@ -743,16 +754,16 @@
     }
     return;
 }
-bool C2SoftVpxDec::outputBuffer(
+status_t C2SoftVpxDec::outputBuffer(
         const std::shared_ptr<C2BlockPool> &pool,
         const std::unique_ptr<C2Work> &work)
 {
-    if (!(work && pool)) return false;
+    if (!(work && pool)) return BAD_VALUE;
 
     vpx_codec_iter_t iter = nullptr;
     vpx_image_t *img = vpx_codec_get_frame(mCodecCtx, &iter);
 
-    if (!img) return false;
+    if (!img) return NOT_ENOUGH_DATA;
 
     if (img->d_w != mWidth || img->d_h != mHeight) {
         mWidth = img->d_w;
@@ -769,7 +780,7 @@
             mSignalledError = true;
             work->workletsProcessed = 1u;
             work->result = C2_CORRUPTED;
-            return false;
+            return UNKNOWN_ERROR;
         }
 
     }
@@ -792,18 +803,19 @@
     if (err != C2_OK) {
         ALOGE("fetchGraphicBlock for Output failed with status %d", err);
         work->result = err;
-        return false;
+        return UNKNOWN_ERROR;
     }
 
     C2GraphicView wView = block->map().get();
     if (wView.error()) {
         ALOGE("graphic view map failed %d", wView.error());
         work->result = C2_CORRUPTED;
-        return false;
+        return UNKNOWN_ERROR;
     }
 
-    ALOGV("provided (%dx%d) required (%dx%d), out frameindex %d",
-           block->width(), block->height(), mWidth, mHeight, (int)*(int64_t *)img->user_priv);
+    ALOGV("provided (%dx%d) required (%dx%d), out frameindex %lld",
+           block->width(), block->height(), mWidth, mHeight,
+           ((c2_cntr64_t *)img->user_priv)->peekll());
 
     uint8_t *dst = const_cast<uint8_t *>(wView.data()[C2PlanarLayout::PLANE_Y]);
     size_t srcYStride = img->stride[VPX_PLANE_Y];
@@ -859,8 +871,8 @@
                 dstYStride, dstUVStride,
                 mWidth, mHeight);
     }
-    finishWork(*(int64_t *)img->user_priv, work, std::move(block));
-    return true;
+    finishWork(((c2_cntr64_t *)img->user_priv)->peekull(), work, std::move(block));
+    return OK;
 }
 
 c2_status_t C2SoftVpxDec::drainInternal(
@@ -876,7 +888,7 @@
         return C2_OMITTED;
     }
 
-    while ((outputBuffer(pool, work))) {
+    while (outputBuffer(pool, work) == OK) {
     }
 
     if (drainMode == DRAIN_COMPONENT_WITH_EOS &&
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.h b/media/codec2/components/vpx/C2SoftVpxDec.h
index e51bcee..2065165 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.h
+++ b/media/codec2/components/vpx/C2SoftVpxDec.h
@@ -85,7 +85,7 @@
     status_t destroyDecoder();
     void finishWork(uint64_t index, const std::unique_ptr<C2Work> &work,
                     const std::shared_ptr<C2GraphicBlock> &block);
-    bool outputBuffer(
+    status_t outputBuffer(
             const std::shared_ptr<C2BlockPool> &pool,
             const std::unique_ptr<C2Work> &work);
     c2_status_t drainInternal(
diff --git a/media/codec2/hidl/1.0/utils/Android.bp b/media/codec2/hidl/1.0/utils/Android.bp
index a2930a6..4a9dc55 100644
--- a/media/codec2/hidl/1.0/utils/Android.bp
+++ b/media/codec2/hidl/1.0/utils/Android.bp
@@ -6,7 +6,7 @@
     defaults: ["hidl_defaults"],
 
     srcs: [
-        "ClientBlockHelper.cpp",
+        "OutputBufferQueue.cpp",
         "types.cpp",
     ],
 
diff --git a/media/codec2/hidl/1.0/utils/ClientBlockHelper.cpp b/media/codec2/hidl/1.0/utils/ClientBlockHelper.cpp
deleted file mode 100644
index 50790bc..0000000
--- a/media/codec2/hidl/1.0/utils/ClientBlockHelper.cpp
+++ /dev/null
@@ -1,371 +0,0 @@
-/*
- * Copyright 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "Codec2-block_helper"
-#include <android-base/logging.h>
-
-#include <android/hardware/graphics/bufferqueue/2.0/IGraphicBufferProducer.h>
-#include <codec2/hidl/1.0/ClientBlockHelper.h>
-#include <gui/bufferqueue/2.0/B2HGraphicBufferProducer.h>
-
-#include <C2AllocatorGralloc.h>
-#include <C2BlockInternal.h>
-#include <C2Buffer.h>
-#include <C2PlatformSupport.h>
-
-#include <iomanip>
-
-namespace android {
-namespace hardware {
-namespace media {
-namespace c2 {
-namespace V1_0 {
-namespace utils {
-
-using HGraphicBufferProducer = ::android::hardware::graphics::bufferqueue::
-        V2_0::IGraphicBufferProducer;
-using B2HGraphicBufferProducer = ::android::hardware::graphics::bufferqueue::
-        V2_0::utils::B2HGraphicBufferProducer;
-
-namespace /* unnamed */ {
-
-// Create a GraphicBuffer object from a graphic block.
-sp<GraphicBuffer> createGraphicBuffer(const C2ConstGraphicBlock& block) {
-    uint32_t width;
-    uint32_t height;
-    uint32_t format;
-    uint64_t usage;
-    uint32_t stride;
-    uint32_t generation;
-    uint64_t bqId;
-    int32_t bqSlot;
-    _UnwrapNativeCodec2GrallocMetadata(
-            block.handle(), &width, &height, &format, &usage,
-            &stride, &generation, &bqId, reinterpret_cast<uint32_t*>(&bqSlot));
-    native_handle_t *grallocHandle =
-            UnwrapNativeCodec2GrallocHandle(block.handle());
-    sp<GraphicBuffer> graphicBuffer =
-            new GraphicBuffer(grallocHandle,
-                              GraphicBuffer::CLONE_HANDLE,
-                              width, height, format,
-                              1, usage, stride);
-    native_handle_delete(grallocHandle);
-    return graphicBuffer;
-}
-
-template <typename BlockProcessor>
-void forEachBlock(C2FrameData& frameData,
-                  BlockProcessor process) {
-    for (const std::shared_ptr<C2Buffer>& buffer : frameData.buffers) {
-        if (buffer) {
-            for (const C2ConstGraphicBlock& block :
-                    buffer->data().graphicBlocks()) {
-                process(block);
-            }
-        }
-    }
-}
-
-template <typename BlockProcessor>
-void forEachBlock(const std::list<std::unique_ptr<C2Work>>& workList,
-                  BlockProcessor process) {
-    for (const std::unique_ptr<C2Work>& work : workList) {
-        if (!work) {
-            continue;
-        }
-        for (const std::unique_ptr<C2Worklet>& worklet : work->worklets) {
-            if (worklet) {
-                forEachBlock(worklet->output, process);
-            }
-        }
-    }
-}
-
-sp<HGraphicBufferProducer> getHgbp(const sp<IGraphicBufferProducer>& igbp) {
-    sp<HGraphicBufferProducer> hgbp =
-            igbp->getHalInterface<HGraphicBufferProducer>();
-    return hgbp ? hgbp :
-            new B2HGraphicBufferProducer(igbp);
-}
-
-status_t attachToBufferQueue(const C2ConstGraphicBlock& block,
-                             const sp<IGraphicBufferProducer>& igbp,
-                             uint32_t generation,
-                             int32_t* bqSlot) {
-    if (!igbp) {
-        LOG(WARNING) << "attachToBufferQueue -- null producer.";
-        return NO_INIT;
-    }
-
-    sp<GraphicBuffer> graphicBuffer = createGraphicBuffer(block);
-    graphicBuffer->setGenerationNumber(generation);
-
-    LOG(VERBOSE) << "attachToBufferQueue -- attaching buffer:"
-            << " block dimension " << block.width() << "x"
-                                   << block.height()
-            << ", graphicBuffer dimension " << graphicBuffer->getWidth() << "x"
-                                           << graphicBuffer->getHeight()
-            << std::hex << std::setfill('0')
-            << ", format 0x" << std::setw(8) << graphicBuffer->getPixelFormat()
-            << ", usage 0x" << std::setw(16) << graphicBuffer->getUsage()
-            << std::dec << std::setfill(' ')
-            << ", stride " << graphicBuffer->getStride()
-            << ", generation " << graphicBuffer->getGenerationNumber();
-
-    status_t result = igbp->attachBuffer(bqSlot, graphicBuffer);
-    if (result != OK) {
-        LOG(WARNING) << "attachToBufferQueue -- attachBuffer failed: "
-                        "status = " << result << ".";
-        return result;
-    }
-    LOG(VERBOSE) << "attachToBufferQueue -- attachBuffer returned slot #"
-                 << *bqSlot << ".";
-    return OK;
-}
-
-bool getBufferQueueAssignment(const C2ConstGraphicBlock& block,
-                              uint32_t* generation,
-                              uint64_t* bqId,
-                              int32_t* bqSlot) {
-    return _C2BlockFactory::GetBufferQueueData(
-            _C2BlockFactory::GetGraphicBlockPoolData(block),
-            generation, bqId, bqSlot);
-}
-} // unnamed namespace
-
-class OutputBufferQueue::Impl {
-    std::mutex mMutex;
-    sp<IGraphicBufferProducer> mIgbp;
-    uint32_t mGeneration;
-    uint64_t mBqId;
-    std::shared_ptr<int> mOwner;
-    // To migrate existing buffers
-    sp<GraphicBuffer> mBuffers[BufferQueueDefs::NUM_BUFFER_SLOTS]; // find a better way
-    std::weak_ptr<_C2BlockPoolData>
-                    mPoolDatas[BufferQueueDefs::NUM_BUFFER_SLOTS];
-
-public:
-    Impl(): mGeneration(0), mBqId(0) {}
-
-    bool configure(const sp<IGraphicBufferProducer>& igbp,
-                   uint32_t generation,
-                   uint64_t bqId) {
-        size_t tryNum = 0;
-        size_t success = 0;
-        sp<GraphicBuffer> buffers[BufferQueueDefs::NUM_BUFFER_SLOTS];
-        std::weak_ptr<_C2BlockPoolData>
-                poolDatas[BufferQueueDefs::NUM_BUFFER_SLOTS];
-        {
-            std::scoped_lock<std::mutex> l(mMutex);
-            if (generation == mGeneration) {
-                return false;
-            }
-            mIgbp = igbp;
-            mGeneration = generation;
-            mBqId = bqId;
-            mOwner = std::make_shared<int>(0);
-            for (int i = 0; i < BufferQueueDefs::NUM_BUFFER_SLOTS; ++i) {
-                if (mBqId == 0 || !mBuffers[i]) {
-                    continue;
-                }
-                std::shared_ptr<_C2BlockPoolData> data = mPoolDatas[i].lock();
-                if (!data ||
-                    !_C2BlockFactory::BeginAttachBlockToBufferQueue(data)) {
-                    continue;
-                }
-                ++tryNum;
-                int bqSlot;
-                mBuffers[i]->setGenerationNumber(generation);
-                status_t result = igbp->attachBuffer(&bqSlot, mBuffers[i]);
-                if (result != OK) {
-                    continue;
-                }
-                bool attach =
-                        _C2BlockFactory::EndAttachBlockToBufferQueue(
-                                data, mOwner, getHgbp(mIgbp),
-                                generation, bqId, bqSlot);
-                if (!attach) {
-                    igbp->cancelBuffer(bqSlot, Fence::NO_FENCE);
-                    continue;
-                }
-                buffers[bqSlot] = mBuffers[i];
-                poolDatas[bqSlot] = data;
-                ++success;
-            }
-            for (int i = 0; i < BufferQueueDefs::NUM_BUFFER_SLOTS; ++i) {
-                mBuffers[i] = buffers[i];
-                mPoolDatas[i] = poolDatas[i];
-            }
-        }
-        ALOGD("remote graphic buffer migration %zu/%zu", success, tryNum);
-        return true;
-    }
-
-    bool registerBuffer(const C2ConstGraphicBlock& block) {
-        std::shared_ptr<_C2BlockPoolData> data =
-                _C2BlockFactory::GetGraphicBlockPoolData(block);
-        if (!data) {
-            return false;
-        }
-        std::scoped_lock<std::mutex> l(mMutex);
-
-        if (!mIgbp) {
-            return false;
-        }
-
-        uint32_t oldGeneration;
-        uint64_t oldId;
-        int32_t oldSlot;
-        // If the block is not bufferqueue-based, do nothing.
-        if (!_C2BlockFactory::GetBufferQueueData(
-                data, &oldGeneration, &oldId, &oldSlot) || (oldId == 0)) {
-            return false;
-        }
-        // If the block's bqId is the same as the desired bqId, just hold.
-        if ((oldId == mBqId) && (oldGeneration == mGeneration)) {
-            LOG(VERBOSE) << "holdBufferQueueBlock -- import without attaching:"
-                         << " bqId " << oldId
-                         << ", bqSlot " << oldSlot
-                         << ", generation " << mGeneration
-                         << ".";
-            _C2BlockFactory::HoldBlockFromBufferQueue(data, mOwner, getHgbp(mIgbp));
-            mPoolDatas[oldSlot] = data;
-            mBuffers[oldSlot] = createGraphicBuffer(block);
-            mBuffers[oldSlot]->setGenerationNumber(mGeneration);
-            return true;
-        }
-        int32_t d = (int32_t) mGeneration - (int32_t) oldGeneration;
-        LOG(WARNING) << "receiving stale buffer: generation "
-                     << mGeneration << " , diff " << d  << " : slot "
-                     << oldSlot;
-        return false;
-    }
-
-    status_t outputBuffer(
-            const C2ConstGraphicBlock& block,
-            const BnGraphicBufferProducer::QueueBufferInput& input,
-            BnGraphicBufferProducer::QueueBufferOutput* output) {
-        uint32_t generation;
-        uint64_t bqId;
-        int32_t bqSlot;
-        bool display = displayBufferQueueBlock(block);
-        if (!getBufferQueueAssignment(block, &generation, &bqId, &bqSlot) ||
-            bqId == 0) {
-            // Block not from bufferqueue -- it must be attached before queuing.
-
-            mMutex.lock();
-            sp<IGraphicBufferProducer> outputIgbp = mIgbp;
-            uint32_t outputGeneration = mGeneration;
-            mMutex.unlock();
-
-            status_t status = attachToBufferQueue(
-                    block, outputIgbp, outputGeneration, &bqSlot);
-            if (status != OK) {
-                LOG(WARNING) << "outputBuffer -- attaching failed.";
-                return INVALID_OPERATION;
-            }
-
-            status = outputIgbp->queueBuffer(static_cast<int>(bqSlot),
-                                         input, output);
-            if (status != OK) {
-                LOG(ERROR) << "outputBuffer -- queueBuffer() failed "
-                           "on non-bufferqueue-based block. "
-                           "Error = " << status << ".";
-                return status;
-            }
-            return OK;
-        }
-
-        mMutex.lock();
-        sp<IGraphicBufferProducer> outputIgbp = mIgbp;
-        uint32_t outputGeneration = mGeneration;
-        uint64_t outputBqId = mBqId;
-        mMutex.unlock();
-
-        if (!outputIgbp) {
-            LOG(VERBOSE) << "outputBuffer -- output surface is null.";
-            return NO_INIT;
-        }
-
-        if (!display) {
-            LOG(WARNING) << "outputBuffer -- cannot display "
-                         "bufferqueue-based block to the bufferqueue.";
-            return UNKNOWN_ERROR;
-        }
-        if (bqId != outputBqId || generation != outputGeneration) {
-            int32_t diff = (int32_t) outputGeneration - (int32_t) generation;
-            LOG(WARNING) << "outputBuffer -- buffers from old generation to "
-                         << outputGeneration << " , diff: " << diff
-                         << " , slot: " << bqSlot;
-            return DEAD_OBJECT;
-        }
-
-        status_t status = outputIgbp->queueBuffer(static_cast<int>(bqSlot),
-                                              input, output);
-        if (status != OK) {
-            LOG(ERROR) << "outputBuffer -- queueBuffer() failed "
-                       "on bufferqueue-based block. "
-                       "Error = " << status << ".";
-            return status;
-        }
-        return OK;
-    }
-
-    Impl *getPtr() {
-        return this;
-    }
-
-    ~Impl() {}
-};
-
-OutputBufferQueue::OutputBufferQueue(): mImpl(new Impl()) {}
-
-OutputBufferQueue::~OutputBufferQueue() {}
-
-bool OutputBufferQueue::configure(const sp<IGraphicBufferProducer>& igbp,
-                                  uint32_t generation,
-                                  uint64_t bqId) {
-    return mImpl && mImpl->configure(igbp, generation, bqId);
-}
-
-status_t OutputBufferQueue::outputBuffer(
-    const C2ConstGraphicBlock& block,
-    const BnGraphicBufferProducer::QueueBufferInput& input,
-    BnGraphicBufferProducer::QueueBufferOutput* output) {
-    if (mImpl) {
-        return mImpl->outputBuffer(block, input, output);
-    }
-    return DEAD_OBJECT;
-}
-
-void OutputBufferQueue::holdBufferQueueBlocks(
-        const std::list<std::unique_ptr<C2Work>>& workList) {
-    if (!mImpl) {
-        return;
-    }
-    forEachBlock(workList,
-                 std::bind(&OutputBufferQueue::Impl::registerBuffer,
-                           mImpl->getPtr(), std::placeholders::_1));
-}
-
-}  // namespace utils
-}  // namespace V1_0
-}  // namespace c2
-}  // namespace media
-}  // namespace hardware
-}  // namespace android
-
diff --git a/media/codec2/hidl/1.0/utils/OutputBufferQueue.cpp b/media/codec2/hidl/1.0/utils/OutputBufferQueue.cpp
new file mode 100644
index 0000000..c4a72ef
--- /dev/null
+++ b/media/codec2/hidl/1.0/utils/OutputBufferQueue.cpp
@@ -0,0 +1,335 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "Codec2-OutputBufferQueue"
+#include <android-base/logging.h>
+
+#include <android/hardware/graphics/bufferqueue/2.0/IGraphicBufferProducer.h>
+#include <codec2/hidl/1.0/OutputBufferQueue.h>
+#include <gui/bufferqueue/2.0/B2HGraphicBufferProducer.h>
+
+#include <C2AllocatorGralloc.h>
+#include <C2BlockInternal.h>
+#include <C2Buffer.h>
+#include <C2PlatformSupport.h>
+
+#include <iomanip>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace c2 {
+namespace V1_0 {
+namespace utils {
+
+using HGraphicBufferProducer = ::android::hardware::graphics::bufferqueue::
+        V2_0::IGraphicBufferProducer;
+using B2HGraphicBufferProducer = ::android::hardware::graphics::bufferqueue::
+        V2_0::utils::B2HGraphicBufferProducer;
+
+namespace /* unnamed */ {
+
+// Create a GraphicBuffer object from a graphic block.
+sp<GraphicBuffer> createGraphicBuffer(const C2ConstGraphicBlock& block) {
+    uint32_t width;
+    uint32_t height;
+    uint32_t format;
+    uint64_t usage;
+    uint32_t stride;
+    uint32_t generation;
+    uint64_t bqId;
+    int32_t bqSlot;
+    _UnwrapNativeCodec2GrallocMetadata(
+            block.handle(), &width, &height, &format, &usage,
+            &stride, &generation, &bqId, reinterpret_cast<uint32_t*>(&bqSlot));
+    native_handle_t *grallocHandle =
+            UnwrapNativeCodec2GrallocHandle(block.handle());
+    sp<GraphicBuffer> graphicBuffer =
+            new GraphicBuffer(grallocHandle,
+                              GraphicBuffer::CLONE_HANDLE,
+                              width, height, format,
+                              1, usage, stride);
+    native_handle_delete(grallocHandle);
+    return graphicBuffer;
+}
+
+template <typename BlockProcessor>
+void forEachBlock(C2FrameData& frameData,
+                  BlockProcessor process) {
+    for (const std::shared_ptr<C2Buffer>& buffer : frameData.buffers) {
+        if (buffer) {
+            for (const C2ConstGraphicBlock& block :
+                    buffer->data().graphicBlocks()) {
+                process(block);
+            }
+        }
+    }
+}
+
+template <typename BlockProcessor>
+void forEachBlock(const std::list<std::unique_ptr<C2Work>>& workList,
+                  BlockProcessor process) {
+    for (const std::unique_ptr<C2Work>& work : workList) {
+        if (!work) {
+            continue;
+        }
+        for (const std::unique_ptr<C2Worklet>& worklet : work->worklets) {
+            if (worklet) {
+                forEachBlock(worklet->output, process);
+            }
+        }
+    }
+}
+
+sp<HGraphicBufferProducer> getHgbp(const sp<IGraphicBufferProducer>& igbp) {
+    sp<HGraphicBufferProducer> hgbp =
+            igbp->getHalInterface<HGraphicBufferProducer>();
+    return hgbp ? hgbp :
+            new B2HGraphicBufferProducer(igbp);
+}
+
+status_t attachToBufferQueue(const C2ConstGraphicBlock& block,
+                             const sp<IGraphicBufferProducer>& igbp,
+                             uint32_t generation,
+                             int32_t* bqSlot) {
+    if (!igbp) {
+        LOG(WARNING) << "attachToBufferQueue -- null producer.";
+        return NO_INIT;
+    }
+
+    sp<GraphicBuffer> graphicBuffer = createGraphicBuffer(block);
+    graphicBuffer->setGenerationNumber(generation);
+
+    LOG(VERBOSE) << "attachToBufferQueue -- attaching buffer:"
+            << " block dimension " << block.width() << "x"
+                                   << block.height()
+            << ", graphicBuffer dimension " << graphicBuffer->getWidth() << "x"
+                                           << graphicBuffer->getHeight()
+            << std::hex << std::setfill('0')
+            << ", format 0x" << std::setw(8) << graphicBuffer->getPixelFormat()
+            << ", usage 0x" << std::setw(16) << graphicBuffer->getUsage()
+            << std::dec << std::setfill(' ')
+            << ", stride " << graphicBuffer->getStride()
+            << ", generation " << graphicBuffer->getGenerationNumber();
+
+    status_t result = igbp->attachBuffer(bqSlot, graphicBuffer);
+    if (result != OK) {
+        LOG(WARNING) << "attachToBufferQueue -- attachBuffer failed: "
+                        "status = " << result << ".";
+        return result;
+    }
+    LOG(VERBOSE) << "attachToBufferQueue -- attachBuffer returned slot #"
+                 << *bqSlot << ".";
+    return OK;
+}
+
+bool getBufferQueueAssignment(const C2ConstGraphicBlock& block,
+                              uint32_t* generation,
+                              uint64_t* bqId,
+                              int32_t* bqSlot) {
+    return _C2BlockFactory::GetBufferQueueData(
+            _C2BlockFactory::GetGraphicBlockPoolData(block),
+            generation, bqId, bqSlot);
+}
+
+} // unnamed namespace
+
+OutputBufferQueue::OutputBufferQueue()
+      : mGeneration{0}, mBqId{0} {
+}
+
+OutputBufferQueue::~OutputBufferQueue() {
+}
+
+bool OutputBufferQueue::configure(const sp<IGraphicBufferProducer>& igbp,
+                                  uint32_t generation,
+                                  uint64_t bqId) {
+    size_t tryNum = 0;
+    size_t success = 0;
+    sp<GraphicBuffer> buffers[BufferQueueDefs::NUM_BUFFER_SLOTS];
+    std::weak_ptr<_C2BlockPoolData>
+            poolDatas[BufferQueueDefs::NUM_BUFFER_SLOTS];
+    {
+        std::scoped_lock<std::mutex> l(mMutex);
+        if (generation == mGeneration) {
+            return false;
+        }
+        mIgbp = igbp;
+        mGeneration = generation;
+        mBqId = bqId;
+        mOwner = std::make_shared<int>(0);
+        for (int i = 0; i < BufferQueueDefs::NUM_BUFFER_SLOTS; ++i) {
+            if (mBqId == 0 || !mBuffers[i]) {
+                continue;
+            }
+            std::shared_ptr<_C2BlockPoolData> data = mPoolDatas[i].lock();
+            if (!data ||
+                !_C2BlockFactory::BeginAttachBlockToBufferQueue(data)) {
+                continue;
+            }
+            ++tryNum;
+            int bqSlot;
+            mBuffers[i]->setGenerationNumber(generation);
+            status_t result = igbp->attachBuffer(&bqSlot, mBuffers[i]);
+            if (result != OK) {
+                continue;
+            }
+            bool attach =
+                    _C2BlockFactory::EndAttachBlockToBufferQueue(
+                            data, mOwner, getHgbp(mIgbp),
+                            generation, bqId, bqSlot);
+            if (!attach) {
+                igbp->cancelBuffer(bqSlot, Fence::NO_FENCE);
+                continue;
+            }
+            buffers[bqSlot] = mBuffers[i];
+            poolDatas[bqSlot] = data;
+            ++success;
+        }
+        for (int i = 0; i < BufferQueueDefs::NUM_BUFFER_SLOTS; ++i) {
+            mBuffers[i] = buffers[i];
+            mPoolDatas[i] = poolDatas[i];
+        }
+    }
+    ALOGD("remote graphic buffer migration %zu/%zu", success, tryNum);
+    return true;
+}
+
+bool OutputBufferQueue::registerBuffer(const C2ConstGraphicBlock& block) {
+    std::shared_ptr<_C2BlockPoolData> data =
+            _C2BlockFactory::GetGraphicBlockPoolData(block);
+    if (!data) {
+        return false;
+    }
+    std::scoped_lock<std::mutex> l(mMutex);
+
+    if (!mIgbp) {
+        return false;
+    }
+
+    uint32_t oldGeneration;
+    uint64_t oldId;
+    int32_t oldSlot;
+    // If the block is not bufferqueue-based, do nothing.
+    if (!_C2BlockFactory::GetBufferQueueData(
+            data, &oldGeneration, &oldId, &oldSlot) || (oldId == 0)) {
+        return false;
+    }
+    // If the block's bqId is the same as the desired bqId, just hold.
+    if ((oldId == mBqId) && (oldGeneration == mGeneration)) {
+        LOG(VERBOSE) << "holdBufferQueueBlock -- import without attaching:"
+                     << " bqId " << oldId
+                     << ", bqSlot " << oldSlot
+                     << ", generation " << mGeneration
+                     << ".";
+        _C2BlockFactory::HoldBlockFromBufferQueue(data, mOwner, getHgbp(mIgbp));
+        mPoolDatas[oldSlot] = data;
+        mBuffers[oldSlot] = createGraphicBuffer(block);
+        mBuffers[oldSlot]->setGenerationNumber(mGeneration);
+        return true;
+    }
+    int32_t d = (int32_t) mGeneration - (int32_t) oldGeneration;
+    LOG(WARNING) << "receiving stale buffer: generation "
+                 << mGeneration << " , diff " << d  << " : slot "
+                 << oldSlot;
+    return false;
+}
+
+status_t OutputBufferQueue::outputBuffer(
+        const C2ConstGraphicBlock& block,
+        const BnGraphicBufferProducer::QueueBufferInput& input,
+        BnGraphicBufferProducer::QueueBufferOutput* output) {
+    uint32_t generation;
+    uint64_t bqId;
+    int32_t bqSlot;
+    bool display = displayBufferQueueBlock(block);
+    if (!getBufferQueueAssignment(block, &generation, &bqId, &bqSlot) ||
+        bqId == 0) {
+        // Block not from bufferqueue -- it must be attached before queuing.
+
+        mMutex.lock();
+        sp<IGraphicBufferProducer> outputIgbp = mIgbp;
+        uint32_t outputGeneration = mGeneration;
+        mMutex.unlock();
+
+        status_t status = attachToBufferQueue(
+                block, outputIgbp, outputGeneration, &bqSlot);
+        if (status != OK) {
+            LOG(WARNING) << "outputBuffer -- attaching failed.";
+            return INVALID_OPERATION;
+        }
+
+        status = outputIgbp->queueBuffer(static_cast<int>(bqSlot),
+                                     input, output);
+        if (status != OK) {
+            LOG(ERROR) << "outputBuffer -- queueBuffer() failed "
+                       "on non-bufferqueue-based block. "
+                       "Error = " << status << ".";
+            return status;
+        }
+        return OK;
+    }
+
+    mMutex.lock();
+    sp<IGraphicBufferProducer> outputIgbp = mIgbp;
+    uint32_t outputGeneration = mGeneration;
+    uint64_t outputBqId = mBqId;
+    mMutex.unlock();
+
+    if (!outputIgbp) {
+        LOG(VERBOSE) << "outputBuffer -- output surface is null.";
+        return NO_INIT;
+    }
+
+    if (!display) {
+        LOG(WARNING) << "outputBuffer -- cannot display "
+                     "bufferqueue-based block to the bufferqueue.";
+        return UNKNOWN_ERROR;
+    }
+    if (bqId != outputBqId || generation != outputGeneration) {
+        int32_t diff = (int32_t) outputGeneration - (int32_t) generation;
+        LOG(WARNING) << "outputBuffer -- buffers from old generation to "
+                     << outputGeneration << " , diff: " << diff
+                     << " , slot: " << bqSlot;
+        return DEAD_OBJECT;
+    }
+
+    status_t status = outputIgbp->queueBuffer(static_cast<int>(bqSlot),
+                                          input, output);
+    if (status != OK) {
+        LOG(ERROR) << "outputBuffer -- queueBuffer() failed "
+                   "on bufferqueue-based block. "
+                   "Error = " << status << ".";
+        return status;
+    }
+    return OK;
+}
+
+void OutputBufferQueue::holdBufferQueueBlocks(
+        const std::list<std::unique_ptr<C2Work>>& workList) {
+    forEachBlock(workList,
+                 std::bind(&OutputBufferQueue::registerBuffer,
+                           this, std::placeholders::_1));
+}
+
+}  // namespace utils
+}  // namespace V1_0
+}  // namespace c2
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
+
diff --git a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/ClientBlockHelper.h b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/OutputBufferQueue.h
similarity index 78%
rename from media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/ClientBlockHelper.h
rename to media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/OutputBufferQueue.h
index 0a2298c..80368f7 100644
--- a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/ClientBlockHelper.h
+++ b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/OutputBufferQueue.h
@@ -14,13 +14,15 @@
  * limitations under the License.
  */
 
-#ifndef CLIENT_BLOCK_HELPER_H
-#define CLIENT_BLOCK_HELPER_H
+#ifndef CODEC2_HIDL_V1_0_UTILS_OUTPUT_BUFFER_QUEUE
+#define CODEC2_HIDL_V1_0_UTILS_OUTPUT_BUFFER_QUEUE
 
 #include <gui/IGraphicBufferProducer.h>
 #include <codec2/hidl/1.0/types.h>
 #include <C2Work.h>
 
+struct C2_HIDE _C2BlockPoolData;
+
 namespace android {
 namespace hardware {
 namespace media {
@@ -61,8 +63,16 @@
 
 private:
 
-    class Impl;
-    std::unique_ptr<Impl> mImpl;
+    std::mutex mMutex;
+    sp<IGraphicBufferProducer> mIgbp;
+    uint32_t mGeneration;
+    uint64_t mBqId;
+    std::shared_ptr<int> mOwner;
+    // To migrate existing buffers
+    sp<GraphicBuffer> mBuffers[BufferQueueDefs::NUM_BUFFER_SLOTS]; // find a better way
+    std::weak_ptr<_C2BlockPoolData> mPoolDatas[BufferQueueDefs::NUM_BUFFER_SLOTS];
+
+    bool registerBuffer(const C2ConstGraphicBlock& block);
 };
 
 }  // namespace utils
@@ -72,4 +82,4 @@
 }  // namespace hardware
 }  // namespace android
 
-#endif  // CLIENT_BLOCK_HELPER_H
+#endif  // CODEC2_HIDL_V1_0_UTILS_OUTPUT_BUFFER_QUEUE
diff --git a/media/codec2/hidl/client/include/codec2/hidl/client.h b/media/codec2/hidl/client/include/codec2/hidl/client.h
index c37407f..dca28f7 100644
--- a/media/codec2/hidl/client/include/codec2/hidl/client.h
+++ b/media/codec2/hidl/client/include/codec2/hidl/client.h
@@ -18,7 +18,7 @@
 #define CODEC2_HIDL_CLIENT_H
 
 #include <gui/IGraphicBufferProducer.h>
-#include <codec2/hidl/1.0/ClientBlockHelper.h>
+#include <codec2/hidl/1.0/OutputBufferQueue.h>
 #include <C2PlatformSupport.h>
 #include <C2Component.h>
 #include <C2Buffer.h>
diff --git a/media/codec2/vndk/C2AllocatorGralloc.cpp b/media/codec2/vndk/C2AllocatorGralloc.cpp
index 3a715b1..8fffa5e 100644
--- a/media/codec2/vndk/C2AllocatorGralloc.cpp
+++ b/media/codec2/vndk/C2AllocatorGralloc.cpp
@@ -633,7 +633,7 @@
             int32_t stride;
             if (mMapper2) {
                 stride = int32_t(mInfo2.stride);
-            } if (mMapper3) {
+            } else if (mMapper3) {
                 stride = int32_t(mInfo3.stride);
             } else {
                 stride = int32_t(mInfo4.stride);
@@ -769,7 +769,7 @@
             int32_t stride;
             if (mMapper2) {
                 stride = int32_t(mInfo2.stride);
-            } if (mMapper3) {
+            } else if (mMapper3) {
                 stride = int32_t(mInfo3.stride);
             } else {
                 stride = int32_t(mInfo4.stride);
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index b707d78..a4322a1 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -472,6 +472,8 @@
  * This is intended for developers to use when debugging.
  * It is not for display to users.
  *
+ * Available since API level 26.
+ *
  * @return pointer to a text representation of an AAudio result code.
  */
 AAUDIO_API const char * AAudio_convertResultToText(aaudio_result_t returnCode) __INTRODUCED_IN(26);
@@ -482,6 +484,8 @@
  * This is intended for developers to use when debugging.
  * It is not for display to users.
  *
+ * Available since API level 26.
+ *
  * @return pointer to a text representation of an AAudio state.
  */
 AAUDIO_API const char * AAudio_convertStreamStateToText(aaudio_stream_state_t state)
@@ -502,6 +506,8 @@
  * chosen by the device when it is opened.
  *
  * AAudioStreamBuilder_delete() must be called when you are done using the builder.
+ *
+ * Available since API level 26.
  */
 AAUDIO_API aaudio_result_t AAudio_createStreamBuilder(AAudioStreamBuilder** builder)
         __INTRODUCED_IN(26);
@@ -513,6 +519,8 @@
  * The default, if you do not call this function, is {@link #AAUDIO_UNSPECIFIED},
  * in which case the primary device will be used.
  *
+ * Available since API level 26.
+ *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param deviceId device identifier or {@link #AAUDIO_UNSPECIFIED}
  */
@@ -530,6 +538,8 @@
  * If an exact value is specified then an opened stream will use that value.
  * If a stream cannot be opened with the specified value then the open will fail.
  *
+ * Available since API level 26.
+ *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param sampleRate frames per second. Common rates include 44100 and 48000 Hz.
  */
@@ -547,6 +557,8 @@
  * If an exact value is specified then an opened stream will use that value.
  * If a stream cannot be opened with the specified value then the open will fail.
  *
+ * Available since API level 26.
+ *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param channelCount Number of channels desired.
  */
@@ -556,6 +568,8 @@
 /**
  * Identical to AAudioStreamBuilder_setChannelCount().
  *
+ * Available since API level 26.
+ *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param samplesPerFrame Number of samples in a frame.
  */
@@ -573,6 +587,8 @@
  * If an exact value is specified then an opened stream will use that value.
  * If a stream cannot be opened with the specified value then the open will fail.
  *
+ * Available since API level 26.
+ *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param format common formats are {@link #AAUDIO_FORMAT_PCM_FLOAT} and
  *               {@link #AAUDIO_FORMAT_PCM_I16}.
@@ -588,6 +604,8 @@
  * The requested sharing mode may not be available.
  * The application can query for the actual mode after the stream is opened.
  *
+ * Available since API level 26.
+ *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param sharingMode {@link #AAUDIO_SHARING_MODE_SHARED} or {@link #AAUDIO_SHARING_MODE_EXCLUSIVE}
  */
@@ -599,6 +617,8 @@
  *
  * The default, if you do not call this function, is {@link #AAUDIO_DIRECTION_OUTPUT}.
  *
+ * Available since API level 26.
+ *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param direction {@link #AAUDIO_DIRECTION_OUTPUT} or {@link #AAUDIO_DIRECTION_INPUT}
  */
@@ -611,6 +631,8 @@
  *
  * The default, if you do not call this function, is {@link #AAUDIO_UNSPECIFIED}.
  *
+ * Available since API level 26.
+ *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param numFrames the desired buffer capacity in frames or {@link #AAUDIO_UNSPECIFIED}
  */
@@ -629,6 +651,8 @@
  * You can call AAudioStream_getPerformanceMode()
  * to find out the final mode for the stream.
  *
+ * Available since API level 26.
+ *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param mode the desired performance mode, eg. {@link #AAUDIO_PERFORMANCE_MODE_LOW_LATENCY}
  */
@@ -644,7 +668,7 @@
  *
  * The default, if you do not call this function, is {@link #AAUDIO_USAGE_MEDIA}.
  *
- * Added in API level 28.
+ * Available since API level 28.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param usage the desired usage, eg. {@link #AAUDIO_USAGE_GAME}
@@ -661,7 +685,7 @@
  *
  * The default, if you do not call this function, is {@link #AAUDIO_CONTENT_TYPE_MUSIC}.
  *
- * Added in API level 28.
+ * Available since API level 28.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param contentType the type of audio data, eg. {@link #AAUDIO_CONTENT_TYPE_SPEECH}
@@ -681,7 +705,7 @@
  * That is because VOICE_RECOGNITION is the preset with the lowest latency
  * on many platforms.
  *
- * Added in API level 28.
+ * Available since API level 28.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param inputPreset the desired configuration for recording
@@ -697,7 +721,7 @@
  * Note that an application can also set its global policy, in which case the most restrictive
  * policy is always applied. See {@link android.media.AudioAttributes#setAllowedCapturePolicy(int)}
  *
- * Added in API level 29.
+ * Available since API level 29.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param capturePolicy the desired level of opt-out from being captured.
@@ -727,7 +751,7 @@
  *
  * Allocated session IDs will always be positive and nonzero.
  *
- * Added in API level 28.
+ * Available since API level 28.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param sessionId an allocated sessionID or {@link #AAUDIO_SESSION_ID_ALLOCATE}
@@ -826,6 +850,8 @@
  *
  * Note that the AAudio callbacks will never be called simultaneously from multiple threads.
  *
+ * Available since API level 26.
+ *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param callback pointer to a function that will process audio data.
  * @param userData pointer to an application data structure that will be passed
@@ -854,6 +880,8 @@
  * If you do call this function then the requested size should be less than
  * half the buffer capacity, to allow double buffering.
  *
+ * Available since API level 26.
+ *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param numFrames the desired buffer size in frames or {@link #AAUDIO_UNSPECIFIED}
  */
@@ -905,6 +933,8 @@
  *
  * Note that the AAudio callbacks will never be called simultaneously from multiple threads.
  *
+ * Available since API level 26.
+ *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param callback pointer to a function that will be called if an error occurs.
  * @param userData pointer to an application data structure that will be passed
@@ -919,6 +949,8 @@
  * AAudioStream_close() must be called when finished with the stream to recover
  * the memory and to free the associated resources.
  *
+ * Available since API level 26.
+ *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param stream pointer to a variable to receive the new stream reference
  * @return {@link #AAUDIO_OK} or a negative error.
@@ -929,6 +961,8 @@
 /**
  * Delete the resources associated with the StreamBuilder.
  *
+ * Available since API level 26.
+ *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @return {@link #AAUDIO_OK} or a negative error.
  */
@@ -942,6 +976,8 @@
 /**
  * Free the resources associated with a stream created by AAudioStreamBuilder_openStream()
  *
+ * Available since API level 26.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return {@link #AAUDIO_OK} or a negative error.
  */
@@ -954,6 +990,8 @@
  * After this call the state will be in {@link #AAUDIO_STREAM_STATE_STARTING} or
  * {@link #AAUDIO_STREAM_STATE_STARTED}.
  *
+ * Available since API level 26.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return {@link #AAUDIO_OK} or a negative error.
  */
@@ -969,6 +1007,8 @@
  * This will return {@link #AAUDIO_ERROR_UNIMPLEMENTED} for input streams.
  * For input streams use AAudioStream_requestStop().
  *
+ * Available since API level 26.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return {@link #AAUDIO_OK} or a negative error.
  */
@@ -984,6 +1024,8 @@
  *
  * This will return {@link #AAUDIO_ERROR_UNIMPLEMENTED} for input streams.
  *
+ * Available since API level 26.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return {@link #AAUDIO_OK} or a negative error.
  */
@@ -995,6 +1037,8 @@
  * After this call the state will be in {@link #AAUDIO_STREAM_STATE_STOPPING} or
  * {@link #AAUDIO_STREAM_STATE_STOPPED}.
  *
+ * Available since API level 26.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return {@link #AAUDIO_OK} or a negative error.
  */
@@ -1008,6 +1052,8 @@
  * call AAudioStream_waitForStateChange() with currentState
  * set to {@link #AAUDIO_STREAM_STATE_UNKNOWN} and a zero timeout.
  *
+ * Available since API level 26.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  */
 AAUDIO_API aaudio_stream_state_t AAudioStream_getState(AAudioStream* stream) __INTRODUCED_IN(26);
@@ -1028,6 +1074,8 @@
  * }
  * </code></pre>
  *
+ * Available since API level 26.
+ *
  * @param stream A reference provided by AAudioStreamBuilder_openStream()
  * @param inputState The state we want to avoid.
  * @param nextState Pointer to a variable that will be set to the new state.
@@ -1056,6 +1104,8 @@
  *
  * If the call times out then zero or a partial frame count will be returned.
  *
+ * Available since API level 26.
+ *
  * @param stream A stream created using AAudioStreamBuilder_openStream().
  * @param buffer The address of the first sample.
  * @param numFrames Number of frames to read. Only complete frames will be written.
@@ -1079,6 +1129,8 @@
  *
  * If the call times out then zero or a partial frame count will be returned.
  *
+ * Available since API level 26.
+ *
  * @param stream A stream created using AAudioStreamBuilder_openStream().
  * @param buffer The address of the first sample.
  * @param numFrames Number of frames to write. Only complete frames will be written.
@@ -1104,6 +1156,8 @@
  * You can check the return value or call AAudioStream_getBufferSizeInFrames()
  * to see what the actual final size is.
  *
+ * Available since API level 26.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @param numFrames requested number of frames that can be filled without blocking
  * @return actual buffer size in frames or a negative error
@@ -1114,6 +1168,8 @@
 /**
  * Query the maximum number of frames that can be filled without blocking.
  *
+ * Available since API level 26.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return buffer size in frames.
  */
@@ -1129,6 +1185,8 @@
  * For some endpoints, the burst size can vary dynamically.
  * But these tend to be devices with high latency.
  *
+ * Available since API level 26.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return burst size
  */
@@ -1137,6 +1195,8 @@
 /**
  * Query maximum buffer capacity in frames.
  *
+ * Available since API level 26.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return  buffer capacity in frames
  */
@@ -1158,6 +1218,8 @@
  * {@link #AAUDIO_UNSPECIFIED} indicates that the callback buffer size for this stream
  * may vary from one dataProc callback to the next.
  *
+ * Available since API level 26.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return callback buffer size in frames or {@link #AAUDIO_UNSPECIFIED}
  */
@@ -1175,12 +1237,16 @@
  * Note that some INPUT devices may not support this function.
  * In that case a 0 will always be returned.
  *
+ * Available since API level 26.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return the underrun or overrun count
  */
 AAUDIO_API int32_t AAudioStream_getXRunCount(AAudioStream* stream) __INTRODUCED_IN(26);
 
 /**
+ * Available since API level 26.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return actual sample rate
  */
@@ -1190,6 +1256,8 @@
  * A stream has one or more channels of data.
  * A frame will contain one sample for each channel.
  *
+ * Available since API level 26.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return actual number of channels
  */
@@ -1198,18 +1266,24 @@
 /**
  * Identical to AAudioStream_getChannelCount().
  *
+ * Available since API level 26.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return actual number of samples frame
  */
 AAUDIO_API int32_t AAudioStream_getSamplesPerFrame(AAudioStream* stream) __INTRODUCED_IN(26);
 
 /**
+ * Available since API level 26.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return actual device ID
  */
 AAUDIO_API int32_t AAudioStream_getDeviceId(AAudioStream* stream) __INTRODUCED_IN(26);
 
 /**
+ * Available since API level 26.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return actual data format
  */
@@ -1217,6 +1291,9 @@
 
 /**
  * Provide actual sharing mode.
+ *
+ * Available since API level 26.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return  actual sharing mode
  */
@@ -1226,12 +1303,16 @@
 /**
  * Get the performance mode used by the stream.
  *
+ * Available since API level 26.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  */
 AAUDIO_API aaudio_performance_mode_t AAudioStream_getPerformanceMode(AAudioStream* stream)
         __INTRODUCED_IN(26);
 
 /**
+ * Available since API level 26.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return direction
  */
@@ -1245,6 +1326,8 @@
  *
  * The frame position is monotonically increasing.
  *
+ * Available since API level 26.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return frames written
  */
@@ -1258,6 +1341,8 @@
  *
  * The frame position is monotonically increasing.
  *
+ * Available since API level 26.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return frames read
  */
@@ -1281,7 +1366,7 @@
  *
  * The sessionID for a stream should not change once the stream has been opened.
  *
- * Added in API level 28.
+ * Available since API level 28.
  *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return session ID or {@link #AAUDIO_SESSION_ID_NONE}
@@ -1304,6 +1389,8 @@
  *
  * The position and time passed back are monotonically increasing.
  *
+ * Available since API level 26.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @param clockid CLOCK_MONOTONIC or CLOCK_BOOTTIME
  * @param framePosition pointer to a variable to receive the position
@@ -1316,7 +1403,7 @@
 /**
  * Return the use case for the stream.
  *
- * Added in API level 28.
+ * Available since API level 28.
  *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return frames read
@@ -1326,7 +1413,7 @@
 /**
  * Return the content type for the stream.
  *
- * Added in API level 28.
+ * Available since API level 28.
  *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return content type, for example {@link #AAUDIO_CONTENT_TYPE_MUSIC}
@@ -1337,7 +1424,7 @@
 /**
  * Return the input preset for the stream.
  *
- * Added in API level 28.
+ * Available since API level 28.
  *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return input preset, for example {@link #AAUDIO_INPUT_PRESET_CAMCORDER}
@@ -1349,7 +1436,7 @@
  * Return the policy that determines whether the audio may or may not be captured
  * by other apps or the system.
  *
- * Added in API level 29.
+ * Available since API level 29.
  *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return the allowed capture policy, for example {@link #AAUDIO_ALLOW_CAPTURE_BY_ALL}
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 6614e5e..7481daa 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -250,6 +250,8 @@
         mTimeOffsetNanos = offsetMicros * AAUDIO_NANOS_PER_MICROSECOND;
     }
 
+    setBufferSize(capacity / 2); // Default buffer size to match Q
+
     setState(AAUDIO_STREAM_STATE_OPEN);
 
     return result;
@@ -739,6 +741,7 @@
     adjustedFrames = std::min(actualFrames, adjustedFrames);
 
     mBufferSizeInFrames = adjustedFrames;
+    ALOGV("%s(%d) returns %d", __func__, requestedFrames, adjustedFrames);
     return (aaudio_result_t) adjustedFrames;
 }
 
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 32904bb..ad79e9c 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -1,7 +1,15 @@
 cc_library_headers {
     name: "libaudioclient_headers",
     vendor_available: true,
-    export_include_dirs: ["include"],
+    header_libs: [
+        "libaudiofoundation_headers",
+    ],
+    export_include_dirs: [
+        "include",
+    ],
+    export_header_lib_headers: [
+        "libaudiofoundation_headers",
+    ],
 }
 
 cc_library_shared {
@@ -63,6 +71,7 @@
         "TrackPlayerBase.cpp",
     ],
     shared_libs: [
+        "libaudiofoundation",
         "libaudioutils",
         "libaudiopolicy",
         "libaudiomanager",
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 46cfb68..6e9a7cf 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -392,20 +392,18 @@
     virtual status_t openOutput(audio_module_handle_t module,
                                 audio_io_handle_t *output,
                                 audio_config_t *config,
-                                audio_devices_t *devices,
-                                const String8& address,
+                                const sp<DeviceDescriptorBase>& device,
                                 uint32_t *latencyMs,
                                 audio_output_flags_t flags)
     {
-        if (output == NULL || config == NULL || devices == NULL || latencyMs == NULL) {
+        if (output == nullptr || config == nullptr || device == nullptr || latencyMs == nullptr) {
             return BAD_VALUE;
         }
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
         data.writeInt32(module);
         data.write(config, sizeof(audio_config_t));
-        data.writeInt32(*devices);
-        data.writeString8(address);
+        data.writeParcelable(*device);
         data.writeInt32((int32_t) flags);
         status_t status = remote()->transact(OPEN_OUTPUT, data, &reply);
         if (status != NO_ERROR) {
@@ -420,7 +418,6 @@
         *output = (audio_io_handle_t)reply.readInt32();
         ALOGV("openOutput() returned output, %d", *output);
         reply.read(config, sizeof(audio_config_t));
-        *devices = (audio_devices_t)reply.readInt32();
         *latencyMs = reply.readInt32();
         return NO_ERROR;
     }
@@ -1198,19 +1195,21 @@
             if (data.read(&config, sizeof(audio_config_t)) != NO_ERROR) {
                 ALOGE("b/23905951");
             }
-            audio_devices_t devices = (audio_devices_t)data.readInt32();
-            String8 address(data.readString8());
+            sp<DeviceDescriptorBase> device = new DeviceDescriptorBase(AUDIO_DEVICE_NONE);
+            status_t status = NO_ERROR;
+            if ((status = data.readParcelable(device.get())) != NO_ERROR) {
+                reply->writeInt32((int32_t)status);
+                return NO_ERROR;
+            }
             audio_output_flags_t flags = (audio_output_flags_t) data.readInt32();
             uint32_t latencyMs = 0;
             audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
-            status_t status = openOutput(module, &output, &config,
-                                         &devices, address, &latencyMs, flags);
+            status = openOutput(module, &output, &config, device, &latencyMs, flags);
             ALOGV("OPEN_OUTPUT output, %d", output);
             reply->writeInt32((int32_t)status);
             if (status == NO_ERROR) {
                 reply->writeInt32((int32_t)output);
                 reply->write(&config, sizeof(audio_config_t));
-                reply->writeInt32(devices);
                 reply->writeInt32(latencyMs);
             }
             return NO_ERROR;
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index b580a88..0a65857 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -27,6 +27,7 @@
 #include <binder/Parcel.h>
 #include <binder/Parcelable.h>
 #include <media/AudioClient.h>
+#include <media/DeviceDescriptorBase.h>
 #include <media/IAudioTrack.h>
 #include <media/IAudioFlingerClient.h>
 #include <system/audio.h>
@@ -416,8 +417,7 @@
     virtual status_t openOutput(audio_module_handle_t module,
                                 audio_io_handle_t *output,
                                 audio_config_t *config,
-                                audio_devices_t *devices,
-                                const String8& address,
+                                const sp<DeviceDescriptorBase>& device,
                                 uint32_t *latencyMs,
                                 audio_output_flags_t flags) = 0;
     virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
diff --git a/media/libaudiofoundation/Android.bp b/media/libaudiofoundation/Android.bp
index 1df0d16..edc06d2 100644
--- a/media/libaudiofoundation/Android.bp
+++ b/media/libaudiofoundation/Android.bp
@@ -15,6 +15,7 @@
 cc_library {
     name: "libaudiofoundation",
     vendor_available: true,
+    double_loadable: true,
 
     srcs: [
         "AudioContainers.cpp",
diff --git a/media/libaudiofoundation/include/media/AudioContainers.h b/media/libaudiofoundation/include/media/AudioContainers.h
index 8347c71..05e68fa 100644
--- a/media/libaudiofoundation/include/media/AudioContainers.h
+++ b/media/libaudiofoundation/include/media/AudioContainers.h
@@ -17,6 +17,7 @@
 #pragma once
 
 #include <algorithm>
+#include <functional>
 #include <iterator>
 #include <set>
 #include <vector>
@@ -78,6 +79,11 @@
     return deviceTypes.size() == 1 && p(*(deviceTypes.begin()));
 }
 
+static inline bool areAllOfSameDeviceType(const DeviceTypeSet& deviceTypes,
+                                          std::function<bool(audio_devices_t)> p) {
+    return std::all_of(deviceTypes.begin(), deviceTypes.end(), p);
+}
+
 static inline void resetDeviceTypes(DeviceTypeSet& deviceTypes, audio_devices_t typeToAdd) {
     deviceTypes.clear();
     deviceTypes.insert(typeToAdd);
diff --git a/media/libaudiohal/impl/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
index b07f21d..3d3a5eb 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -229,14 +229,14 @@
 
 status_t DeviceHalHidl::openOutputStream(
         audio_io_handle_t handle,
-        audio_devices_t devices,
+        audio_devices_t deviceType,
         audio_output_flags_t flags,
         struct audio_config *config,
         const char *address,
         sp<StreamOutHalInterface> *outStream) {
     if (mDevice == 0) return NO_INIT;
     DeviceAddress hidlDevice;
-    status_t status = deviceAddressFromHal(devices, address, &hidlDevice);
+    status_t status = deviceAddressFromHal(deviceType, address, &hidlDevice);
     if (status != OK) return status;
     AudioConfig hidlConfig;
     HidlUtils::audioConfigFromHal(*config, &hidlConfig);
diff --git a/media/libaudiohal/impl/DeviceHalLocal.cpp b/media/libaudiohal/impl/DeviceHalLocal.cpp
index ee68252..dfbb6b2 100644
--- a/media/libaudiohal/impl/DeviceHalLocal.cpp
+++ b/media/libaudiohal/impl/DeviceHalLocal.cpp
@@ -104,7 +104,7 @@
 
 status_t DeviceHalLocal::openOutputStream(
         audio_io_handle_t handle,
-        audio_devices_t devices,
+        audio_devices_t deviceType,
         audio_output_flags_t flags,
         struct audio_config *config,
         const char *address,
@@ -112,11 +112,11 @@
     audio_stream_out_t *halStream;
     ALOGV("open_output_stream handle: %d devices: %x flags: %#x"
             "srate: %d format %#x channels %x address %s",
-            handle, devices, flags,
+            handle, deviceType, flags,
             config->sample_rate, config->format, config->channel_mask,
             address);
     int openResut = mDev->open_output_stream(
-            mDev, handle, devices, flags, config, &halStream, address);
+            mDev, handle, deviceType, flags, config, &halStream, address);
     if (openResut == OK) {
         *outStream = new StreamOutHalLocal(halStream, this);
     }
diff --git a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
index e565237..2200a7f 100644
--- a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
@@ -69,7 +69,7 @@
     // by releasing all references to the returned object.
     virtual status_t openOutputStream(
             audio_io_handle_t handle,
-            audio_devices_t devices,
+            audio_devices_t deviceType,
             audio_output_flags_t flags,
             struct audio_config *config,
             const char *address,
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 8e96e8c..ac88448 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -328,11 +328,11 @@
 
     static_libs: [
         "libc_malloc_debug_backtrace", // for memory heap analysis
-        "resourcemanager_aidl_interface-cpp",
+        "resourcemanager_aidl_interface-unstable-cpp",
     ],
 
     export_static_lib_headers: [
-        "resourcemanager_aidl_interface-cpp",
+        "resourcemanager_aidl_interface-unstable-cpp",
     ],
 
     export_include_dirs: [
diff --git a/media/libmediametrics/IMediaAnalyticsService.cpp b/media/libmediametrics/IMediaAnalyticsService.cpp
index 1ab6653..4324f6d 100644
--- a/media/libmediametrics/IMediaAnalyticsService.cpp
+++ b/media/libmediametrics/IMediaAnalyticsService.cpp
@@ -55,13 +55,17 @@
 
         Parcel data;
         data.writeInterfaceToken(IMediaAnalyticsService::getInterfaceDescriptor());
-        item->writeToParcel(&data);
 
-        status_t err = remote()->transact(
+        status_t status = item->writeToParcel(&data);
+        if (status != NO_ERROR) { // assume failure logged in item
+            return status;
+        }
+
+        status = remote()->transact(
                 SUBMIT_ITEM_ONEWAY, data, nullptr /* reply */, IBinder::FLAG_ONEWAY);
-        ALOGW_IF(err != NO_ERROR, "%s: bad response from service for submit, err=%d",
-                __func__, err);
-        return err;
+        ALOGW_IF(status != NO_ERROR, "%s: bad response from service for submit, status=%d",
+                __func__, status);
+        return status;
     }
 };
 
@@ -79,11 +83,14 @@
         CHECK_INTERFACE(IMediaAnalyticsService, data, reply);
 
         MediaAnalyticsItem * const item = MediaAnalyticsItem::create();
-        if (item->readFromParcel(data) < 0) {
-            return BAD_VALUE;
+        status_t status = item->readFromParcel(data);
+        if (status != NO_ERROR) { // assume failure logged in item
+            return status;
         }
+        // TODO: remove this setPid.
         item->setPid(clientPid);
-        const status_t status __unused = submitInternal(item, true /* release */);
+        status = submitInternal(item, true /* release */);
+        // assume failure logged by submitInternal
         return NO_ERROR;
     } break;
 
diff --git a/media/libmediametrics/MediaAnalyticsItem.cpp b/media/libmediametrics/MediaAnalyticsItem.cpp
index a4efa49..14dce79 100644
--- a/media/libmediametrics/MediaAnalyticsItem.cpp
+++ b/media/libmediametrics/MediaAnalyticsItem.cpp
@@ -14,7 +14,6 @@
  * limitations under the License.
  */
 
-#undef LOG_TAG
 #define LOG_TAG "MediaAnalyticsItem"
 
 #include <inttypes.h>
@@ -23,6 +22,7 @@
 #include <sys/types.h>
 
 #include <mutex>
+#include <set>
 
 #include <binder/Parcel.h>
 #include <utils/Errors.h>
@@ -45,18 +45,6 @@
 // the service is off.
 #define SVC_TRIES               2
 
-// So caller doesn't need to know size of allocated space
-MediaAnalyticsItem *MediaAnalyticsItem::create()
-{
-    return MediaAnalyticsItem::create(kKeyNone);
-}
-
-MediaAnalyticsItem *MediaAnalyticsItem::create(MediaAnalyticsItem::Key key)
-{
-    MediaAnalyticsItem *item = new MediaAnalyticsItem(key);
-    return item;
-}
-
 MediaAnalyticsItem* MediaAnalyticsItem::convert(mediametrics_handle_t handle) {
     MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
     return item;
@@ -159,64 +147,50 @@
     return mPkgVersionCode;
 }
 
-// this key is for the overall record -- "codec", "player", "drm", etc
-MediaAnalyticsItem &MediaAnalyticsItem::setKey(MediaAnalyticsItem::Key key) {
-    mKey = key;
-    return *this;
-}
-
-// number of attributes we have in this record
-int32_t MediaAnalyticsItem::count() const {
-    return mPropCount;
-}
 
 // find the proper entry in the list
-size_t MediaAnalyticsItem::findPropIndex(const char *name, size_t len) const
+size_t MediaAnalyticsItem::findPropIndex(const char *name) const
 {
     size_t i = 0;
     for (; i < mPropCount; i++) {
-        if (mProps[i].isNamed(name, len)) break;
+        if (mProps[i].isNamed(name)) break;
     }
     return i;
 }
 
 MediaAnalyticsItem::Prop *MediaAnalyticsItem::findProp(const char *name) const {
-    size_t len = strlen(name);
-    size_t i = findPropIndex(name, len);
+    const size_t i = findPropIndex(name);
     if (i < mPropCount) {
         return &mProps[i];
     }
-    return NULL;
+    return nullptr;
 }
 
 // consider this "find-or-allocate".
 // caller validates type and uses clearPropValue() accordingly
 MediaAnalyticsItem::Prop *MediaAnalyticsItem::allocateProp(const char *name) {
-    size_t len = strlen(name);
-    size_t i = findPropIndex(name, len);
-    Prop *prop;
-
+    const size_t i = findPropIndex(name);
     if (i < mPropCount) {
-        prop = &mProps[i];
-    } else {
-        if (i == mPropSize) {
-            if (growProps() == false) {
-                ALOGE("failed allocation for new props");
-                return NULL;
-            }
-        }
-        i = mPropCount++;
-        prop = &mProps[i];
-        prop->setName(name, len);
+        return &mProps[i]; // already have it, return
     }
 
+    Prop *prop = allocateProp(); // get a new prop
+    if (prop == nullptr) return nullptr;
+    prop->setName(name);
     return prop;
 }
 
+MediaAnalyticsItem::Prop *MediaAnalyticsItem::allocateProp() {
+    if (mPropCount == mPropSize && growProps() == false) {
+        ALOGE("%s: failed allocation for new properties", __func__);
+        return nullptr;
+    }
+    return &mProps[mPropCount++];
+}
+
 // used within the summarizers; return whether property existed
 bool MediaAnalyticsItem::removeProp(const char *name) {
-    size_t len = strlen(name);
-    size_t i = findPropIndex(name, len);
+    const size_t i = findPropIndex(name);
     if (i < mPropCount) {
         mProps[i].clear();
         if (i != mPropCount-1) {
@@ -231,19 +205,15 @@
 
 // remove indicated keys and their values
 // return value is # keys removed
-int32_t MediaAnalyticsItem::filter(int n, MediaAnalyticsItem::Attr attrs[]) {
-    int zapped = 0;
-    if (attrs == NULL || n <= 0) {
-        return -1;
-    }
-    for (ssize_t i = 0 ; i < n ;  i++) {
+size_t MediaAnalyticsItem::filter(size_t n, const char *attrs[]) {
+    size_t zapped = 0;
+    for (size_t i = 0; i < n; ++i) {
         const char *name = attrs[i];
-        size_t len = strlen(name);
-        size_t j = findPropIndex(name, len);
+        size_t j = findPropIndex(name);
         if (j >= mPropCount) {
             // not there
             continue;
-        } else if (j+1 == mPropCount) {
+        } else if (j + 1 == mPropCount) {
             // last one, shorten
             zapped++;
             mProps[j].clear();
@@ -261,35 +231,31 @@
 
 // remove any keys NOT in the provided list
 // return value is # keys removed
-int32_t MediaAnalyticsItem::filterNot(int n, MediaAnalyticsItem::Attr attrs[]) {
-    int zapped = 0;
-    if (attrs == NULL || n <= 0) {
-        return -1;
-    }
-    for (ssize_t i = mPropCount-1 ; i >=0 ;  i--) {
-        Prop *prop = &mProps[i];
-        for (ssize_t j = 0; j < n ; j++) {
-            if (prop->isNamed(attrs[j])) {
-                prop->clear();
-                zapped++;
-                if (i != (ssize_t)(mPropCount-1)) {
-                    *prop = mProps[mPropCount-1];
-                }
-                mProps[mPropCount-1].clear();
-                mPropCount--;
-                break;
-            }
+size_t MediaAnalyticsItem::filterNot(size_t n, const char *attrs[]) {
+    std::set<std::string> check(attrs, attrs + n);
+    size_t zapped = 0;
+    for (size_t j = 0; j < mPropCount;) {
+        if (check.find(mProps[j].getName()) != check.end()) {
+            ++j;
+            continue;
+        }
+        if (j + 1 == mPropCount) {
+            // last one, shorten
+            zapped++;
+            mProps[j].clear();
+            mPropCount--;
+            break;
+        } else {
+            // in the middle, bring last one down and shorten
+            zapped++;
+            mProps[j].clear();
+            mProps[j] = mProps[mPropCount-1];
+            mPropCount--;
         }
     }
     return zapped;
 }
 
-// remove a single key
-// return value is 0 (not found) or 1 (found and removed)
-int32_t MediaAnalyticsItem::filter(MediaAnalyticsItem::Attr name) {
-    return filter(1, &name);
-}
-
 bool MediaAnalyticsItem::growProps(int increment)
 {
     if (increment <= 0) {
@@ -314,98 +280,77 @@
 // Parcel / serialize things for binder calls
 //
 
-int32_t MediaAnalyticsItem::readFromParcel(const Parcel& data) {
-    int32_t version = data.readInt32();
+status_t MediaAnalyticsItem::readFromParcel(const Parcel& data) {
+    int32_t version;
+    status_t status = data.readInt32(&version);
+    if (status != NO_ERROR) return status;
 
-    switch(version) {
-        case 0:
-          return readFromParcel0(data);
-          break;
-        default:
-          ALOGE("Unsupported MediaAnalyticsItem Parcel version: %d", version);
-          return -1;
+    switch (version) {
+    case 0:
+      return readFromParcel0(data);
+    default:
+      ALOGE("%s: unsupported parcel version: %d", __func__, version);
+      return INVALID_OPERATION;
     }
 }
 
-int32_t MediaAnalyticsItem::readFromParcel0(const Parcel& data) {
-    // into 'this' object
-    // .. we make a copy of the string to put away.
-    mKey = data.readCString();
-    mPid = data.readInt32();
-    mUid = data.readInt32();
-    mPkgName = data.readCString();
-    mPkgVersionCode = data.readInt64();
-    // We no longer pay attention to user setting of finalized, BUT it's
-    // still part of the wire packet -- so read & discard.
-    mTimestamp = data.readInt64();
-
-    int count = data.readInt32();
+status_t MediaAnalyticsItem::readFromParcel0(const Parcel& data) {
+    const char *s = data.readCString();
+    mKey = s == nullptr ? "" : s;
+    int32_t pid, uid;
+    status_t status = data.readInt32(&pid) ?: data.readInt32(&uid);
+    if (status != NO_ERROR) return status;
+    mPid = (pid_t)pid;
+    mUid = (uid_t)uid;
+    s = data.readCString();
+    mPkgName = s == nullptr ? "" : s;
+    int32_t count;
+    int64_t version, timestamp;
+    status = data.readInt64(&version) ?: data.readInt64(&timestamp) ?: data.readInt32(&count);
+    if (status != NO_ERROR) return status;
+    if (count < 0) return BAD_VALUE;
+    mPkgVersionCode = version;
+    mTimestamp = timestamp;
     for (int i = 0; i < count ; i++) {
-            MediaAnalyticsItem::Attr attr = data.readCString();
-            int32_t ztype = data.readInt32();
-                switch (ztype) {
-                    case MediaAnalyticsItem::kTypeInt32:
-                            setInt32(attr, data.readInt32());
-                            break;
-                    case MediaAnalyticsItem::kTypeInt64:
-                            setInt64(attr, data.readInt64());
-                            break;
-                    case MediaAnalyticsItem::kTypeDouble:
-                            setDouble(attr, data.readDouble());
-                            break;
-                    case MediaAnalyticsItem::kTypeCString:
-                            setCString(attr, data.readCString());
-                            break;
-                    case MediaAnalyticsItem::kTypeRate:
-                            {
-                                int64_t count = data.readInt64();
-                                int64_t duration = data.readInt64();
-                                setRate(attr, count, duration);
-                            }
-                            break;
-                    default:
-                            ALOGE("reading bad item type: %d, idx %d",
-                                  ztype, i);
-                            return -1;
-                }
+        Prop *prop = allocateProp();
+        status_t status = prop->readFromParcel(data);
+        if (status != NO_ERROR) return status;
     }
-
-    return 0;
+    return NO_ERROR;
 }
 
-int32_t MediaAnalyticsItem::writeToParcel(Parcel *data) {
+status_t MediaAnalyticsItem::writeToParcel(Parcel *data) const {
+    if (data == nullptr) return BAD_VALUE;
 
-    if (data == NULL) return -1;
+    const int32_t version = 0;
+    status_t status = data->writeInt32(version);
+    if (status != NO_ERROR) return status;
 
-    int32_t version = 0;
-    data->writeInt32(version);
-
-    switch(version) {
-        case 0:
-          return writeToParcel0(data);
-          break;
-        default:
-          ALOGE("Unsupported MediaAnalyticsItem Parcel version: %d", version);
-          return -1;
+    switch (version) {
+    case 0:
+      return writeToParcel0(data);
+    default:
+      ALOGE("%s: unsupported parcel version: %d", __func__, version);
+      return INVALID_OPERATION;
     }
 }
 
-int32_t MediaAnalyticsItem::writeToParcel0(Parcel *data) {
+status_t MediaAnalyticsItem::writeToParcel0(Parcel *data) const {
+    status_t status =
+        data->writeCString(mKey.c_str())
+        ?: data->writeInt32(mPid)
+        ?: data->writeInt32(mUid)
+        ?: data->writeCString(mPkgName.c_str())
+        ?: data->writeInt64(mPkgVersionCode)
+        ?: data->writeInt64(mTimestamp);
+    if (status != NO_ERROR) return status;
 
-    data->writeCString(mKey.c_str());
-    data->writeInt32(mPid);
-    data->writeInt32(mUid);
-    data->writeCString(mPkgName.c_str());
-    data->writeInt64(mPkgVersionCode);
-    data->writeInt64(mTimestamp);
-
-    // set of items
-    const size_t count = mPropCount;
-    data->writeInt32(count);
-    for (size_t i = 0 ; i < count; i++ ) {
-        mProps[i].writeToParcel(data);
+    data->writeInt32((int32_t)mPropCount);
+    for (size_t i = 0 ; i < mPropCount; ++i) {
+        status = mProps[i].writeToParcel(data);
+        if (status != NO_ERROR) return status;
     }
-    return 0;
+    return NO_ERROR;
 }
 
 const char *MediaAnalyticsItem::toCString() {
@@ -506,7 +451,6 @@
     }
 }
 
-
 //static
 bool MediaAnalyticsItem::isEnabled() {
     // completely skip logging from certain UIDs. We do this here
@@ -634,199 +578,282 @@
     return true;
 }
 
-// a byte array; contents are
-// overall length (uint32) including the length field itself
-// encoding version (uint32)
-// count of properties (uint32)
-// N copies of:
-//     property name as length(int16), bytes
-//         the bytes WILL include the null terminator of the name
-//     type (uint8 -- 1 byte)
-//     size of value field (int16 -- 2 bytes)
-//     value (size based on type)
-//       int32, int64, double -- little endian 4/8/8 bytes respectively
-//       cstring -- N bytes of value [WITH terminator]
+namespace {
 
-enum { kInt32 = 0, kInt64, kDouble, kRate, kCString};
-
-bool MediaAnalyticsItem::dumpAttributes(char **pbuffer, size_t *plength) {
-
-    char *build = NULL;
-
-    if (pbuffer == NULL || plength == NULL)
-        return false;
-
-    // consistency for the caller, who owns whatever comes back in this pointer.
-    *pbuffer = NULL;
-
-    // first, let's calculate sizes
-    int32_t goal = 0;
-    int32_t version = 0;
-
-    goal += sizeof(uint32_t);   // overall length, including the length field
-    goal += sizeof(uint32_t);   // encoding version
-    goal += sizeof(uint32_t);   // # properties
-
-    int32_t count = mPropCount;
-    for (int i = 0 ; i < count; i++ ) {
-        Prop *prop = &mProps[i];
-        goal += sizeof(uint16_t);           // name length
-        goal += strlen(prop->mName) + 1;    // string + null
-        goal += sizeof(uint8_t);            // type
-        goal += sizeof(uint16_t);           // size of value
-        switch (prop->mType) {
-            case MediaAnalyticsItem::kTypeInt32:
-                    goal += sizeof(uint32_t);
-                    break;
-            case MediaAnalyticsItem::kTypeInt64:
-                    goal += sizeof(uint64_t);
-                    break;
-            case MediaAnalyticsItem::kTypeDouble:
-                    goal += sizeof(double);
-                    break;
-            case MediaAnalyticsItem::kTypeRate:
-                    goal += 2 * sizeof(uint64_t);
-                    break;
-            case MediaAnalyticsItem::kTypeCString:
-                    // length + actual string + null
-                    goal += strlen(prop->u.CStringValue) + 1;
-                    break;
-            default:
-                    ALOGE("found bad Prop type: %d, idx %d, name %s",
-                          prop->mType, i, prop->mName);
-                    return false;
-        }
+template <typename T>
+status_t insert(const T& val, char **bufferpptr, char *bufferptrmax)
+{
+    const size_t size = sizeof(val);
+    if (*bufferpptr + size > bufferptrmax) {
+        ALOGE("%s: buffer exceeded with size %zu", __func__, size);
+        return BAD_VALUE;
     }
-
-    // now that we have a size... let's allocate and fill
-    build = (char *)malloc(goal);
-    if (build == NULL)
-        return false;
-
-    memset(build, 0, goal);
-
-    char *filling = build;
-
-#define _INSERT(val, size) \
-    { memcpy(filling, &(val), (size)); filling += (size);}
-#define _INSERTSTRING(val, size) \
-    { memcpy(filling, (val), (size)); filling += (size);}
-
-    _INSERT(goal, sizeof(int32_t));
-    _INSERT(version, sizeof(int32_t));
-    _INSERT(count, sizeof(int32_t));
-
-    for (int i = 0 ; i < count; i++ ) {
-        Prop *prop = &mProps[i];
-        int16_t attrNameLen = strlen(prop->mName) + 1;
-        _INSERT(attrNameLen, sizeof(int16_t));
-        _INSERTSTRING(prop->mName, attrNameLen);    // termination included
-        int8_t elemtype;
-        int16_t elemsize;
-        switch (prop->mType) {
-            case MediaAnalyticsItem::kTypeInt32:
-                {
-                    elemtype = kInt32;
-                    _INSERT(elemtype, sizeof(int8_t));
-                    elemsize = sizeof(int32_t);
-                    _INSERT(elemsize, sizeof(int16_t));
-
-                    _INSERT(prop->u.int32Value, sizeof(int32_t));
-                    break;
-                }
-            case MediaAnalyticsItem::kTypeInt64:
-                {
-                    elemtype = kInt64;
-                    _INSERT(elemtype, sizeof(int8_t));
-                    elemsize = sizeof(int64_t);
-                    _INSERT(elemsize, sizeof(int16_t));
-
-                    _INSERT(prop->u.int64Value, sizeof(int64_t));
-                    break;
-                }
-            case MediaAnalyticsItem::kTypeDouble:
-                {
-                    elemtype = kDouble;
-                    _INSERT(elemtype, sizeof(int8_t));
-                    elemsize = sizeof(double);
-                    _INSERT(elemsize, sizeof(int16_t));
-
-                    _INSERT(prop->u.doubleValue, sizeof(double));
-                    break;
-                }
-            case MediaAnalyticsItem::kTypeRate:
-                {
-                    elemtype = kRate;
-                    _INSERT(elemtype, sizeof(int8_t));
-                    elemsize = 2 * sizeof(uint64_t);
-                    _INSERT(elemsize, sizeof(int16_t));
-
-                    _INSERT(prop->u.rate.count, sizeof(uint64_t));
-                    _INSERT(prop->u.rate.duration, sizeof(uint64_t));
-                    break;
-                }
-            case MediaAnalyticsItem::kTypeCString:
-                {
-                    elemtype = kCString;
-                    _INSERT(elemtype, sizeof(int8_t));
-                    elemsize = strlen(prop->u.CStringValue) + 1;
-                    _INSERT(elemsize, sizeof(int16_t));
-
-                    _INSERTSTRING(prop->u.CStringValue, elemsize);
-                    break;
-                }
-            default:
-                    // error if can't encode; warning if can't decode
-                    ALOGE("found bad Prop type: %d, idx %d, name %s",
-                          prop->mType, i, prop->mName);
-                    goto badness;
-        }
-    }
-
-    if (build + goal != filling) {
-        ALOGE("problems populating; wrote=%d planned=%d",
-              (int)(filling-build), goal);
-        goto badness;
-    }
-
-    *pbuffer = build;
-    *plength = goal;
-
-    return true;
-
-  badness:
-    free(build);
-    return false;
+    memcpy(*bufferpptr, &val, size);
+    *bufferpptr += size;
+    return NO_ERROR;
 }
 
-void MediaAnalyticsItem::Prop::writeToParcel(Parcel *data) const
+template <>
+status_t insert(const char * const& val, char **bufferpptr, char *bufferptrmax)
 {
-   data->writeCString(mName);
-   data->writeInt32(mType);
+    const size_t size = strlen(val) + 1;
+    if (size > UINT16_MAX || *bufferpptr + size > bufferptrmax) {
+        ALOGE("%s: buffer exceeded with size %zu", __func__, size);
+        return BAD_VALUE;
+    }
+    memcpy(*bufferpptr, val, size);
+    *bufferpptr += size;
+    return NO_ERROR;
+}
+
+template <>
+ __unused
+status_t insert(char * const& val, char **bufferpptr, char *bufferptrmax)
+{
+    return insert((const char *)val, bufferpptr, bufferptrmax);
+}
+
+template <typename T>
+status_t extract(T *val, const char **bufferpptr, const char *bufferptrmax)
+{
+    const size_t size = sizeof(*val);
+    if (*bufferpptr + size > bufferptrmax) {
+        ALOGE("%s: buffer exceeded with size %zu", __func__, size);
+        return BAD_VALUE;
+    }
+    memcpy(val, *bufferpptr, size);
+    *bufferpptr += size;
+    return NO_ERROR;
+}
+
+template <>
+status_t extract(char **val, const char **bufferpptr, const char *bufferptrmax)
+{
+    const char *ptr = *bufferpptr;
+    while (*ptr != 0) {
+        if (ptr >= bufferptrmax) {
+            ALOGE("%s: buffer exceeded", __func__);
+        }
+        ++ptr;
+    }
+    const size_t size = (ptr - *bufferpptr) + 1;
+    *val = (char *)malloc(size);
+    memcpy(*val, *bufferpptr, size);
+    *bufferpptr += size;
+    return NO_ERROR;
+}
+
+} // namespace
+
+status_t MediaAnalyticsItem::writeToByteString(char **pbuffer, size_t *plength) const
+{
+    if (pbuffer == nullptr || plength == nullptr)
+        return BAD_VALUE;
+
+    // get size
+    const size_t keySizeZeroTerminated = strlen(mKey.c_str()) + 1;
+    if (keySizeZeroTerminated > UINT16_MAX) {
+        ALOGW("%s: key size %zu too large", __func__, keySizeZeroTerminated);
+        return INVALID_OPERATION;
+    }
+    const uint16_t version = 0;
+    const uint32_t header_len =
+        sizeof(uint32_t)     // overall length
+        + sizeof(header_len) // header length
+        + sizeof(version)    // encoding version
+        + sizeof(uint16_t)   // key length
+        + keySizeZeroTerminated // key, zero terminated
+        + sizeof(int32_t)    // pid
+        + sizeof(int32_t)    // uid
+        + sizeof(int64_t)    // timestamp
+        ;
+
+    uint32_t len = header_len
+        + sizeof(uint32_t) // # properties
+        ;
+    for (size_t i = 0 ; i < mPropCount; ++i) {
+        const size_t size = mProps[i].getByteStringSize();
+        if (size > UINT_MAX - 1) {
+            ALOGW("%s: prop %zu has size %zu", __func__, i, size);
+            return INVALID_OPERATION;
+        }
+        len += size;
+    }
+
+    // TODO: consider package information and timestamp.
+
+    // now that we have a size... let's allocate and fill
+    char *build = (char *)calloc(1 /* nmemb */, len);
+    if (build == nullptr) return NO_MEMORY;
+
+    char *filling = build;
+    char *buildmax = build + len;
+    if (insert(len, &filling, buildmax) != NO_ERROR
+            || insert(header_len, &filling, buildmax) != NO_ERROR
+            || insert(version, &filling, buildmax) != NO_ERROR
+            || insert((uint16_t)keySizeZeroTerminated, &filling, buildmax) != NO_ERROR
+            || insert(mKey.c_str(), &filling, buildmax) != NO_ERROR
+            || insert((int32_t)mPid, &filling, buildmax) != NO_ERROR
+            || insert((int32_t)mUid, &filling, buildmax) != NO_ERROR
+            || insert((int64_t)mTimestamp, &filling, buildmax) != NO_ERROR
+            || insert((uint32_t)mPropCount, &filling, buildmax) != NO_ERROR) {
+        ALOGD("%s:could not write header", __func__);
+        free(build);
+        return INVALID_OPERATION;
+    }
+    for (size_t i = 0 ; i < mPropCount; ++i) {
+        if (mProps[i].writeToByteString(&filling, buildmax) != NO_ERROR) {
+            free(build);
+            ALOGD("%s:could not write prop %zu of %zu", __func__, i, mPropCount);
+            return INVALID_OPERATION;
+        }
+    }
+
+    if (filling != buildmax) {
+        ALOGE("problems populating; wrote=%d planned=%d",
+              (int)(filling - build), len);
+        free(build);
+        return INVALID_OPERATION;
+    }
+    *pbuffer = build;
+    *plength = len;
+    return NO_ERROR;
+}
+
+status_t MediaAnalyticsItem::readFromByteString(const char *bufferptr, size_t length)
+{
+    if (bufferptr == nullptr) return BAD_VALUE;
+
+    const char *read = bufferptr;
+    const char *readend = bufferptr + length;
+
+    uint32_t len;
+    uint32_t header_len;
+    int16_t version;
+    int16_t key_len;
+    char *key = nullptr;
+    int32_t pid;
+    int32_t uid;
+    int64_t timestamp;
+    uint32_t propCount;
+    if (extract(&len, &read, readend) != NO_ERROR
+            || extract(&header_len, &read, readend) != NO_ERROR
+            || extract(&version, &read, readend) != NO_ERROR
+            || extract(&key_len, &read, readend) != NO_ERROR
+            || extract(&key, &read, readend) != NO_ERROR
+            || extract(&pid, &read, readend) != NO_ERROR
+            || extract(&uid, &read, readend) != NO_ERROR
+            || extract(&timestamp, &read, readend) != NO_ERROR
+            || len > length
+            || header_len > len) {
+        free(key);
+        ALOGD("%s: invalid header", __func__);
+        return INVALID_OPERATION;
+    }
+    mKey = key;
+    free(key);
+    const size_t pos = read - bufferptr;
+    if (pos > header_len) {
+        ALOGD("%s: invalid header pos:%zu > header_len:%u",
+                __func__, pos, header_len);
+        return INVALID_OPERATION;
+    } else if (pos < header_len) {
+        ALOGD("%s: mismatched header pos:%zu < header_len:%u, advancing",
+                __func__, pos, header_len);
+        read += (header_len - pos);
+    }
+    if (extract(&propCount, &read, readend) != NO_ERROR) {
+        ALOGD("%s: cannot read prop count", __func__);
+        return INVALID_OPERATION;
+    }
+    mPid = pid;
+    mUid = uid;
+    mTimestamp = timestamp;
+    for (size_t i = 0; i < propCount; ++i) {
+        Prop *prop = allocateProp();
+        if (prop->readFromByteString(&read, readend) != NO_ERROR) {
+            ALOGD("%s: cannot read prop %zu", __func__, i);
+            return INVALID_OPERATION;
+        }
+    }
+    return NO_ERROR;
+}
+
+status_t MediaAnalyticsItem::Prop::writeToParcel(Parcel *data) const
+{
    switch (mType) {
    case kTypeInt32:
-       data->writeInt32(u.int32Value);
-       break;
+       return data->writeCString(mName)
+               ?: data->writeInt32(mType)
+               ?: data->writeInt32(u.int32Value);
    case kTypeInt64:
-       data->writeInt64(u.int64Value);
-       break;
+       return data->writeCString(mName)
+               ?: data->writeInt32(mType)
+               ?: data->writeInt64(u.int64Value);
    case kTypeDouble:
-       data->writeDouble(u.doubleValue);
-       break;
+       return data->writeCString(mName)
+               ?: data->writeInt32(mType)
+               ?: data->writeDouble(u.doubleValue);
    case kTypeRate:
-       data->writeInt64(u.rate.count);
-       data->writeInt64(u.rate.duration);
-       break;
+       return data->writeCString(mName)
+               ?: data->writeInt32(mType)
+               ?: data->writeInt64(u.rate.first)
+               ?: data->writeInt64(u.rate.second);
    case kTypeCString:
-       data->writeCString(u.CStringValue);
-       break;
+       return data->writeCString(mName)
+               ?: data->writeInt32(mType)
+               ?: data->writeCString(u.CStringValue);
    default:
        ALOGE("%s: found bad type: %d, name %s", __func__, mType, mName);
-       break;
+       return BAD_VALUE;
    }
 }
 
-void MediaAnalyticsItem::Prop::toString(char *buffer, size_t length) const {
+status_t MediaAnalyticsItem::Prop::readFromParcel(const Parcel& data)
+{
+    const char *key = data.readCString();
+    if (key == nullptr) return BAD_VALUE;
+    int32_t type;
+    status_t status = data.readInt32(&type);
+    if (status != NO_ERROR) return status;
+    switch (type) {
+    case kTypeInt32:
+        status = data.readInt32(&u.int32Value);
+        break;
+    case kTypeInt64:
+        status = data.readInt64(&u.int64Value);
+        break;
+    case kTypeDouble:
+        status = data.readDouble(&u.doubleValue);
+        break;
+    case kTypeCString: {
+        const char *s = data.readCString();
+        if (s == nullptr) return BAD_VALUE;
+        set(s);
+        break;
+        }
+    case kTypeRate: {
+        std::pair<int64_t, int64_t> rate;
+        status = data.readInt64(&rate.first)
+                ?: data.readInt64(&rate.second);
+        if (status == NO_ERROR) {
+            set(rate);
+        }
+        break;
+        }
+    default:
+        ALOGE("%s: reading bad item type: %d", __func__, mType);
+        return BAD_VALUE;
+    }
+    if (status == NO_ERROR) {
+        setName(key);
+        mType = (Type)type;
+    }
+    return status;
+}
+
+void MediaAnalyticsItem::Prop::toString(char *buffer, size_t length) const
+{
     switch (mType) {
     case kTypeInt32:
         snprintf(buffer, length, "%s=%d:", mName, u.int32Value);
@@ -839,7 +866,7 @@
         break;
     case MediaAnalyticsItem::kTypeRate:
         snprintf(buffer, length, "%s=%lld/%lld:",
-                mName, (long long)u.rate.count, (long long)u.rate.duration);
+                mName, (long long)u.rate.first, (long long)u.rate.second);
         break;
     case MediaAnalyticsItem::kTypeCString:
         // TODO sanitize string for ':' '='
@@ -852,5 +879,168 @@
     }
 }
 
-} // namespace android
+size_t MediaAnalyticsItem::Prop::getByteStringSize() const
+{
+    const size_t header =
+        sizeof(uint16_t)      // length
+        + sizeof(uint8_t)     // type
+        + strlen(mName) + 1;  // mName + 0 termination
+    size_t payload = 0;
+    switch (mType) {
+    case MediaAnalyticsItem::kTypeInt32:
+        payload = sizeof(u.int32Value);
+        break;
+    case MediaAnalyticsItem::kTypeInt64:
+        payload = sizeof(u.int64Value);
+        break;
+    case MediaAnalyticsItem::kTypeDouble:
+        payload = sizeof(u.doubleValue);
+        break;
+    case MediaAnalyticsItem::kTypeRate:
+        payload = sizeof(u.rate.first) + sizeof(u.rate.second);
+        break;
+    case MediaAnalyticsItem::kTypeCString:
+        payload = strlen(u.CStringValue) + 1;
+        break;
+    default:
+        ALOGE("%s: found bad prop type: %d, name %s",
+                __func__, mType, mName); // no payload computed
+        break;
+    }
+    return header + payload;
+}
 
+// TODO: fold into a template later.
+status_t MediaAnalyticsItem::writeToByteString(
+        const char *name, int32_t value, char **bufferpptr, char *bufferptrmax)
+{
+    const size_t len = 2 + 1 + strlen(name) + 1 + sizeof(value);
+    if (len > UINT16_MAX) return BAD_VALUE;
+    return insert((uint16_t)len, bufferpptr, bufferptrmax)
+            ?: insert((uint8_t)kTypeInt32, bufferpptr, bufferptrmax)
+            ?: insert(name, bufferpptr, bufferptrmax)
+            ?: insert(value, bufferpptr, bufferptrmax);
+}
+
+status_t MediaAnalyticsItem::writeToByteString(
+        const char *name, int64_t value, char **bufferpptr, char *bufferptrmax)
+{
+    const size_t len = 2 + 1 + strlen(name) + 1 + sizeof(value);
+    if (len > UINT16_MAX) return BAD_VALUE;
+    return insert((uint16_t)len, bufferpptr, bufferptrmax)
+            ?: insert((uint8_t)kTypeInt64, bufferpptr, bufferptrmax)
+            ?: insert(name, bufferpptr, bufferptrmax)
+            ?: insert(value, bufferpptr, bufferptrmax);
+}
+
+status_t MediaAnalyticsItem::writeToByteString(
+        const char *name, double value, char **bufferpptr, char *bufferptrmax)
+{
+    const size_t len = 2 + 1 + strlen(name) + 1 + sizeof(value);
+    if (len > UINT16_MAX) return BAD_VALUE;
+    return insert((uint16_t)len, bufferpptr, bufferptrmax)
+            ?: insert((uint8_t)kTypeDouble, bufferpptr, bufferptrmax)
+            ?: insert(name, bufferpptr, bufferptrmax)
+            ?: insert(value, bufferpptr, bufferptrmax);
+}
+
+status_t MediaAnalyticsItem::writeToByteString(
+        const char *name, const std::pair<int64_t, int64_t> &value, char **bufferpptr, char *bufferptrmax)
+{
+    const size_t len = 2 + 1 + strlen(name) + 1 + 8 + 8;
+    if (len > UINT16_MAX) return BAD_VALUE;
+    return insert((uint16_t)len, bufferpptr, bufferptrmax)
+            ?: insert((uint8_t)kTypeRate, bufferpptr, bufferptrmax)
+            ?: insert(name, bufferpptr, bufferptrmax)
+            ?: insert(value.first, bufferpptr, bufferptrmax)
+            ?: insert(value.second, bufferpptr, bufferptrmax);
+}
+
+status_t MediaAnalyticsItem::writeToByteString(
+        const char *name, char * const &value, char **bufferpptr, char *bufferptrmax)
+{
+    const size_t len = 2 + 1 + strlen(name) + 1 + strlen(value) + 1;
+    if (len > UINT16_MAX) return BAD_VALUE;
+    return insert((uint16_t)len, bufferpptr, bufferptrmax)
+            ?: insert((uint8_t)kTypeCString, bufferpptr, bufferptrmax)
+            ?: insert(name, bufferpptr, bufferptrmax)
+            ?: insert(value, bufferpptr, bufferptrmax);
+}
+
+status_t MediaAnalyticsItem::writeToByteString(
+        const char *name, const none_t &, char **bufferpptr, char *bufferptrmax)
+{
+    const size_t len = 2 + 1 + strlen(name) + 1;
+    if (len > UINT16_MAX) return BAD_VALUE;
+    return insert((uint16_t)len, bufferpptr, bufferptrmax)
+            ?: insert((uint8_t)kTypeCString, bufferpptr, bufferptrmax)
+            ?: insert(name, bufferpptr, bufferptrmax);
+}
+
+status_t MediaAnalyticsItem::Prop::writeToByteString(
+        char **bufferpptr, char *bufferptrmax) const
+{
+    switch (mType) {
+    case kTypeInt32:
+        return MediaAnalyticsItem::writeToByteString(mName, u.int32Value, bufferpptr, bufferptrmax);
+    case kTypeInt64:
+        return MediaAnalyticsItem::writeToByteString(mName, u.int64Value, bufferpptr, bufferptrmax);
+    case kTypeDouble:
+        return MediaAnalyticsItem::writeToByteString(mName, u.doubleValue, bufferpptr, bufferptrmax);
+    case kTypeRate:
+        return MediaAnalyticsItem::writeToByteString(mName, u.rate, bufferpptr, bufferptrmax);
+    case kTypeCString:
+        return MediaAnalyticsItem::writeToByteString(mName, u.CStringValue, bufferpptr, bufferptrmax);
+    case kTypeNone:
+        return MediaAnalyticsItem::writeToByteString(mName, none_t{}, bufferpptr, bufferptrmax);
+    default:
+        ALOGE("%s: found bad prop type: %d, name %s",
+                __func__, mType, mName);  // no payload sent
+        return BAD_VALUE;
+    }
+}
+
+status_t MediaAnalyticsItem::Prop::readFromByteString(
+        const char **bufferpptr, const char *bufferptrmax)
+{
+    uint16_t len;
+    char *name;
+    uint8_t type;
+    status_t status = extract(&len, bufferpptr, bufferptrmax)
+            ?: extract(&type, bufferpptr, bufferptrmax)
+            ?: extract(&name, bufferpptr, bufferptrmax);
+    if (status != NO_ERROR) return status;
+    if (mName != nullptr) {
+        free(mName);
+    }
+    mName = name;
+    if (mType == kTypeCString) {
+        free(u.CStringValue);
+        u.CStringValue = nullptr;
+    }
+    mType = (Type)type;
+    switch (mType) {
+    case kTypeInt32:
+        return extract(&u.int32Value, bufferpptr, bufferptrmax);
+    case kTypeInt64:
+        return extract(&u.int64Value, bufferpptr, bufferptrmax);
+    case kTypeDouble:
+        return extract(&u.doubleValue, bufferpptr, bufferptrmax);
+    case kTypeRate:
+        return extract(&u.rate.first, bufferpptr, bufferptrmax)
+                ?: extract(&u.rate.second, bufferpptr, bufferptrmax);
+    case kTypeCString:
+        status = extract(&u.CStringValue, bufferpptr, bufferptrmax);
+        if (status != NO_ERROR) mType = kTypeNone;
+        return status;
+    case kTypeNone:
+        return NO_ERROR;
+    default:
+        mType = kTypeNone;
+        ALOGE("%s: found bad prop type: %d, name %s",
+                __func__, mType, mName);  // no payload sent
+        return BAD_VALUE;
+    }
+}
+
+} // namespace android
diff --git a/media/libmediametrics/MediaMetrics.cpp b/media/libmediametrics/MediaMetrics.cpp
index 360ae0c..cf268e0 100644
--- a/media/libmediametrics/MediaMetrics.cpp
+++ b/media/libmediametrics/MediaMetrics.cpp
@@ -195,6 +195,6 @@
 bool mediametrics_getAttributes(mediametrics_handle_t handle, char **buffer, size_t *length) {
     android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
     if (item == NULL) return false;
-    return item->dumpAttributes(buffer, length);
+    return item->writeToByteString(buffer, length) == android::NO_ERROR;
 
 }
diff --git a/media/libmediametrics/include/MediaAnalyticsItem.h b/media/libmediametrics/include/MediaAnalyticsItem.h
index f0deaaf..5558211 100644
--- a/media/libmediametrics/include/MediaAnalyticsItem.h
+++ b/media/libmediametrics/include/MediaAnalyticsItem.h
@@ -19,6 +19,7 @@
 
 #include "MediaMetrics.h"
 
+#include <algorithm>
 #include <string>
 #include <sys/types.h>
 
@@ -34,8 +35,44 @@
 class IMediaAnalyticsService;
 class Parcel;
 
-// the class interface
-//
+/*
+ * Media Metrics
+ * Byte String format for communication of MediaAnalyticsItem.
+ *
+ * .... begin of item
+ * .... begin of header
+ * (uint32) length: including the length field itself
+ * (uint32) header length, including header_length and length fields.
+ * (uint16) version: 0
+ * (uint16) key length, including zero termination
+ * (int8)+ key string, including 0 termination
+ * (int32) pid
+ * (int32) uid
+ * (int64) timestamp
+ * .... end of header
+ * .... begin body
+ * (uint32) properties
+ * #properties of the following:
+ *     (uint16) property_length, including property_length field itself
+ *     (uint8) type of property
+ *     (int8)+ key string, including 0 termination
+ *      based on type of property (above), one of:
+ *       (int32)
+ *       (int64)
+ *       (double)
+ *       (int8)+ for cstring, including 0 termination
+ *       (int64, int64) for rate
+ * .... end body
+ * .... end of item
+ */
+
+/**
+ * Media Metrics MediaAnalyticsItem
+ *
+ * A mutable item representing an event or record that will be
+ * logged with the Media Metrics service.
+ *
+ */
 
 class MediaAnalyticsItem {
     friend class MediaMetricsJNI;           // TODO: remove this access
@@ -52,21 +89,9 @@
                 kTypeRate = 5,
             };
 
-    // Key: the record descriminator
-    // values for the record discriminator
-    // values can be "component/component"
-    // basic values: "video", "audio", "drm"
-    // XXX: need to better define the format
-    using Key = std::string;
     static constexpr const char * const kKeyNone = "none";
     static constexpr const char * const kKeyAny = "any";
 
-        // Attr: names for attributes within a record
-        // format "prop1" or "prop/subprop"
-        // XXX: need to better define the format
-        typedef const char *Attr;
-
-
         enum {
             PROTO_V0 = 0,
             PROTO_FIRST = PROTO_V0,
@@ -78,11 +103,36 @@
     template <typename T>
     explicit MediaAnalyticsItem(T key)
         : mKey(key) { }
+    MediaAnalyticsItem() = default;
+
     MediaAnalyticsItem(const MediaAnalyticsItem&) = delete;
     MediaAnalyticsItem &operator=(const MediaAnalyticsItem&) = delete;
 
-        static MediaAnalyticsItem* create(Key key);
-        static MediaAnalyticsItem* create();
+    bool operator==(const MediaAnalyticsItem& other) const {
+        if (mPropCount != other.mPropCount
+            || mPid != other.mPid
+            || mUid != other.mUid
+            || mPkgName != other.mPkgName
+            || mPkgVersionCode != other.mPkgVersionCode
+            || mKey != other.mKey
+            || mTimestamp != other.mTimestamp) return false;
+         for (size_t i = 0; i < mPropCount; ++i) {
+             Prop *p = other.findProp(mProps[i].getName());
+             if (p == nullptr || mProps[i] != *p) return false;
+         }
+         return true;
+    }
+    bool operator!=(const MediaAnalyticsItem& other) const {
+        return !(*this == other);
+    }
+
+    template <typename T>
+    static MediaAnalyticsItem* create(T key) {
+        return new MediaAnalyticsItem(key);
+    }
+    static MediaAnalyticsItem* create() {
+        return new MediaAnalyticsItem();
+    }
 
         static MediaAnalyticsItem* convert(mediametrics_handle_t);
         static mediametrics_handle_t convert(MediaAnalyticsItem *);
@@ -94,13 +144,14 @@
         void clear();
         MediaAnalyticsItem *dup();
 
-        // set the key discriminator for the record.
-        // most often initialized as part of the constructor
-        MediaAnalyticsItem &setKey(MediaAnalyticsItem::Key);
-        const MediaAnalyticsItem::Key& getKey() const { return mKey; }
+    MediaAnalyticsItem &setKey(const char *key) {
+        mKey = key;
+        return *this;
+    }
+    const std::string& getKey() const { return mKey; }
 
-        // # of attributes in the record
-        int32_t count() const;
+    // # of properties in the record
+    size_t count() const { return mPropCount; }
 
     template<typename S, typename T>
     MediaAnalyticsItem &set(S key, T value) {
@@ -109,19 +160,19 @@
     }
 
     // set values appropriately
-    MediaAnalyticsItem &setInt32(Attr key, int32_t value) {
+    MediaAnalyticsItem &setInt32(const char *key, int32_t value) {
         return set(key, value);
     }
-    MediaAnalyticsItem &setInt64(Attr key, int64_t value) {
+    MediaAnalyticsItem &setInt64(const char *key, int64_t value) {
         return set(key, value);
     }
-    MediaAnalyticsItem &setDouble(Attr key, double value) {
+    MediaAnalyticsItem &setDouble(const char *key, double value) {
         return set(key, value);
     }
-    MediaAnalyticsItem &setRate(Attr key, int64_t count, int64_t duration) {
+    MediaAnalyticsItem &setRate(const char *key, int64_t count, int64_t duration) {
         return set(key, std::make_pair(count, duration));
     }
-    MediaAnalyticsItem &setCString(Attr key, const char *value) {
+    MediaAnalyticsItem &setCString(const char *key, const char *value) {
         return set(key, value);
     }
 
@@ -133,16 +184,16 @@
         return *this;
     }
 
-    MediaAnalyticsItem &addInt32(Attr key, int32_t value) {
+    MediaAnalyticsItem &addInt32(const char *key, int32_t value) {
         return add(key, value);
     }
-    MediaAnalyticsItem &addInt64(Attr key, int64_t value) {
+    MediaAnalyticsItem &addInt64(const char *key, int64_t value) {
         return add(key, value);
     }
-    MediaAnalyticsItem &addDouble(Attr key, double value) {
+    MediaAnalyticsItem &addDouble(const char *key, double value) {
         return add(key, value);
     }
-    MediaAnalyticsItem &addRate(Attr key, int64_t count, int64_t duration) {
+    MediaAnalyticsItem &addRate(const char *key, int64_t count, int64_t duration) {
         return add(key, std::make_pair(count, duration));
     }
 
@@ -155,16 +206,16 @@
         return prop != nullptr && prop->get(value);
     }
 
-    bool getInt32(Attr key, int32_t *value) const {
+    bool getInt32(const char *key, int32_t *value) const {
         return get(key, value);
     }
-    bool getInt64(Attr key, int64_t *value) const {
+    bool getInt64(const char *key, int64_t *value) const {
         return get(key, value);
     }
-    bool getDouble(Attr key, double *value) const {
+    bool getDouble(const char *key, double *value) const {
         return get(key, value);
     }
-    bool getRate(Attr key, int64_t *count, int64_t *duration, double *rate) const {
+    bool getRate(const char *key, int64_t *count, int64_t *duration, double *rate) const {
         std::pair<int64_t, int64_t> value;
         if (!get(key, &value)) return false;
         if (count != nullptr) *count = value.first;
@@ -179,24 +230,29 @@
         return true;
     }
     // Caller owns the returned string
-    bool getCString(Attr key, char **value) const {
-        return get(key, value);
+    bool getCString(const char *key, char **value) const {
+        const char *cs;
+        if (get(key, &cs)) {
+            *value = cs != nullptr ? strdup(cs) : nullptr;
+            return true;
+        }
+        return false;
     }
-    bool getString(Attr key, std::string *value) const {
+    bool getString(const char *key, std::string *value) const {
         return get(key, value);
     }
 
         // Deliver the item to MediaMetrics
         bool selfrecord();
 
-        // remove indicated attributes and their values
-        // filterNot() could also be called keepOnly()
-        // return value is # attributes removed
-        // XXX: perhaps 'remove' instead of 'filter'
-        // XXX: filterNot would become 'keep'
-        int32_t filter(int count, Attr attrs[]);
-        int32_t filterNot(int count, Attr attrs[]);
-        int32_t filter(Attr attr);
+    // remove indicated attributes and their values
+    // filterNot() could also be called keepOnly()
+    // return value is # attributes removed
+    // XXX: perhaps 'remove' instead of 'filter'
+    // XXX: filterNot would become 'keep'
+    size_t filter(size_t count, const char *attrs[]);
+    size_t filterNot(size_t count, const char *attrs[]);
+    size_t filter(const char *attr) { return filter(1, &attr); }
 
         // below here are used on server side or to talk to server
         // clients need not worry about these.
@@ -218,12 +274,26 @@
         MediaAnalyticsItem &setPkgVersionCode(int64_t);
         int64_t getPkgVersionCode() const;
 
-        // our serialization code for binder calls
-        int32_t writeToParcel(Parcel *);
-        int32_t readFromParcel(const Parcel&);
+    // our serialization code for binder calls
+    status_t writeToParcel(Parcel *) const;
+    status_t readFromParcel(const Parcel&);
 
-        // supports the stable interface
-        bool dumpAttributes(char **pbuffer, size_t *plength);
+    status_t writeToByteString(char **bufferptr, size_t *length) const;
+    status_t readFromByteString(const char *bufferptr, size_t length);
+
+    static status_t writeToByteString(
+            const char *name, int32_t value, char **bufferpptr, char *bufferptrmax);
+    static status_t writeToByteString(
+            const char *name, int64_t value, char **bufferpptr, char *bufferptrmax);
+    static status_t writeToByteString(
+            const char *name, double value, char **bufferpptr, char *bufferptrmax);
+    static status_t writeToByteString(
+            const char *name, const std::pair<int64_t, int64_t> &value, char **bufferpptr, char *bufferptrmax);
+    static status_t writeToByteString(
+            const char *name, char * const &value, char **bufferpptr, char *bufferptrmax);
+    struct none_t {}; // for kTypeNone
+    static status_t writeToByteString(
+            const char *name, const none_t &, char **bufferpptr, char *bufferptrmax);
 
         std::string toString() const;
         std::string toString(int version) const;
@@ -233,11 +303,6 @@
         // are we collecting analytics data
         static bool isEnabled();
 
-    private:
-        // handle Parcel version 0
-        int32_t writeToParcel0(Parcel *);
-        int32_t readFromParcel0(const Parcel&);
-
     protected:
 
         // merge fields from arg into this
@@ -246,18 +311,32 @@
         // caller continues to own 'incoming'
         bool merge(MediaAnalyticsItem *incoming);
 
+private:
+    // handle Parcel version 0
+    int32_t writeToParcel0(Parcel *) const;
+    int32_t readFromParcel0(const Parcel&);
+
     // enabled 1, disabled 0
     static constexpr const char * const EnabledProperty = "media.metrics.enabled";
     static constexpr const char * const EnabledPropertyPersist = "persist.media.metrics.enabled";
     static const int EnabledProperty_default = 1;
 
-    private:
-
     // let's reuse a binder connection
     static sp<IMediaAnalyticsService> sAnalyticsService;
     static sp<IMediaAnalyticsService> getInstance();
     static void dropInstance();
 
+    // checks equality even with nullptr.
+    static bool stringEquals(const char *a, const char *b) {
+        if (a == nullptr) {
+            return b == nullptr;
+        } else {
+            return b != nullptr && strcmp(a, b) == 0;
+        }
+    }
+
+public:
+
     class Prop {
     friend class MediaMetricsJNI;           // TODO: remove this access
     public:
@@ -271,7 +350,6 @@
             } else {
                 mName = nullptr;
             }
-            mNameLen = other.mNameLen;
             mType = other.mType;
             switch (mType) {
             case kTypeInt32:
@@ -287,7 +365,7 @@
                 u.CStringValue = strdup(other.u.CStringValue);
                 break;
             case kTypeRate:
-                u.rate = {other.u.rate.count, other.u.rate.duration};
+                u.rate = other.u.rate;
                 break;
             case kTypeNone:
                 break;
@@ -297,11 +375,32 @@
             }
             return *this;
         }
+        bool operator==(const Prop& other) const {
+            if (!stringEquals(mName, other.mName)
+                    || mType != other.mType) return false;
+            switch (mType) {
+            case kTypeInt32:
+                return u.int32Value == other.u.int32Value;
+            case kTypeInt64:
+                return u.int64Value == other.u.int64Value;
+            case kTypeDouble:
+                return u.doubleValue == other.u.doubleValue;
+            case kTypeCString:
+                return stringEquals(u.CStringValue, other.u.CStringValue);
+            case kTypeRate:
+                return u.rate == other.u.rate;
+            case kTypeNone:
+            default:
+                return true;
+            }
+        }
+        bool operator!=(const Prop& other) const {
+            return !(*this == other);
+        }
 
         void clear() {
             free(mName);
             mName = nullptr;
-            mNameLen = 0;
             clearValue();
         }
         void clearValue() {
@@ -322,29 +421,19 @@
 
         void swap(Prop& other) {
             std::swap(mName, other.mName);
-            std::swap(mNameLen, other.mNameLen);
             std::swap(mType, other.mType);
             std::swap(u, other.u);
         }
 
-        void setName(const char *name, size_t len) {
+        void setName(const char *name) {
             free(mName);
             if (name != nullptr) {
-                mName = (char *)malloc(len + 1);
-                mNameLen = len;
-                strncpy(mName, name, len);
-                mName[len] = 0;
+                mName = strdup(name);
             } else {
                 mName = nullptr;
-                mNameLen = 0;
             }
         }
 
-        bool isNamed(const char *name, size_t len) const {
-            return len == mNameLen && memcmp(name, mName, len) == 0;
-        }
-
-        // TODO: remove duplicate but different definition
         bool isNamed(const char *name) const {
             return strcmp(name, mName) == 0;
         }
@@ -369,9 +458,9 @@
            return true;
         }
         template <>
-        bool get(char** value) const {
+        bool get(const char** value) const {
             if (mType != kTypeCString) return false;
-            if (value != nullptr) *value = strdup(u.CStringValue);
+            if (value != nullptr) *value = u.CStringValue;
             return true;
         }
         template <>
@@ -384,8 +473,7 @@
         bool get(std::pair<int64_t, int64_t> *value) const {
            if (mType != kTypeRate) return false;
            if (value != nullptr) {
-               value->first = u.rate.count;
-               value->second = u.rate.duration;
+               *value = u.rate;
            }
            return true;
         }
@@ -416,7 +504,13 @@
             if (value == nullptr) {
                 u.CStringValue = nullptr;
             } else {
-                u.CStringValue = strdup(value);
+                size_t len = strlen(value);
+                if (len > UINT16_MAX - 1) {
+                    len = UINT16_MAX - 1;
+                }
+                u.CStringValue = (char *)malloc(len + 1);
+                strncpy(u.CStringValue, value, len);
+                u.CStringValue[len] = 0;
             }
         }
         template <>
@@ -456,33 +550,79 @@
         template <>
         void add(const std::pair<int64_t, int64_t>& value) {
             if (mType == kTypeRate) {
-                u.rate.count += value.first;
-                u.rate.duration += value.second;
+                u.rate.first += value.first;
+                u.rate.second += value.second;
             } else {
                 mType = kTypeRate;
-                u.rate = {value.first, value.second};
+                u.rate = value;
             }
         }
 
-        void writeToParcel(Parcel *data) const;
+        status_t writeToParcel(Parcel *data) const;
+        status_t readFromParcel(const Parcel& data);
         void toString(char *buffer, size_t length) const;
+        size_t getByteStringSize() const;
+        status_t writeToByteString(char **bufferpptr, char *bufferptrmax) const;
+        status_t readFromByteString(const char **bufferpptr, const char *bufferptrmax);
 
-    // TODO: make private
+    // TODO: make private (and consider converting to std::variant)
     // private:
         char *mName = nullptr;
-        size_t mNameLen = 0;    // the strlen(), doesn't include the null
         Type mType = kTypeNone;
-        union {
+        union u__ {
+            u__() { zero(); }
+            u__(u__ &&other) {
+                *this = std::move(other);
+            }
+            u__& operator=(u__ &&other) {
+                memcpy(this, &other, sizeof(*this));
+                other.zero();
+                return *this;
+            }
+            void zero() { memset(this, 0, sizeof(*this)); }
+
             int32_t int32Value;
             int64_t int64Value;
             double doubleValue;
             char *CStringValue;
-            struct { int64_t count, duration; } rate;
+            std::pair<int64_t, int64_t> rate;
         } u;
     };
 
-    size_t findPropIndex(const char *name, size_t len) const;
+    class iterator {
+    public:
+       iterator(size_t pos, const MediaAnalyticsItem &_item)
+           : i(std::min(pos, _item.count()))
+           , item(_item) { }
+       iterator &operator++() {
+           i = std::min(i + 1, item.count());
+           return *this;
+       }
+       bool operator!=(iterator &other) const {
+           return i != other.i;
+       }
+       Prop &operator*() const {
+           return item.mProps[i];
+       }
+
+    private:
+      size_t i;
+      const MediaAnalyticsItem &item;
+    };
+
+    iterator begin() const {
+        return iterator(0, *this);
+    }
+    iterator end() const {
+        return iterator(SIZE_MAX, *this);
+    }
+
+private:
+
+    // TODO: make prop management class
+    size_t findPropIndex(const char *name) const;
     Prop *findProp(const char *name) const;
+    Prop *allocateProp();
 
         enum {
             kGrowProps = 10
@@ -490,6 +630,7 @@
         bool growProps(int increment = kGrowProps);
         Prop *allocateProp(const char *name);
         bool removeProp(const char *name);
+    Prop *allocateProp(const std::string& name) { return allocateProp(name.c_str()); }
 
         size_t mPropCount = 0;
         size_t mPropSize = 0;
@@ -499,7 +640,7 @@
     uid_t         mUid = -1;
     std::string   mPkgName;
     int64_t       mPkgVersionCode = 0;
-    Key           mKey{kKeyNone};
+    std::string   mKey{kKeyNone};
     nsecs_t       mTimestamp = 0;
 };
 
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index 703da4b..c61ed1b 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -32,6 +32,7 @@
 #include <cutils/atomic.h>
 #include <cutils/properties.h> // for property_get
 #include <gui/IGraphicBufferProducer.h>
+#include <mediautils/ServiceUtilities.h>
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <system/audio.h>
@@ -44,7 +45,6 @@
 namespace android {
 
 const char* cameraPermission = "android.permission.CAMERA";
-const char* recordAudioPermission = "android.permission.RECORD_AUDIO";
 
 static bool checkPermission(const char* permissionString) {
     if (getpid() == IPCThreadState::self()->getCallingPid()) return true;
@@ -118,7 +118,16 @@
 status_t MediaRecorderClient::setAudioSource(int as)
 {
     ALOGV("setAudioSource(%d)", as);
-    if (!checkPermission(recordAudioPermission)) {
+    if (as < AUDIO_SOURCE_DEFAULT
+            || (as >= AUDIO_SOURCE_CNT && as != AUDIO_SOURCE_FM_TUNER)) {
+        ALOGE("Invalid audio source: %d", as);
+        return BAD_VALUE;
+    }
+    pid_t pid = IPCThreadState::self()->getCallingPid();
+    uid_t uid = IPCThreadState::self()->getCallingUid();
+
+    if ((as == AUDIO_SOURCE_FM_TUNER && !captureAudioOutputAllowed(pid, uid))
+            || !recordingAllowed(String16(""), pid, uid)) {
         return PERMISSION_DENIED;
     }
     Mutex::Autolock lock(mLock);
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 63681fa..954ccc9 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -232,11 +232,6 @@
 
 status_t StagefrightRecorder::setAudioSource(audio_source_t as) {
     ALOGV("setAudioSource: %d", as);
-    if (as < AUDIO_SOURCE_DEFAULT ||
-        (as >= AUDIO_SOURCE_CNT && as != AUDIO_SOURCE_FM_TUNER)) {
-        ALOGE("Invalid audio source: %d", as);
-        return BAD_VALUE;
-    }
 
     if (as == AUDIO_SOURCE_DEFAULT) {
         mAudioSource = AUDIO_SOURCE_MIC;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 95c973a..4d9872a 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -641,7 +641,7 @@
             mAnalyticsItem->setUid(mClientUid);
         }
     } else {
-        ALOGV("nothing to record (only %d fields)", mAnalyticsItem->count());
+        ALOGV("nothing to record (only %zu fields)", mAnalyticsItem->count());
     }
 }
 
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 965e5a6..ef9d253 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -2436,7 +2436,7 @@
         }
         rate = (OMX_U32)(rateFloat * 65536.0f + 0.5f);
     } else {
-        if (rateFloat > UINT_MAX) {
+        if (rateFloat > (float)UINT_MAX) {
             return BAD_VALUE;
         }
         rate = (OMX_U32)(rateFloat);
@@ -3342,6 +3342,7 @@
     { MEDIA_MIMETYPE_VIDEO_VP9, OMX_VIDEO_CodingVP9 },
     { MEDIA_MIMETYPE_VIDEO_DOLBY_VISION, OMX_VIDEO_CodingDolbyVision },
     { MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC, OMX_VIDEO_CodingImageHEIC },
+    { MEDIA_MIMETYPE_VIDEO_AV1, OMX_VIDEO_CodingAV1 },
 };
 
 static status_t GetVideoCodingTypeFromMime(
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index e504327..a48faca 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -122,6 +122,53 @@
     },
 }
 
+cc_library_shared {
+    name: "libstagefright_framecapture_utils",
+
+    srcs: [
+        "FrameCaptureLayer.cpp",
+        "FrameCaptureProcessor.cpp",
+    ],
+
+    shared_libs: [
+        "libbase",
+        "libcutils",
+        "libEGL",
+        "libGLESv1_CM",
+        "libGLESv2",
+        "libgui",
+        "liblog",
+        "libprocessgroup",
+        "libstagefright_foundation",
+        "libsync",
+        "libui",
+        "libutils",
+    ],
+
+    static_libs: [
+        "librenderengine",
+    ],
+
+    export_include_dirs: [
+        "include",
+    ],
+
+    cflags: [
+        "-Wno-multichar",
+        "-Werror",
+        "-Wno-error=deprecated-declarations",
+        "-Wall",
+    ],
+
+    sanitize: {
+        // TODO: re-enabled cfi for this lib after b/139945549 fixed
+        cfi: false,
+        misc_undefined: [
+            "unsigned-integer-overflow",
+            "signed-integer-overflow",
+        ],
+    },
+}
 cc_library {
     name: "libstagefright",
 
@@ -174,6 +221,7 @@
     ],
 
     shared_libs: [
+        "libstagefright_framecapture_utils",
         "libaudioutils",
         "libbase",
         "libbinder",
diff --git a/media/libstagefright/FrameCaptureLayer.cpp b/media/libstagefright/FrameCaptureLayer.cpp
new file mode 100644
index 0000000..29642be
--- /dev/null
+++ b/media/libstagefright/FrameCaptureLayer.cpp
@@ -0,0 +1,247 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "FrameCaptureLayer"
+
+#include <include/FrameCaptureLayer.h>
+#include <media/stagefright/FrameCaptureProcessor.h>
+#include <gui/BufferQueue.h>
+#include <gui/GLConsumer.h>
+#include <gui/IGraphicBufferConsumer.h>
+#include <gui/Surface.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaErrors.h>
+#include <renderengine/RenderEngine.h>
+#include <utils/Log.h>
+
+namespace android {
+
+static const int64_t kAcquireBufferTimeoutNs = 100000000LL;
+
+ui::Dataspace translateDataspace(ui::Dataspace dataspace) {
+    ui::Dataspace updatedDataspace = dataspace;
+    // translate legacy dataspaces to modern dataspaces
+    switch (dataspace) {
+        case ui::Dataspace::SRGB:
+            updatedDataspace = ui::Dataspace::V0_SRGB;
+            break;
+        case ui::Dataspace::SRGB_LINEAR:
+            updatedDataspace = ui::Dataspace::V0_SRGB_LINEAR;
+            break;
+        case ui::Dataspace::JFIF:
+            updatedDataspace = ui::Dataspace::V0_JFIF;
+            break;
+        case ui::Dataspace::BT601_625:
+            updatedDataspace = ui::Dataspace::V0_BT601_625;
+            break;
+        case ui::Dataspace::BT601_525:
+            updatedDataspace = ui::Dataspace::V0_BT601_525;
+            break;
+        case ui::Dataspace::BT709:
+            updatedDataspace = ui::Dataspace::V0_BT709;
+            break;
+        default:
+            break;
+    }
+
+    return updatedDataspace;
+}
+
+bool isHdrY410(const BufferItem &bi) {
+    ui::Dataspace dataspace = translateDataspace(static_cast<ui::Dataspace>(bi.mDataSpace));
+    // pixel format is HDR Y410 masquerading as RGBA_1010102
+    return (dataspace == ui::Dataspace::BT2020_ITU_PQ &&
+            bi.mGraphicBuffer->getPixelFormat() == HAL_PIXEL_FORMAT_RGBA_1010102);
+}
+
+struct FrameCaptureLayer::BufferLayer : public FrameCaptureProcessor::Layer {
+    BufferLayer(const BufferItem &bi) : mBufferItem(bi) {}
+    void getLayerSettings(
+            const Rect &sourceCrop, uint32_t textureName,
+            renderengine::LayerSettings *layerSettings) override;
+    BufferItem mBufferItem;
+};
+
+void FrameCaptureLayer::BufferLayer::getLayerSettings(
+        const Rect &sourceCrop, uint32_t textureName,
+        renderengine::LayerSettings *layerSettings) {
+    layerSettings->geometry.boundaries = sourceCrop.toFloatRect();
+    layerSettings->alpha = 1.0f;
+
+    layerSettings->sourceDataspace = translateDataspace(
+            static_cast<ui::Dataspace>(mBufferItem.mDataSpace));
+
+    // from BufferLayer
+    layerSettings->source.buffer.buffer = mBufferItem.mGraphicBuffer;
+    layerSettings->source.buffer.isOpaque = true;
+    layerSettings->source.buffer.fence = mBufferItem.mFence;
+    layerSettings->source.buffer.textureName = textureName;
+    layerSettings->source.buffer.usePremultipliedAlpha = false;
+    layerSettings->source.buffer.isY410BT2020 = isHdrY410(mBufferItem);
+
+    // Set filtering to false since the capture itself doesn't involve
+    // any scaling, metadata retriever JNI is scaling the bitmap if
+    // display size is different from decoded size. If that scaling
+    // needs to be handled by server side, consider enable this based
+    // display size vs decoded size.
+    const bool useFiltering = false;
+    layerSettings->source.buffer.useTextureFiltering = useFiltering;
+
+    float textureMatrix[16];
+    GLConsumer::computeTransformMatrix(
+            textureMatrix, mBufferItem.mGraphicBuffer,
+            mBufferItem.mCrop, mBufferItem.mTransform, useFiltering);
+
+    // Flip y-coordinates because GLConsumer expects OpenGL convention.
+    mat4 tr = mat4::translate(vec4(.5, .5, 0, 1)) * mat4::scale(vec4(1, -1, 1, 1)) *
+            mat4::translate(vec4(-.5, -.5, 0, 1));
+
+    layerSettings->source.buffer.textureTransform =
+            mat4(static_cast<const float*>(textureMatrix)) * tr;
+}
+
+status_t FrameCaptureLayer::init() {
+    if (FrameCaptureProcessor::getInstance() == nullptr) {
+        ALOGE("failed to get capture processor");
+        return ERROR_UNSUPPORTED;
+    }
+
+    // Mimic surfaceflinger's BufferQueueLayer::onFirstRef() to create a
+    // BufferQueue for encoder output
+    sp<IGraphicBufferProducer> producer;
+    sp<IGraphicBufferConsumer> consumer;
+
+    BufferQueue::createBufferQueue(&producer, &consumer);
+    // We don't need HW_COMPOSER usage since we're not using hwc to compose.
+    // The buffer is only used as a GL texture.
+    consumer->setConsumerUsageBits(GraphicBuffer::USAGE_HW_TEXTURE);
+    consumer->setConsumerName(String8("FrameDecoder"));
+
+    status_t err = consumer->consumerConnect(
+            new BufferQueue::ProxyConsumerListener(this), false);
+    if (NO_ERROR != err) {
+        ALOGE("Error connecting to BufferQueue: %s (%d)", strerror(-err), err);
+        return err;
+    }
+
+    mConsumer = consumer;
+    mSurface = new Surface(producer);
+
+    return OK;
+}
+
+status_t FrameCaptureLayer::capture(const ui::PixelFormat reqPixelFormat,
+        const Rect &sourceCrop, sp<GraphicBuffer> *outBuffer) {
+    ALOGV("capture: reqPixelFormat %d, crop {%d, %d, %d, %d}", reqPixelFormat,
+            sourceCrop.left, sourceCrop.top, sourceCrop.right, sourceCrop.bottom);
+
+    BufferItem bi;
+    status_t err = acquireBuffer(&bi);
+    if (err != OK) {
+        return err;
+    }
+
+    // create out buffer
+    const uint32_t usage =
+            GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN |
+            GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_TEXTURE;
+    sp<GraphicBuffer> buffer = new GraphicBuffer(
+            sourceCrop.getWidth(), sourceCrop.getHeight(),
+            static_cast<android_pixel_format>(reqPixelFormat),
+            1, usage, std::string("thumbnail"));
+
+    err = FrameCaptureProcessor::getInstance()->capture(
+            new BufferLayer(bi), sourceCrop, buffer);
+    if (err == OK) {
+        *outBuffer = buffer;
+    }
+
+    (void)releaseBuffer(bi);
+    return err;
+}
+
+FrameCaptureLayer::FrameCaptureLayer() : mFrameAvailable(false) {}
+
+void FrameCaptureLayer::onFrameAvailable(const BufferItem& /*item*/) {
+    ALOGV("onFrameAvailable");
+    Mutex::Autolock _lock(mLock);
+
+    mFrameAvailable = true;
+    mCondition.signal();
+}
+
+void FrameCaptureLayer::onBuffersReleased() {
+    ALOGV("onBuffersReleased");
+    Mutex::Autolock _lock(mLock);
+
+    uint64_t mask = 0;
+    mConsumer->getReleasedBuffers(&mask);
+    for (int i = 0; i < BufferQueue::NUM_BUFFER_SLOTS; i++) {
+        if (mask & (1ULL << i)) {
+            mSlotToBufferMap[i] = nullptr;
+        }
+    }
+}
+
+void FrameCaptureLayer::onSidebandStreamChanged() {
+    ALOGV("onSidebandStreamChanged");
+}
+
+status_t FrameCaptureLayer::acquireBuffer(BufferItem *bi) {
+    ALOGV("acquireBuffer");
+    Mutex::Autolock _lock(mLock);
+
+    if (!mFrameAvailable) {
+        // The output buffer is already released to the codec at this point.
+        // Use a small timeout of 100ms in case the buffer hasn't arrived
+        // at the consumer end of the output surface yet.
+        if (mCondition.waitRelative(mLock, kAcquireBufferTimeoutNs) != OK) {
+            ALOGE("wait for buffer timed out");
+            return TIMED_OUT;
+        }
+    }
+    mFrameAvailable = false;
+
+    status_t err = mConsumer->acquireBuffer(bi, 0);
+    if (err != OK) {
+        ALOGE("failed to acquire buffer!");
+        return err;
+    }
+
+    if (bi->mGraphicBuffer != nullptr) {
+        mSlotToBufferMap[bi->mSlot] = bi->mGraphicBuffer;
+    } else {
+        bi->mGraphicBuffer = mSlotToBufferMap[bi->mSlot];
+    }
+
+    if (bi->mGraphicBuffer == nullptr) {
+        ALOGE("acquired null buffer!");
+        return BAD_VALUE;
+    }
+    return OK;
+}
+
+status_t FrameCaptureLayer::releaseBuffer(const BufferItem &bi) {
+    ALOGV("releaseBuffer");
+    Mutex::Autolock _lock(mLock);
+
+    return mConsumer->releaseBuffer(bi.mSlot, bi.mFrameNumber,
+            EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, bi.mFence);
+}
+
+}  // namespace android
diff --git a/media/libstagefright/FrameCaptureProcessor.cpp b/media/libstagefright/FrameCaptureProcessor.cpp
new file mode 100644
index 0000000..c517e33
--- /dev/null
+++ b/media/libstagefright/FrameCaptureProcessor.cpp
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "FrameCaptureProcessor"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/FrameCaptureProcessor.h>
+#include <media/stagefright/MediaErrors.h>
+#include <renderengine/RenderEngine.h>
+#include <ui/Fence.h>
+#include <ui/PixelFormat.h>
+#include <utils/Log.h>
+
+namespace android {
+
+//static
+Mutex FrameCaptureProcessor::sLock;
+//static
+sp<FrameCaptureProcessor> FrameCaptureProcessor::sInstance;
+
+//static
+sp<FrameCaptureProcessor> FrameCaptureProcessor::getInstance() {
+    Mutex::Autolock _l(sLock);
+    if (sInstance == nullptr) {
+        sInstance = new FrameCaptureProcessor();
+        sInstance->createRenderEngine();
+    }
+    // init only once, if failed nullptr will be returned afterwards.
+    return (sInstance->initCheck() == OK) ? sInstance : nullptr;
+}
+
+//static
+status_t FrameCaptureProcessor::PostAndAwaitResponse(
+        const sp<AMessage> &msg, sp<AMessage> *response) {
+    status_t err = msg->postAndAwaitResponse(response);
+
+    if (err != OK) {
+        return err;
+    }
+
+    if (!(*response)->findInt32("err", &err)) {
+        err = OK;
+    }
+
+    return err;
+}
+
+//static
+void FrameCaptureProcessor::PostReplyWithError(
+        const sp<AReplyToken> &replyID, status_t err) {
+    sp<AMessage> response = new AMessage;
+    if (err != OK) {
+        response->setInt32("err", err);
+    }
+    response->postReply(replyID);
+}
+
+FrameCaptureProcessor::FrameCaptureProcessor()
+    : mInitStatus(NO_INIT), mTextureName(0) {}
+
+FrameCaptureProcessor::~FrameCaptureProcessor() {
+    if (mLooper != nullptr) {
+        mLooper->unregisterHandler(id());
+        mLooper->stop();
+    }
+}
+
+void FrameCaptureProcessor::createRenderEngine() {
+    // this method should only be called once, immediately after ctor
+    CHECK(mInitStatus == NO_INIT);
+
+    mLooper = new ALooper();
+    mLooper->setName("capture_looper");
+    mLooper->start(); // default priority
+    mLooper->registerHandler(this);
+
+    sp<AMessage> response;
+    status_t err = PostAndAwaitResponse(new AMessage(kWhatCreate, this), &response);
+    if (err != OK) {
+        mInitStatus = ERROR_UNSUPPORTED;
+
+        mLooper->unregisterHandler(id());
+        mLooper->stop();
+        mLooper.clear();
+        return;
+    }
+
+    // only need one texture name
+    mRE->genTextures(1, &mTextureName);
+
+    mInitStatus = OK;
+}
+
+status_t FrameCaptureProcessor::capture(
+        const sp<Layer> &layer, const Rect &sourceCrop, const sp<GraphicBuffer> &buffer) {
+    sp<AMessage> msg = new AMessage(kWhatCapture, this);
+    msg->setObject("layer", layer);
+    msg->setRect("crop", sourceCrop.left, sourceCrop.top, sourceCrop.right, sourceCrop.bottom);
+    msg->setObject("buffer", buffer);
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
+status_t FrameCaptureProcessor::onCreate() {
+    mRE = renderengine::RenderEngine::create(
+            renderengine::RenderEngineCreationArgs::Builder()
+                .setPixelFormat(static_cast<int>(ui::PixelFormat::RGBA_8888))
+                .setImageCacheSize(2 /*maxFrameBufferAcquiredBuffers*/)
+                .setUseColorManagerment(true)
+                .setEnableProtectedContext(false)
+                .setPrecacheToneMapperShaderOnly(true)
+                .setContextPriority(renderengine::RenderEngine::ContextPriority::LOW)
+                .build());
+
+    if (mRE == nullptr) {
+        return ERROR_UNSUPPORTED;
+    }
+    return OK;
+}
+
+status_t FrameCaptureProcessor::onCapture(const sp<Layer> &layer,
+        const Rect &sourceCrop, const sp<GraphicBuffer> &buffer) {
+    renderengine::DisplaySettings clientCompositionDisplay;
+    std::vector<renderengine::LayerSettings> clientCompositionLayers;
+
+    clientCompositionDisplay.physicalDisplay = sourceCrop;
+    clientCompositionDisplay.clip = sourceCrop;
+
+    clientCompositionDisplay.outputDataspace = ui::Dataspace::V0_SRGB;
+    clientCompositionDisplay.maxLuminance = sDefaultMaxLumiance;
+    clientCompositionDisplay.clearRegion = Region::INVALID_REGION;
+
+    // from Layer && BufferLayer
+    renderengine::LayerSettings layerSettings;
+
+    layer->getLayerSettings(sourceCrop, mTextureName, &layerSettings);
+
+    clientCompositionLayers.push_back(layerSettings);
+
+    // Use an empty fence for the buffer fence, since we just created the buffer so
+    // there is no need for synchronization with the GPU.
+    base::unique_fd bufferFence;
+    base::unique_fd drawFence;
+    mRE->useProtectedContext(false);
+    status_t err = mRE->drawLayers(clientCompositionDisplay, clientCompositionLayers, buffer.get(),
+            /*useFramebufferCache=*/false, std::move(bufferFence), &drawFence);
+
+    sp<Fence> fence = new Fence(std::move(drawFence));
+
+    if (err != OK) {
+        ALOGE("drawLayers returned err %d", err);
+        return err;
+    }
+
+    err = fence->wait(500);
+    if (err != OK) {
+        ALOGW("wait for fence returned err %d", err);
+    }
+    return OK;
+}
+
+void FrameCaptureProcessor::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatCreate:
+        {
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            status_t err = onCreate();
+
+            PostReplyWithError(replyID, err);
+            break;
+        }
+        case kWhatCapture:
+        {
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            sp<RefBase> layerObj, bufferObj;
+            int32_t left, top, right, bottom;
+            CHECK(msg->findObject("layer", &layerObj));
+            CHECK(msg->findRect("crop", &left, &top, &right, &bottom));
+            CHECK(msg->findObject("buffer", &bufferObj));
+
+            sp<GraphicBuffer> buffer = static_cast<GraphicBuffer*>(bufferObj.get());
+            sp<Layer> layer = static_cast<Layer*>(layerObj.get());
+
+            PostReplyWithError(replyID,
+                    onCapture(layer, Rect(left, top, right, bottom), buffer));
+
+            break;
+        }
+        default:
+            TRESPASS();
+    }
+}
+
+}  // namespace android
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index 03415d5..d75b317 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -18,10 +18,10 @@
 #define LOG_TAG "FrameDecoder"
 
 #include "include/FrameDecoder.h"
+#include "include/FrameCaptureLayer.h"
 #include <binder/MemoryBase.h>
 #include <binder/MemoryHeapBase.h>
 #include <gui/Surface.h>
-#include <gui/SurfaceComposerClient.h>
 #include <inttypes.h>
 #include <mediadrm/ICrypto.h>
 #include <media/IMediaSource.h>
@@ -31,6 +31,7 @@
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/foundation/ColorUtils.h>
 #include <media/stagefright/ColorConverter.h>
+#include <media/stagefright/FrameCaptureProcessor.h>
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaCodec.h>
 #include <media/stagefright/MediaDefs.h>
@@ -512,7 +513,7 @@
     }
 
     if (isHDR(videoFormat)) {
-        *window = initSurfaceControl();
+        *window = initSurface();
         if (*window == NULL) {
             ALOGE("Failed to init surface control for HDR, fallback to non-hdr");
         } else {
@@ -589,7 +590,7 @@
     }
 
     if (!outputFormat->findInt32("stride", &stride)) {
-        if (mSurfaceControl == NULL) {
+        if (mCaptureLayer == NULL) {
             ALOGE("format must have stride for byte buffer mode: %s",
                     outputFormat->debugString().c_str());
             return ERROR_MALFORMED;
@@ -613,7 +614,7 @@
                 0,
                 0,
                 dstBpp(),
-                mSurfaceControl != nullptr /*allocRotated*/);
+                mCaptureLayer != nullptr /*allocRotated*/);
         mFrame = static_cast<VideoFrame*>(frameMem->unsecurePointer());
 
         setFrame(frameMem);
@@ -621,8 +622,8 @@
 
     mFrame->mDurationUs = durationUs;
 
-    if (mSurfaceControl != nullptr) {
-        return captureSurfaceControl();
+    if (mCaptureLayer != nullptr) {
+        return captureSurface();
     }
 
     ColorConverter converter((OMX_COLOR_FORMATTYPE)srcFormat, dstFormat());
@@ -655,70 +656,26 @@
     return ERROR_UNSUPPORTED;
 }
 
-sp<Surface> VideoFrameDecoder::initSurfaceControl() {
-    sp<SurfaceComposerClient> client = new SurfaceComposerClient();
-    if (client->initCheck() != NO_ERROR) {
-        ALOGE("failed to get SurfaceComposerClient");
-        return NULL;
+sp<Surface> VideoFrameDecoder::initSurface() {
+    // create the consumer listener interface, and hold sp so that this
+    // interface lives as long as the GraphicBufferSource.
+    sp<FrameCaptureLayer> captureLayer = new FrameCaptureLayer();
+    if (captureLayer->init() != OK) {
+        ALOGE("failed to init capture layer");
+        return nullptr;
     }
+    mCaptureLayer = captureLayer;
 
-    // create a container layer to hold the capture layer, so that we can
-    // use full frame drop. If without the container, the crop will be set
-    // to display size.
-    sp<SurfaceControl> parent = client->createSurface(
-            String8("parent"),
-            0 /* width */, 0 /* height */,
-            PIXEL_FORMAT_RGBA_8888,
-            ISurfaceComposerClient::eFXSurfaceContainer );
-
-    if (!parent) {
-        ALOGE("failed to get surface control parent");
-        return NULL;
-    }
-
-    // create the surface with unknown size 1x1 for now, real size will
-    // be set before the capture when we have output format info.
-    sp<SurfaceControl> surfaceControl = client->createSurface(
-            String8("thumbnail"),
-            1 /* width */, 1 /* height */,
-            PIXEL_FORMAT_RGBA_8888,
-            ISurfaceComposerClient::eFXSurfaceBufferQueue,
-            parent.get());
-
-    if (!surfaceControl) {
-        ALOGE("failed to get surface control");
-        return NULL;
-    }
-
-    SurfaceComposerClient::Transaction t;
-    t.hide(parent)
-            .show(surfaceControl)
-            .apply(true);
-
-    mSurfaceControl = surfaceControl;
-    mParent = parent;
-
-    return surfaceControl->getSurface();
+    return captureLayer->getSurface();
 }
 
-status_t VideoFrameDecoder::captureSurfaceControl() {
-    // set the layer size to the output size before the capture
-    SurfaceComposerClient::Transaction()
-        .setSize(mSurfaceControl, mFrame->mWidth, mFrame->mHeight)
-        .apply(true);
-
+status_t VideoFrameDecoder::captureSurface() {
     sp<GraphicBuffer> outBuffer;
-    status_t err = ScreenshotClient::captureChildLayers(
-            mParent->getHandle(),
-            ui::Dataspace::V0_SRGB,
-            captureFormat(),
-            Rect(0, 0, mFrame->mWidth, mFrame->mHeight),
-            {},
-            1.0f /*frameScale*/,
-            &outBuffer);
+    status_t err = mCaptureLayer->capture(
+            captureFormat(), Rect(0, 0, mFrame->mWidth, mFrame->mHeight), &outBuffer);
 
     if (err != OK) {
-        ALOGE("failed to captureLayers: err %d", err);
+        ALOGE("failed to capture layer (err %d)", err);
         return err;
     }
 
@@ -917,12 +874,6 @@
     }
     converter.setSrcColorSpace(standard, range, transfer);
 
-    int32_t dstLeft, dstTop, dstRight, dstBottom;
-    dstLeft = mTilesDecoded % mGridCols * width;
-    dstTop = mTilesDecoded / mGridCols * height;
-    dstRight = dstLeft + width - 1;
-    dstBottom = dstTop + height - 1;
-
     int32_t crop_left, crop_top, crop_right, crop_bottom;
     if (!outputFormat->findRect("crop", &crop_left, &crop_top, &crop_right, &crop_bottom)) {
         crop_left = crop_top = 0;
@@ -930,15 +881,25 @@
         crop_bottom = height - 1;
     }
 
+    int32_t crop_width, crop_height;
+    crop_width = crop_right - crop_left + 1;
+    crop_height = crop_bottom - crop_top + 1;
+
+    int32_t dstLeft, dstTop, dstRight, dstBottom;
+    dstLeft = mTilesDecoded % mGridCols * crop_width;
+    dstTop = mTilesDecoded / mGridCols * crop_height;
+    dstRight = dstLeft + crop_width - 1;
+    dstBottom = dstTop + crop_height - 1;
+
     // apply crop on bottom-right
     // TODO: need to move this into the color converter itself.
     if (dstRight >= mWidth) {
-        crop_right = mWidth - dstLeft - 1;
-        dstRight = dstLeft + crop_right;
+        crop_right = crop_left + mWidth - dstLeft - 1;
+        dstRight = mWidth - 1;
     }
     if (dstBottom >= mHeight) {
-        crop_bottom = mHeight - dstTop - 1;
-        dstBottom = dstTop + crop_bottom;
+        crop_bottom = crop_top + mHeight - dstTop - 1;
+        dstBottom = mHeight - 1;
     }
 
     *done = (++mTilesDecoded >= mTargetTiles);
diff --git a/media/libstagefright/MediaClock.cpp b/media/libstagefright/MediaClock.cpp
index 4f9bc6d..24608a7 100644
--- a/media/libstagefright/MediaClock.cpp
+++ b/media/libstagefright/MediaClock.cpp
@@ -281,7 +281,7 @@
             it = mTimers.erase(it);
         } else {
             if (mPlaybackRate != 0.0
-                && (double)diffMediaUs < INT64_MAX * (double)mPlaybackRate) {
+                && (double)diffMediaUs < (double)INT64_MAX * (double)mPlaybackRate) {
                 int64_t targetRealUs = diffMediaUs / (double)mPlaybackRate;
                 if (targetRealUs < nextLapseRealUs) {
                     nextLapseRealUs = targetRealUs;
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index ac4d087..fc70091 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -1874,7 +1874,7 @@
     if (msg->findInt32("frame-rate", &fps) && fps > 0) {
         meta->setInt32(kKeyFrameRate, fps);
     } else if (msg->findFloat("frame-rate", &fpsFloat)
-            && fpsFloat >= 1 && fpsFloat <= INT32_MAX) {
+            && fpsFloat >= 1 && fpsFloat <= (float)INT32_MAX) {
         // truncate values to distinguish between e.g. 24 vs 23.976 fps
         meta->setInt32(kKeyFrameRate, (int32_t)fpsFloat);
     }
diff --git a/media/libstagefright/codecs/mp3dec/src/pv_mp3dec_fxd_op_c_equivalent.h b/media/libstagefright/codecs/mp3dec/src/pv_mp3dec_fxd_op_c_equivalent.h
index adb0dd4..f9d91b1 100644
--- a/media/libstagefright/codecs/mp3dec/src/pv_mp3dec_fxd_op_c_equivalent.h
+++ b/media/libstagefright/codecs/mp3dec/src/pv_mp3dec_fxd_op_c_equivalent.h
@@ -44,7 +44,7 @@
 #endif
 
 #include "pvmp3_audio_type_defs.h"
-#define Qfmt_31(a)   (Int32)((float)(a)*0x7FFFFFFF)
+#define Qfmt_31(a)   (Int32)((float)(a)*(float)0x7FFFFFFF)
 
 #define Qfmt15(x)   (Int16)((x)*((Int32)1<<15) + ((x)>=0?0.5F:-0.5F))
 
diff --git a/media/libstagefright/codecs/mp3dec/src/pvmp3_alias_reduction.cpp b/media/libstagefright/codecs/mp3dec/src/pvmp3_alias_reduction.cpp
index af738ba..6c8102b 100644
--- a/media/libstagefright/codecs/mp3dec/src/pvmp3_alias_reduction.cpp
+++ b/media/libstagefright/codecs/mp3dec/src/pvmp3_alias_reduction.cpp
@@ -169,7 +169,7 @@
 
     int32 i, j;
 
-    *used_freq_lines = fxp_mul32_Q32(*used_freq_lines << 16, (int32)(0x7FFFFFFF / (float)18 - 1.0f)) >> 15;
+    *used_freq_lines = fxp_mul32_Q32(*used_freq_lines << 16, (int32)((float)0x7FFFFFFF / (float)18 - 1.0f)) >> 15;
 
 
     if (gr_info->window_switching_flag &&  gr_info->block_type == 2)
diff --git a/media/libstagefright/codecs/mp3dec/src/pvmp3_dct_9.cpp b/media/libstagefright/codecs/mp3dec/src/pvmp3_dct_9.cpp
index bbb247d..9cd0e91 100644
--- a/media/libstagefright/codecs/mp3dec/src/pvmp3_dct_9.cpp
+++ b/media/libstagefright/codecs/mp3dec/src/pvmp3_dct_9.cpp
@@ -77,7 +77,7 @@
 ; Include all pre-processor statements here. Include conditional
 ; compile variables also.
 ----------------------------------------------------------------------------*/
-#define Qfmt31(a)   (int32)((a)*(0x7FFFFFFF))
+#define Qfmt31(a)   (int32)((a)*((float)0x7FFFFFFF))
 
 #define cos_pi_9    Qfmt31( 0.93969262078591f)
 #define cos_2pi_9   Qfmt31( 0.76604444311898f)
diff --git a/media/libstagefright/foundation/OpusHeader.cpp b/media/libstagefright/foundation/OpusHeader.cpp
index 513e41f..f5687e0 100644
--- a/media/libstagefright/foundation/OpusHeader.cpp
+++ b/media/libstagefright/foundation/OpusHeader.cpp
@@ -292,6 +292,10 @@
         *opusHeadSize = data_size;
         return true;
     } else if (memcmp(AOPUS_CSD_MARKER_PREFIX, data, AOPUS_CSD_MARKER_PREFIX_SIZE) == 0) {
+        if (data_size < AOPUS_UNIFIED_CSD_MINSIZE || data_size > AOPUS_UNIFIED_CSD_MAXSIZE) {
+            ALOGD("Unexpected size for unified opus csd %zu", data_size);
+            return false;
+        }
         size_t i = 0;
         bool found = false;
         while (i <= data_size - AOPUS_MARKER_SIZE - AOPUS_LENGTH_SIZE) {
diff --git a/media/libstagefright/include/FrameCaptureLayer.h b/media/libstagefright/include/FrameCaptureLayer.h
new file mode 100644
index 0000000..23fd5e5
--- /dev/null
+++ b/media/libstagefright/include/FrameCaptureLayer.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FRAME_CAPTURE_LAYER_H_
+#define FRAME_CAPTURE_LAYER_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <gui/IConsumerListener.h>
+#include <ui/GraphicTypes.h>
+#include <utils/Mutex.h>
+#include <utils/Condition.h>
+
+namespace android {
+
+class GraphicBuffer;
+class IGraphicBufferConsumer;
+class Rect;
+class Surface;
+
+/*
+ * This class is a simple BufferQueue consumer implementation to
+ * obtain a decoded buffer output from MediaCodec. The output
+ * buffer is then sent to FrameCaptureProcessor to be converted
+ * to sRGB properly.
+ */
+struct FrameCaptureLayer : public ConsumerListener {
+    FrameCaptureLayer();
+    ~FrameCaptureLayer() = default;
+
+    // ConsumerListener
+    void onFrameAvailable(const BufferItem& /*item*/) override;
+    void onBuffersReleased() override;
+    void onSidebandStreamChanged() override;
+
+    status_t init();
+
+    sp<Surface> getSurface() { return mSurface; }
+
+    status_t capture(const ui::PixelFormat reqPixelFormat,
+            const Rect &sourceCrop, sp<GraphicBuffer> *outBuffer);
+
+private:
+    struct BufferLayer;
+    // Note: do not hold any sp ref to GraphicBufferSource
+    // GraphicBufferSource is holding an sp to us, holding any sp ref
+    // to GraphicBufferSource will cause circular dependency and both
+    // object will not be released.
+    sp<IGraphicBufferConsumer> mConsumer;
+    sp<Surface> mSurface;
+    std::map<int32_t, sp<GraphicBuffer> > mSlotToBufferMap;
+
+    Mutex mLock;
+    Condition mCondition;
+    bool mFrameAvailable GUARDED_BY(mLock);
+
+    status_t acquireBuffer(BufferItem *bi);
+    status_t releaseBuffer(const BufferItem &bi);
+
+    DISALLOW_EVIL_CONSTRUCTORS(FrameCaptureLayer);
+};
+
+}  // namespace android
+
+#endif  // FRAME_CAPTURE_LAYER_H_
diff --git a/media/libstagefright/include/FrameDecoder.h b/media/libstagefright/include/FrameDecoder.h
index 8e42fcf..353c957 100644
--- a/media/libstagefright/include/FrameDecoder.h
+++ b/media/libstagefright/include/FrameDecoder.h
@@ -33,7 +33,6 @@
 class IMediaSource;
 class MediaCodecBuffer;
 class Surface;
-class SurfaceControl;
 class VideoFrame;
 
 struct FrameRect {
@@ -101,6 +100,7 @@
 
     DISALLOW_EVIL_CONSTRUCTORS(FrameDecoder);
 };
+struct FrameCaptureLayer;
 
 struct VideoFrameDecoder : public FrameDecoder {
     VideoFrameDecoder(
@@ -133,8 +133,7 @@
             bool *done) override;
 
 private:
-    sp<SurfaceControl> mSurfaceControl;
-    sp<SurfaceControl> mParent;
+    sp<FrameCaptureLayer> mCaptureLayer;
     VideoFrame *mFrame;
     bool mIsAvcOrHevc;
     MediaSource::ReadOptions::SeekMode mSeekMode;
@@ -142,8 +141,8 @@
     List<int64_t> mSampleDurations;
     int64_t mDefaultSampleDurationUs;
 
-    sp<Surface> initSurfaceControl();
-    status_t captureSurfaceControl();
+    sp<Surface> initSurface();
+    status_t captureSurface();
 };
 
 struct ImageDecoder : public FrameDecoder {
diff --git a/media/libstagefright/include/media/stagefright/FrameCaptureProcessor.h b/media/libstagefright/include/media/stagefright/FrameCaptureProcessor.h
new file mode 100644
index 0000000..66e5daa
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/FrameCaptureProcessor.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FRAME_CAPTURE_PROCESSOR_H_
+#define FRAME_CAPTURE_PROCESSOR_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/foundation/AHandler.h>
+
+namespace android {
+
+struct AMessage;
+class GraphicBuffer;
+class Rect;
+
+namespace renderengine {
+class RenderEngine;
+struct LayerSettings;
+}
+
+/*
+ * Process a decoded graphic buffer through RenderEngine to
+ * convert it to sRGB.
+ *
+ * This class is a singleton that holds one instance of RenderEngine
+ * and its event queue (on which the GL context runs). The RenderEngine
+ * is created upon the first getInstance().
+ */
+class FrameCaptureProcessor : public AHandler {
+
+public:
+
+    struct Layer : public RefBase {
+        virtual void getLayerSettings(
+                const Rect &sourceCrop, uint32_t textureName,
+                renderengine::LayerSettings *layerSettings) = 0;
+    };
+
+    static sp<FrameCaptureProcessor> getInstance();
+
+    status_t capture(
+            const sp<Layer> &layer,
+            const Rect &sourceCrop, const sp<GraphicBuffer> &outBuffer);
+
+protected:
+    virtual ~FrameCaptureProcessor();
+    void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+    FrameCaptureProcessor();
+
+    enum {
+        kWhatCreate,
+        kWhatCapture,
+    };
+
+    static Mutex sLock;
+    static sp<FrameCaptureProcessor> sInstance GUARDED_BY(sLock);
+
+    constexpr static float sDefaultMaxLumiance = 500.0f;
+
+    status_t mInitStatus;
+    sp<ALooper> mLooper;
+    std::unique_ptr<renderengine::RenderEngine> mRE;
+    uint32_t mTextureName;
+
+    static status_t PostAndAwaitResponse(
+            const sp<AMessage> &msg, sp<AMessage> *response);
+    static void PostReplyWithError(
+            const sp<AReplyToken> &replyID, status_t err);
+
+    status_t initCheck() { return mInitStatus; }
+    void createRenderEngine();
+
+    // message handlers
+    status_t onCreate();
+    status_t onCapture(const sp<Layer> &layer,
+            const Rect &sourceCrop, const sp<GraphicBuffer> &outBuffer);
+
+    DISALLOW_EVIL_CONSTRUCTORS(FrameCaptureProcessor);
+};
+
+}  // namespace android
+
+#endif  // FRAME_CAPTURE_PROCESSOR_H_
diff --git a/media/libstagefright/include/media/stagefright/MediaErrors.h b/media/libstagefright/include/media/stagefright/MediaErrors.h
index 09639e2..6f48c5d 100644
--- a/media/libstagefright/include/media/stagefright/MediaErrors.h
+++ b/media/libstagefright/include/media/stagefright/MediaErrors.h
@@ -99,7 +99,13 @@
     ERROR_CAS_DEVICE_REVOKED                 = CAS_ERROR_BASE - 9,
     ERROR_CAS_RESOURCE_BUSY                  = CAS_ERROR_BASE - 10,
     ERROR_CAS_INSUFFICIENT_OUTPUT_PROTECTION = CAS_ERROR_BASE - 11,
-    ERROR_CAS_LAST_USED_ERRORCODE            = CAS_ERROR_BASE - 11,
+    ERROR_CAS_NEED_ACTIVATION                = CAS_ERROR_BASE - 12,
+    ERROR_CAS_NEED_PAIRING                   = CAS_ERROR_BASE - 13,
+    ERROR_CAS_NO_CARD                        = CAS_ERROR_BASE - 14,
+    ERROR_CAS_CARD_MUTE                      = CAS_ERROR_BASE - 15,
+    ERROR_CAS_CARD_INVALID                   = CAS_ERROR_BASE - 16,
+    ERROR_CAS_BLACKOUT                       = CAS_ERROR_BASE - 17,
+    ERROR_CAS_LAST_USED_ERRORCODE            = CAS_ERROR_BASE - 17,
 
     ERROR_CAS_VENDOR_MAX                     = CAS_ERROR_BASE - 500,
     ERROR_CAS_VENDOR_MIN                     = CAS_ERROR_BASE - 999,
diff --git a/media/ndk/Android.bp b/media/ndk/Android.bp
index aac6d71..032c5e2 100644
--- a/media/ndk/Android.bp
+++ b/media/ndk/Android.bp
@@ -90,7 +90,6 @@
         "libutils",
         "libcutils",
         "libnativewindow",
-        "libbinder",
         "libhidlbase",
         "libgui",
         "libui",
diff --git a/media/ndk/include/media/NdkImage.h b/media/ndk/include/media/NdkImage.h
index 3e60de0..62b8624 100644
--- a/media/ndk/include/media/NdkImage.h
+++ b/media/ndk/include/media/NdkImage.h
@@ -570,6 +570,8 @@
  * return {@link AMEDIA_ERROR_INVALID_OBJECT}. Application still needs to call this method on those
  * {@link AImage} objects to fully delete the {@link AImage} object from memory.</p>
  *
+ * Available since API level 24.
+ *
  * @param image The {@link AImage} to be deleted.
  */
 void AImage_delete(AImage* image) __INTRODUCED_IN(24);
@@ -577,6 +579,8 @@
 /**
  * Query the width of the input {@link AImage}.
  *
+ * Available since API level 24.
+ *
  * @param image the {@link AImage} of interest.
  * @param width the width of the image will be filled here if the method call succeeeds.
  *
@@ -591,6 +595,8 @@
 /**
  * Query the height of the input {@link AImage}.
  *
+ * Available since API level 24.
+ *
  * @param image the {@link AImage} of interest.
  * @param height the height of the image will be filled here if the method call succeeeds.
  *
@@ -607,6 +613,8 @@
  *
  * <p>The format value will be one of AIMAGE_FORMAT_* enum value.</p>
  *
+ * Available since API level 24.
+ *
  * @param image the {@link AImage} of interest.
  * @param format the format of the image will be filled here if the method call succeeeds.
  *
@@ -624,6 +632,8 @@
  * <p>The crop rectangle specifies the region of valid pixels in the image, using coordinates in the
  * largest-resolution plane.</p>
  *
+ * Available since API level 24.
+ *
  * @param image the {@link AImage} of interest.
  * @param rect the cropped rectangle of the image will be filled here if the method call succeeeds.
  *
@@ -648,6 +658,8 @@
  * {@link ACameraCaptureSession_captureCallbacks#onCaptureCompleted} callback.
  * </p>
  *
+ * Available since API level 24.
+ *
  * @param image the {@link AImage} of interest.
  * @param timestampNs the timestamp of the image will be filled here if the method call succeeeds.
  *
@@ -665,6 +677,8 @@
  * <p>The number of plane of an {@link AImage} is determined by its format, which can be queried by
  * {@link AImage_getFormat} method.</p>
  *
+ * Available since API level 24.
+ *
  * @param image the {@link AImage} of interest.
  * @param numPlanes the number of planes of the image will be filled here if the method call
  *         succeeeds.
@@ -687,6 +701,8 @@
  * being returned.
  * For formats where pixel stride is well defined, the pixel stride is always greater than 0.</p>
  *
+ * Available since API level 24.
+ *
  * @param image the {@link AImage} of interest.
  * @param planeIdx the index of the plane. Must be less than the number of planes of input image.
  * @param pixelStride the pixel stride of the image will be filled here if the method call succeeeds.
@@ -714,6 +730,8 @@
  * being returned.
  * For formats where row stride is well defined, the row stride is always greater than 0.</p>
  *
+ * Available since API level 24.
+ *
  * @param image the {@link AImage} of interest.
  * @param planeIdx the index of the plane. Must be less than the number of planes of input image.
  * @param rowStride the row stride of the image will be filled here if the method call succeeeds.
@@ -739,6 +757,8 @@
  * pointer from previous AImage_getPlaneData call becomes invalid. Do NOT use it after the
  * {@link AImage} or the parent {@link AImageReader} is deleted.</p>
  *
+ * Available since API level 24.
+ *
  * @param image the {@link AImage} of interest.
  * @param planeIdx the index of the plane. Must be less than the number of planes of input image.
  * @param data the data pointer of the image will be filled here if the method call succeeeds.
@@ -769,6 +789,8 @@
  * signal the release of the hardware buffer back to the {@link AImageReader}'s queue using
  * releaseFenceFd.</p>
  *
+ * Available since API level 26.
+ *
  * @param image The {@link AImage} to be deleted.
  * @param releaseFenceFd A sync fence fd defined in {@link sync.h}, which signals the release of
  *         underlying {@link AHardwareBuffer}.
@@ -794,6 +816,8 @@
  * {@link AImageReader_setBufferRemovedListener} to be notified when the buffer is no longer used
  * by {@link AImageReader}.</p>
  *
+ * Available since API level 26.
+ *
  * @param image the {@link AImage} of interest.
  * @param outBuffer The memory area pointed to by buffer will contain the acquired AHardwareBuffer
  *         handle.
diff --git a/media/ndk/include/media/NdkImageReader.h b/media/ndk/include/media/NdkImageReader.h
index e5d863c..600ffc9 100644
--- a/media/ndk/include/media/NdkImageReader.h
+++ b/media/ndk/include/media/NdkImageReader.h
@@ -67,6 +67,8 @@
  * The valid sizes and formats depend on the source of the image data.
  * </p>
  *
+ * Available since API level 24.
+ *
  * @param width The default width in pixels of the Images that this reader will produce.
  * @param height The default height in pixels of the Images that this reader will produce.
  * @param format The format of the Image that this reader will produce. This must be one of the
@@ -101,6 +103,8 @@
  * making any of data pointers obtained from {@link AImage_getPlaneData} invalid. Do NOT access
  * the reader object or any of those data pointers after this method returns.</p>
  *
+ * Available since API level 24.
+ *
  * @param reader The image reader to be deleted.
  */
 void AImageReader_delete(AImageReader* reader) __INTRODUCED_IN(24);
@@ -108,6 +112,8 @@
 /**
  * Get a {@link ANativeWindow} that can be used to produce {@link AImage} for this image reader.
  *
+ * Available since API level 24.
+ *
  * @param reader The image reader of interest.
  * @param window The output {@link ANativeWindow} will be filled here if the method call succeeds.
  *                The {@link ANativeWindow} is managed by this image reader. Do NOT call
@@ -126,6 +132,8 @@
  * {@link ANativeWindow}. If so, the actual width of the images can be found using
  * {@link AImage_getWidth}.</p>
  *
+ * Available since API level 24.
+ *
  * @param reader The image reader of interest.
  * @param width the default width of the reader will be filled here if the method call succeeeds.
  *
@@ -142,6 +150,8 @@
  * {@link ANativeWindow}. If so, the actual height of the images can be found using
  * {@link AImage_getHeight}.</p>
  *
+ * Available since API level 24.
+ *
  * @param reader The image reader of interest.
  * @param height the default height of the reader will be filled here if the method call succeeeds.
  *
@@ -154,6 +164,8 @@
 /**
  * Query the format of the {@link AImage} generated by this reader.
  *
+ * Available since API level 24.
+ *
  * @param reader The image reader of interest.
  * @param format the fromat of the reader will be filled here if the method call succeeeds. The
  *                value will be one of the AIMAGE_FORMAT_* enum value defiend in {@link NdkImage.h}.
@@ -167,6 +179,8 @@
 /**
  * Query the maximum number of concurrently acquired {@link AImage}s of this reader.
  *
+ * Available since API level 24.
+ *
  * @param reader The image reader of interest.
  * @param maxImages the maximum number of concurrently acquired images of the reader will be filled
  *                here if the method call succeeeds.
@@ -197,6 +211,8 @@
  * {@link AImage_delete}.
  * </p>
  *
+ * Available since API level 24.
+ *
  * @param reader The image reader of interest.
  * @param image the acquired {@link AImage} will be filled here if the method call succeeeds.
  *
@@ -214,7 +230,6 @@
 media_status_t AImageReader_acquireNextImage(AImageReader* reader, /*out*/AImage** image) __INTRODUCED_IN(24);
 
 /**
-
  * Acquire the latest {@link AImage} from the image reader's queue, dropping older images.
  *
  * <p>
@@ -241,6 +256,8 @@
  * {@link AImage_delete}.
  * </p>
  *
+ * Available since API level 24.
+ *
  * @param reader The image reader of interest.
  * @param image the acquired {@link AImage} will be filled here if the method call succeeeds.
  *
@@ -290,6 +307,8 @@
  *
  * Calling this method will replace previously registered listeners.
  *
+ * Available since API level 24.
+ *
  * @param reader The image reader of interest.
  * @param listener The {@link AImageReader_ImageListener} to be registered. Set this to NULL if
  *                 the application no longer needs to listen to new images.
@@ -356,6 +375,9 @@
  *   {@link AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE}, or combined</td>
  * </tr>
  * </table>
+ *
+ * Available since API level 26.
+ *
  * @return <ul>
  *         <li>{@link AMEDIA_OK} if the method call succeeds.</li>
  *         <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if reader is NULL, or one or more of width,
@@ -377,6 +399,8 @@
  * additional parameter for the sync fence. All other parameters and the return values are
  * identical to those passed to {@link AImageReader_acquireNextImage}.</p>
  *
+ * Available since API level 26.
+ *
  * @param acquireFenceFd A sync fence fd defined in {@link sync.h}, which is used to signal when the
  *         buffer is ready to consume. When synchronization fence is not needed, fence will be set
  *         to -1 and the {@link AImage} returned is ready for use immediately. Otherwise, user shall
@@ -397,6 +421,8 @@
  * additional parameter for the sync fence. All other parameters and the return values are
  * identical to those passed to {@link AImageReader_acquireLatestImage}.</p>
  *
+ * Available since API level 26.
+ *
  * @param acquireFenceFd A sync fence fd defined in {@link sync.h}, which is used to signal when the
  *         buffer is ready to consume. When synchronization fence is not needed, fence will be set
  *         to -1 and the {@link AImage} returned is ready for use immediately. Otherwise, user shall
@@ -408,6 +434,7 @@
  */
 media_status_t AImageReader_acquireLatestImageAsync(
         AImageReader* reader, /*out*/AImage** image, /*out*/int* acquireFenceFd) __INTRODUCED_IN(26);
+
 /**
  * Signature of the callback which is called when {@link AImageReader} is about to remove a buffer.
  *
@@ -451,6 +478,8 @@
  *
  * <p>Note that calling this method will replace previously registered listeners.</p>
  *
+ * Available since API level 26.
+ *
  * @param reader The image reader of interest.
  * @param listener the {@link AImageReader_BufferRemovedListener} to be registered. Set this to
  * NULL if application no longer needs to listen to buffer removed events.
diff --git a/media/ndk/include/media/NdkMediaCodec.h b/media/ndk/include/media/NdkMediaCodec.h
index b3ee853..1823fbc 100644
--- a/media/ndk/include/media/NdkMediaCodec.h
+++ b/media/ndk/include/media/NdkMediaCodec.h
@@ -127,27 +127,37 @@
  * Create codec by name. Use this if you know the exact codec you want to use.
  * When configuring, you will need to specify whether to use the codec as an
  * encoder or decoder.
+ *
+ * Available since API level 21.
  */
 AMediaCodec* AMediaCodec_createCodecByName(const char *name) __INTRODUCED_IN(21);
 
 /**
  * Create codec by mime type. Most applications will use this, specifying a
  * mime type obtained from media extractor.
+ *
+ * Available since API level 21.
  */
 AMediaCodec* AMediaCodec_createDecoderByType(const char *mime_type) __INTRODUCED_IN(21);
 
 /**
  * Create encoder by name.
+ *
+ * Available since API level 21.
  */
 AMediaCodec* AMediaCodec_createEncoderByType(const char *mime_type) __INTRODUCED_IN(21);
 
 /**
- * delete the codec and free its resources
+ * Delete the codec and free its resources.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaCodec_delete(AMediaCodec*) __INTRODUCED_IN(21);
 
 /**
  * Configure the codec. For decoding you would typically get the format from an extractor.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaCodec_configure(
         AMediaCodec*,
@@ -159,29 +169,39 @@
 /**
  * Start the codec. A codec must be configured before it can be started, and must be started
  * before buffers can be sent to it.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaCodec_start(AMediaCodec*) __INTRODUCED_IN(21);
 
 /**
  * Stop the codec.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaCodec_stop(AMediaCodec*) __INTRODUCED_IN(21);
 
 /*
  * Flush the codec's input and output. All indices previously returned from calls to
  * AMediaCodec_dequeueInputBuffer and AMediaCodec_dequeueOutputBuffer become invalid.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaCodec_flush(AMediaCodec*) __INTRODUCED_IN(21);
 
 /**
  * Get an input buffer. The specified buffer index must have been previously obtained from
  * dequeueInputBuffer, and not yet queued.
+ *
+ * Available since API level 21.
  */
 uint8_t* AMediaCodec_getInputBuffer(AMediaCodec*, size_t idx, size_t *out_size) __INTRODUCED_IN(21);
 
 /**
  * Get an output buffer. The specified buffer index must have been previously obtained from
  * dequeueOutputBuffer, and not yet queued.
+ *
+ * Available since API level 21.
  */
 uint8_t* AMediaCodec_getOutputBuffer(AMediaCodec*, size_t idx, size_t *out_size) __INTRODUCED_IN(21);
 
@@ -189,6 +209,8 @@
  * Get the index of the next available input buffer. An app will typically use this with
  * getInputBuffer() to get a pointer to the buffer, then copy the data to be encoded or decoded
  * into the buffer before passing it to the codec.
+ *
+ * Available since API level 21.
  */
 ssize_t AMediaCodec_dequeueInputBuffer(AMediaCodec*, int64_t timeoutUs) __INTRODUCED_IN(21);
 
@@ -218,6 +240,8 @@
 
 /**
  * Send the specified buffer to the codec for processing.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaCodec_queueInputBuffer(AMediaCodec*, size_t idx,
                                             _off_t_compat offset, size_t size,
@@ -225,6 +249,8 @@
 
 /**
  * Send the specified buffer to the codec for processing.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaCodec_queueSecureInputBuffer(AMediaCodec*, size_t idx,
                                                   _off_t_compat offset,
@@ -235,15 +261,23 @@
 
 /**
  * Get the index of the next available buffer of processed data.
+ *
+ * Available since API level 21.
  */
 ssize_t AMediaCodec_dequeueOutputBuffer(AMediaCodec*, AMediaCodecBufferInfo *info,
         int64_t timeoutUs) __INTRODUCED_IN(21);
+
+/**
+ * Available since API level 21.
+ */
 AMediaFormat* AMediaCodec_getOutputFormat(AMediaCodec*) __INTRODUCED_IN(21);
 
 /**
  * If you are done with a buffer, use this call to return the buffer to
  * the codec. If you previously specified a surface when configuring this
  * video decoder you can optionally render the buffer.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaCodec_releaseOutputBuffer(AMediaCodec*, size_t idx, bool render) __INTRODUCED_IN(21);
 
@@ -256,6 +290,8 @@
  *  to ImageReader (software readable) output.
  *
  * For more details, see the Java documentation for MediaCodec.setOutputSurface.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaCodec_setOutputSurface(AMediaCodec*, ANativeWindow* surface) __INTRODUCED_IN(21);
 
@@ -266,6 +302,8 @@
  * this call will simply return the buffer to the codec.
  *
  * For more details, see the Java documentation for MediaCodec.releaseOutputBuffer.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaCodec_releaseOutputBufferAtTime(
         AMediaCodec *mData, size_t idx, int64_t timestampNs) __INTRODUCED_IN(21);
@@ -282,6 +320,8 @@
  * ANativeWindow_release() when done.
  *
  * For more details, see the Java documentation for MediaCodec.createInputSurface.
+ *
+ * Available since API level 26.
  */
 media_status_t AMediaCodec_createInputSurface(
         AMediaCodec *mData, ANativeWindow **surface) __INTRODUCED_IN(26);
@@ -298,6 +338,8 @@
  * ANativeWindow_release() when done.
  *
  * For more details, see the Java documentation for MediaCodec.createPersistentInputSurface.
+ *
+ * Available since API level 26.
  */
 media_status_t AMediaCodec_createPersistentInputSurface(
         ANativeWindow **surface) __INTRODUCED_IN(26);
@@ -311,6 +353,8 @@
  * AMediaCodec_configure(..); and before AMediaCodec_start() has been called.
  *
  * For more details, see the Java documentation for MediaCodec.setInputSurface.
+ *
+ * Available since API level 26.
  */
 media_status_t AMediaCodec_setInputSurface(
         AMediaCodec *mData, ANativeWindow *surface) __INTRODUCED_IN(26);
@@ -322,6 +366,8 @@
  * after AMediaCodec_start() has been called.
  *
  * NOTE: Some of these parameter changes may silently fail to apply.
+ *
+ * Available since API level 26.
  */
 media_status_t AMediaCodec_setParameters(
         AMediaCodec *mData, const AMediaFormat* params) __INTRODUCED_IN(26);
@@ -339,6 +385,8 @@
  * Returns AMEDIA_OK when completed succesfully.
  *
  * For more details, see the Java documentation for MediaCodec.signalEndOfInputStream.
+ *
+ * Available since API level 26.
  */
 media_status_t AMediaCodec_signalEndOfInputStream(AMediaCodec *mData) __INTRODUCED_IN(26);
 
@@ -349,6 +397,8 @@
 /**
  * Get format of the buffer. The specified buffer index must have been previously obtained from
  * dequeueOutputBuffer.
+ *
+ * Available since API level 28.
  */
 AMediaFormat* AMediaCodec_getBufferFormat(AMediaCodec*, size_t index) __INTRODUCED_IN(28);
 
@@ -356,11 +406,15 @@
  * Get the component name. If the codec was created by createDecoderByType
  * or createEncoderByType, what component is chosen is not known beforehand.
  * Caller shall call AMediaCodec_releaseName to free the returned pointer.
+ *
+ * Available since API level 28.
  */
 media_status_t AMediaCodec_getName(AMediaCodec*, char** out_name) __INTRODUCED_IN(28);
 
 /**
  * Free the memory pointed by name which is returned by AMediaCodec_getName.
+ *
+ * Available since API level 28.
  */
 void AMediaCodec_releaseName(AMediaCodec*, char* name) __INTRODUCED_IN(28);
 
@@ -382,6 +436,8 @@
  * All callbacks are fired on one NDK internal thread.
  * AMediaCodec_setAsyncNotifyCallback should not be called on the callback thread.
  * No heavy duty task should be performed on callback thread.
+ *
+ * Available since API level 28.
  */
 media_status_t AMediaCodec_setAsyncNotifyCallback(
         AMediaCodec*,
@@ -390,6 +446,8 @@
 
 /**
  * Release the crypto if applicable.
+ *
+ * Available since API level 28.
  */
 media_status_t AMediaCodec_releaseCrypto(AMediaCodec*) __INTRODUCED_IN(28);
 
@@ -397,12 +455,16 @@
  * Call this after AMediaCodec_configure() returns successfully to get the input
  * format accepted by the codec. Do this to determine what optional configuration
  * parameters were supported by the codec.
+ *
+ * Available since API level 28.
  */
 AMediaFormat* AMediaCodec_getInputFormat(AMediaCodec*) __INTRODUCED_IN(28);
 
 /**
  * Returns true if the codec cannot proceed further, but can be recovered by stopping,
  * configuring, and starting again.
+ *
+ * Available since API level 28.
  */
 bool AMediaCodecActionCode_isRecoverable(int32_t actionCode) __INTRODUCED_IN(28);
 
@@ -410,6 +472,8 @@
  * Returns true if the codec error is a transient issue, perhaps due to
  * resource constraints, and that the method (or encoding/decoding) may be
  * retried at a later time.
+ *
+ * Available since API level 28.
  */
 bool AMediaCodecActionCode_isTransient(int32_t actionCode) __INTRODUCED_IN(28);
 
@@ -440,6 +504,8 @@
  * numBytesOfClearData can be null to indicate that all data is encrypted.
  * This information encapsulates per-sample metadata as outlined in
  * ISO/IEC FDIS 23001-7:2011 "Common encryption in ISO base media file format files".
+ *
+ * Available since API level 21.
  */
 AMediaCodecCryptoInfo *AMediaCodecCryptoInfo_new(
         int numsubsamples,
@@ -450,13 +516,17 @@
         size_t *encryptedbytes) __INTRODUCED_IN(21);
 
 /**
- * delete an AMediaCodecCryptoInfo created previously with AMediaCodecCryptoInfo_new, or
- * obtained from AMediaExtractor
+ * Delete an AMediaCodecCryptoInfo created previously with AMediaCodecCryptoInfo_new, or
+ * obtained from AMediaExtractor.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaCodecCryptoInfo_delete(AMediaCodecCryptoInfo*) __INTRODUCED_IN(21);
 
 /**
- * Set the crypto pattern on an AMediaCryptoInfo object
+ * Set the crypto pattern on an AMediaCryptoInfo object.
+ *
+ * Available since API level 21.
  */
 void AMediaCodecCryptoInfo_setPattern(
         AMediaCodecCryptoInfo *info,
@@ -464,32 +534,44 @@
 
 /**
  * The number of subsamples that make up the buffer's contents.
+ *
+ * Available since API level 21.
  */
 size_t AMediaCodecCryptoInfo_getNumSubSamples(AMediaCodecCryptoInfo*) __INTRODUCED_IN(21);
 
 /**
- * A 16-byte opaque key
+ * A 16-byte opaque key.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaCodecCryptoInfo_getKey(AMediaCodecCryptoInfo*, uint8_t *dst) __INTRODUCED_IN(21);
 
 /**
- * A 16-byte initialization vector
+ * A 16-byte initialization vector.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaCodecCryptoInfo_getIV(AMediaCodecCryptoInfo*, uint8_t *dst) __INTRODUCED_IN(21);
 
 /**
  * The type of encryption that has been applied,
  * one of AMEDIACODECRYPTOINFO_MODE_CLEAR or AMEDIACODECRYPTOINFO_MODE_AES_CTR.
+ *
+ * Available since API level 21.
  */
 cryptoinfo_mode_t AMediaCodecCryptoInfo_getMode(AMediaCodecCryptoInfo*) __INTRODUCED_IN(21);
 
 /**
  * The number of leading unencrypted bytes in each subsample.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaCodecCryptoInfo_getClearBytes(AMediaCodecCryptoInfo*, size_t *dst) __INTRODUCED_IN(21);
 
 /**
  * The number of trailing encrypted bytes in each subsample.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaCodecCryptoInfo_getEncryptedBytes(AMediaCodecCryptoInfo*, size_t *dst) __INTRODUCED_IN(21);
 
diff --git a/media/ndk/include/media/NdkMediaCrypto.h b/media/ndk/include/media/NdkMediaCrypto.h
index bcdf9a0..3fa07c7 100644
--- a/media/ndk/include/media/NdkMediaCrypto.h
+++ b/media/ndk/include/media/NdkMediaCrypto.h
@@ -49,12 +49,24 @@
 
 #if __ANDROID_API__ >= 21
 
+/**
+ * Available since API level 21.
+ */
 bool AMediaCrypto_isCryptoSchemeSupported(const AMediaUUID uuid) __INTRODUCED_IN(21);
 
+/**
+ * Available since API level 21.
+ */
 bool AMediaCrypto_requiresSecureDecoderComponent(const char *mime) __INTRODUCED_IN(21);
 
+/**
+ * Available since API level 21.
+ */
 AMediaCrypto* AMediaCrypto_new(const AMediaUUID uuid, const void *initData, size_t initDataSize) __INTRODUCED_IN(21);
 
+/**
+ * Available since API level 21.
+ */
 void AMediaCrypto_delete(AMediaCrypto* crypto) __INTRODUCED_IN(21);
 
 #endif /* __ANDROID_API__ >= 21 */
diff --git a/media/ndk/include/media/NdkMediaDataSource.h b/media/ndk/include/media/NdkMediaDataSource.h
index 16b1eb3..0577df2 100644
--- a/media/ndk/include/media/NdkMediaDataSource.h
+++ b/media/ndk/include/media/NdkMediaDataSource.h
@@ -88,6 +88,8 @@
 /**
  * Create new media data source. Returns NULL if memory allocation
  * for the new data source object fails.
+ *
+ * Available since API level 28.
  */
 AMediaDataSource* AMediaDataSource_new() __INTRODUCED_IN(28);
 
@@ -116,6 +118,7 @@
  * ...
  * key_values[(numheaders - 1) * 2]:key_values[(numheaders - 1) * 2 + 1]
  *
+ * Available since API level 29.
  */
 AMediaDataSource* AMediaDataSource_newUri(const char *uri,
         int numheaders,
@@ -125,12 +128,16 @@
 
 /**
  * Delete a previously created media data source.
+ *
+ * Available since API level 28.
  */
 void AMediaDataSource_delete(AMediaDataSource*) __INTRODUCED_IN(28);
 
 /**
  * Set an user provided opaque handle. This opaque handle is passed as
  * the first argument to the data source callbacks.
+ *
+ * Available since API level 28.
  */
 void AMediaDataSource_setUserdata(
         AMediaDataSource*, void *userdata) __INTRODUCED_IN(28);
@@ -145,6 +152,8 @@
  *
  * Please refer to the definition of AMediaDataSourceReadAt for
  * additional details.
+ *
+ * Available since API level 28.
  */
 void AMediaDataSource_setReadAt(
         AMediaDataSource*,
@@ -156,6 +165,8 @@
  *
  * Please refer to the definition of AMediaDataSourceGetSize for
  * additional details.
+ *
+ * Available since API level 28.
  */
 void AMediaDataSource_setGetSize(
         AMediaDataSource*,
@@ -167,6 +178,8 @@
  *
  * Please refer to the definition of AMediaDataSourceClose for
  * additional details.
+ *
+ * Available since API level 28.
  */
 void AMediaDataSource_setClose(
         AMediaDataSource*,
@@ -181,6 +194,8 @@
  *
  * Please refer to the definition of AMediaDataSourceClose for
  * additional details.
+ *
+ * Available since API level 29.
  */
 void AMediaDataSource_close(AMediaDataSource*) __INTRODUCED_IN(29);
 
@@ -191,6 +206,8 @@
  *
  * Please refer to the definition of AMediaDataSourceGetAvailableSize
  * for additional details.
+ *
+ * Available since API level 29.
  */
 void AMediaDataSource_setGetAvailableSize(
         AMediaDataSource*,
diff --git a/media/ndk/include/media/NdkMediaDrm.h b/media/ndk/include/media/NdkMediaDrm.h
index 2e438d9..31f5c7d 100644
--- a/media/ndk/include/media/NdkMediaDrm.h
+++ b/media/ndk/include/media/NdkMediaDrm.h
@@ -174,41 +174,53 @@
  * uuid identifies the universal unique ID of the crypto scheme. uuid must be 16 bytes.
  * mimeType is the MIME type of the media container, e.g. "video/mp4".  If mimeType
  * is not known or required, it can be provided as NULL.
+ *
+ * Available since API level 21.
  */
 bool AMediaDrm_isCryptoSchemeSupported(const uint8_t *uuid,
         const char *mimeType) __INTRODUCED_IN(21);
 
 /**
- * Create a MediaDrm instance from a UUID
+ * Create a MediaDrm instance from a UUID.
  * uuid identifies the universal unique ID of the crypto scheme. uuid must be 16 bytes.
+ *
+ * Available since API level 21.
  */
 AMediaDrm* AMediaDrm_createByUUID(const uint8_t *uuid) __INTRODUCED_IN(21);
 
 /**
- * Release a MediaDrm object
+ * Release a MediaDrm object.
+ *
+ * Available since API level 21.
  */
 void AMediaDrm_release(AMediaDrm *) __INTRODUCED_IN(21);
 
 /**
- * Register a callback to be invoked when an event occurs
+ * Register a callback to be invoked when an event occurs.
  *
- * listener is the callback that will be invoked on event
+ * listener is the callback that will be invoked on event.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaDrm_setOnEventListener(AMediaDrm *,
         AMediaDrmEventListener listener) __INTRODUCED_IN(21);
 
 /**
- * Register a callback to be invoked when an expiration update event occurs
+ * Register a callback to be invoked when an expiration update event occurs.
  *
- * listener is the callback that will be invoked on event
+ * listener is the callback that will be invoked on event.
+ *
+ * Available since API level 29.
  */
 media_status_t AMediaDrm_setOnExpirationUpdateListener(AMediaDrm *,
         AMediaDrmExpirationUpdateListener listener) __INTRODUCED_IN(29);
 
 /**
- * Register a callback to be invoked when a key status change event occurs
+ * Register a callback to be invoked when a key status change event occurs.
  *
- * listener is the callback that will be invoked on event
+ * listener is the callback that will be invoked on event.
+ *
+ * Available since API level 29.
  */
 media_status_t AMediaDrm_setOnKeysChangeListener(AMediaDrm *,
         AMediaDrmKeysChangeListener listener) __INTRODUCED_IN(29);
@@ -216,8 +228,10 @@
 /**
  * Open a new session with the MediaDrm object.  A session ID is returned.
  *
- * returns MEDIADRM_NOT_PROVISIONED_ERROR if provisioning is needed
- * returns MEDIADRM_RESOURCE_BUSY_ERROR if required resources are in use
+ * Returns MEDIADRM_NOT_PROVISIONED_ERROR if provisioning is needed.
+ * Returns MEDIADRM_RESOURCE_BUSY_ERROR if required resources are in use.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaDrm_openSession(AMediaDrm *,
         AMediaDrmSessionId *sessionId) __INTRODUCED_IN(21);
@@ -225,6 +239,8 @@
 /**
  * Close a session on the MediaDrm object that was previously opened
  * with AMediaDrm_openSession.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaDrm_closeSession(AMediaDrm *,
         const AMediaDrmSessionId *sessionId) __INTRODUCED_IN(21);
@@ -272,9 +288,11 @@
  *       MediaDrm object is released.
  *   2. keyRequestSize will be set to the size of the request
  *
- * returns MEDIADRM_NOT_PROVISIONED_ERROR if reprovisioning is needed, due to a
+ * Returns MEDIADRM_NOT_PROVISIONED_ERROR if reprovisioning is needed, due to a
  * problem with the device certificate.
-*/
+ *
+ * Available since API level 21.
+ */
 media_status_t AMediaDrm_getKeyRequest(AMediaDrm *, const AMediaDrmScope *scope,
         const uint8_t *init, size_t initSize, const char *mimeType, AMediaDrmKeyType keyType,
         const AMediaDrmKeyValue *optionalParameters, size_t numOptionalParameters,
@@ -295,8 +313,9 @@
  *
  * response points to the opaque response from the server
  * responseSize should be set to the size of the response in bytes
+ *
+ * Available since API level 21.
  */
-
 media_status_t AMediaDrm_provideKeyResponse(AMediaDrm *, const AMediaDrmScope *scope,
         const uint8_t *response, size_t responseSize,
         AMediaDrmKeySetId *keySetId) __INTRODUCED_IN(21);
@@ -305,8 +324,10 @@
  * Restore persisted offline keys into a new session.  keySetId identifies the
  * keys to load, obtained from a prior call to AMediaDrm_provideKeyResponse.
  *
- * sessionId is the session ID for the DRM session
- * keySetId identifies the saved key set to restore
+ * sessionId is the session ID for the DRM session.
+ * keySetId identifies the saved key set to restore.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaDrm_restoreKeys(AMediaDrm *, const AMediaDrmSessionId *sessionId,
         const AMediaDrmKeySetId *keySetId) __INTRODUCED_IN(21);
@@ -314,7 +335,9 @@
 /**
  * Remove the current keys from a session.
  *
- * keySetId identifies keys to remove
+ * keySetId identifies keys to remove.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaDrm_removeKeys(AMediaDrm *,
         const AMediaDrmSessionId *keySetId) __INTRODUCED_IN(21);
@@ -331,6 +354,8 @@
  * to the number of entries written to the array.  If the number of {key, value} pairs
  * to be returned is greater than *numPairs, MEDIADRM_SHORT_BUFFER will be returned
  * and numPairs will be set to the number of pairs available.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaDrm_queryKeyStatus(AMediaDrm *, const AMediaDrmSessionId *sessionId,
         AMediaDrmKeyValue *keyValuePairs, size_t *numPairs) __INTRODUCED_IN(21);
@@ -350,6 +375,8 @@
  *    3. serverUrl will reference a NULL terminated string containing the URL
  *       the provisioning request should be sent to.  It will remain accessible until
  *       the next call to getProvisionRequest.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaDrm_getProvisionRequest(AMediaDrm *, const uint8_t **provisionRequest,
         size_t *provisionRequestSize, const char **serverUrl) __INTRODUCED_IN(21);
@@ -363,8 +390,10 @@
  *   DRM engine plugin.
  * responseSize is the length of the provisioning response in bytes.
  *
- * returns MEDIADRM_DEVICE_REVOKED_ERROR if the response indicates that the
+ * Returns MEDIADRM_DEVICE_REVOKED_ERROR if the response indicates that the
  * server rejected the request
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaDrm_provideProvisionResponse(AMediaDrm *,
         const uint8_t *response, size_t responseSize) __INTRODUCED_IN(21);
@@ -390,6 +419,8 @@
  * If *numSecureStops is too small for the number of secure stops available,
  * MEDIADRM_SHORT_BUFFER will be returned and *numSecureStops will be set to the
  * number required.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaDrm_getSecureStops(AMediaDrm *,
         AMediaDrmSecureStop *secureStops, size_t *numSecureStops) __INTRODUCED_IN(21);
@@ -399,6 +430,8 @@
  * the message, remove the SecureStops identified in the response.
  *
  * ssRelease is the server response indicating which secure stops to release
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaDrm_releaseSecureStops(AMediaDrm *,
         const AMediaDrmSecureStop *ssRelease) __INTRODUCED_IN(21);
@@ -432,6 +465,8 @@
  * On return, propertyValue will be set to point to the property value.  The
  * memory that the value resides in is owned by the NDK MediaDrm API and
  * will remain valid until the next call to AMediaDrm_getPropertyString.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaDrm_getPropertyString(AMediaDrm *, const char *propertyName,
         const char **propertyValue) __INTRODUCED_IN(21);
@@ -447,18 +482,24 @@
  * On return, *propertyValue will be set to point to the property value.  The
  * memory that the value resides in is owned by the NDK MediaDrm API and
  * will remain valid until the next call to AMediaDrm_getPropertyByteArray.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaDrm_getPropertyByteArray(AMediaDrm *, const char *propertyName,
         AMediaDrmByteArray *propertyValue) __INTRODUCED_IN(21);
 
 /**
  * Set a DRM engine plugin String property value.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaDrm_setPropertyString(AMediaDrm *, const char *propertyName,
         const char *value) __INTRODUCED_IN(21);
 
 /**
  * Set a DRM engine plugin byte array property value.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaDrm_setPropertyByteArray(AMediaDrm *, const char *propertyName,
         const uint8_t *value, size_t valueSize) __INTRODUCED_IN(21);
@@ -487,6 +528,8 @@
  * ensure that the output buffer is large enough to accept dataSize bytes. The key
  * to use is identified by the 16 byte keyId.  The key must have been loaded into
  * the session using provideKeyResponse.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaDrm_encrypt(AMediaDrm *, const AMediaDrmSessionId *sessionId,
         const char *cipherAlgorithm, uint8_t *keyId, uint8_t *iv,
@@ -498,6 +541,8 @@
  * ensure that the output buffer is large enough to accept dataSize bytes.  The key
  * to use is identified by the 16 byte keyId.  The key must have been loaded into
  * the session using provideKeyResponse.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaDrm_decrypt(AMediaDrm *, const AMediaDrmSessionId *sessionId,
         const char *cipherAlgorithm, uint8_t *keyId, uint8_t *iv,
@@ -511,6 +556,8 @@
  * *signatureSize is set to the buffer size required.  The key to use is identified
  * by the 16 byte keyId.  The key must have been loaded into the session using
  * provideKeyResponse.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaDrm_sign(AMediaDrm *, const AMediaDrmSessionId *sessionId,
         const char *macAlgorithm, uint8_t *keyId, uint8_t *message, size_t messageSize,
@@ -522,6 +569,8 @@
  * if the signature matches, otherwise MEDAIDRM_VERIFY_FAILED is returned. The key to
  * use is identified by the 16 byte keyId.  The key must have been loaded into the
  * session using provideKeyResponse.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaDrm_verify(AMediaDrm *, const AMediaDrmSessionId *sessionId,
         const char *macAlgorithm, uint8_t *keyId, const uint8_t *message, size_t messageSize,
diff --git a/media/ndk/include/media/NdkMediaExtractor.h b/media/ndk/include/media/NdkMediaExtractor.h
index e3d9fe6..14319c4 100644
--- a/media/ndk/include/media/NdkMediaExtractor.h
+++ b/media/ndk/include/media/NdkMediaExtractor.h
@@ -52,23 +52,31 @@
 #if __ANDROID_API__ >= 21
 
 /**
- * Create new media extractor
+ * Create new media extractor.
+ *
+ * Available since API level 21.
  */
 AMediaExtractor* AMediaExtractor_new() __INTRODUCED_IN(21);
 
 /**
- * Delete a previously created media extractor
+ * Delete a previously created media extractor.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaExtractor_delete(AMediaExtractor*) __INTRODUCED_IN(21);
 
 /**
- *  Set the file descriptor from which the extractor will read.
+ * Set the file descriptor from which the extractor will read.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaExtractor_setDataSourceFd(AMediaExtractor*, int fd, off64_t offset,
         off64_t length) __INTRODUCED_IN(21);
 
 /**
  * Set the URI from which the extractor will read.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaExtractor_setDataSource(AMediaExtractor*,
         const char *location) __INTRODUCED_IN(21);
@@ -77,6 +85,8 @@
 
 /**
  * Set the custom data source implementation from which the extractor will read.
+ *
+ * Available since API level 28.
  */
 media_status_t AMediaExtractor_setDataSourceCustom(AMediaExtractor*,
         AMediaDataSource *src) __INTRODUCED_IN(28);
@@ -85,11 +95,15 @@
 
 /**
  * Return the number of tracks in the previously specified media file
+ *
+ * Available since API level 21.
  */
 size_t AMediaExtractor_getTrackCount(AMediaExtractor*) __INTRODUCED_IN(21);
 
 /**
  * Return the format of the specified track. The caller must free the returned format
+ *
+ * Available since API level 21.
  */
 AMediaFormat* AMediaExtractor_getTrackFormat(AMediaExtractor*, size_t idx) __INTRODUCED_IN(21);
 
@@ -98,41 +112,55 @@
  * getSampleTime only retrieve information for the subset of tracks selected.
  * Selecting the same track multiple times has no effect, the track is
  * only selected once.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaExtractor_selectTrack(AMediaExtractor*, size_t idx) __INTRODUCED_IN(21);
 
 /**
  * Unselect the specified track. Subsequent calls to readSampleData, getSampleTrackIndex and
- * getSampleTime only retrieve information for the subset of tracks selected..
+ * getSampleTime only retrieve information for the subset of tracks selected.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaExtractor_unselectTrack(AMediaExtractor*, size_t idx) __INTRODUCED_IN(21);
 
 /**
  * Read the current sample.
+ *
+ * Available since API level 21.
  */
 ssize_t AMediaExtractor_readSampleData(AMediaExtractor*,
         uint8_t *buffer, size_t capacity) __INTRODUCED_IN(21);
 
 /**
  * Read the current sample's flags.
+ *
+ * Available since API level 21.
  */
 uint32_t AMediaExtractor_getSampleFlags(AMediaExtractor*) __INTRODUCED_IN(21);
 
 /**
  * Returns the track index the current sample originates from (or -1
  * if no more samples are available)
+ *
+ * Available since API level 21.
  */
 int AMediaExtractor_getSampleTrackIndex(AMediaExtractor*) __INTRODUCED_IN(21);
 
 /**
  * Returns the current sample's presentation time in microseconds.
  * or -1 if no more samples are available.
+ *
+ * Available since API level 21.
  */
 int64_t AMediaExtractor_getSampleTime(AMediaExtractor*) __INTRODUCED_IN(21);
 
 /**
  * Advance to the next sample. Returns false if no more sample data
  * is available (end of stream).
+ *
+ * Available since API level 21.
  */
 bool AMediaExtractor_advance(AMediaExtractor*) __INTRODUCED_IN(21);
 
@@ -143,7 +171,7 @@
 } SeekMode;
 
 /**
- *
+ * Available since API level 21.
  */
 media_status_t AMediaExtractor_seekTo(AMediaExtractor*,
         int64_t seekPosUs, SeekMode mode) __INTRODUCED_IN(21);
@@ -167,10 +195,14 @@
 
 /**
  * Get the PSSH info if present.
+ *
+ * Available since API level 21.
  */
 PsshInfo* AMediaExtractor_getPsshInfo(AMediaExtractor*) __INTRODUCED_IN(21);
 
-
+/**
+ * Available since API level 21.
+ */
 AMediaCodecCryptoInfo *AMediaExtractor_getSampleCryptoInfo(AMediaExtractor *) __INTRODUCED_IN(21);
 
 enum {
@@ -186,6 +218,8 @@
  *
  * This function will always return a format; however, the format could be empty
  * (no key-value pairs) if the media container does not provide format information.
+ *
+ * Available since API level 28.
  */
 AMediaFormat* AMediaExtractor_getFileFormat(AMediaExtractor*) __INTRODUCED_IN(28);
 
@@ -198,6 +232,7 @@
  * uint8_t *buf = new uint8_t[sampleSize];
  * AMediaExtractor_readSampleData(ex, buf, sampleSize);
  *
+ * Available since API level 28.
  */
 ssize_t AMediaExtractor_getSampleSize(AMediaExtractor*) __INTRODUCED_IN(28);
 
@@ -211,6 +246,8 @@
  * Returns -1 when the extractor is not reading from a network data source, or when the
  * cached duration cannot be calculated (bitrate, duration, and file size information
  * not available).
+ *
+ * Available since API level 28.
  */
 int64_t AMediaExtractor_getCachedDuration(AMediaExtractor *) __INTRODUCED_IN(28);
 
@@ -222,6 +259,8 @@
  * Returns AMEDIA_OK on success or AMEDIA_ERROR_* to indicate failure reason.
  * Existing key-value pairs in |fmt| would be removed if this API returns AMEDIA_OK.
  * The contents of |fmt| is undefined if this API returns AMEDIA_ERROR_*.
+ *
+ * Available since API level 28.
  */
 media_status_t AMediaExtractor_getSampleFormat(AMediaExtractor *ex,
         AMediaFormat *fmt) __INTRODUCED_IN(28);
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index fd43f36..41c2378 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -48,40 +48,78 @@
 
 #if __ANDROID_API__ >= 21
 
+/**
+ * Available since API level 21.
+ */
 AMediaFormat *AMediaFormat_new() __INTRODUCED_IN(21);
+
+/**
+ * Available since API level 21.
+ */
 media_status_t AMediaFormat_delete(AMediaFormat*) __INTRODUCED_IN(21);
 
 /**
  * Human readable representation of the format. The returned string is owned by the format,
  * and remains valid until the next call to toString, or until the format is deleted.
+ *
+ * Available since API level 21.
  */
 const char* AMediaFormat_toString(AMediaFormat*) __INTRODUCED_IN(21);
 
+/**
+ * Available since API level 21.
+ */
 bool AMediaFormat_getInt32(AMediaFormat*, const char *name, int32_t *out) __INTRODUCED_IN(21);
+/**
+ * Available since API level 21.
+ */
 bool AMediaFormat_getInt64(AMediaFormat*, const char *name, int64_t *out) __INTRODUCED_IN(21);
+/**
+ * Available since API level 21.
+ */
 bool AMediaFormat_getFloat(AMediaFormat*, const char *name, float *out) __INTRODUCED_IN(21);
+/**
+ * Available since API level 21.
+ */
 bool AMediaFormat_getSize(AMediaFormat*, const char *name, size_t *out) __INTRODUCED_IN(21);
 /**
  * The returned data is owned by the format and remains valid as long as the named entry
  * is part of the format.
+ *
+ * Available since API level 21.
  */
 bool AMediaFormat_getBuffer(AMediaFormat*, const char *name, void** data, size_t *size) __INTRODUCED_IN(21);
 /**
  * The returned string is owned by the format, and remains valid until the next call to getString,
  * or until the format is deleted.
+ *
+ * Available since API level 21.
  */
 bool AMediaFormat_getString(AMediaFormat*, const char *name, const char **out) __INTRODUCED_IN(21);
 
 
+/**
+ * Available since API level 21.
+ */
 void AMediaFormat_setInt32(AMediaFormat*, const char* name, int32_t value) __INTRODUCED_IN(21);
+/**
+ * Available since API level 21.
+ */
 void AMediaFormat_setInt64(AMediaFormat*, const char* name, int64_t value) __INTRODUCED_IN(21);
+/**
+ * Available since API level 21.
+ */
 void AMediaFormat_setFloat(AMediaFormat*, const char* name, float value) __INTRODUCED_IN(21);
 /**
  * The provided string is copied into the format.
+ *
+ * Available since API level 21.
  */
 void AMediaFormat_setString(AMediaFormat*, const char* name, const char* value) __INTRODUCED_IN(21);
 /**
  * The provided data is copied into the format.
+ *
+ * Available since API level 21.
  */
 void AMediaFormat_setBuffer(AMediaFormat*, const char* name, const void* data, size_t size) __INTRODUCED_IN(21);
 
@@ -155,24 +193,43 @@
 #endif /* __ANDROID_API__ >= 21 */
 
 #if __ANDROID_API__ >= 28
+/**
+ * Available since API level 28.
+ */
 bool AMediaFormat_getDouble(AMediaFormat*, const char *name, double *out) __INTRODUCED_IN(28);
+/**
+ * Available since API level 28.
+ */
 bool AMediaFormat_getRect(AMediaFormat*, const char *name,
         int32_t *left, int32_t *top, int32_t *right, int32_t *bottom) __INTRODUCED_IN(28);
 
+/**
+ * Available since API level 28.
+ */
 void AMediaFormat_setDouble(AMediaFormat*, const char* name, double value) __INTRODUCED_IN(28);
+/**
+ * Available since API level 28.
+ */
 void AMediaFormat_setSize(AMediaFormat*, const char* name, size_t value) __INTRODUCED_IN(28);
+/**
+ * Available since API level 28.
+ */
 void AMediaFormat_setRect(AMediaFormat*, const char* name,
         int32_t left, int32_t top, int32_t right, int32_t bottom) __INTRODUCED_IN(28);
 #endif /* __ANDROID_API__ >= 28 */
 
 #if __ANDROID_API__ >= 29
 /**
- * remove all key/value pairs from the given AMediaFormat
+ * Remove all key/value pairs from the given AMediaFormat.
+ *
+ * Available since API level 29.
  */
 void AMediaFormat_clear(AMediaFormat*) __INTRODUCED_IN(29);
 
 /**
- * copy one AMediaFormat to another
+ * Copy one AMediaFormat to another.
+ *
+ * Available since API level 29.
  */
 media_status_t AMediaFormat_copy(AMediaFormat *to, AMediaFormat *from) __INTRODUCED_IN(29);
 
diff --git a/media/ndk/include/media/NdkMediaMuxer.h b/media/ndk/include/media/NdkMediaMuxer.h
index 7393867..3fdeea4 100644
--- a/media/ndk/include/media/NdkMediaMuxer.h
+++ b/media/ndk/include/media/NdkMediaMuxer.h
@@ -56,12 +56,16 @@
 #if __ANDROID_API__ >= 21
 
 /**
- * Create new media muxer
+ * Create new media muxer.
+ *
+ * Available since API level 21.
  */
 AMediaMuxer* AMediaMuxer_new(int fd, OutputFormat format) __INTRODUCED_IN(21);
 
 /**
- * Delete a previously created media muxer
+ * Delete a previously created media muxer.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaMuxer_delete(AMediaMuxer*) __INTRODUCED_IN(21);
 
@@ -75,6 +79,8 @@
  * Both values are specified in degrees.
  * Latitude must be in the range [-90, 90].
  * Longitude must be in the range [-180, 180].
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaMuxer_setLocation(AMediaMuxer*,
         float latitude, float longitude) __INTRODUCED_IN(21);
@@ -90,6 +96,8 @@
  * during playback.
  * The angle is specified in degrees, clockwise.
  * The supported angles are 0, 90, 180, and 270 degrees.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaMuxer_setOrientationHint(AMediaMuxer*, int degrees) __INTRODUCED_IN(21);
 
@@ -97,18 +105,24 @@
  * Adds a track with the specified format.
  * Returns the index of the new track or a negative value in case of failure,
  * which can be interpreted as a media_status_t.
+ *
+ * Available since API level 21.
  */
 ssize_t AMediaMuxer_addTrack(AMediaMuxer*, const AMediaFormat* format) __INTRODUCED_IN(21);
 
 /**
  * Start the muxer. Should be called after AMediaMuxer_addTrack and
  * before AMediaMuxer_writeSampleData.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaMuxer_start(AMediaMuxer*) __INTRODUCED_IN(21);
 
 /**
  * Stops the muxer.
  * Once the muxer stops, it can not be restarted.
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaMuxer_stop(AMediaMuxer*) __INTRODUCED_IN(21);
 
@@ -118,6 +132,8 @@
  * the right tracks. Also, it needs to make sure the samples for each track
  * are written in chronological order (e.g. in the order they are provided
  * by the encoder.)
+ *
+ * Available since API level 21.
  */
 media_status_t AMediaMuxer_writeSampleData(AMediaMuxer *muxer,
         size_t trackIdx, const uint8_t *data,
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index 990f318..971ae9f 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -61,12 +61,12 @@
 
 static bool checkRecordingInternal(const String16& opPackageName, pid_t pid,
         uid_t uid, bool start) {
-    // Okay to not track in app ops as audio server is us and if
+    // Okay to not track in app ops as audio server or media server is us and if
     // device is rooted security model is considered compromised.
     // system_server loses its RECORD_AUDIO permission when a secondary
     // user is active, but it is a core system service so let it through.
     // TODO(b/141210120): UserManager.DISALLOW_RECORD_AUDIO should not affect system user 0
-    if (isAudioServerOrSystemServerOrRootUid(uid)) return true;
+    if (isAudioServerOrMediaServerOrSystemServerOrRootUid(uid)) return true;
 
     // We specify a pid and uid here as mediaserver (aka MediaRecorder or StageFrightRecorder)
     // may open a record track on behalf of a client.  Note that pid may be a tid.
diff --git a/media/utils/include/mediautils/ServiceUtilities.h b/media/utils/include/mediautils/ServiceUtilities.h
index e1089d5..2595761 100644
--- a/media/utils/include/mediautils/ServiceUtilities.h
+++ b/media/utils/include/mediautils/ServiceUtilities.h
@@ -58,10 +58,11 @@
     return multiuser_get_app_id(uid) == AID_SYSTEM || uid == AID_AUDIOSERVER;
 }
 
-// used for calls that should come from system_server or audio_server and
+// used for calls that should come from system_server or audio_server or media server and
 // include AID_ROOT for command-line tests.
-static inline bool isAudioServerOrSystemServerOrRootUid(uid_t uid) {
-    return multiuser_get_app_id(uid) == AID_SYSTEM || uid == AID_AUDIOSERVER || uid == AID_ROOT;
+static inline bool isAudioServerOrMediaServerOrSystemServerOrRootUid(uid_t uid) {
+    return multiuser_get_app_id(uid) == AID_SYSTEM || uid == AID_AUDIOSERVER
+              || uid == AID_MEDIA || uid == AID_ROOT;
 }
 
 // Mediaserver may forward the client PID and UID as part of a binder interface call;
diff --git a/services/audioflinger/Android.bp b/services/audioflinger/Android.bp
index d50a556..de8c7e7 100644
--- a/services/audioflinger/Android.bp
+++ b/services/audioflinger/Android.bp
@@ -34,6 +34,7 @@
     ],
 
     shared_libs: [
+        "libaudiofoundation",
         "libaudiohal",
         "libaudioprocessing",
         "libaudiospdif",
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 65261da..9d80425 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -381,7 +381,7 @@
 
 AudioHwDevice* AudioFlinger::findSuitableHwDev_l(
         audio_module_handle_t module,
-        audio_devices_t devices)
+        audio_devices_t deviceType)
 {
     // if module is 0, the request comes from an old policy manager and we should load
     // well known modules
@@ -396,7 +396,7 @@
             sp<DeviceHalInterface> dev = audioHwDevice->hwDevice();
             uint32_t supportedDevices;
             if (dev->getSupportedDevices(&supportedDevices) == OK &&
-                    (supportedDevices & devices) == devices) {
+                    (supportedDevices & deviceType) == deviceType) {
                 return audioHwDevice;
             }
         }
@@ -1573,7 +1573,7 @@
     proposed.format = format;
 
     sp<DeviceHalInterface> dev = mPrimaryHardwareDev->hwDevice();
-    size_t frames;
+    size_t frames = 0;
     for (;;) {
         // Note: config is currently a const parameter for get_input_buffer_size()
         // but we use a copy from proposed in case config changes from the call.
@@ -2304,13 +2304,13 @@
 
 
 sp<AudioFlinger::ThreadBase> AudioFlinger::openOutput_l(audio_module_handle_t module,
-                                                            audio_io_handle_t *output,
-                                                            audio_config_t *config,
-                                                            audio_devices_t devices,
-                                                            const String8& address,
-                                                            audio_output_flags_t flags)
+                                                        audio_io_handle_t *output,
+                                                        audio_config_t *config,
+                                                        audio_devices_t deviceType,
+                                                        const String8& address,
+                                                        audio_output_flags_t flags)
 {
-    AudioHwDevice *outHwDev = findSuitableHwDev_l(module, devices);
+    AudioHwDevice *outHwDev = findSuitableHwDev_l(module, deviceType);
     if (outHwDev == NULL) {
         return 0;
     }
@@ -2351,7 +2351,7 @@
     status_t status = outHwDev->openOutputStream(
             &outputStream,
             *output,
-            devices,
+            deviceType,
             flags,
             config,
             address.string());
@@ -2362,7 +2362,7 @@
         if (flags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) {
             sp<MmapPlaybackThread> thread =
                     new MmapPlaybackThread(this, *output, outHwDev, outputStream,
-                                          devices, AUDIO_DEVICE_NONE, mSystemReady);
+                                           deviceType, AUDIO_DEVICE_NONE, mSystemReady);
             mMmapThreads.add(*output, thread);
             ALOGV("openOutput_l() created mmap playback thread: ID %d thread %p",
                   *output, thread.get());
@@ -2370,17 +2370,18 @@
         } else {
             sp<PlaybackThread> thread;
             if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
-                thread = new OffloadThread(this, outputStream, *output, devices, mSystemReady);
+                thread = new OffloadThread(this, outputStream, *output, deviceType, mSystemReady);
                 ALOGV("openOutput_l() created offload output: ID %d thread %p",
                       *output, thread.get());
             } else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT)
                     || !isValidPcmSinkFormat(config->format)
                     || !isValidPcmSinkChannelMask(config->channel_mask)) {
-                thread = new DirectOutputThread(this, outputStream, *output, devices, mSystemReady);
+                thread = new DirectOutputThread(
+                        this, outputStream, *output, deviceType, mSystemReady);
                 ALOGV("openOutput_l() created direct output: ID %d thread %p",
                       *output, thread.get());
             } else {
-                thread = new MixerThread(this, outputStream, *output, devices, mSystemReady);
+                thread = new MixerThread(this, outputStream, *output, deviceType, mSystemReady);
                 ALOGV("openOutput_l() created mixer output: ID %d thread %p",
                       *output, thread.get());
             }
@@ -2396,27 +2397,29 @@
 status_t AudioFlinger::openOutput(audio_module_handle_t module,
                                   audio_io_handle_t *output,
                                   audio_config_t *config,
-                                  audio_devices_t *devices,
-                                  const String8& address,
+                                  const sp<DeviceDescriptorBase>& device,
                                   uint32_t *latencyMs,
                                   audio_output_flags_t flags)
 {
-    ALOGI("openOutput() this %p, module %d Device %#x, SamplingRate %d, Format %#08x, "
+    ALOGI("openOutput() this %p, module %d Device %s, SamplingRate %d, Format %#08x, "
               "Channels %#x, flags %#x",
               this, module,
-              (devices != NULL) ? *devices : 0,
+              device->toString().c_str(),
               config->sample_rate,
               config->format,
               config->channel_mask,
               flags);
 
-    if (devices == NULL || *devices == AUDIO_DEVICE_NONE) {
+    audio_devices_t deviceType = device->type();
+    const String8 address = String8(device->address().c_str());
+
+    if (deviceType == AUDIO_DEVICE_NONE) {
         return BAD_VALUE;
     }
 
     Mutex::Autolock _l(mLock);
 
-    sp<ThreadBase> thread = openOutput_l(module, output, config, *devices, address, flags);
+    sp<ThreadBase> thread = openOutput_l(module, output, config, deviceType, address, flags);
     if (thread != 0) {
         if ((flags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) == 0) {
             PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index d2de5fe..65be06d 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -175,8 +175,7 @@
     virtual status_t openOutput(audio_module_handle_t module,
                                 audio_io_handle_t *output,
                                 audio_config_t *config,
-                                audio_devices_t *devices,
-                                const String8& address,
+                                const sp<DeviceDescriptorBase>& device,
                                 uint32_t *latencyMs,
                                 audio_output_flags_t flags);
 
@@ -372,7 +371,7 @@
     virtual     void        onFirstRef();
 
     AudioHwDevice*          findSuitableHwDev_l(audio_module_handle_t module,
-                                                audio_devices_t devices);
+                                                audio_devices_t deviceType);
 
     // Set kEnableExtendedChannels to true to enable greater than stereo output
     // for the MixerThread and device sink.  Number of channels allowed is
@@ -678,11 +677,11 @@
                                            audio_devices_t outputDevice,
                                            const String8& outputDeviceAddress);
               sp<ThreadBase> openOutput_l(audio_module_handle_t module,
-                                              audio_io_handle_t *output,
-                                              audio_config_t *config,
-                                              audio_devices_t devices,
-                                              const String8& address,
-                                              audio_output_flags_t flags);
+                                          audio_io_handle_t *output,
+                                          audio_config_t *config,
+                                          audio_devices_t deviceType,
+                                          const String8& address,
+                                          audio_output_flags_t flags);
 
               void closeOutputFinish(const sp<PlaybackThread>& thread);
               void closeInputFinish(const sp<RecordThread>& thread);
diff --git a/services/audioflinger/AudioHwDevice.cpp b/services/audioflinger/AudioHwDevice.cpp
index b109d06..dda164c 100644
--- a/services/audioflinger/AudioHwDevice.cpp
+++ b/services/audioflinger/AudioHwDevice.cpp
@@ -34,7 +34,7 @@
 status_t AudioHwDevice::openOutputStream(
         AudioStreamOut **ppStreamOut,
         audio_io_handle_t handle,
-        audio_devices_t devices,
+        audio_devices_t deviceType,
         audio_output_flags_t flags,
         struct audio_config *config,
         const char *address)
@@ -50,7 +50,7 @@
             config->sample_rate,
             config->format,
             config->channel_mask);
-    status_t status = outputStream->open(handle, devices, config, address);
+    status_t status = outputStream->open(handle, deviceType, config, address);
 
     if (status != NO_ERROR) {
         delete outputStream;
@@ -75,7 +75,7 @@
         if (wrapperNeeded) {
             if (SPDIFEncoder::isFormatSupported(originalConfig.format)) {
                 outputStream = new SpdifStreamOut(this, flags, originalConfig.format);
-                status = outputStream->open(handle, devices, &originalConfig, address);
+                status = outputStream->open(handle, deviceType, &originalConfig, address);
                 if (status != NO_ERROR) {
                     ALOGE("ERROR - openOutputStream(), SPDIF open returned %d",
                         status);
diff --git a/services/audioflinger/AudioHwDevice.h b/services/audioflinger/AudioHwDevice.h
index d4299b0..6709d17 100644
--- a/services/audioflinger/AudioHwDevice.h
+++ b/services/audioflinger/AudioHwDevice.h
@@ -76,7 +76,7 @@
     status_t openOutputStream(
             AudioStreamOut **ppStreamOut,
             audio_io_handle_t handle,
-            audio_devices_t devices,
+            audio_devices_t deviceType,
             audio_output_flags_t flags,
             struct audio_config *config,
             const char *address);
diff --git a/services/audioflinger/AudioStreamOut.cpp b/services/audioflinger/AudioStreamOut.cpp
index a60a5f2..d13cb8f 100644
--- a/services/audioflinger/AudioStreamOut.cpp
+++ b/services/audioflinger/AudioStreamOut.cpp
@@ -118,7 +118,7 @@
 
 status_t AudioStreamOut::open(
         audio_io_handle_t handle,
-        audio_devices_t devices,
+        audio_devices_t deviceType,
         struct audio_config *config,
         const char *address)
 {
@@ -130,7 +130,7 @@
 
     int status = hwDev()->openOutputStream(
             handle,
-            devices,
+            deviceType,
             customFlags,
             config,
             address,
@@ -152,7 +152,7 @@
 
         status = hwDev()->openOutputStream(
                 handle,
-                devices,
+                deviceType,
                 customFlags,
                 &customConfig,
                 address,
diff --git a/services/audioflinger/AudioStreamOut.h b/services/audioflinger/AudioStreamOut.h
index b16b1af..16fbcf2 100644
--- a/services/audioflinger/AudioStreamOut.h
+++ b/services/audioflinger/AudioStreamOut.h
@@ -47,7 +47,7 @@
 
     virtual status_t open(
             audio_io_handle_t handle,
-            audio_devices_t devices,
+            audio_devices_t deviceType,
             struct audio_config *config,
             const char *address);
 
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index d5257bd..d87239d 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -25,7 +25,8 @@
     ~OpRecordAudioMonitor() override;
     bool hasOpRecordAudio() const;
 
-    static sp<OpRecordAudioMonitor> createIfNeeded(uid_t uid, const String16& opPackageName);
+    static sp<OpRecordAudioMonitor> createIfNeeded
+        (uid_t uid, const audio_attributes_t& attr, const String16& opPackageName);
 
 private:
     OpRecordAudioMonitor(uid_t uid, const String16& opPackageName);
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 41a71d5..23c2209 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -1898,7 +1898,7 @@
 // static
 sp<AudioFlinger::RecordThread::OpRecordAudioMonitor>
 AudioFlinger::RecordThread::OpRecordAudioMonitor::createIfNeeded(
-            uid_t uid, const String16& opPackageName)
+            uid_t uid, const audio_attributes_t& attr, const String16& opPackageName)
 {
     if (isServiceUid(uid)) {
         ALOGV("not silencing record for service uid:%d pack:%s",
@@ -1906,6 +1906,13 @@
         return nullptr;
     }
 
+    // Capturing from FM TUNER output is not controlled by OP_RECORD_AUDIO
+    // because it does not affect users privacy as does capturing from an actual microphone.
+    if (attr.source == AUDIO_SOURCE_FM_TUNER) {
+        ALOGV("not muting FM TUNER capture for uid %d", uid);
+        return nullptr;
+    }
+
     if (opPackageName.size() == 0) {
         Vector<String16> packages;
         // no package name, happens with SL ES clients
@@ -2071,7 +2078,7 @@
         mRecordBufferConverter(NULL),
         mFlags(flags),
         mSilenced(false),
-        mOpRecordAudioMonitor(OpRecordAudioMonitor::createIfNeeded(uid, opPackageName))
+        mOpRecordAudioMonitor(OpRecordAudioMonitor::createIfNeeded(uid, attr, opPackageName))
 {
     if (mCblk == NULL) {
         return;
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 35126ad..0d3e614 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -19,6 +19,7 @@
 
 #include <media/AudioSystem.h>
 #include <media/AudioPolicy.h>
+#include <media/DeviceDescriptorBase.h>
 #include <utils/String8.h>
 
 namespace android {
@@ -296,8 +297,7 @@
     virtual status_t openOutput(audio_module_handle_t module,
                                 audio_io_handle_t *output,
                                 audio_config_t *config,
-                                audio_devices_t *devices,
-                                const String8& address,
+                                const sp<DeviceDescriptorBase>& device,
                                 uint32_t *latencyMs,
                                 audio_output_flags_t flags) = 0;
     // creates a special output that is duplicated to the two outputs passed as arguments. The duplication is performed by
diff --git a/services/audiopolicy/common/include/Volume.h b/services/audiopolicy/common/include/Volume.h
index c3a02f9..7c8ce83 100644
--- a/services/audiopolicy/common/include/Volume.h
+++ b/services/audiopolicy/common/include/Volume.h
@@ -22,6 +22,8 @@
 #include <utils/Log.h>
 #include <math.h>
 
+#include "policy.h"
+
 namespace android {
 
 /**
@@ -85,43 +87,14 @@
      */
     static audio_devices_t getDeviceForVolume(const android::DeviceTypeSet& deviceTypes)
     {
-        audio_devices_t deviceType = AUDIO_DEVICE_NONE;
         if (deviceTypes.empty()) {
             // this happens when forcing a route update and no track is active on an output.
             // In this case the returned category is not important.
-            deviceType = AUDIO_DEVICE_OUT_SPEAKER;
-        } else if (deviceTypes.size() > 1) {
-            // Multiple device selection is either:
-            //  - speaker + one other device: give priority to speaker in this case.
-            //  - one A2DP device + another device: happens with duplicated output. In this case
-            // retain the device on the A2DP output as the other must not correspond to an active
-            // selection if not the speaker.
-            //  - HDMI-CEC system audio mode only output: give priority to available item in order.
-            if (deviceTypes.count(AUDIO_DEVICE_OUT_SPEAKER) != 0) {
-                deviceType = AUDIO_DEVICE_OUT_SPEAKER;
-            } else if (deviceTypes.count(AUDIO_DEVICE_OUT_SPEAKER_SAFE) != 0) {
-                deviceType = AUDIO_DEVICE_OUT_SPEAKER_SAFE;
-            } else if (deviceTypes.count(AUDIO_DEVICE_OUT_HDMI_ARC) != 0) {
-                deviceType = AUDIO_DEVICE_OUT_HDMI_ARC;
-            } else if (deviceTypes.count(AUDIO_DEVICE_OUT_AUX_LINE) != 0) {
-                deviceType = AUDIO_DEVICE_OUT_AUX_LINE;
-            } else if (deviceTypes.count(AUDIO_DEVICE_OUT_SPDIF) != 0) {
-                deviceType = AUDIO_DEVICE_OUT_SPDIF;
-            } else {
-                std::vector<audio_devices_t> a2dpDevices = android::Intersection(
-                        deviceTypes, android::getAudioDeviceOutAllA2dpSet());
-                if (a2dpDevices.size() > 1) {
-                    ALOGW("getDeviceForVolume() invalid device combination: %s",
-                          android::dumpDeviceTypes(deviceTypes).c_str());
-                }
-                if (!a2dpDevices.empty()) {
-                    deviceType = a2dpDevices[0];
-                }
-            }
-        } else {
-            deviceType = *(deviceTypes.begin());
+            return AUDIO_DEVICE_OUT_SPEAKER;
         }
 
+        audio_devices_t deviceType = apm_extract_one_audio_device(deviceTypes);
+
         /*SPEAKER_SAFE is an alias of SPEAKER for purposes of volume control*/
         if (deviceType == AUDIO_DEVICE_OUT_SPEAKER_SAFE) {
             deviceType = AUDIO_DEVICE_OUT_SPEAKER;
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
index 630efc1..0537365 100644
--- a/services/audiopolicy/common/include/policy.h
+++ b/services/audiopolicy/common/include/policy.h
@@ -19,6 +19,8 @@
 #include <system/audio.h>
 #include <vector>
 
+#include <media/AudioContainers.h>
+
 namespace android {
 
 using StreamTypeVector = std::vector<audio_stream_type_t>;
@@ -199,3 +201,43 @@
 {
     return hasStream(streams, AUDIO_STREAM_VOICE_CALL);
 }
+
+/**
+ * @brief extract one device relevant from multiple device selection
+ * @param deviceTypes collection of audio device type
+ * @return the device type that is selected
+ */
+static inline audio_devices_t apm_extract_one_audio_device(
+        const android::DeviceTypeSet& deviceTypes) {
+    if (deviceTypes.empty()) {
+        return AUDIO_DEVICE_NONE;
+    } else if (deviceTypes.size() == 1) {
+        return *(deviceTypes.begin());
+    } else {
+        // Multiple device selection is either:
+        //  - speaker + one other device: give priority to speaker in this case.
+        //  - one A2DP device + another device: happens with duplicated output. In this case
+        // retain the device on the A2DP output as the other must not correspond to an active
+        // selection if not the speaker.
+        //  - HDMI-CEC system audio mode only output: give priority to available item in order.
+        if (deviceTypes.count(AUDIO_DEVICE_OUT_SPEAKER) != 0) {
+            return AUDIO_DEVICE_OUT_SPEAKER;
+        } else if (deviceTypes.count(AUDIO_DEVICE_OUT_SPEAKER_SAFE) != 0) {
+            return AUDIO_DEVICE_OUT_SPEAKER_SAFE;
+        } else if (deviceTypes.count(AUDIO_DEVICE_OUT_HDMI_ARC) != 0) {
+            return AUDIO_DEVICE_OUT_HDMI_ARC;
+        } else if (deviceTypes.count(AUDIO_DEVICE_OUT_AUX_LINE) != 0) {
+            return AUDIO_DEVICE_OUT_AUX_LINE;
+        } else if (deviceTypes.count(AUDIO_DEVICE_OUT_SPDIF) != 0) {
+            return AUDIO_DEVICE_OUT_SPDIF;
+        } else {
+            std::vector<audio_devices_t> a2dpDevices = android::Intersection(
+                    deviceTypes, android::getAudioDeviceOutAllA2dpSet());
+            if (a2dpDevices.empty() || a2dpDevices.size() > 1) {
+                ALOGW("%s invalid device combination: %s",
+                      __func__, android::dumpDeviceTypes(deviceTypes).c_str());
+            }
+            return a2dpDevices.empty() ? AUDIO_DEVICE_NONE : a2dpDevices[0];
+        }
+    }
+}
\ No newline at end of file
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index 158215d..7faf90e 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -130,6 +130,14 @@
     DeviceVector getFirstDevicesFromTypes(std::vector<audio_devices_t> orderedTypes) const;
     sp<DeviceDescriptor> getFirstExistingDevice(std::vector<audio_devices_t> orderedTypes) const;
 
+    // Return device descriptor that is used to open an input/output stream.
+    // Null pointer will be returned if
+    //     1) this collection is empty
+    //     2) the device descriptors are not the same category(input or output)
+    //     3) there are more than one device type for input case
+    //     4) the combination of all devices is invalid for selection
+    sp<DeviceDescriptor> getDeviceForOpening() const;
+
     // If there are devices with the given type and the devices to add is not empty,
     // remove all the devices with the given type and add all the devices to add.
     void replaceDevicesByType(audio_devices_t typeToRemove, const DeviceVector &devicesToAdd);
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 4ff69ee..dd51658 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -467,8 +467,11 @@
                                        audio_io_handle_t *output)
 {
     mDevices = devices;
-    const String8& address = devices.getFirstValidAddress();
-    DeviceTypeSet deviceTypes = devices.types();
+    sp<DeviceDescriptor> device = devices.getDeviceForOpening();
+    LOG_ALWAYS_FATAL_IF(device == nullptr,
+                        "%s failed to get device descriptor for opening "
+                        "with the requested devices, all device types: %s",
+                        __func__, dumpDeviceTypes(devices.types()).c_str());
 
     audio_config_t lConfig;
     if (config == nullptr) {
@@ -500,25 +503,19 @@
     ALOGV("opening output for device %s profile %p name %s",
           mDevices.toString().c_str(), mProfile.get(), mProfile->getName().c_str());
 
-    // FIXME: Stop using device types as bit mask when the interface updated.
-    audio_devices_t deviceType = deviceTypesToBitMask(deviceTypes);
     status_t status = mClientInterface->openOutput(mProfile->getModuleHandle(),
                                                    output,
                                                    &lConfig,
-                                                   &deviceType,
-                                                   address,
+                                                   device,
                                                    &mLatency,
                                                    mFlags);
-    deviceTypes = deviceTypesFromBitMask(deviceType);
-    LOG_ALWAYS_FATAL_IF(mDevices.types() != deviceTypes,
-                        "%s openOutput returned device %s when given device %s",
-                        __FUNCTION__, dumpDeviceTypes(mDevices.types()).c_str(),
-                        dumpDeviceTypes(deviceTypes).c_str());
 
     if (status == NO_ERROR) {
         LOG_ALWAYS_FATAL_IF(*output == AUDIO_IO_HANDLE_NONE,
-                            "%s openOutput returned output handle %d for device %s",
-                            __FUNCTION__, *output, dumpDeviceTypes(deviceTypes).c_str());
+                            "%s openOutput returned output handle %d for device %s, "
+                            "selected device %s for opening",
+                            __FUNCTION__, *output, devices.toString().c_str(),
+                            device->toString().c_str());
         mSamplingRate = lConfig.sample_rate;
         mChannelMask = lConfig.channel_mask;
         mFormat = lConfig.format;
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 4e8c01c..0587041 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -328,6 +328,24 @@
     return device;
 }
 
+sp<DeviceDescriptor> DeviceVector::getDeviceForOpening() const
+{
+    if (isEmpty()) {
+        // Return nullptr if this collection is empty.
+        return nullptr;
+    } else if (areAllOfSameDeviceType(types(), audio_is_input_device)) {
+        // For input case, return the first one when there is only one device.
+        return size() > 1 ? nullptr : *begin();
+    } else if (areAllOfSameDeviceType(types(), audio_is_output_device)) {
+        // For output case, return the device descriptor according to apm strategy.
+        audio_devices_t deviceType = apm_extract_one_audio_device(types());
+        return deviceType == AUDIO_DEVICE_NONE ? nullptr :
+                getDevice(deviceType, String8(""), AUDIO_FORMAT_DEFAULT);
+    }
+    // Return null pointer if the devices are not all input/output device.
+    return nullptr;
+}
+
 void DeviceVector::replaceDevicesByType(
         audio_devices_t typeToRemove, const DeviceVector &devicesToAdd) {
     DeviceVector devicesToRemove = getDevicesFromType(typeToRemove);
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index d51cc6e..6de0c80 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -39,8 +39,7 @@
 status_t AudioPolicyService::AudioPolicyClient::openOutput(audio_module_handle_t module,
                                                            audio_io_handle_t *output,
                                                            audio_config_t *config,
-                                                           audio_devices_t *devices,
-                                                           const String8& address,
+                                                           const sp<DeviceDescriptorBase>& device,
                                                            uint32_t *latencyMs,
                                                            audio_output_flags_t flags)
 {
@@ -49,7 +48,7 @@
         ALOGW("%s: could not get AudioFlinger", __func__);
         return PERMISSION_DENIED;
     }
-    return af->openOutput(module, output, config, devices, address, latencyMs, flags);
+    return af->openOutput(module, output, config, device, latencyMs, flags);
 }
 
 audio_io_handle_t AudioPolicyService::AudioPolicyClient::openDuplicateOutput(
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 389f861..875f51d 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -377,8 +377,10 @@
         pid = callingPid;
     }
 
-    // check calling permissions
-    if (!recordingAllowed(opPackageName, pid, uid)) {
+    // check calling permissions.
+    // Capturing from FM_TUNER source is controlled by captureAudioOutputAllowed() only as this
+    // does not affect users privacy as does capturing from an actual microphone.
+    if (!(recordingAllowed(opPackageName, pid, uid) || attr->source == AUDIO_SOURCE_FM_TUNER)) {
         ALOGE("%s permission denied: recording not allowed for uid %d pid %d",
                 __func__, uid, pid);
         return PERMISSION_DENIED;
@@ -388,7 +390,8 @@
     if ((attr->source == AUDIO_SOURCE_VOICE_UPLINK ||
         attr->source == AUDIO_SOURCE_VOICE_DOWNLINK ||
         attr->source == AUDIO_SOURCE_VOICE_CALL ||
-        attr->source == AUDIO_SOURCE_ECHO_REFERENCE) &&
+        attr->source == AUDIO_SOURCE_ECHO_REFERENCE||
+        attr->source == AUDIO_SOURCE_FM_TUNER) &&
         !canCaptureOutput) {
         return PERMISSION_DENIED;
     }
@@ -494,7 +497,8 @@
     }
 
     // check calling permissions
-    if (!startRecording(client->opPackageName, client->pid, client->uid)) {
+    if (!(startRecording(client->opPackageName, client->pid, client->uid)
+            || client->attributes.source == AUDIO_SOURCE_FM_TUNER)) {
         ALOGE("%s permission denied: recording not allowed for uid %d pid %d",
                 __func__, client->uid, client->pid);
         return PERMISSION_DENIED;
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 2319838..c8d7d0c 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -977,7 +977,8 @@
 
 void AudioPolicyService::UidPolicy::onUidStateChanged(uid_t uid,
                                                       int32_t procState,
-                                                      int64_t procStateSeq __unused) {
+                                                      int64_t procStateSeq __unused,
+                                                      int32_t capability __unused) {
     if (procState != ActivityManager::PROCESS_STATE_UNKNOWN) {
         updateUid(&mCachedUids, uid, true, procState, true);
     }
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 939df2c..17e0437 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -371,7 +371,8 @@
         void onUidActive(uid_t uid) override;
         void onUidGone(uid_t uid, bool disabled) override;
         void onUidIdle(uid_t uid, bool disabled) override;
-        void onUidStateChanged(uid_t uid, int32_t procState, int64_t procStateSeq);
+        void onUidStateChanged(uid_t uid, int32_t procState, int64_t procStateSeq,
+                int32_t capability);
 
         void addOverrideUid(uid_t uid, bool active) { updateOverrideUid(uid, active, true); }
         void removeOverrideUid(uid_t uid) { updateOverrideUid(uid, false, false); }
@@ -622,8 +623,7 @@
         virtual status_t openOutput(audio_module_handle_t module,
                                     audio_io_handle_t *output,
                                     audio_config_t *config,
-                                    audio_devices_t *devices,
-                                    const String8& address,
+                                    const sp<DeviceDescriptorBase>& device,
                                     uint32_t *latencyMs,
                                     audio_output_flags_t flags);
         // creates a special output that is duplicated to the two outputs passed as arguments. The duplication is performed by
diff --git a/services/audiopolicy/tests/AudioPolicyManagerTestClient.h b/services/audiopolicy/tests/AudioPolicyManagerTestClient.h
index afe6f20..c2a92d7 100644
--- a/services/audiopolicy/tests/AudioPolicyManagerTestClient.h
+++ b/services/audiopolicy/tests/AudioPolicyManagerTestClient.h
@@ -34,8 +34,7 @@
     status_t openOutput(audio_module_handle_t module,
                         audio_io_handle_t *output,
                         audio_config_t * /*config*/,
-                        audio_devices_t * /*devices*/,
-                        const String8 & /*address*/,
+                        const sp<DeviceDescriptorBase>& /*device*/,
                         uint32_t * /*latencyMs*/,
                         audio_output_flags_t /*flags*/) override {
         if (module >= mNextModuleHandle) {
diff --git a/services/audiopolicy/tests/AudioPolicyTestClient.h b/services/audiopolicy/tests/AudioPolicyTestClient.h
index e4c64e5..b92a2e6 100644
--- a/services/audiopolicy/tests/AudioPolicyTestClient.h
+++ b/services/audiopolicy/tests/AudioPolicyTestClient.h
@@ -31,8 +31,7 @@
     status_t openOutput(audio_module_handle_t /*module*/,
                         audio_io_handle_t* /*output*/,
                         audio_config_t* /*config*/,
-                        audio_devices_t* /*devices*/,
-                        const String8& /*address*/,
+                        const sp<DeviceDescriptorBase>& /*device*/,
                         uint32_t* /*latencyMs*/,
                         audio_output_flags_t /*flags*/) override { return NO_INIT; }
     audio_io_handle_t openDuplicateOutput(audio_io_handle_t /*output1*/,
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index a02178e..c10adbb 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -2855,7 +2855,7 @@
 }
 
 void CameraService::UidPolicy::onUidStateChanged(uid_t uid, int32_t procState,
-        int64_t /*procStateSeq*/) {
+        int64_t procStateSeq __unused, int32_t capability __unused) {
     bool procStateChange = false;
     {
         Mutex::Autolock _l(mUidLock);
@@ -3448,10 +3448,21 @@
         ALOGE("%s: Invalid camera id %s, skipping", __FUNCTION__, cameraId.string());
         return;
     }
-
+    bool supportsHAL3 = false;
+    // supportsCameraApi also holds mInterfaceMutex, we can't call it in the
+    // HIDL onStatusChanged wrapper call (we'll hold mStatusListenerLock and
+    // mInterfaceMutex together, which can lead to deadlocks)
+    binder::Status sRet =
+            supportsCameraApi(String16(cameraId), hardware::ICameraService::API_VERSION_2,
+                    &supportsHAL3);
+    if (!sRet.isOk()) {
+        ALOGW("%s: Failed to determine if device supports HAL3 %s, supportsCameraApi call failed",
+                __FUNCTION__, cameraId.string());
+        return;
+    }
     // Update the status for this camera state, then send the onStatusChangedCallbacks to each
     // of the listeners with both the mStatusStatus and mStatusListenerLock held
-    state->updateStatus(status, cameraId, rejectSourceStates, [this, &deviceKind]
+    state->updateStatus(status, cameraId, rejectSourceStates, [this, &deviceKind, &supportsHAL3]
             (const String8& cameraId, StatusInternal status) {
 
             if (status != StatusInternal::ENUMERATING) {
@@ -3473,9 +3484,11 @@
             Mutex::Autolock lock(mStatusListenerLock);
 
             for (auto& listener : mListenerList) {
-                if (shouldSkipStatusUpdates(deviceKind, listener->isVendorListener(),
-                        listener->getListenerPid(), listener->getListenerUid())) {
-                    ALOGV("Skipping camera discovery callback for system-only camera %s",
+                bool isVendorListener = listener->isVendorListener();
+                if (shouldSkipStatusUpdates(deviceKind, isVendorListener,
+                        listener->getListenerPid(), listener->getListenerUid()) ||
+                    (isVendorListener && !supportsHAL3)) {
+                    ALOGV("Skipping discovery callback for system-only camera/HAL1 device %s",
                             cameraId.c_str());
                     continue;
                 }
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 8765fbf..829a3ee 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -574,7 +574,8 @@
         void onUidGone(uid_t uid, bool disabled);
         void onUidActive(uid_t uid);
         void onUidIdle(uid_t uid, bool disabled);
-        void onUidStateChanged(uid_t uid, int32_t procState, int64_t procStateSeq);
+        void onUidStateChanged(uid_t uid, int32_t procState, int64_t procStateSeq,
+                int32_t capability);
 
         void addOverrideUid(uid_t uid, String16 callingPackage, bool active);
         void removeOverrideUid(uid_t uid, String16 callingPackage);
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
index e33bbad..26459f9 100644
--- a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
@@ -31,7 +31,6 @@
 #include <mediadrm/ICrypto.h>
 #include <media/MediaCodecBuffer.h>
 #include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/foundation/MediaDefs.h>
 #include <media/stagefright/MediaCodecConstants.h>
 
@@ -61,12 +60,13 @@
         mUseGrid(false),
         mAppSegmentStreamId(-1),
         mAppSegmentSurfaceId(-1),
-        mAppSegmentBufferAcquired(false),
         mMainImageStreamId(-1),
         mMainImageSurfaceId(-1),
         mYuvBufferAcquired(false),
         mProducerListener(new ProducerListener()),
-        mOutputBufferCounter(0),
+        mDequeuedOutputBufferCnt(0),
+        mLockedAppSegmentBufferCnt(0),
+        mCodecOutputCounter(0),
         mGridTimestampUs(0) {
 }
 
@@ -132,7 +132,7 @@
     sp<IGraphicBufferProducer> producer;
     sp<IGraphicBufferConsumer> consumer;
     BufferQueue::createBufferQueue(&producer, &consumer);
-    mAppSegmentConsumer = new CpuConsumer(consumer, 1);
+    mAppSegmentConsumer = new CpuConsumer(consumer, kMaxAcquiredAppSegment);
     mAppSegmentConsumer->setFrameAvailableListener(this);
     mAppSegmentConsumer->setName(String8("Camera3-HeicComposite-AppSegmentStream"));
     mAppSegmentSurface = new Surface(producer);
@@ -231,6 +231,8 @@
     if (bufferInfo.mError) return;
 
     mCodecOutputBufferTimestamps.push(bufferInfo.mTimestamp);
+    ALOGV("%s: [%" PRId64 "]: Adding codecOutputBufferTimestamp (%zu timestamps in total)",
+            __FUNCTION__, bufferInfo.mTimestamp, mCodecOutputBufferTimestamps.size());
 }
 
 // We need to get the settings early to handle the case where the codec output
@@ -361,6 +363,8 @@
             mCodecOutputBuffers.push_back(outputBufferInfo);
             mInputReadyCondition.signal();
         } else {
+            ALOGV("%s: Releasing output buffer: size %d flags: 0x%x ", __FUNCTION__,
+                outputBufferInfo.size, outputBufferInfo.flags);
             mCodec->releaseOutputBuffer(outputBufferInfo.index);
         }
     } else {
@@ -414,8 +418,10 @@
         mNumOutputTiles = 1;
     }
 
-    ALOGV("%s: mNumOutputTiles is %zu", __FUNCTION__, mNumOutputTiles);
     mFormat = newFormat;
+
+    ALOGV("%s: mNumOutputTiles is %zu", __FUNCTION__, mNumOutputTiles);
+    mInputReadyCondition.signal();
 }
 
 void HeicCompositeStream::onHeicCodecError() {
@@ -459,9 +465,8 @@
 
     // Cannot use SourceSurface buffer count since it could be codec's 512*512 tile
     // buffer count.
-    int maxProducerBuffers = 1;
     if ((res = native_window_set_buffer_count(
-                    anwConsumer, maxProducerBuffers + maxConsumerBuffers)) != OK) {
+                    anwConsumer, kMaxOutputSurfaceProducerCount + maxConsumerBuffers)) != OK) {
         ALOGE("%s: Unable to set buffer count for stream %d", __FUNCTION__, mMainImageStreamId);
         return res;
     }
@@ -505,6 +510,8 @@
     }
 
     if (mSettingsByFrameNumber.find(resultExtras.frameNumber) != mSettingsByFrameNumber.end()) {
+        ALOGV("%s: [%" PRId64 "]: frameNumber %" PRId64, __FUNCTION__,
+                timestamp, resultExtras.frameNumber);
         mFrameNumberMap.emplace(resultExtras.frameNumber, timestamp);
         mSettingsByTimestamp[timestamp] = mSettingsByFrameNumber[resultExtras.frameNumber];
         mSettingsByFrameNumber.erase(resultExtras.frameNumber);
@@ -520,12 +527,12 @@
         mSettingsByTimestamp.erase(it);
     }
 
-    while (!mInputAppSegmentBuffers.empty() && !mAppSegmentBufferAcquired) {
+    while (!mInputAppSegmentBuffers.empty()) {
         CpuConsumer::LockedBuffer imgBuffer;
         auto it = mInputAppSegmentBuffers.begin();
         auto res = mAppSegmentConsumer->lockNextBuffer(&imgBuffer);
         if (res == NOT_ENOUGH_DATA) {
-            // Canot not lock any more buffers.
+            // Can not lock any more buffers.
             break;
         } else if ((res != OK) || (*it != imgBuffer.timestamp)) {
             if (res != OK) {
@@ -535,6 +542,7 @@
                 ALOGE("%s: Expecting JPEG_APP_SEGMENTS buffer with time stamp: %" PRId64
                         " received buffer with time stamp: %" PRId64, __FUNCTION__,
                         *it, imgBuffer.timestamp);
+                mAppSegmentConsumer->unlockBuffer(imgBuffer);
             }
             mPendingInputFrames[*it].error = true;
             mInputAppSegmentBuffers.erase(it);
@@ -546,7 +554,7 @@
             mAppSegmentConsumer->unlockBuffer(imgBuffer);
         } else {
             mPendingInputFrames[imgBuffer.timestamp].appSegmentBuffer = imgBuffer;
-            mAppSegmentBufferAcquired = true;
+            mLockedAppSegmentBufferCnt++;
         }
         mInputAppSegmentBuffers.erase(it);
     }
@@ -556,7 +564,7 @@
         auto it = mInputYuvBuffers.begin();
         auto res = mMainImageConsumer->lockNextBuffer(&imgBuffer);
         if (res == NOT_ENOUGH_DATA) {
-            // Canot not lock any more buffers.
+            // Can not lock any more buffers.
             break;
         } else if (res != OK) {
             ALOGE("%s: Error locking YUV_888 image buffer: %s (%d)", __FUNCTION__,
@@ -593,13 +601,15 @@
         } else {
             // Direct mapping between camera timestamp (in ns) and codec timestamp (in us).
             bufferTime = mCodecOutputBufferTimestamps.front();
-            mOutputBufferCounter++;
-            if (mOutputBufferCounter == mNumOutputTiles) {
+            mCodecOutputCounter++;
+            if (mCodecOutputCounter == mNumOutputTiles) {
                 mCodecOutputBufferTimestamps.pop();
-                mOutputBufferCounter = 0;
+                mCodecOutputCounter = 0;
             }
 
             mPendingInputFrames[bufferTime].codecOutputBuffers.push_back(*it);
+            ALOGV("%s: [%" PRId64 "]: Pushing codecOutputBuffers (time %" PRId64 " us)",
+                    __FUNCTION__, bufferTime, it->timeUs);
         }
         mCodecOutputBuffers.erase(it);
     }
@@ -607,6 +617,7 @@
     while (!mFrameNumberMap.empty()) {
         auto it = mFrameNumberMap.begin();
         mPendingInputFrames[it->second].frameNumber = it->first;
+        ALOGV("%s: [%" PRId64 "]: frameNumber is %" PRId64, __FUNCTION__, it->second, it->first);
         mFrameNumberMap.erase(it);
     }
 
@@ -675,16 +686,29 @@
     }
 
     bool newInputAvailable = false;
-    for (const auto& it : mPendingInputFrames) {
+    for (auto& it : mPendingInputFrames) {
+        // New input is considered to be available only if:
+        // 1. input buffers are ready, or
+        // 2. App segment and muxer is created, or
+        // 3. A codec output tile is ready, and an output buffer is available.
+        // This makes sure that muxer gets created only when an output tile is
+        // generated, because right now we only handle 1 HEIC output buffer at a
+        // time (max dequeued buffer count is 1).
         bool appSegmentReady = (it.second.appSegmentBuffer.data != nullptr) &&
-                !it.second.appSegmentWritten && it.second.result != nullptr;
+                !it.second.appSegmentWritten && it.second.result != nullptr &&
+                it.second.muxer != nullptr;
         bool codecOutputReady = !it.second.codecOutputBuffers.empty();
         bool codecInputReady = (it.second.yuvBuffer.data != nullptr) &&
                 (!it.second.codecInputBuffers.empty());
+        bool hasOutputBuffer = it.second.muxer != nullptr ||
+                (mDequeuedOutputBufferCnt < kMaxOutputSurfaceProducerCount);
         if ((!it.second.error) &&
                 (it.first < *currentTs) &&
-                (appSegmentReady || codecOutputReady || codecInputReady)) {
+                (appSegmentReady || (codecOutputReady && hasOutputBuffer) || codecInputReady)) {
             *currentTs = it.first;
+            if (it.second.format == nullptr && mFormat != nullptr) {
+                it.second.format = mFormat->dup();
+            }
             newInputAvailable = true;
             break;
         }
@@ -716,15 +740,17 @@
     status_t res = OK;
 
     bool appSegmentReady = inputFrame.appSegmentBuffer.data != nullptr &&
-            !inputFrame.appSegmentWritten && inputFrame.result != nullptr;
+            !inputFrame.appSegmentWritten && inputFrame.result != nullptr &&
+            inputFrame.muxer != nullptr;
     bool codecOutputReady = inputFrame.codecOutputBuffers.size() > 0;
     bool codecInputReady = inputFrame.yuvBuffer.data != nullptr &&
-           !inputFrame.codecInputBuffers.empty();
+            !inputFrame.codecInputBuffers.empty();
+    bool hasOutputBuffer = inputFrame.muxer != nullptr ||
+            (mDequeuedOutputBufferCnt < kMaxOutputSurfaceProducerCount);
 
-    if (!appSegmentReady && !codecOutputReady && !codecInputReady) {
-        ALOGW("%s: No valid appSegmentBuffer/codec input/outputBuffer available!", __FUNCTION__);
-        return OK;
-    }
+    ALOGV("%s: [%" PRId64 "]: appSegmentReady %d, codecOutputReady %d, codecInputReady %d,"
+            " dequeuedOutputBuffer %d", __FUNCTION__, timestamp, appSegmentReady,
+            codecOutputReady, codecInputReady, mDequeuedOutputBufferCnt);
 
     // Handle inputs for Hevc tiling
     if (codecInputReady) {
@@ -736,7 +762,13 @@
         }
     }
 
-    // Initialize and start muxer if not yet done so
+    if (!(codecOutputReady && hasOutputBuffer) && !appSegmentReady) {
+        return OK;
+    }
+
+    // Initialize and start muxer if not yet done so. In this case,
+    // codecOutputReady must be true. Otherwise, appSegmentReady is guaranteed
+    // to be false, and the function must have returned early.
     if (inputFrame.muxer == nullptr) {
         res = startMuxerForInputFrame(timestamp, inputFrame);
         if (res != OK) {
@@ -747,7 +779,7 @@
     }
 
     // Write JPEG APP segments data to the muxer.
-    if (appSegmentReady && inputFrame.muxer != nullptr) {
+    if (appSegmentReady) {
         res = processAppSegment(timestamp, inputFrame);
         if (res != OK) {
             ALOGE("%s: Failed to process JPEG APP segments: %s (%d)", __FUNCTION__,
@@ -766,12 +798,18 @@
         }
     }
 
-    if (inputFrame.appSegmentWritten && inputFrame.pendingOutputTiles == 0) {
-        res = processCompletedInputFrame(timestamp, inputFrame);
-        if (res != OK) {
-            ALOGE("%s: Failed to process completed input frame: %s (%d)", __FUNCTION__,
-                    strerror(-res), res);
-            return res;
+    if (inputFrame.pendingOutputTiles == 0) {
+        if (inputFrame.appSegmentWritten) {
+            res = processCompletedInputFrame(timestamp, inputFrame);
+            if (res != OK) {
+                ALOGE("%s: Failed to process completed input frame: %s (%d)", __FUNCTION__,
+                        strerror(-res), res);
+                return res;
+            }
+        } else if (mLockedAppSegmentBufferCnt == kMaxAcquiredAppSegment) {
+            ALOGE("%s: Out-of-order app segment buffers reaches limit %u", __FUNCTION__,
+                    kMaxAcquiredAppSegment);
+            return INVALID_OPERATION;
         }
     }
 
@@ -780,11 +818,6 @@
 
 status_t HeicCompositeStream::startMuxerForInputFrame(nsecs_t timestamp, InputFrame &inputFrame) {
     sp<ANativeWindow> outputANW = mOutputSurface;
-    if (inputFrame.codecOutputBuffers.size() == 0) {
-        // No single codec output buffer has been generated. Continue to
-        // wait.
-        return OK;
-    }
 
     auto res = outputANW->dequeueBuffer(mOutputSurface.get(), &inputFrame.anb, &inputFrame.fenceFd);
     if (res != OK) {
@@ -792,6 +825,7 @@
                 res);
         return res;
     }
+    mDequeuedOutputBufferCnt++;
 
     // Combine current thread id, stream id and timestamp to uniquely identify image.
     std::ostringstream tempOutputFile;
@@ -828,7 +862,7 @@
         }
     }
 
-    ssize_t trackId = inputFrame.muxer->addTrack(mFormat);
+    ssize_t trackId = inputFrame.muxer->addTrack(inputFrame.format);
     if (trackId < 0) {
         ALOGE("%s: Failed to addTrack to the muxer: %zd", __FUNCTION__, trackId);
         return NO_INIT;
@@ -844,6 +878,8 @@
         return res;
     }
 
+    ALOGV("%s: [%" PRId64 "]: Muxer started for inputFrame", __FUNCTION__,
+            timestamp);
     return OK;
 }
 
@@ -852,9 +888,6 @@
     auto appSegmentSize = findAppSegmentsSize(inputFrame.appSegmentBuffer.data,
             inputFrame.appSegmentBuffer.width * inputFrame.appSegmentBuffer.height,
             &app1Size);
-    ALOGV("%s: appSegmentSize is %zu, width %d, height %d, app1Size %zu", __FUNCTION__,
-          appSegmentSize, inputFrame.appSegmentBuffer.width,
-          inputFrame.appSegmentBuffer.height, app1Size);
     if (appSegmentSize == 0) {
         ALOGE("%s: Failed to find JPEG APP segment size", __FUNCTION__);
         return NO_INIT;
@@ -910,7 +943,16 @@
                 __FUNCTION__, strerror(-res), res);
         return res;
     }
+
+    ALOGV("%s: [%" PRId64 "]: appSegmentSize is %zu, width %d, height %d, app1Size %zu",
+          __FUNCTION__, timestamp, appSegmentSize, inputFrame.appSegmentBuffer.width,
+          inputFrame.appSegmentBuffer.height, app1Size);
+
     inputFrame.appSegmentWritten = true;
+    // Release the buffer now so any pending input app segments can be processed
+    mAppSegmentConsumer->unlockBuffer(inputFrame.appSegmentBuffer);
+    inputFrame.appSegmentBuffer.data = nullptr;
+    mLockedAppSegmentBufferCnt--;
 
     return OK;
 }
@@ -934,8 +976,9 @@
                 mOutputWidth - tileX * mGridWidth : mGridWidth;
         size_t height = (tileY == static_cast<size_t>(mGridRows) - 1) ?
                 mOutputHeight - tileY * mGridHeight : mGridHeight;
-        ALOGV("%s: inputBuffer tileIndex [%zu, %zu], top %zu, left %zu, width %zu, height %zu",
-                __FUNCTION__, tileX, tileY, top, left, width, height);
+        ALOGV("%s: inputBuffer tileIndex [%zu, %zu], top %zu, left %zu, width %zu, height %zu,"
+                " timeUs %" PRId64, __FUNCTION__, tileX, tileY, top, left, width, height,
+                inputBuffer.timeUs);
 
         res = copyOneYuvTile(buffer, inputFrame.yuvBuffer, top, left, width, height);
         if (res != OK) {
@@ -990,6 +1033,9 @@
     }
 
     inputFrame.codecOutputBuffers.erase(inputFrame.codecOutputBuffers.begin());
+
+    ALOGV("%s: [%" PRId64 "]: Output buffer index %d",
+        __FUNCTION__, timestamp, it->index);
     return OK;
 }
 
@@ -1046,7 +1092,9 @@
         return res;
     }
     inputFrame.anb = nullptr;
+    mDequeuedOutputBufferCnt--;
 
+    ALOGV("%s: [%" PRId64 "]", __FUNCTION__, timestamp);
     ATRACE_ASYNC_END("HEIC capture", inputFrame.frameNumber);
     return OK;
 }
@@ -1060,7 +1108,6 @@
     if (inputFrame->appSegmentBuffer.data != nullptr) {
         mAppSegmentConsumer->unlockBuffer(inputFrame->appSegmentBuffer);
         inputFrame->appSegmentBuffer.data = nullptr;
-        mAppSegmentBufferAcquired = false;
     }
 
     while (!inputFrame->codecOutputBuffers.empty()) {
@@ -1098,11 +1145,13 @@
     }
 }
 
-void HeicCompositeStream::releaseInputFramesLocked(int64_t currentTs) {
+void HeicCompositeStream::releaseInputFramesLocked() {
     auto it = mPendingInputFrames.begin();
     while (it != mPendingInputFrames.end()) {
-        if (it->first <= currentTs) {
-            releaseInputFrameLocked(&it->second);
+        auto& inputFrame = it->second;
+        if (inputFrame.error ||
+            (inputFrame.appSegmentWritten && inputFrame.pendingOutputTiles == 0)) {
+            releaseInputFrameLocked(&inputFrame);
             it = mPendingInputFrames.erase(it);
         } else {
             it++;
@@ -1506,7 +1555,7 @@
             // In case we landed in error state, return any pending buffers and
             // halt all further processing.
             compilePendingInputLocked();
-            releaseInputFramesLocked(currentTs);
+            releaseInputFramesLocked();
             return false;
         }
 
@@ -1548,11 +1597,7 @@
         mPendingInputFrames[currentTs].error = true;
     }
 
-    if (mPendingInputFrames[currentTs].error ||
-            (mPendingInputFrames[currentTs].appSegmentWritten &&
-            mPendingInputFrames[currentTs].pendingOutputTiles == 0)) {
-        releaseInputFramesLocked(currentTs);
-    }
+    releaseInputFramesLocked();
 
     return true;
 }
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.h b/services/camera/libcameraservice/api2/HeicCompositeStream.h
index 260c68e..04e7b83 100644
--- a/services/camera/libcameraservice/api2/HeicCompositeStream.h
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.h
@@ -25,6 +25,7 @@
 #include <media/hardware/VideoAPI.h>
 #include <media/MediaCodecBuffer.h>
 #include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MediaCodec.h>
 #include <media/stagefright/MediaMuxer.h>
 
@@ -157,6 +158,7 @@
         bool                      errorNotified;
         int64_t                   frameNumber;
 
+        sp<AMessage>              format;
         sp<MediaMuxer>            muxer;
         int                       fenceFd;
         int                       fileFd;
@@ -187,7 +189,7 @@
     status_t processCompletedInputFrame(nsecs_t timestamp, InputFrame &inputFrame);
 
     void releaseInputFrameLocked(InputFrame *inputFrame /*out*/);
-    void releaseInputFramesLocked(int64_t currentTs);
+    void releaseInputFramesLocked();
 
     size_t findAppSegmentsSize(const uint8_t* appSegmentBuffer, size_t maxSize,
             size_t* app1SegmentSize);
@@ -205,11 +207,13 @@
             static_cast<android_dataspace>(HAL_DATASPACE_JPEG_APP_SEGMENTS);
     static const android_dataspace kHeifDataSpace =
             static_cast<android_dataspace>(HAL_DATASPACE_HEIF);
+    // Use the limit of pipeline depth in the API sepc as maximum number of acquired
+    // app segment buffers.
+    static const uint32_t kMaxAcquiredAppSegment = 8;
 
     int               mAppSegmentStreamId, mAppSegmentSurfaceId;
     sp<CpuConsumer>   mAppSegmentConsumer;
     sp<Surface>       mAppSegmentSurface;
-    bool              mAppSegmentBufferAcquired;
     size_t            mAppSegmentMaxSize;
     CameraMetadata    mStaticInfo;
 
@@ -218,9 +222,10 @@
     sp<CpuConsumer>   mMainImageConsumer; // Only applicable for HEVC codec.
     bool              mYuvBufferAcquired; // Only applicable to HEVC codec
 
+    static const int32_t kMaxOutputSurfaceProducerCount = 1;
     sp<Surface>       mOutputSurface;
     sp<ProducerListener> mProducerListener;
-
+    int32_t           mDequeuedOutputBufferCnt;
 
     // Map from frame number to JPEG setting of orientation+quality
     std::map<int64_t, std::pair<int32_t, int32_t>> mSettingsByFrameNumber;
@@ -229,11 +234,12 @@
 
     // Keep all incoming APP segment Blob buffer pending further processing.
     std::vector<int64_t> mInputAppSegmentBuffers;
+    int32_t           mLockedAppSegmentBufferCnt;
 
     // Keep all incoming HEIC blob buffer pending further processing.
     std::vector<CodecOutputBufferInfo> mCodecOutputBuffers;
     std::queue<int64_t> mCodecOutputBufferTimestamps;
-    size_t mOutputBufferCounter;
+    size_t mCodecOutputCounter;
 
     // Keep all incoming Yuv buffer pending tiling and encoding (for HEVC YUV tiling only)
     std::vector<int64_t> mInputYuvBuffers;
diff --git a/services/camera/libcameraservice/hidl/HidlCameraService.cpp b/services/camera/libcameraservice/hidl/HidlCameraService.cpp
index 74cfe42..1daa035 100644
--- a/services/camera/libcameraservice/hidl/HidlCameraService.cpp
+++ b/services/camera/libcameraservice/hidl/HidlCameraService.cpp
@@ -191,6 +191,14 @@
       _hidl_cb(status, {});
       return Void();
     }
+    cameraStatusAndIds.erase(std::remove_if(cameraStatusAndIds.begin(), cameraStatusAndIds.end(),
+            [this](const hardware::CameraStatus& s) {
+              bool supportsHAL3 = false;
+              binder::Status sRet =
+                            mAidlICameraService->supportsCameraApi(String16(s.cameraId),
+                                    hardware::ICameraService::API_VERSION_2, &supportsHAL3);
+              return !sRet.isOk() || !supportsHAL3;
+            }), cameraStatusAndIds.end());
     hidl_vec<HCameraStatusAndId> hCameraStatusAndIds;
     //Convert cameraStatusAndIds to HIDL and call callback
     convertToHidl(cameraStatusAndIds, &hCameraStatusAndIds);
diff --git a/services/mediaanalytics/tests/mediametrics_tests.cpp b/services/mediaanalytics/tests/mediametrics_tests.cpp
index 7a6f5a4..09ca114 100644
--- a/services/mediaanalytics/tests/mediametrics_tests.cpp
+++ b/services/mediaanalytics/tests/mediametrics_tests.cpp
@@ -160,6 +160,27 @@
   }
 }
 
+TEST(mediametrics_tests, superbig_item_removal2) {
+  MediaAnalyticsItem item("TheOne");
+  constexpr size_t count = 10000;
+
+  for (size_t i = 0; i < count; ++i) {
+    item.setInt32(std::to_string(i).c_str(), i);
+  }
+  static const char *attrs[] = { "1", };
+  item.filterNot(1, attrs);
+
+  for (size_t i = 0; i < count; ++i) {
+    int32_t i32;
+    if (i == 1) { // check to see that there is only one
+        ASSERT_TRUE(item.getInt32(std::to_string(i).c_str(), &i32));
+        ASSERT_EQ((int32_t)i, i32);
+    } else {
+        ASSERT_FALSE(item.getInt32(std::to_string(i).c_str(), &i32));
+    }
+  }
+}
+
 TEST(mediametrics_tests, item_transmutation) {
   MediaAnalyticsItem item("Alchemist's Stone");
 
@@ -175,3 +196,88 @@
   ASSERT_TRUE(item.getInt32("convert", &i32));   // check it is i32 and 2 (123 is discarded).
   ASSERT_EQ(2, i32);
 }
+
+TEST(mediametrics_tests, item_binderization) {
+  MediaAnalyticsItem item;
+  item.setInt32("i32", 1)
+      .setInt64("i64", 2)
+      .setDouble("double", 3.1)
+      .setCString("string", "abc")
+      .setRate("rate", 11, 12);
+
+  Parcel p;
+  item.writeToParcel(&p);
+
+  p.setDataPosition(0); // rewind for reading
+  MediaAnalyticsItem item2;
+  item2.readFromParcel(p);
+
+  ASSERT_EQ(item, item2);
+}
+
+TEST(mediametrics_tests, item_byteserialization) {
+  MediaAnalyticsItem item;
+  item.setInt32("i32", 1)
+      .setInt64("i64", 2)
+      .setDouble("double", 3.1)
+      .setCString("string", "abc")
+      .setRate("rate", 11, 12);
+
+  char *data;
+  size_t length;
+  ASSERT_EQ(0, item.writeToByteString(&data, &length));
+  ASSERT_GT(length, (size_t)0);
+
+  MediaAnalyticsItem item2;
+  item2.readFromByteString(data, length);
+
+  printf("item: %s\n", item.toString().c_str());
+  printf("item2: %s\n", item2.toString().c_str());
+  ASSERT_EQ(item, item2);
+
+  free(data);
+}
+
+TEST(mediametrics_tests, item_iteration) {
+  MediaAnalyticsItem item;
+  item.setInt32("i32", 1)
+      .setInt64("i64", 2)
+      .setDouble("double", 3.125)
+      .setCString("string", "abc")
+      .setRate("rate", 11, 12);
+
+  int mask = 0;
+  for (auto &prop : item) {
+      const char *name = prop.getName();
+      if (!strcmp(name, "i32")) {
+          int32_t i32;
+          ASSERT_TRUE(prop.get(&i32));
+          ASSERT_EQ(1, i32);
+          mask |= 1;
+      } else if (!strcmp(name, "i64")) {
+          int64_t i64;
+          ASSERT_TRUE(prop.get(&i64));
+          ASSERT_EQ(2, i64);
+          mask |= 2;
+      } else if (!strcmp(name, "double")) {
+          double d;
+          ASSERT_TRUE(prop.get(&d));
+          ASSERT_EQ(3.125, d);
+          mask |= 4;
+      } else if (!strcmp(name, "string")) {
+          const char *s;
+          ASSERT_TRUE(prop.get(&s));
+          ASSERT_EQ(0, strcmp(s, "abc"));
+          mask |= 8;
+      } else if (!strcmp(name, "rate")) {
+          std::pair<int64_t, int64_t> r;
+          ASSERT_TRUE(prop.get(&r));
+          ASSERT_EQ(11, r.first);
+          ASSERT_EQ(12, r.second);
+          mask |= 16;
+      } else {
+          FAIL();
+      }
+  }
+  ASSERT_EQ(31, mask);
+}