Merge "Audioflinger: tracks monitor OP_PLAY_AUDIO"
diff --git a/Android.bp b/Android.bp
deleted file mode 100644
index e4f12c8..0000000
--- a/Android.bp
+++ /dev/null
@@ -1,7 +0,0 @@
-subdirs = [
-    "camera",
-    "drm/*",
-    "media/*",
-    "services/*",
-    "soundtrigger",
-]
diff --git a/apex/ld.config.txt b/apex/ld.config.txt
index b342206..2daeeac 100644
--- a/apex/ld.config.txt
+++ b/apex/ld.config.txt
@@ -48,7 +48,9 @@
 # TODO: replace the following when apex has a way to auto-generate this list
 # namespace.default.link.platform.shared_libs  = %LLNDK_LIBRARIES%
 # namespace.default.link.platform.shared_libs += %SANITIZER_RUNTIME_LIBRARIES%
-namespace.default.link.platform.shared_libs = libEGL.so:libGLESv1_CM.so:libGLESv2.so:libGLESv3.so:libRS.so:libandroid_net.so:libc.so:libclang_rt.asan-aarch64-android.so:libclang_rt.asan-arm-android.so:libclang_rt.hwasan-aarch64-android.so:libclang_rt.asan-i686-android.so:libclang_rt.asan-x86_64-android.so:libdl.so:libft2.so:liblog.so:libm.so:libmediandk.so:libnativewindow.so:libneuralnetworks.so:libsync.so:libvndksupport.so:libvulkan.so
+namespace.default.link.platform.shared_libs = libEGL.so:libGLESv1_CM.so:libGLESv2.so:libGLESv3.so:libRS.so:libandroid_net.so:libc.so:libcgrouprc.so:libclang_rt.asan-aarch64-android.so:libclang_rt.asan-arm-android.so:libclang_rt.hwasan-aarch64-android.so:libclang_rt.asan-i686-android.so:libclang_rt.asan-x86_64-android.so:libdl.so:libft2.so:liblog.so:libm.so:libmediandk.so:libnativewindow.so:libneuralnetworks.so:libsync.so:libvndksupport.so:libvulkan.so
+# FIXME: b/129552044
+namespace.default.link.platform.shared_libs += libz.so
 
 ###############################################################################
 # "platform" namespace
diff --git a/apex/manifest.json b/apex/manifest.json
index e2df3a3..c6c63f6 100644
--- a/apex/manifest.json
+++ b/apex/manifest.json
@@ -1,4 +1,4 @@
 {
   "name": "com.android.media",
-  "version": 1
+  "version": 200000000
 }
diff --git a/apex/manifest_codec.json b/apex/manifest_codec.json
index e2bceec..4f31b15 100644
--- a/apex/manifest_codec.json
+++ b/apex/manifest_codec.json
@@ -1,4 +1,4 @@
 {
   "name": "com.android.media.swcodec",
-  "version": 1
+  "version": 200000000
 }
diff --git a/camera/Android.bp b/camera/Android.bp
index 21588d4..2800595 100644
--- a/camera/Android.bp
+++ b/camera/Android.bp
@@ -12,8 +12,6 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-subdirs = ["ndk"]
-
 cc_library_shared {
     name: "libcamera_client",
 
diff --git a/camera/CaptureResult.cpp b/camera/CaptureResult.cpp
index 928a6bc..1d8e8c4 100644
--- a/camera/CaptureResult.cpp
+++ b/camera/CaptureResult.cpp
@@ -39,6 +39,16 @@
     parcel->readInt64(&frameNumber);
     parcel->readInt32(&partialResultCount);
     parcel->readInt32(&errorStreamId);
+    auto physicalCameraIdPresent = parcel->readBool();
+    if (physicalCameraIdPresent) {
+        String16 cameraId;
+        status_t res = OK;
+        if ((res = parcel->readString16(&cameraId)) != OK) {
+            ALOGE("%s: Failed to read camera id: %d", __FUNCTION__, res);
+            return res;
+        }
+        errorPhysicalCameraId = cameraId;
+    }
 
     return OK;
 }
@@ -56,6 +66,16 @@
     parcel->writeInt64(frameNumber);
     parcel->writeInt32(partialResultCount);
     parcel->writeInt32(errorStreamId);
+    if (errorPhysicalCameraId.size() > 0) {
+        parcel->writeBool(true);
+        status_t res = OK;
+        if ((res = parcel->writeString16(errorPhysicalCameraId)) != OK) {
+            ALOGE("%s: Failed to write physical camera ID to parcel: %d", __FUNCTION__, res);
+            return res;
+        }
+    } else {
+        parcel->writeBool(false);
+    }
 
     return OK;
 }
diff --git a/camera/include/camera/CaptureResult.h b/camera/include/camera/CaptureResult.h
index 56fa178..ef830b5 100644
--- a/camera/include/camera/CaptureResult.h
+++ b/camera/include/camera/CaptureResult.h
@@ -70,6 +70,13 @@
     int32_t errorStreamId;
 
     /**
+     * For capture result errors, the physical camera ID in case the respective request contains
+     * a reference to physical camera device.
+     * Empty otherwise.
+     */
+    String16  errorPhysicalCameraId;
+
+    /**
      * Constructor initializes object as invalid by setting requestId to be -1.
      */
     CaptureResultExtras()
@@ -79,7 +86,8 @@
           precaptureTriggerId(0),
           frameNumber(0),
           partialResultCount(0),
-          errorStreamId(-1) {
+          errorStreamId(-1),
+          errorPhysicalCameraId() {
     }
 
     /**
diff --git a/camera/ndk/impl/ACameraDevice.cpp b/camera/ndk/impl/ACameraDevice.cpp
index c9db01e..25a81eb 100644
--- a/camera/ndk/impl/ACameraDevice.cpp
+++ b/camera/ndk/impl/ACameraDevice.cpp
@@ -45,6 +45,7 @@
 const char* CameraDevice::kSequenceIdKey     = "SequenceId";
 const char* CameraDevice::kFrameNumberKey    = "FrameNumber";
 const char* CameraDevice::kAnwKey            = "Anw";
+const char* CameraDevice::kFailingPhysicalCameraId= "FailingPhysicalCameraId";
 
 /**
  * CameraDevice Implementation
@@ -867,10 +868,19 @@
         failure->wasImageCaptured = (errorCode ==
                 hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT);
 
-        sp<AMessage> msg = new AMessage(kWhatCaptureFail, mHandler);
+        sp<AMessage> msg = new AMessage(cbh.mIsLogicalCameraCallback ? kWhatLogicalCaptureFail :
+                kWhatCaptureFail, mHandler);
         msg->setPointer(kContextKey, cbh.mContext);
         msg->setObject(kSessionSpKey, session);
-        msg->setPointer(kCallbackFpKey, (void*) onError);
+        if (cbh.mIsLogicalCameraCallback) {
+            if (resultExtras.errorPhysicalCameraId.size() > 0) {
+                String8 cameraId(resultExtras.errorPhysicalCameraId);
+                msg->setString(kFailingPhysicalCameraId, cameraId.string(), cameraId.size());
+            }
+            msg->setPointer(kCallbackFpKey, (void*) cbh.mOnLogicalCameraCaptureFailed);
+        } else {
+            msg->setPointer(kCallbackFpKey, (void*) onError);
+        }
         msg->setObject(kCaptureRequestKey, request);
         msg->setObject(kCaptureFailureKey, failure);
         postSessionMsgAndCleanup(msg);
@@ -895,6 +905,7 @@
         case kWhatCaptureResult:
         case kWhatLogicalCaptureResult:
         case kWhatCaptureFail:
+        case kWhatLogicalCaptureFail:
         case kWhatCaptureSeqEnd:
         case kWhatCaptureSeqAbort:
         case kWhatCaptureBufferLost:
@@ -966,6 +977,7 @@
         case kWhatCaptureResult:
         case kWhatLogicalCaptureResult:
         case kWhatCaptureFail:
+        case kWhatLogicalCaptureFail:
         case kWhatCaptureSeqEnd:
         case kWhatCaptureSeqAbort:
         case kWhatCaptureBufferLost:
@@ -984,6 +996,7 @@
                 case kWhatCaptureResult:
                 case kWhatLogicalCaptureResult:
                 case kWhatCaptureFail:
+                case kWhatLogicalCaptureFail:
                 case kWhatCaptureBufferLost:
                     found = msg->findObject(kCaptureRequestKey, &obj);
                     if (!found) {
@@ -1138,6 +1151,39 @@
                     freeACaptureRequest(request);
                     break;
                 }
+                case kWhatLogicalCaptureFail:
+                {
+                    ACameraCaptureSession_logicalCamera_captureCallback_failed onFail;
+                    found = msg->findPointer(kCallbackFpKey, (void**) &onFail);
+                    if (!found) {
+                        ALOGE("%s: Cannot find capture fail callback!", __FUNCTION__);
+                        return;
+                    }
+                    if (onFail == nullptr) {
+                        return;
+                    }
+
+                    found = msg->findObject(kCaptureFailureKey, &obj);
+                    if (!found) {
+                        ALOGE("%s: Cannot find capture failure!", __FUNCTION__);
+                        return;
+                    }
+                    sp<CameraCaptureFailure> failureSp(
+                            static_cast<CameraCaptureFailure*>(obj.get()));
+                    ALogicalCameraCaptureFailure failure;
+                    AString physicalCameraId;
+                    found = msg->findString(kFailingPhysicalCameraId, &physicalCameraId);
+                    if (found && !physicalCameraId.empty()) {
+                        failure.physicalCameraId = physicalCameraId.c_str();
+                    } else {
+                        failure.physicalCameraId = nullptr;
+                    }
+                    failure.captureFailure = *failureSp;
+                    ACaptureRequest* request = allocateACaptureRequest(requestSp, mId);
+                    (*onFail)(context, session.get(), request, &failure);
+                    freeACaptureRequest(request);
+                    break;
+                }
                 case kWhatCaptureSeqEnd:
                 {
                     ACameraCaptureSession_captureCallback_sequenceEnd onSeqEnd;
@@ -1233,6 +1279,7 @@
 
     if (cbs != nullptr) {
         mOnCaptureCompleted = cbs->onCaptureCompleted;
+        mOnCaptureFailed = cbs->onCaptureFailed;
     }
 }
 
@@ -1248,6 +1295,7 @@
 
     if (lcbs != nullptr) {
         mOnLogicalCameraCaptureCompleted = lcbs->onLogicalCameraCaptureCompleted;
+        mOnLogicalCameraCaptureFailed = lcbs->onLogicalCameraCaptureFailed;
     }
 }
 
diff --git a/camera/ndk/impl/ACameraDevice.h b/camera/ndk/impl/ACameraDevice.h
index 56741ce..c92a95f 100644
--- a/camera/ndk/impl/ACameraDevice.h
+++ b/camera/ndk/impl/ACameraDevice.h
@@ -214,6 +214,7 @@
         kWhatCaptureResult,    // onCaptureProgressed, onCaptureCompleted
         kWhatLogicalCaptureResult, // onLogicalCameraCaptureCompleted
         kWhatCaptureFail,      // onCaptureFailed
+        kWhatLogicalCaptureFail, // onLogicalCameraCaptureFailed
         kWhatCaptureSeqEnd,    // onCaptureSequenceCompleted
         kWhatCaptureSeqAbort,  // onCaptureSequenceAborted
         kWhatCaptureBufferLost,// onCaptureBufferLost
@@ -233,6 +234,7 @@
     static const char* kSequenceIdKey;
     static const char* kFrameNumberKey;
     static const char* kAnwKey;
+    static const char* kFailingPhysicalCameraId;
 
     class CallbackHandler : public AHandler {
       public:
@@ -281,6 +283,7 @@
             mOnCaptureProgressed = nullptr;
             mOnCaptureCompleted = nullptr;
             mOnLogicalCameraCaptureCompleted = nullptr;
+            mOnLogicalCameraCaptureFailed = nullptr;
             mOnCaptureFailed = nullptr;
             mOnCaptureSequenceCompleted = nullptr;
             mOnCaptureSequenceAborted = nullptr;
@@ -289,7 +292,6 @@
                 mContext = cbs->context;
                 mOnCaptureStarted = cbs->onCaptureStarted;
                 mOnCaptureProgressed = cbs->onCaptureProgressed;
-                mOnCaptureFailed = cbs->onCaptureFailed;
                 mOnCaptureSequenceCompleted = cbs->onCaptureSequenceCompleted;
                 mOnCaptureSequenceAborted = cbs->onCaptureSequenceAborted;
                 mOnCaptureBufferLost = cbs->onCaptureBufferLost;
@@ -305,6 +307,7 @@
         ACameraCaptureSession_captureCallback_result mOnCaptureProgressed;
         ACameraCaptureSession_captureCallback_result mOnCaptureCompleted;
         ACameraCaptureSession_logicalCamera_captureCallback_result mOnLogicalCameraCaptureCompleted;
+        ACameraCaptureSession_logicalCamera_captureCallback_failed mOnLogicalCameraCaptureFailed;
         ACameraCaptureSession_captureCallback_failed mOnCaptureFailed;
         ACameraCaptureSession_captureCallback_sequenceEnd mOnCaptureSequenceCompleted;
         ACameraCaptureSession_captureCallback_sequenceAbort mOnCaptureSequenceAborted;
diff --git a/camera/ndk/include/camera/NdkCameraCaptureSession.h b/camera/ndk/include/camera/NdkCameraCaptureSession.h
index d13a818..07176cf 100644
--- a/camera/ndk/include/camera/NdkCameraCaptureSession.h
+++ b/camera/ndk/include/camera/NdkCameraCaptureSession.h
@@ -676,6 +676,41 @@
         size_t physicalResultCount, const char** physicalCameraIds,
         const ACameraMetadata** physicalResults);
 
+/// Struct to describe a logical camera capture failure
+typedef struct ALogicalCameraCaptureFailure {
+    /**
+     * The {@link ACameraCaptureFailure} contains information about regular logical device capture
+     * failure.
+     */
+    struct ACameraCaptureFailure captureFailure;
+
+    /**
+     * The physical camera device ID in case the capture failure comes from a capture request
+     * with configured physical camera streams for a logical camera. physicalCameraId will be set
+     * to NULL in case the capture request has no associated physical camera device.
+     *
+     */
+    const char*    physicalCameraId;
+} ALogicalCameraCaptureFailure;
+
+/**
+ * The definition of logical camera capture failure callback.
+ *
+ * @param context The optional application context provided by user in
+ *                {@link ACameraCaptureSession_captureCallbacks}.
+ * @param session The camera capture session of interest.
+ * @param request The capture request of interest. Note that this pointer points to a copy of
+ *                capture request sent by application, so the address is different to what
+ *                application sent but the content will match. This request will be freed by
+ *                framework immediately after this callback returns.
+ * @param failure The {@link ALogicalCameraCaptureFailure} desribes the capture failure. The memory
+ *                is managed by camera framework. Do not access this pointer after this callback
+ *                returns.
+ */
+typedef void (*ACameraCaptureSession_logicalCamera_captureCallback_failed)(
+        void* context, ACameraCaptureSession* session,
+        ACaptureRequest* request, ALogicalCameraCaptureFailure* failure);
+
 /**
  * This has the same functionality as ACameraCaptureSession_captureCallbacks,
  * with the exception that an onLogicalCameraCaptureCompleted callback is
@@ -708,9 +743,24 @@
     ACameraCaptureSession_logicalCamera_captureCallback_result onLogicalCameraCaptureCompleted;
 
     /**
+     * This callback is called instead of {@link onLogicalCameraCaptureCompleted} when the
+     * camera device failed to produce a capture result for the
+     * request.
+     *
+     * <p>Other requests are unaffected, and some or all image buffers from
+     * the capture may have been pushed to their respective output
+     * streams.</p>
+     *
+     * <p>Note that the ACaptureRequest pointer in the callback will not match what application has
+     * submitted, but the contents the ACaptureRequest will match what application submitted.</p>
+     *
+     * @see ALogicalCameraCaptureFailure
+     */
+    ACameraCaptureSession_logicalCamera_captureCallback_failed onLogicalCameraCaptureFailed;
+
+    /**
      * Same as ACameraCaptureSession_captureCallbacks
      */
-    ACameraCaptureSession_captureCallback_failed        onCaptureFailed;
     ACameraCaptureSession_captureCallback_sequenceEnd   onCaptureSequenceCompleted;
     ACameraCaptureSession_captureCallback_sequenceAbort onCaptureSequenceAborted;
     ACameraCaptureSession_captureCallback_bufferLost    onCaptureBufferLost;
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index acf6999..99b613e 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -5695,6 +5695,8 @@
      * <p>In both cases, all images generated for a particular capture request still carry the same
      * timestamps, so that they can be used to look up the matching frame number and
      * onCaptureStarted callback.</p>
+     * <p>This tag is only applicable if the logical camera device supports concurrent physical
+     * streams from different physical cameras.</p>
      */
     ACAMERA_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE =             // byte (acamera_metadata_enum_android_logical_multi_camera_sensor_sync_type_t)
             ACAMERA_LOGICAL_MULTI_CAMERA_START + 1,
@@ -7581,14 +7583,23 @@
     ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_MOTION_TRACKING           = 10,
 
     /**
-     * <p>The camera device is a logical camera backed by two or more physical cameras. In
-     * API level 28, the physical cameras must also be exposed to the application via
-     * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraManager.html#getCameraIdList">CameraManager#getCameraIdList</a>. Starting from API
-     * level 29, some or all physical cameras may not be independently exposed to the
-     * application, in which case the physical camera IDs will not be available in
-     * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraManager.html#getCameraIdList">CameraManager#getCameraIdList</a>. But the application
-     * can still query the physical cameras' characteristics by calling
-     * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraManager.html#getCameraCharacteristics">CameraManager#getCameraCharacteristics</a>.</p>
+     * <p>The camera device is a logical camera backed by two or more physical cameras.</p>
+     * <p>In API level 28, the physical cameras must also be exposed to the application via
+     * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraManager.html#getCameraIdList">CameraManager#getCameraIdList</a>.</p>
+     * <p>Starting from API level 29, some or all physical cameras may not be independently
+     * exposed to the application, in which case the physical camera IDs will not be
+     * available in <a href="https://developer.android.com/reference/android/hardware/camera2/CameraManager.html#getCameraIdList">CameraManager#getCameraIdList</a>. But the
+     * application can still query the physical cameras' characteristics by calling
+     * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraManager.html#getCameraCharacteristics">CameraManager#getCameraCharacteristics</a>. Additionally,
+     * if a physical camera is hidden from camera ID list, the mandatory stream combinations
+     * for that physical camera must be supported through the logical camera using physical
+     * streams.</p>
+     * <p>Combinations of logical and physical streams, or physical streams from different
+     * physical cameras are not guaranteed. However, if the camera device supports
+     * {@link ACameraDevice_isSessionConfigurationSupported },
+     * application must be able to query whether a stream combination involving physical
+     * streams is supported by calling
+     * {@link ACameraDevice_isSessionConfigurationSupported }.</p>
      * <p>Camera application shouldn't assume that there are at most 1 rear camera and 1 front
      * camera in the system. For an application that switches between front and back cameras,
      * the recommendation is to switch between the first rear camera and the first front
@@ -7613,24 +7624,6 @@
      *   the same.</li>
      * <li>The logical camera must be LIMITED or higher device.</li>
      * </ul>
-     * <p>Both the logical camera device and its underlying physical devices support the
-     * mandatory stream combinations required for their device levels.</p>
-     * <p>Additionally, for each guaranteed stream combination, the logical camera supports:</p>
-     * <ul>
-     * <li>For each guaranteed stream combination, the logical camera supports replacing one
-     *   logical {@link AIMAGE_FORMAT_YUV_420_888 YUV_420_888}
-     *   or raw stream with two physical streams of the same size and format, each from a
-     *   separate physical camera, given that the size and format are supported by both
-     *   physical cameras.</li>
-     * <li>If the logical camera doesn't advertise RAW capability, but the underlying physical
-     *   cameras do, the logical camera will support guaranteed stream combinations for RAW
-     *   capability, except that the RAW streams will be physical streams, each from a separate
-     *   physical camera. This is usually the case when the physical cameras have different
-     *   sensor sizes.</li>
-     * </ul>
-     * <p>Using physical streams in place of a logical stream of the same size and format will
-     * not slow down the frame rate of the capture, as long as the minimum frame duration
-     * of the physical and logical streams are the same.</p>
      * <p>A logical camera device's dynamic metadata may contain
      * ACAMERA_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID to notify the application of the current
      * active physical camera Id. An active physical camera is the physical camera from which
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
index d7d774b..b18c897 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
@@ -55,6 +55,7 @@
 const char* CameraDevice::kSequenceIdKey     = "SequenceId";
 const char* CameraDevice::kFrameNumberKey    = "FrameNumber";
 const char* CameraDevice::kAnwKey            = "Anw";
+const char* CameraDevice::kFailingPhysicalCameraId= "FailingPhysicalCameraId";
 
 /**
  * CameraDevice Implementation
@@ -299,7 +300,6 @@
     OutputConfigurationWrapper outConfigW;
     OutputConfiguration &outConfig = outConfigW.mOutputConfiguration;
     outConfig.rotation = utils::convertToHidl(output->mRotation);
-    outConfig.windowGroupId = -1; // ndk doesn't support inter OutputConfiguration buffer sharing.
     outConfig.windowHandles.resize(output->mSharedWindows.size() + 1);
     outConfig.windowHandles[0] = output->mWindow;
     outConfig.physicalCameraId = output->mPhysicalCameraId;
@@ -894,10 +894,19 @@
         failure->sequenceId  = sequenceId;
         failure->wasImageCaptured = (errorCode == ErrorCode::CAMERA_RESULT);
 
-        sp<AMessage> msg = new AMessage(kWhatCaptureFail, mHandler);
+        sp<AMessage> msg = new AMessage(cbh.mIsLogicalCameraCallback ? kWhatLogicalCaptureFail :
+                kWhatCaptureFail, mHandler);
         msg->setPointer(kContextKey, cbh.mContext);
         msg->setObject(kSessionSpKey, session);
-        msg->setPointer(kCallbackFpKey, (void*) onError);
+        if (cbh.mIsLogicalCameraCallback) {
+            if (resultExtras.errorPhysicalCameraId.size() > 0) {
+                msg->setString(kFailingPhysicalCameraId, resultExtras.errorPhysicalCameraId.c_str(),
+                        resultExtras.errorPhysicalCameraId.size());
+            }
+            msg->setPointer(kCallbackFpKey, (void*) cbh.mOnLogicalCameraCaptureFailed);
+        } else {
+            msg->setPointer(kCallbackFpKey, (void*) onError);
+        }
         msg->setObject(kCaptureRequestKey, request);
         msg->setObject(kCaptureFailureKey, failure);
         postSessionMsgAndCleanup(msg);
@@ -919,6 +928,7 @@
         case kWhatCaptureResult:
         case kWhatLogicalCaptureResult:
         case kWhatCaptureFail:
+        case kWhatLogicalCaptureFail:
         case kWhatCaptureSeqEnd:
         case kWhatCaptureSeqAbort:
         case kWhatCaptureBufferLost:
@@ -990,6 +1000,7 @@
         case kWhatCaptureResult:
         case kWhatLogicalCaptureResult:
         case kWhatCaptureFail:
+        case kWhatLogicalCaptureFail:
         case kWhatCaptureSeqEnd:
         case kWhatCaptureSeqAbort:
         case kWhatCaptureBufferLost:
@@ -1009,6 +1020,7 @@
                 case kWhatCaptureResult:
                 case kWhatLogicalCaptureResult:
                 case kWhatCaptureFail:
+                case kWhatLogicalCaptureFail:
                 case kWhatCaptureBufferLost:
                     found = msg->findObject(kCaptureRequestKey, &obj);
                     if (!found) {
@@ -1161,6 +1173,39 @@
                     freeACaptureRequest(request);
                     break;
                 }
+                case kWhatLogicalCaptureFail:
+                {
+                    ACameraCaptureSession_logicalCamera_captureCallback_failed onFail;
+                    found = msg->findPointer(kCallbackFpKey, (void**) &onFail);
+                    if (!found) {
+                        ALOGE("%s: Cannot find capture fail callback!", __FUNCTION__);
+                        return;
+                    }
+                    if (onFail == nullptr) {
+                        return;
+                    }
+
+                    found = msg->findObject(kCaptureFailureKey, &obj);
+                    if (!found) {
+                        ALOGE("%s: Cannot find capture failure!", __FUNCTION__);
+                        return;
+                    }
+                    sp<CameraCaptureFailure> failureSp(
+                            static_cast<CameraCaptureFailure*>(obj.get()));
+                    ALogicalCameraCaptureFailure failure;
+                    AString physicalCameraId;
+                    found = msg->findString(kFailingPhysicalCameraId, &physicalCameraId);
+                    if (found && !physicalCameraId.empty()) {
+                        failure.physicalCameraId = physicalCameraId.c_str();
+                    } else {
+                        failure.physicalCameraId = nullptr;
+                    }
+                    failure.captureFailure = *failureSp;
+                    ACaptureRequest* request = allocateACaptureRequest(requestSp, device->getId());
+                    (*onFail)(context, session.get(), request, &failure);
+                    freeACaptureRequest(request);
+                    break;
+                }
                 case kWhatCaptureSeqEnd:
                 {
                     ACameraCaptureSession_captureCallback_sequenceEnd onSeqEnd;
@@ -1256,6 +1301,7 @@
 
     if (cbs != nullptr) {
         mOnCaptureCompleted = cbs->onCaptureCompleted;
+        mOnCaptureFailed = cbs->onCaptureFailed;
     }
 }
 
@@ -1271,6 +1317,7 @@
 
     if (lcbs != nullptr) {
         mOnLogicalCameraCaptureCompleted = lcbs->onLogicalCameraCaptureCompleted;
+        mOnLogicalCameraCaptureFailed = lcbs->onLogicalCameraCaptureFailed;
     }
 }
 
@@ -1368,8 +1415,9 @@
 CameraDevice::ServiceCallback::onDeviceError(
         ErrorCode errorCode,
         const CaptureResultExtras& resultExtras) {
-    ALOGD("Device error received, code %d, frame number %" PRId64 ", request ID %d, subseq ID %d",
-            errorCode, resultExtras.frameNumber, resultExtras.requestId, resultExtras.burstId);
+    ALOGD("Device error received, code %d, frame number %" PRId64 ", request ID %d, subseq ID %d"
+            " physical camera ID %s", errorCode, resultExtras.frameNumber, resultExtras.requestId,
+            resultExtras.burstId, resultExtras.errorPhysicalCameraId.c_str());
     auto ret = Void();
     sp<CameraDevice> dev = mDevice.promote();
     if (dev == nullptr) {
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.h b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
index 47e6f56..7036017 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.h
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
@@ -242,6 +242,7 @@
         kWhatCaptureResult,    // onCaptureProgressed, onCaptureCompleted
         kWhatLogicalCaptureResult, // onLogicalCameraCaptureCompleted
         kWhatCaptureFail,      // onCaptureFailed
+        kWhatLogicalCaptureFail, // onLogicalCameraCaptureFailed
         kWhatCaptureSeqEnd,    // onCaptureSequenceCompleted
         kWhatCaptureSeqAbort,  // onCaptureSequenceAborted
         kWhatCaptureBufferLost,// onCaptureBufferLost
@@ -261,6 +262,7 @@
     static const char* kSequenceIdKey;
     static const char* kFrameNumberKey;
     static const char* kAnwKey;
+    static const char* kFailingPhysicalCameraId;
 
     class CallbackHandler : public AHandler {
       public:
@@ -307,6 +309,7 @@
             mOnCaptureProgressed = nullptr;
             mOnCaptureCompleted = nullptr;
             mOnLogicalCameraCaptureCompleted = nullptr;
+            mOnLogicalCameraCaptureFailed = nullptr;
             mOnCaptureFailed = nullptr;
             mOnCaptureSequenceCompleted = nullptr;
             mOnCaptureSequenceAborted = nullptr;
@@ -315,7 +318,6 @@
                 mContext = cbs->context;
                 mOnCaptureStarted = cbs->onCaptureStarted;
                 mOnCaptureProgressed = cbs->onCaptureProgressed;
-                mOnCaptureFailed = cbs->onCaptureFailed;
                 mOnCaptureSequenceCompleted = cbs->onCaptureSequenceCompleted;
                 mOnCaptureSequenceAborted = cbs->onCaptureSequenceAborted;
                 mOnCaptureBufferLost = cbs->onCaptureBufferLost;
@@ -332,6 +334,7 @@
         ACameraCaptureSession_captureCallback_result mOnCaptureProgressed;
         ACameraCaptureSession_captureCallback_result mOnCaptureCompleted;
         ACameraCaptureSession_logicalCamera_captureCallback_result mOnLogicalCameraCaptureCompleted;
+        ACameraCaptureSession_logicalCamera_captureCallback_failed mOnLogicalCameraCaptureFailed;
         ACameraCaptureSession_captureCallback_failed mOnCaptureFailed;
         ACameraCaptureSession_captureCallback_sequenceEnd mOnCaptureSequenceCompleted;
         ACameraCaptureSession_captureCallback_sequenceAbort mOnCaptureSequenceAborted;
diff --git a/camera/ndk/ndk_vendor/impl/utils.h b/camera/ndk/ndk_vendor/impl/utils.h
index 2f1006d..a03c7bc 100644
--- a/camera/ndk/ndk_vendor/impl/utils.h
+++ b/camera/ndk/ndk_vendor/impl/utils.h
@@ -99,7 +99,15 @@
         return mOutputConfiguration;
     }
 
-    OutputConfigurationWrapper() = default;
+    OutputConfigurationWrapper() {
+        mOutputConfiguration.rotation = OutputConfiguration::Rotation::R0;
+        // The ndk currently doesn't support deferred surfaces
+        mOutputConfiguration.isDeferred = false;
+        mOutputConfiguration.width = 0;
+        mOutputConfiguration.height = 0;
+        // ndk doesn't support inter OutputConfiguration buffer sharing.
+        mOutputConfiguration.windowGroupId = -1;
+    };
 
     OutputConfigurationWrapper(OutputConfiguration &outputConfiguration)
             : mOutputConfiguration((outputConfiguration)) { }
diff --git a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
index c51f93b..7368775 100644
--- a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
+++ b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
@@ -323,7 +323,13 @@
             }
             ch->mCompletedCaptureCallbackCount++;
         },
-        nullptr, // onCaptureFailed
+        [] (void * /*ctx*/, ACameraCaptureSession* /*session*/, ACaptureRequest* /*request*/,
+                ALogicalCameraCaptureFailure* failure) {
+            if (failure->physicalCameraId) {
+                ALOGD("%s: Physical camera id: %s result failure", __FUNCTION__,
+                        failure->physicalCameraId);
+            }
+        },
         nullptr, // onCaptureSequenceCompleted
         nullptr, // onCaptureSequenceAborted
         nullptr, // onCaptureBufferLost
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index c361690..7aa655f 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -48,6 +48,7 @@
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaCodecConstants.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaMuxer.h>
 #include <media/stagefright/PersistentSurface.h>
@@ -83,8 +84,6 @@
 using android::DISPLAY_ORIENTATION_0;
 using android::DISPLAY_ORIENTATION_180;
 using android::DISPLAY_ORIENTATION_90;
-using android::INFO_FORMAT_CHANGED;
-using android::INFO_OUTPUT_BUFFERS_CHANGED;
 using android::INVALID_OPERATION;
 using android::NAME_NOT_FOUND;
 using android::NO_ERROR;
@@ -113,6 +112,7 @@
 static uint32_t gVideoHeight = 0;
 static uint32_t gBitRate = 20000000;     // 20Mbps
 static uint32_t gTimeLimitSec = kMaxTimeLimitSec;
+static uint32_t gBframes = 0;
 
 // Set by signal handler to stop recording.
 static volatile bool gStopRequested = false;
@@ -184,13 +184,18 @@
     }
 
     sp<AMessage> format = new AMessage;
-    format->setInt32("width", gVideoWidth);
-    format->setInt32("height", gVideoHeight);
-    format->setString("mime", kMimeTypeAvc);
-    format->setInt32("color-format", OMX_COLOR_FormatAndroidOpaque);
-    format->setInt32("bitrate", gBitRate);
-    format->setFloat("frame-rate", displayFps);
-    format->setInt32("i-frame-interval", 10);
+    format->setInt32(KEY_WIDTH, gVideoWidth);
+    format->setInt32(KEY_HEIGHT, gVideoHeight);
+    format->setString(KEY_MIME, kMimeTypeAvc);
+    format->setInt32(KEY_COLOR_FORMAT, OMX_COLOR_FormatAndroidOpaque);
+    format->setInt32(KEY_BIT_RATE, gBitRate);
+    format->setFloat(KEY_FRAME_RATE, displayFps);
+    format->setInt32(KEY_I_FRAME_INTERVAL, 10);
+    format->setInt32(KEY_MAX_B_FRAMES, gBframes);
+    if (gBframes > 0) {
+        format->setInt32(KEY_PROFILE, AVCProfileMain);
+        format->setInt32(KEY_LEVEL, AVCLevel41);
+    }
 
     sp<android::ALooper> looper = new android::ALooper;
     looper->setName("screenrecord_looper");
@@ -478,7 +483,7 @@
         case -EAGAIN:                       // INFO_TRY_AGAIN_LATER
             ALOGV("Got -EAGAIN, looping");
             break;
-        case INFO_FORMAT_CHANGED:           // INFO_OUTPUT_FORMAT_CHANGED
+        case android::INFO_FORMAT_CHANGED:    // INFO_OUTPUT_FORMAT_CHANGED
             {
                 // Format includes CSD, which we must provide to muxer.
                 ALOGV("Encoder format changed");
@@ -495,7 +500,7 @@
                 }
             }
             break;
-        case INFO_OUTPUT_BUFFERS_CHANGED:   // INFO_OUTPUT_BUFFERS_CHANGED
+        case android::INFO_OUTPUT_BUFFERS_CHANGED:   // INFO_OUTPUT_BUFFERS_CHANGED
             // Not expected for an encoder; handle it anyway.
             ALOGV("Encoder buffers changed");
             err = encoder->getOutputBuffers(&buffers);
@@ -960,6 +965,7 @@
         { "codec-name",         required_argument,  NULL, 'N' },
         { "monotonic-time",     no_argument,        NULL, 'm' },
         { "persistent-surface", no_argument,        NULL, 'p' },
+        { "bframes",            required_argument,  NULL, 'B' },
         { NULL,                 0,                  NULL, 0 }
     };
 
@@ -1052,6 +1058,11 @@
         case 'p':
             gPersistentSurface = true;
             break;
+        case 'B':
+            if (parseValueWithUnit(optarg, &gBframes) != NO_ERROR) {
+                return 2;
+            }
+            break;
         default:
             if (ic != '?') {
                 fprintf(stderr, "getopt_long returned unexpected value 0x%x\n", ic);
diff --git a/drm/libdrmframework/Android.bp b/drm/libdrmframework/Android.bp
index 43ba72b..940c17d 100644
--- a/drm/libdrmframework/Android.bp
+++ b/drm/libdrmframework/Android.bp
@@ -39,4 +39,3 @@
     cflags: ["-Werror"],
 }
 
-subdirs = ["plugins/*"]
diff --git a/drm/libdrmframework/plugins/common/Android.bp b/drm/libdrmframework/plugins/common/Android.bp
deleted file mode 100644
index 213e57f..0000000
--- a/drm/libdrmframework/plugins/common/Android.bp
+++ /dev/null
@@ -1 +0,0 @@
-subdirs = ["util"]
diff --git a/drm/libdrmframework/plugins/forward-lock/Android.bp b/drm/libdrmframework/plugins/forward-lock/Android.bp
deleted file mode 100644
index f884c14..0000000
--- a/drm/libdrmframework/plugins/forward-lock/Android.bp
+++ /dev/null
@@ -1,4 +0,0 @@
-subdirs = [
-    "FwdLockEngine",
-    "internal-format",
-]
diff --git a/drm/libdrmframework/plugins/forward-lock/internal-format/Android.bp b/drm/libdrmframework/plugins/forward-lock/internal-format/Android.bp
deleted file mode 100644
index 9f58e26..0000000
--- a/drm/libdrmframework/plugins/forward-lock/internal-format/Android.bp
+++ /dev/null
@@ -1,5 +0,0 @@
-subdirs = [
-    "common",
-    "converter",
-    "decoder",
-]
diff --git a/drm/mediadrm/Android.bp b/drm/mediadrm/Android.bp
deleted file mode 100644
index b9f07f1..0000000
--- a/drm/mediadrm/Android.bp
+++ /dev/null
@@ -1 +0,0 @@
-subdirs = ["plugins/*"]
diff --git a/drm/mediadrm/plugins/clearkey/default/Android.bp b/drm/mediadrm/plugins/clearkey/default/Android.bp
index 7ba5708..9803d32 100644
--- a/drm/mediadrm/plugins/clearkey/default/Android.bp
+++ b/drm/mediadrm/plugins/clearkey/default/Android.bp
@@ -61,7 +61,3 @@
     },
 }
 
-//########################################################################
-// Build unit tests
-
-subdirs = ["tests"]
diff --git a/include/soundtrigger/ISoundTriggerHwService.h b/include/soundtrigger/ISoundTriggerHwService.h
index ae0cb01..1faeb0f 100644
--- a/include/soundtrigger/ISoundTriggerHwService.h
+++ b/include/soundtrigger/ISoundTriggerHwService.h
@@ -33,12 +33,14 @@
 
     DECLARE_META_INTERFACE(SoundTriggerHwService);
 
-    virtual status_t listModules(struct sound_trigger_module_descriptor *modules,
+    virtual status_t listModules(const String16& opPackageName,
+                                 struct sound_trigger_module_descriptor *modules,
                                  uint32_t *numModules) = 0;
 
-    virtual status_t attach(const sound_trigger_module_handle_t handle,
-                                      const sp<ISoundTriggerClient>& client,
-                                      sp<ISoundTrigger>& module) = 0;
+    virtual status_t attach(const String16& opPackageName,
+                            const sound_trigger_module_handle_t handle,
+                            const sp<ISoundTriggerClient>& client,
+                            sp<ISoundTrigger>& module) = 0;
 
     virtual status_t setCaptureState(bool active) = 0;
 };
diff --git a/include/soundtrigger/SoundTrigger.h b/include/soundtrigger/SoundTrigger.h
index 2e2ff7a..ccc61dc 100644
--- a/include/soundtrigger/SoundTrigger.h
+++ b/include/soundtrigger/SoundTrigger.h
@@ -36,10 +36,12 @@
 
     virtual ~SoundTrigger();
 
-    static  status_t listModules(struct sound_trigger_module_descriptor *modules,
+    static  status_t listModules(const String16& opPackageName,
+                                 struct sound_trigger_module_descriptor *modules,
                                  uint32_t *numModules);
-    static  sp<SoundTrigger> attach(const sound_trigger_module_handle_t module,
-                                       const sp<SoundTriggerCallback>& callback);
+    static  sp<SoundTrigger> attach(const String16& opPackageName,
+                                    const sound_trigger_module_handle_t module,
+                                    const sp<SoundTriggerCallback>& callback);
 
     static  status_t setCaptureState(bool active);
 
diff --git a/media/codec2/components/aac/C2SoftAacEnc.cpp b/media/codec2/components/aac/C2SoftAacEnc.cpp
index 137e775..71eb1ac 100644
--- a/media/codec2/components/aac/C2SoftAacEnc.cpp
+++ b/media/codec2/components/aac/C2SoftAacEnc.cpp
@@ -153,7 +153,7 @@
       mSentCodecSpecificData(false),
       mInputTimeSet(false),
       mInputSize(0),
-      mInputTimeUs(-1ll),
+      mInputTimeUs(0),
       mSignalledError(false),
       mOutIndex(0u) {
 }
@@ -179,7 +179,7 @@
     mSentCodecSpecificData = false;
     mInputTimeSet = false;
     mInputSize = 0u;
-    mInputTimeUs = -1ll;
+    mInputTimeUs = 0;
     mSignalledError = false;
     return C2_OK;
 }
@@ -197,6 +197,7 @@
     mSentCodecSpecificData = false;
     mInputTimeSet = false;
     mInputSize = 0u;
+    mInputTimeUs = 0;
     return C2_OK;
 }
 
@@ -558,6 +559,7 @@
     mSentCodecSpecificData = false;
     mInputTimeSet = false;
     mInputSize = 0u;
+    mInputTimeUs = 0;
 
     // TODO: we don't have any pending work at this time to drain.
     return C2_OK;
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 55a525e..d1fa920 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -1525,6 +1525,7 @@
     mPending.splice(mPending.end(), mStash);
     mDepth = depth;
 }
+
 void CCodecBufferChannel::ReorderStash::setKey(C2Config::ordinal_key_t key) {
     mPending.splice(mPending.end(), mStash);
     mKey = key;
@@ -1547,13 +1548,25 @@
         int64_t timestamp,
         int32_t flags,
         const C2WorkOrdinalStruct &ordinal) {
-    auto it = mStash.begin();
-    for (; it != mStash.end(); ++it) {
-        if (less(ordinal, it->ordinal)) {
-            break;
+    bool eos = flags & MediaCodec::BUFFER_FLAG_EOS;
+    if (!buffer && eos) {
+        // TRICKY: we may be violating ordering of the stash here. Because we
+        // don't expect any more emplace() calls after this, the ordering should
+        // not matter.
+        mStash.emplace_back(buffer, timestamp, flags, ordinal);
+    } else {
+        flags = flags & ~MediaCodec::BUFFER_FLAG_EOS;
+        auto it = mStash.begin();
+        for (; it != mStash.end(); ++it) {
+            if (less(ordinal, it->ordinal)) {
+                break;
+            }
+        }
+        mStash.emplace(it, buffer, timestamp, flags, ordinal);
+        if (eos) {
+            mStash.back().flags = mStash.back().flags | MediaCodec::BUFFER_FLAG_EOS;
         }
     }
-    mStash.emplace(it, buffer, timestamp, flags, ordinal);
     while (!mStash.empty() && mStash.size() > mDepth) {
         mPending.push_back(mStash.front());
         mStash.pop_front();
@@ -2815,8 +2828,9 @@
 
         outBuffer->meta()->setInt64("timeUs", entry.timestamp);
         outBuffer->meta()->setInt32("flags", entry.flags);
-        ALOGV("[%s] sendOutputBuffers: out buffer index = %zu [%p] => %p + %zu",
-                mName, index, outBuffer.get(), outBuffer->data(), outBuffer->size());
+        ALOGV("[%s] sendOutputBuffers: out buffer index = %zu [%p] => %p + %zu (%lld)",
+                mName, index, outBuffer.get(), outBuffer->data(), outBuffer->size(),
+                (long long)entry.timestamp);
         mCallback->onOutputBufferAvailable(index, outBuffer);
     }
 }
diff --git a/media/extractors/Android.bp b/media/extractors/Android.bp
deleted file mode 100644
index e8176cf..0000000
--- a/media/extractors/Android.bp
+++ /dev/null
@@ -1,3 +0,0 @@
-subdirs = [
-    "*",
-]
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 5ff1c59..5a31c58 100755
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -24,6 +24,7 @@
 #include <stdlib.h>
 #include <string.h>
 
+#include <log/log.h>
 #include <utils/Log.h>
 
 #include "AC4Parser.h"
@@ -81,7 +82,7 @@
                 const Trex *trex,
                 off64_t firstMoofOffset,
                 const sp<ItemTable> &itemTable,
-                int32_t elstShiftStartTicks);
+                uint64_t elstShiftStartTicks);
     virtual status_t init();
 
     virtual media_status_t start();
@@ -147,7 +148,7 @@
 
     // Start offset from composition time to presentation time.
     // Support shift only for video tracks through mElstShiftStartTicks for now.
-    int32_t mElstShiftStartTicks;
+    uint64_t mElstShiftStartTicks;
 
     size_t parseNALSize(const uint8_t *data) const;
     status_t parseChunk(off64_t *offset);
@@ -397,7 +398,6 @@
     while (track) {
         Track *next = track->next;
 
-        AMediaFormat_delete(track->meta);
         delete track;
         track = next;
     }
@@ -672,7 +672,6 @@
 
             ALOGV("adding HEIF image track %u", imageIndex);
             Track *track = new Track;
-            track->next = NULL;
             if (mLastTrack != NULL) {
                 mLastTrack->next = track;
             } else {
@@ -682,10 +681,7 @@
 
             track->meta = meta;
             AMediaFormat_setInt32(track->meta, AMEDIAFORMAT_KEY_TRACK_ID, imageIndex);
-            track->includes_expensive_metadata = false;
-            track->skipTrack = false;
             track->timescale = 1000000;
-            track->elstShiftStartTicks = 0;
         }
     }
 
@@ -967,7 +963,6 @@
 
                 ALOGV("adding new track");
                 Track *track = new Track;
-                track->next = NULL;
                 if (mLastTrack) {
                     mLastTrack->next = track;
                 } else {
@@ -975,15 +970,9 @@
                 }
                 mLastTrack = track;
 
-                track->includes_expensive_metadata = false;
-                track->skipTrack = false;
-                track->timescale = 0;
                 track->meta = AMediaFormat_new();
                 AMediaFormat_setString(track->meta,
                         AMEDIAFORMAT_KEY_MIME, "application/octet-stream");
-                track->has_elst = false;
-                track->subsample_encryption = false;
-                track->elstShiftStartTicks = 0;
             }
 
             off64_t stop_offset = *offset + chunk_size;
@@ -1033,6 +1022,7 @@
                     mLastTrack->skipTrack = true;
                 }
 
+
                 if (mLastTrack->skipTrack) {
                     ALOGV("skipping this track...");
                     Track *cur = mFirstTrack;
@@ -1053,6 +1043,21 @@
 
                     return OK;
                 }
+
+                // place things we built elsewhere into their final locations
+
+                // put aggregated tx3g data into the metadata
+                if (mLastTrack->mTx3gFilled > 0) {
+                    ALOGV("Putting %zu bytes of tx3g data into meta data",
+                          mLastTrack->mTx3gFilled);
+                    AMediaFormat_setBuffer(mLastTrack->meta,
+                        AMEDIAFORMAT_KEY_TEXT_FORMAT_DATA,
+                        mLastTrack->mTx3gBuffer, mLastTrack->mTx3gFilled);
+                    // drop it now to reduce our footprint
+                    free(mLastTrack->mTx3gBuffer);
+                    mLastTrack->mTx3gBuffer = NULL;
+                }
+
             } else if (chunk_type == FOURCC("moov")) {
                 mInitCheck = OK;
 
@@ -2553,41 +2558,55 @@
             if (mLastTrack == NULL)
                 return ERROR_MALFORMED;
 
-            void *data;
-            size_t size = 0;
-            if (!AMediaFormat_getBuffer(mLastTrack->meta,
-                    AMEDIAFORMAT_KEY_TEXT_FORMAT_DATA, &data, &size)) {
-                size = 0;
-            }
-
-            if ((chunk_size > SIZE_MAX) || (SIZE_MAX - chunk_size <= size)) {
+            // complain about ridiculous chunks
+            if (chunk_size > kMaxAtomSize) {
                 return ERROR_MALFORMED;
             }
 
-            uint8_t *buffer = new (std::nothrow) uint8_t[size + chunk_size];
-            if (buffer == NULL) {
+            // complain about empty atoms
+            if (chunk_data_size <= 0) {
+                ALOGE("b/124330204");
+                android_errorWriteLog(0x534e4554, "124330204");
                 return ERROR_MALFORMED;
             }
 
-            if (size > 0) {
-                memcpy(buffer, data, size);
+            // should fill buffer based on "data_offset" and "chunk_data_size"
+            // instead of *offset and chunk_size;
+            // but we've been feeding the extra data to consumers for multiple releases and
+            // if those apps are compensating for it, we'd break them with such a change
+            //
+
+            if (mLastTrack->mTx3gSize - mLastTrack->mTx3gFilled < chunk_size) {
+                size_t growth = kTx3gGrowth;
+                if (growth < chunk_size) {
+                    growth = chunk_size;
+                }
+                // although this disallows 2 tx3g atoms of nearly kMaxAtomSize...
+                if ((uint64_t) mLastTrack->mTx3gSize + growth > kMaxAtomSize) {
+                    ALOGE("b/124330204 - too much space");
+                    android_errorWriteLog(0x534e4554, "124330204");
+                    return ERROR_MALFORMED;
+                }
+                uint8_t *updated = (uint8_t *)realloc(mLastTrack->mTx3gBuffer,
+                                                mLastTrack->mTx3gSize + growth);
+                if (updated == NULL) {
+                    return ERROR_MALFORMED;
+                }
+                mLastTrack->mTx3gBuffer = updated;
+                mLastTrack->mTx3gSize += growth;
             }
 
-            if ((size_t)(mDataSource->readAt(*offset, buffer + size, chunk_size))
+            if ((size_t)(mDataSource->readAt(*offset,
+                                             mLastTrack->mTx3gBuffer + mLastTrack->mTx3gFilled,
+                                             chunk_size))
                     < chunk_size) {
-                delete[] buffer;
-                buffer = NULL;
 
                 // advance read pointer so we don't end up reading this again
                 *offset += chunk_size;
                 return ERROR_IO;
             }
 
-            AMediaFormat_setBuffer(mLastTrack->meta,
-                    AMEDIAFORMAT_KEY_TEXT_FORMAT_DATA, buffer, size + chunk_size);
-
-            delete[] buffer;
-
+            mLastTrack->mTx3gFilled += chunk_size;
             *offset += chunk_size;
             break;
         }
@@ -4040,7 +4059,7 @@
 
     if (track->has_elst and !strncasecmp("video/", mime, 6) and track->elst_media_time > 0) {
         track->elstShiftStartTicks = track->elst_media_time;
-        ALOGV("video track->elstShiftStartTicks :%" PRId64, track->elst_media_time);
+        ALOGV("video track->elstShiftStartTicks :%" PRIu64, track->elstShiftStartTicks);
     }
 
     MPEG4Source *source =  new MPEG4Source(
@@ -4450,7 +4469,7 @@
         const Trex *trex,
         off64_t firstMoofOffset,
         const sp<ItemTable> &itemTable,
-        int32_t elstShiftStartTicks)
+        uint64_t elstShiftStartTicks)
     : mFormat(format),
       mDataSource(dataSource),
       mTimescale(timeScale),
@@ -4576,7 +4595,7 @@
             // Start offset should be less or equal to composition time of first sample.
             // ISO : sample_composition_time_offset, version 0 (unsigned) for major brands.
             mElstShiftStartTicks = std::min(mElstShiftStartTicks,
-                (*mCurrentSamples.begin()).compositionOffset);
+                                            (uint64_t)(*mCurrentSamples.begin()).compositionOffset);
         }
         return err;
     }
@@ -4586,7 +4605,7 @@
         err = mSampleTable->getMetaDataForSample(0, NULL, NULL, &firstSampleCTS);
         // Start offset should be less or equal to composition time of first sample.
         // Composition time stamp of first sample cannot be negative.
-        mElstShiftStartTicks = std::min(mElstShiftStartTicks, (int32_t)firstSampleCTS);
+        mElstShiftStartTicks = std::min(mElstShiftStartTicks, firstSampleCTS);
     }
 
     return err;
@@ -5239,8 +5258,30 @@
         sampleCtsOffset = 0;
     }
 
-    if (size < (off64_t)sampleCount * bytesPerSample) {
-        return -EINVAL;
+    if (bytesPerSample != 0) {
+        if (size < (off64_t)sampleCount * bytesPerSample) {
+            return -EINVAL;
+        }
+    } else {
+        if (sampleDuration == 0) {
+            ALOGW("b/123389881 sampleDuration == 0");
+            android_errorWriteLog(0x534e4554, "124389881 zero");
+            return -EINVAL;
+        }
+
+        // apply some sanity (vs strict legality) checks
+        //
+        // clamp the count of entries in the trun box, to avoid spending forever parsing
+        // this box. Clamping (vs error) lets us play *something*.
+        // 1 million is about 400 msecs on a Pixel3, should be no more than a couple seconds
+        // on the slowest devices.
+        static constexpr uint32_t kMaxTrunSampleCount = 1000000;
+        if (sampleCount > kMaxTrunSampleCount) {
+            ALOGW("b/123389881 clamp sampleCount(%u) @ kMaxTrunSampleCount(%u)",
+                  sampleCount, kMaxTrunSampleCount);
+            android_errorWriteLog(0x534e4554, "124389881 count");
+
+        }
     }
 
     Sample tmp;
@@ -5496,7 +5537,11 @@
             err = mSampleTable->getMetaDataForSample(
                     mCurrentSampleIndex, &offset, &size, &cts, &isSyncSample, &stts);
             if(err == OK) {
-                cts -= mElstShiftStartTicks;
+                /* Composition Time Stamp cannot be negative. Some files have video Sample
+                * Time(STTS)delta with zero value(b/117402420).  Hence subtract only
+                * min(cts, mElstShiftStartTicks), so that audio tracks can be played.
+                */
+                cts -= std::min(cts, mElstShiftStartTicks);
             }
 
         } else {
@@ -5780,8 +5825,8 @@
     if (options && options->getSeekTo(&seekTimeUs, &mode)) {
 
         seekTimeUs += ((long double)mElstShiftStartTicks * 1000000) / mTimescale;
-        ALOGV("shifted seekTimeUs :%" PRId64 ", mElstShiftStartTicks:%" PRId32, seekTimeUs,
-                mElstShiftStartTicks);
+        ALOGV("shifted seekTimeUs :%" PRId64 ", mElstShiftStartTicks:%" PRIu64, seekTimeUs,
+              mElstShiftStartTicks);
 
         int numSidxEntries = mSegments.size();
         if (numSidxEntries != 0) {
@@ -5837,7 +5882,7 @@
 
     off64_t offset = 0;
     size_t size = 0;
-    uint32_t cts = 0;
+    uint64_t cts = 0;
     bool isSyncSample = false;
     bool newBuffer = false;
     if (mBuffer == NULL || mCurrentSampleIndex >= mCurrentSamples.size()) {
@@ -5869,7 +5914,11 @@
         offset = smpl->offset;
         size = smpl->size;
         cts = mCurrentTime + smpl->compositionOffset;
-        cts -= mElstShiftStartTicks;
+        /* Composition Time Stamp cannot be negative. Some files have video Sample
+        * Time(STTS)delta with zero value(b/117402420).  Hence subtract only
+        * min(cts, mElstShiftStartTicks), so that audio tracks can be played.
+        */
+        cts -= std::min(cts, mElstShiftStartTicks);
 
         mCurrentTime += smpl->duration;
         isSyncSample = (mCurrentSampleIndex == 0);
diff --git a/media/extractors/mp4/MPEG4Extractor.h b/media/extractors/mp4/MPEG4Extractor.h
index fadfb50..031e793 100644
--- a/media/extractors/mp4/MPEG4Extractor.h
+++ b/media/extractors/mp4/MPEG4Extractor.h
@@ -83,12 +83,47 @@
         bool includes_expensive_metadata;
         bool skipTrack;
         bool has_elst;
+        /* signed int, ISO Spec allows media_time = -1 for other use cases.
+         * but we don't support empty edits for now.
+         */
         int64_t elst_media_time;
         uint64_t elst_segment_duration;
-        int32_t elstShiftStartTicks;
+        // unsigned int, shift start offset only when media_time > 0.
+        uint64_t elstShiftStartTicks;
         bool subsample_encryption;
+
+        uint8_t *mTx3gBuffer;
+        size_t mTx3gSize, mTx3gFilled;
+
+
+        Track() {
+            next = NULL;
+            meta = NULL;
+            timescale = 0;
+            includes_expensive_metadata = false;
+            skipTrack = false;
+            has_elst = false;
+            elst_media_time = 0;
+            elstShiftStartTicks = 0;
+            subsample_encryption = false;
+            mTx3gBuffer = NULL;
+            mTx3gSize = mTx3gFilled = 0;
+        }
+        ~Track() {
+            if (meta) {
+                AMediaFormat_delete(meta);
+                meta = NULL;
+            }
+            free (mTx3gBuffer);
+            mTx3gBuffer = NULL;
+        }
+
+      private:
+        DISALLOW_EVIL_CONSTRUCTORS(Track);
     };
 
+    static const int kTx3gGrowth = 16 * 1024;
+
     Vector<SidxEntry> mSidxEntries;
     off64_t mMoofOffset;
     bool mMoofFound;
diff --git a/media/libaaudio/Android.bp b/media/libaaudio/Android.bp
index 4857008..16958f9 100644
--- a/media/libaaudio/Android.bp
+++ b/media/libaaudio/Android.bp
@@ -35,4 +35,3 @@
     version_script: "libaaudio.map.txt",
 }
 
-subdirs = ["*"]
diff --git a/media/libaaudio/examples/Android.bp b/media/libaaudio/examples/Android.bp
index 639fab2..49bd5ee 100644
--- a/media/libaaudio/examples/Android.bp
+++ b/media/libaaudio/examples/Android.bp
@@ -1,5 +1,3 @@
-subdirs = ["*"]
-
 cc_library_headers {
     name: "libaaudio_example_utils",
     export_include_dirs: ["utils"],
diff --git a/media/libaaudio/examples/loopback/src/loopback.cpp b/media/libaaudio/examples/loopback/src/loopback.cpp
index c7c42eb..49d921f 100644
--- a/media/libaaudio/examples/loopback/src/loopback.cpp
+++ b/media/libaaudio/examples/loopback/src/loopback.cpp
@@ -40,7 +40,8 @@
 // V0.4.01 = add -h hang option
 //           fix -n option to set output buffer for -tm
 //           plot first glitch
-#define APP_VERSION             "0.4.01"
+// V0.4.02 = allow -n0 for minimal buffer size
+#define APP_VERSION             "0.4.02"
 
 // Tag for machine readable results as property = value pairs
 #define RESULT_TAG              "RESULT: "
@@ -535,7 +536,7 @@
     printf("INPUT  stream ----------------------------------------\n");
     // Use different parameters for the input.
     argParser.setDeviceId(requestedInputDeviceId);
-    argParser.setNumberOfBursts(AAUDIO_UNSPECIFIED);
+    argParser.setNumberOfBursts(AAudioParameters::kDefaultNumberOfBursts);
     argParser.setFormat(requestedInputFormat);
     argParser.setPerformanceMode(inputPerformanceLevel);
     argParser.setChannelCount(requestedInputChannelCount);
diff --git a/media/libaaudio/examples/utils/AAudioArgsParser.h b/media/libaaudio/examples/utils/AAudioArgsParser.h
index f5ed7aa..9115778 100644
--- a/media/libaaudio/examples/utils/AAudioArgsParser.h
+++ b/media/libaaudio/examples/utils/AAudioArgsParser.h
@@ -36,11 +36,14 @@
                                 aaudio_content_type_t contentType) = nullptr;
 static void (*s_setInputPreset)(AAudioStreamBuilder* builder,
                                 aaudio_input_preset_t inputPreset) = nullptr;
+static void (*s_setAllowedCapturePolicy)(AAudioStreamBuilder* builder,
+                                          aaudio_allowed_capture_policy_t usage) = nullptr;
 
 static bool s_loadAttempted = false;
 static aaudio_usage_t (*s_getUsage)(AAudioStream *stream) = nullptr;
 static aaudio_content_type_t (*s_getContentType)(AAudioStream *stream) = nullptr;
 static aaudio_input_preset_t (*s_getInputPreset)(AAudioStream *stream) = nullptr;
+static aaudio_allowed_capture_policy_t (*s_getAllowedCapturePolicy)(AAudioStream *stream) = nullptr;
 
 // Link to test functions in shared library.
 static void loadFutureFunctions() {
@@ -61,6 +64,10 @@
                 dlsym(handle, "AAudioStreamBuilder_setInputPreset");
         if (s_setInputPreset == nullptr) goto error;
 
+        s_setAllowedCapturePolicy = (void (*)(AAudioStreamBuilder *, aaudio_input_preset_t))
+                dlsym(handle, "AAudioStreamBuilder_setAllowedCapturePolicy");
+        if (s_setAllowedCapturePolicy == nullptr) goto error;
+
         s_getUsage = (aaudio_usage_t (*)(AAudioStream *))
                 dlsym(handle, "AAudioStream_getUsage");
         if (s_getUsage == nullptr) goto error;
@@ -72,6 +79,10 @@
         s_getInputPreset = (aaudio_input_preset_t (*)(AAudioStream *))
                 dlsym(handle, "AAudioStream_getInputPreset");
         if (s_getInputPreset == nullptr) goto error;
+
+        s_getAllowedCapturePolicy = (aaudio_input_preset_t (*)(AAudioStream *))
+                dlsym(handle, "AAudioStream_getAllowedCapturePolicy");
+        if (s_getAllowedCapturePolicy == nullptr) goto error;
     }
     return;
 
@@ -169,6 +180,14 @@
         mInputPreset = inputPreset;
     }
 
+    aaudio_allowed_capture_policy_t getAllowedCapturePolicy() const {
+        return mAllowedCapturePolicy;
+    }
+
+    void setAllowedCapturePolicy(aaudio_allowed_capture_policy_t policy) {
+        mAllowedCapturePolicy = policy;
+    }
+
     int32_t getDeviceId() const {
         return mDeviceId;
     }
@@ -223,8 +242,17 @@
         } else if (mUsage != AAUDIO_UNSPECIFIED){
             printf("WARNING: setInputPreset not supported");
         }
+
+        // Call Q functions if supported.
+        if (s_setAllowedCapturePolicy != nullptr) {
+            s_setAllowedCapturePolicy(builder, mAllowedCapturePolicy);
+        } else if (mAllowedCapturePolicy != AAUDIO_UNSPECIFIED){
+            printf("WARNING: setAllowedCapturePolicy not supported");
+        }
     }
 
+    static constexpr int32_t   kDefaultNumberOfBursts = 2;
+
 private:
     int32_t                    mChannelCount    = AAUDIO_UNSPECIFIED;
     aaudio_format_t            mFormat          = AAUDIO_FORMAT_UNSPECIFIED;
@@ -238,8 +266,9 @@
     aaudio_usage_t             mUsage           = AAUDIO_UNSPECIFIED;
     aaudio_content_type_t      mContentType     = AAUDIO_UNSPECIFIED;
     aaudio_input_preset_t      mInputPreset     = AAUDIO_UNSPECIFIED;
+    aaudio_allowed_capture_policy_t mAllowedCapturePolicy     = AAUDIO_UNSPECIFIED;
 
-    int32_t                    mNumberOfBursts  = AAUDIO_UNSPECIFIED;
+    int32_t                    mNumberOfBursts  = kDefaultNumberOfBursts;
     int32_t                    mFramesPerCallback = AAUDIO_UNSPECIFIED;
 };
 
@@ -267,6 +296,9 @@
                 case 'c':
                     setChannelCount(atoi(&arg[2]));
                     break;
+                case 'C':
+                    setAllowedCapturePolicy(parseAllowedCapturePolicy(arg[2]));
+                    break;
                 case 'd':
                     setDeviceId(atoi(&arg[2]));
                     break;
@@ -341,6 +373,10 @@
         printf("      Default values are UNSPECIFIED unless otherwise stated.\n");
         printf("      -b{bufferCapacity} frames\n");
         printf("      -c{channels} for example 2 for stereo\n");
+        printf("      -C{a|s|n} set playback capture policy\n");
+        printf("          a = _ALL (default)\n");
+        printf("          s = _SYSTEM\n");
+        printf("          n = _NONE\n");
         printf("      -d{deviceId} default is %d\n", AAUDIO_UNSPECIFIED);
         printf("      -f{0|1|2} set format\n");
         printf("          0 = UNSPECIFIED\n");
@@ -352,7 +388,7 @@
         printf("          1 = _NEVER, never use MMAP\n");
         printf("          2 = _AUTO, use MMAP if available, default for -m with no number\n");
         printf("          3 = _ALWAYS, use MMAP or fail\n");
-        printf("      -n{numberOfBursts} for setBufferSize\n");
+        printf("      -n{numberOfBursts} for setBufferSize, default %d\n", kDefaultNumberOfBursts);
         printf("      -p{performanceMode} set output AAUDIO_PERFORMANCE_MODE*, default NONE\n");
         printf("          n for _NONE\n");
         printf("          l for _LATENCY\n");
@@ -365,6 +401,25 @@
         printf("      -z{callbackSize} or block size, in frames, default = 0\n");
     }
 
+    static aaudio_performance_mode_t parseAllowedCapturePolicy(char c) {
+        aaudio_allowed_capture_policy_t policy = AAUDIO_ALLOW_CAPTURE_BY_ALL;
+        switch (c) {
+            case 'a':
+                policy = AAUDIO_ALLOW_CAPTURE_BY_ALL;
+                break;
+            case 's':
+                policy = AAUDIO_ALLOW_CAPTURE_BY_SYSTEM;
+                break;
+            case 'n':
+                policy = AAUDIO_ALLOW_CAPTURE_BY_NONE;
+                break;
+            default:
+                printf("ERROR: invalid playback capture policy %c\n", c);
+                break;
+        }
+        return policy;
+    }
+
     static aaudio_performance_mode_t parsePerformanceMode(char c) {
         aaudio_performance_mode_t mode = AAUDIO_PERFORMANCE_MODE_NONE;
         switch (c) {
@@ -407,17 +462,28 @@
                getFormat(), AAudioStream_getFormat(stream));
 
         int32_t framesPerBurst = AAudioStream_getFramesPerBurst(stream);
-        int32_t sizeFrames = AAudioStream_getBufferSizeInFrames(stream);
         printf("  Buffer:       burst     = %d\n", framesPerBurst);
+
+        int32_t sizeFrames = AAudioStream_getBufferSizeInFrames(stream);
         if (framesPerBurst > 0) {
-            printf("  Buffer:       size      = %d = (%d * %d) + %d\n",
+            int32_t requestedSize = getNumberOfBursts() * framesPerBurst;
+            printf("  BufferSize:   requested = %4d, actual = %4d = (%d * %d) + %d\n",
+                   requestedSize,
                    sizeFrames,
                    (sizeFrames / framesPerBurst),
                    framesPerBurst,
                    (sizeFrames % framesPerBurst));
+        } else {
+             printf("  BufferSize:    %d\n", sizeFrames);
         }
-        printf("  Capacity:     requested = %d, actual = %d\n", getBufferCapacity(),
-               AAudioStream_getBufferCapacityInFrames(stream));
+
+        int32_t capacityFrames = AAudioStream_getBufferCapacityInFrames(stream);
+        printf("  Capacity:     requested = %4d, actual = %4d = (%d * %d) + %d\n",
+               getBufferCapacity(),
+               capacityFrames,
+               (capacityFrames / framesPerBurst),
+               framesPerBurst,
+               (capacityFrames % framesPerBurst));
 
         printf("  CallbackSize: requested = %d, actual = %d\n", getFramesPerCallback(),
                AAudioStream_getFramesPerDataCallback(stream));
@@ -449,6 +515,11 @@
         printf("  Is MMAP used? %s\n", AAudioStream_isMMapUsed(stream)
                ? "yes" : "no");
 
+        if (s_getAllowedCapturePolicy != nullptr) {
+            printf("  ContentType:  requested = %d, actual = %d\n",
+                   getAllowedCapturePolicy(), s_getAllowedCapturePolicy(stream));
+        }
+
     }
 
     int32_t getDurationSeconds() const {
diff --git a/media/libaaudio/examples/utils/AAudioSimplePlayer.h b/media/libaaudio/examples/utils/AAudioSimplePlayer.h
index 4373fa9..fd1fc45 100644
--- a/media/libaaudio/examples/utils/AAudioSimplePlayer.h
+++ b/media/libaaudio/examples/utils/AAudioSimplePlayer.h
@@ -120,10 +120,9 @@
 
         if (result == AAUDIO_OK) {
             int32_t sizeInBursts = parameters.getNumberOfBursts();
-            if (sizeInBursts > 0) {
-                int32_t framesPerBurst = AAudioStream_getFramesPerBurst(mStream);
-                AAudioStream_setBufferSizeInFrames(mStream, sizeInBursts * framesPerBurst);
-            }
+            int32_t framesPerBurst = AAudioStream_getFramesPerBurst(mStream);
+            int32_t bufferSizeFrames = sizeInBursts * framesPerBurst;
+            AAudioStream_setBufferSizeInFrames(mStream, bufferSizeFrames);
         }
 
         AAudioStreamBuilder_delete(builder);
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
index 2b05f10..ca60233 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
@@ -31,7 +31,7 @@
 #include "AAudioSimplePlayer.h"
 #include "AAudioArgsParser.h"
 
-#define APP_VERSION  "0.1.6"
+#define APP_VERSION  "0.1.7"
 
 constexpr int32_t kDefaultHangTimeMSec = 10;
 
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index 8e36c77..ee5d089 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -37,7 +37,7 @@
 
 /**
  * This is used to represent a value that has not been specified.
- * For example, an application could use AAUDIO_UNSPECIFIED to indicate
+ * For example, an application could use {@link #AAUDIO_UNSPECIFIED} to indicate
  * that is did not not care what the specific value of a parameter was
  * and would accept whatever it was given.
  */
@@ -232,7 +232,8 @@
  * This information is used by certain platforms or routing policies
  * to make more refined volume or routing decisions.
  *
- * Note that these match the equivalent values in AudioAttributes in the Android Java API.
+ * Note that these match the equivalent values in {@link android.media.AudioAttributes}
+ * in the Android Java API.
  *
  * Added in API level 28.
  */
@@ -303,12 +304,13 @@
 /**
  * The CONTENT_TYPE attribute describes "what" you are playing.
  * It expresses the general category of the content. This information is optional.
- * But in case it is known (for instance {@link #AAUDIO_CONTENT_TYPE_MOVIE} for a
- * movie streaming service or {@link #AAUDIO_CONTENT_TYPE_SPEECH} for
+ * But in case it is known (for instance AAUDIO_CONTENT_TYPE_MOVIE for a
+ * movie streaming service or AAUDIO_CONTENT_TYPE_SPEECH for
  * an audio book application) this information might be used by the audio framework to
  * enforce audio focus.
  *
- * Note that these match the equivalent values in AudioAttributes in the Android Java API.
+ * Note that these match the equivalent values in {@link android.media.AudioAttributes}
+ * in the Android Java API.
  *
  * Added in API level 28.
  */
@@ -384,6 +386,48 @@
 typedef int32_t aaudio_input_preset_t;
 
 /**
+ * Specifying if audio may or may not be captured by other apps or the system.
+ *
+ * Note that these match the equivalent values in {@link android.media.AudioAttributes}
+ * in the Android Java API.
+ *
+ * Added in API level 29.
+ */
+enum {
+    /**
+     * Indicates that the audio may be captured by any app.
+     *
+     * For privacy, the following usages can not be recorded: AAUDIO_VOICE_COMMUNICATION*,
+     * AAUDIO_USAGE_NOTIFICATION*, AAUDIO_USAGE_ASSISTANCE* and {@link #AAUDIO_USAGE_ASSISTANT}.
+     *
+     * On {@link android.os.Build.VERSION_CODES#Q}, this means only {@link #AAUDIO_USAGE_MEDIA}
+     * and {@link #AAUDIO_USAGE_GAME} may be captured.
+     *
+     * See {@link android.media.AudioAttributes#ALLOW_CAPTURE_BY_ALL}.
+     */
+    AAUDIO_ALLOW_CAPTURE_BY_ALL = 1,
+    /**
+     * Indicates that the audio may only be captured by system apps.
+     *
+     * System apps can capture for many purposes like accessibility, user guidance...
+     * but have strong restriction. See
+     * {@link android.media.AudioAttributes#ALLOW_CAPTURE_BY_SYSTEM} for what the system apps
+     * can do with the capture audio.
+     */
+    AAUDIO_ALLOW_CAPTURE_BY_SYSTEM = 2,
+    /**
+     * Indicates that the audio may not be recorded by any app, even if it is a system app.
+     *
+     * It is encouraged to use {@link #AAUDIO_ALLOW_CAPTURE_BY_SYSTEM} instead of this value as system apps
+     * provide significant and useful features for the user (eg. accessibility).
+     * See {@link android.media.AudioAttributes#ALLOW_CAPTURE_BY_NONE}.
+     */
+    AAUDIO_ALLOW_CAPTURE_BY_NONE = 3,
+};
+
+typedef int32_t aaudio_allowed_capture_policy_t;
+
+/**
  * These may be used with AAudioStreamBuilder_setSessionId().
  *
  * Added in API level 28.
@@ -452,8 +496,8 @@
  *
  * The deviceId is initially unspecified, meaning that the current default device will be used.
  *
- * The default direction is AAUDIO_DIRECTION_OUTPUT.
- * The default sharing mode is AAUDIO_SHARING_MODE_SHARED.
+ * The default direction is {@link #AAUDIO_DIRECTION_OUTPUT}.
+ * The default sharing mode is {@link #AAUDIO_SHARING_MODE_SHARED}.
  * The data format, samplesPerFrames and sampleRate are unspecified and will be
  * chosen by the device when it is opened.
  *
@@ -466,11 +510,11 @@
  * Request an audio device identified device using an ID.
  * On Android, for example, the ID could be obtained from the Java AudioManager.
  *
- * The default, if you do not call this function, is AAUDIO_UNSPECIFIED,
+ * The default, if you do not call this function, is {@link #AAUDIO_UNSPECIFIED},
  * in which case the primary device will be used.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
- * @param deviceId device identifier or AAUDIO_UNSPECIFIED
+ * @param deviceId device identifier or {@link #AAUDIO_UNSPECIFIED}
  */
 AAUDIO_API void AAudioStreamBuilder_setDeviceId(AAudioStreamBuilder* builder,
                                                 int32_t deviceId) __INTRODUCED_IN(26);
@@ -478,7 +522,7 @@
 /**
  * Request a sample rate in Hertz.
  *
- * The default, if you do not call this function, is AAUDIO_UNSPECIFIED.
+ * The default, if you do not call this function, is {@link #AAUDIO_UNSPECIFIED}.
  * An optimal value will then be chosen when the stream is opened.
  * After opening a stream with an unspecified value, the application must
  * query for the actual value, which may vary by device.
@@ -495,7 +539,7 @@
 /**
  * Request a number of channels for the stream.
  *
- * The default, if you do not call this function, is AAUDIO_UNSPECIFIED.
+ * The default, if you do not call this function, is {@link #AAUDIO_UNSPECIFIED}.
  * An optimal value will then be chosen when the stream is opened.
  * After opening a stream with an unspecified value, the application must
  * query for the actual value, which may vary by device.
@@ -519,9 +563,9 @@
                                                        int32_t samplesPerFrame) __INTRODUCED_IN(26);
 
 /**
- * Request a sample data format, for example AAUDIO_FORMAT_PCM_I16.
+ * Request a sample data format, for example {@link #AAUDIO_FORMAT_PCM_I16}.
  *
- * The default, if you do not call this function, is AAUDIO_UNSPECIFIED.
+ * The default, if you do not call this function, is {@link #AAUDIO_UNSPECIFIED}.
  * An optimal value will then be chosen when the stream is opened.
  * After opening a stream with an unspecified value, the application must
  * query for the actual value, which may vary by device.
@@ -530,7 +574,8 @@
  * If a stream cannot be opened with the specified value then the open will fail.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
- * @param format common formats are AAUDIO_FORMAT_PCM_FLOAT and AAUDIO_FORMAT_PCM_I16.
+ * @param format common formats are {@link #AAUDIO_FORMAT_PCM_FLOAT} and
+ *               {@link #AAUDIO_FORMAT_PCM_I16}.
  */
 AAUDIO_API void AAudioStreamBuilder_setFormat(AAudioStreamBuilder* builder,
                                               aaudio_format_t format) __INTRODUCED_IN(26);
@@ -538,13 +583,13 @@
 /**
  * Request a mode for sharing the device.
  *
- * The default, if you do not call this function, is AAUDIO_SHARING_MODE_SHARED.
+ * The default, if you do not call this function, is {@link #AAUDIO_SHARING_MODE_SHARED}.
  *
  * The requested sharing mode may not be available.
  * The application can query for the actual mode after the stream is opened.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
- * @param sharingMode AAUDIO_SHARING_MODE_SHARED or AAUDIO_SHARING_MODE_EXCLUSIVE
+ * @param sharingMode {@link #AAUDIO_SHARING_MODE_SHARED} or {@link #AAUDIO_SHARING_MODE_EXCLUSIVE}
  */
 AAUDIO_API void AAudioStreamBuilder_setSharingMode(AAudioStreamBuilder* builder,
         aaudio_sharing_mode_t sharingMode) __INTRODUCED_IN(26);
@@ -552,10 +597,10 @@
 /**
  * Request the direction for a stream.
  *
- * The default, if you do not call this function, is AAUDIO_DIRECTION_OUTPUT.
+ * The default, if you do not call this function, is {@link #AAUDIO_DIRECTION_OUTPUT}.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
- * @param direction AAUDIO_DIRECTION_OUTPUT or AAUDIO_DIRECTION_INPUT
+ * @param direction {@link #AAUDIO_DIRECTION_OUTPUT} or {@link #AAUDIO_DIRECTION_INPUT}
  */
 AAUDIO_API void AAudioStreamBuilder_setDirection(AAudioStreamBuilder* builder,
         aaudio_direction_t direction) __INTRODUCED_IN(26);
@@ -564,10 +609,10 @@
  * Set the requested buffer capacity in frames.
  * The final AAudioStream capacity may differ, but will probably be at least this big.
  *
- * The default, if you do not call this function, is AAUDIO_UNSPECIFIED.
+ * The default, if you do not call this function, is {@link #AAUDIO_UNSPECIFIED}.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
- * @param numFrames the desired buffer capacity in frames or AAUDIO_UNSPECIFIED
+ * @param numFrames the desired buffer capacity in frames or {@link #AAUDIO_UNSPECIFIED}
  */
 AAUDIO_API void AAudioStreamBuilder_setBufferCapacityInFrames(AAudioStreamBuilder* builder,
         int32_t numFrames) __INTRODUCED_IN(26);
@@ -575,16 +620,17 @@
 /**
  * Set the requested performance mode.
  *
- * Supported modes are AAUDIO_PERFORMANCE_MODE_NONE, AAUDIO_PERFORMANCE_MODE_POWER_SAVING
- * and AAUDIO_PERFORMANCE_MODE_LOW_LATENCY.
+ * Supported modes are {@link #AAUDIO_PERFORMANCE_MODE_NONE},
+ * {@link #AAUDIO_PERFORMANCE_MODE_POWER_SAVING} * and {@link #AAUDIO_PERFORMANCE_MODE_LOW_LATENCY}.
  *
- * The default, if you do not call this function, is AAUDIO_PERFORMANCE_MODE_NONE.
+ * The default, if you do not call this function, is {@link #AAUDIO_PERFORMANCE_MODE_NONE}.
  *
  * You may not get the mode you requested.
- * You can call AAudioStream_getPerformanceMode() to find out the final mode for the stream.
+ * You can call AAudioStream_getPerformanceMode()
+ * to find out the final mode for the stream.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
- * @param mode the desired performance mode, eg. AAUDIO_PERFORMANCE_MODE_LOW_LATENCY
+ * @param mode the desired performance mode, eg. {@link #AAUDIO_PERFORMANCE_MODE_LOW_LATENCY}
  */
 AAUDIO_API void AAudioStreamBuilder_setPerformanceMode(AAudioStreamBuilder* builder,
         aaudio_performance_mode_t mode) __INTRODUCED_IN(26);
@@ -596,12 +642,12 @@
  * behavior of the stream.
  * This could, for example, affect how volume and focus is handled for the stream.
  *
- * The default, if you do not call this function, is AAUDIO_USAGE_MEDIA.
+ * The default, if you do not call this function, is {@link #AAUDIO_USAGE_MEDIA}.
  *
  * Added in API level 28.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
- * @param usage the desired usage, eg. AAUDIO_USAGE_GAME
+ * @param usage the desired usage, eg. {@link #AAUDIO_USAGE_GAME}
  */
 AAUDIO_API void AAudioStreamBuilder_setUsage(AAudioStreamBuilder* builder,
         aaudio_usage_t usage) __INTRODUCED_IN(28);
@@ -613,12 +659,12 @@
  * behavior of the stream.
  * This could, for example, affect whether a stream is paused when a notification occurs.
  *
- * The default, if you do not call this function, is AAUDIO_CONTENT_TYPE_MUSIC.
+ * The default, if you do not call this function, is {@link #AAUDIO_CONTENT_TYPE_MUSIC}.
  *
  * Added in API level 28.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
- * @param contentType the type of audio data, eg. AAUDIO_CONTENT_TYPE_SPEECH
+ * @param contentType the type of audio data, eg. {@link #AAUDIO_CONTENT_TYPE_SPEECH}
  */
 AAUDIO_API void AAudioStreamBuilder_setContentType(AAudioStreamBuilder* builder,
         aaudio_content_type_t contentType) __INTRODUCED_IN(28);
@@ -631,7 +677,7 @@
  * This could, for example, affect which microphones are used and how the
  * recorded data is processed.
  *
- * The default, if you do not call this function, is AAUDIO_INPUT_PRESET_VOICE_RECOGNITION.
+ * The default, if you do not call this function, is {@link #AAUDIO_INPUT_PRESET_VOICE_RECOGNITION}.
  * That is because VOICE_RECOGNITION is the preset with the lowest latency
  * on many platforms.
  *
@@ -643,14 +689,30 @@
 AAUDIO_API void AAudioStreamBuilder_setInputPreset(AAudioStreamBuilder* builder,
         aaudio_input_preset_t inputPreset) __INTRODUCED_IN(28);
 
+/**
+ * Specify whether this stream audio may or may not be captured by other apps or the system.
+ *
+ * The default is {@link #AAUDIO_ALLOW_CAPTURE_BY_ALL}.
+ *
+ * Note that an application can also set its global policy, in which case the most restrictive
+ * policy is always applied. See {@link android.media.AudioAttributes#setAllowedCapturePolicy(int)}
+ *
+ * Added in API level 29.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param inputPreset the desired level of opt-out from being captured.
+ */
+AAUDIO_API void AAudioStreamBuilder_setAllowedCapturePolicy(AAudioStreamBuilder* builder,
+        aaudio_allowed_capture_policy_t capturePolicy) __INTRODUCED_IN(29);
+
 /** Set the requested session ID.
  *
  * The session ID can be used to associate a stream with effects processors.
  * The effects are controlled using the Android AudioEffect Java API.
  *
- * The default, if you do not call this function, is AAUDIO_SESSION_ID_NONE.
+ * The default, if you do not call this function, is {@link #AAUDIO_SESSION_ID_NONE}.
  *
- * If set to AAUDIO_SESSION_ID_ALLOCATE then a session ID will be allocated
+ * If set to {@link #AAUDIO_SESSION_ID_ALLOCATE} then a session ID will be allocated
  * when the stream is opened.
  *
  * The allocated session ID can be obtained by calling AAudioStream_getSessionId()
@@ -668,7 +730,7 @@
  * Added in API level 28.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
- * @param sessionId an allocated sessionID or AAUDIO_SESSION_ID_ALLOCATE
+ * @param sessionId an allocated sessionID or {@link #AAUDIO_SESSION_ID_ALLOCATE}
  */
 AAUDIO_API void AAudioStreamBuilder_setSessionId(AAudioStreamBuilder* builder,
         aaudio_session_id_t sessionId) __INTRODUCED_IN(28);
@@ -751,15 +813,16 @@
  *
  * Note that when using this callback, the audio data will be passed in or out
  * of the function as an argument.
- * So you cannot call AAudioStream_write() or AAudioStream_read() on the same stream
- * that has an active data callback.
+ * So you cannot call AAudioStream_write() or AAudioStream_read()
+ * on the same stream that has an active data callback.
  *
- * The callback function will start being called after AAudioStream_requestStart() is called.
+ * The callback function will start being called after AAudioStream_requestStart()
+ * is called.
  * It will stop being called after AAudioStream_requestPause() or
  * AAudioStream_requestStop() is called.
  *
  * This callback function will be called on a real-time thread owned by AAudio. See
- * {@link AAudioStream_dataCallback} for more information.
+ * {@link #AAudioStream_dataCallback} for more information.
  *
  * Note that the AAudio callbacks will never be called simultaneously from multiple threads.
  *
@@ -773,9 +836,9 @@
 
 /**
  * Set the requested data callback buffer size in frames.
- * See {@link AAudioStream_dataCallback}.
+ * See {@link #AAudioStream_dataCallback}.
  *
- * The default, if you do not call this function, is AAUDIO_UNSPECIFIED.
+ * The default, if you do not call this function, is {@link #AAUDIO_UNSPECIFIED}.
  *
  * For the lowest possible latency, do not call this function. AAudio will then
  * call the dataProc callback function with whatever size is optimal.
@@ -792,7 +855,7 @@
  * half the buffer capacity, to allow double buffering.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
- * @param numFrames the desired buffer size in frames or AAUDIO_UNSPECIFIED
+ * @param numFrames the desired buffer size in frames or {@link #AAUDIO_UNSPECIFIED}
  */
 AAUDIO_API void AAudioStreamBuilder_setFramesPerDataCallback(AAudioStreamBuilder* builder,
                                                              int32_t numFrames) __INTRODUCED_IN(26);
@@ -853,12 +916,12 @@
 /**
  * Open a stream based on the options in the StreamBuilder.
  *
- * AAudioStream_close must be called when finished with the stream to recover
+ * AAudioStream_close() must be called when finished with the stream to recover
  * the memory and to free the associated resources.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param stream pointer to a variable to receive the new stream reference
- * @return AAUDIO_OK or a negative error.
+ * @return {@link #AAUDIO_OK} or a negative error.
  */
 AAUDIO_API aaudio_result_t  AAudioStreamBuilder_openStream(AAudioStreamBuilder* builder,
         AAudioStream** stream) __INTRODUCED_IN(26);
@@ -867,7 +930,7 @@
  * Delete the resources associated with the StreamBuilder.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
- * @return AAUDIO_OK or a negative error.
+ * @return {@link #AAUDIO_OK} or a negative error.
  */
 AAUDIO_API aaudio_result_t  AAudioStreamBuilder_delete(AAudioStreamBuilder* builder)
     __INTRODUCED_IN(26);
@@ -880,7 +943,7 @@
  * Free the resources associated with a stream created by AAudioStreamBuilder_openStream()
  *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
- * @return AAUDIO_OK or a negative error.
+ * @return {@link #AAUDIO_OK} or a negative error.
  */
 AAUDIO_API aaudio_result_t  AAudioStream_close(AAudioStream* stream) __INTRODUCED_IN(26);
 
@@ -888,24 +951,26 @@
  * Asynchronously request to start playing the stream. For output streams, one should
  * write to the stream to fill the buffer before starting.
  * Otherwise it will underflow.
- * After this call the state will be in AAUDIO_STREAM_STATE_STARTING or AAUDIO_STREAM_STATE_STARTED.
+ * After this call the state will be in {@link #AAUDIO_STREAM_STATE_STARTING} or
+ * {@link #AAUDIO_STREAM_STATE_STARTED}.
  *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
- * @return AAUDIO_OK or a negative error.
+ * @return {@link #AAUDIO_OK} or a negative error.
  */
 AAUDIO_API aaudio_result_t  AAudioStream_requestStart(AAudioStream* stream) __INTRODUCED_IN(26);
 
 /**
  * Asynchronous request for the stream to pause.
  * Pausing a stream will freeze the data flow but not flush any buffers.
- * Use AAudioStream_Start() to resume playback after a pause.
- * After this call the state will be in AAUDIO_STREAM_STATE_PAUSING or AAUDIO_STREAM_STATE_PAUSED.
+ * Use AAudioStream_requestStart() to resume playback after a pause.
+ * After this call the state will be in {@link #AAUDIO_STREAM_STATE_PAUSING} or
+ * {@link #AAUDIO_STREAM_STATE_PAUSED}.
  *
- * This will return AAUDIO_ERROR_UNIMPLEMENTED for input streams.
+ * This will return {@link #AAUDIO_ERROR_UNIMPLEMENTED} for input streams.
  * For input streams use AAudioStream_requestStop().
  *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
- * @return AAUDIO_OK or a negative error.
+ * @return {@link #AAUDIO_OK} or a negative error.
  */
 AAUDIO_API aaudio_result_t  AAudioStream_requestPause(AAudioStream* stream) __INTRODUCED_IN(26);
 
@@ -914,32 +979,34 @@
  * Flushing will discard any pending data.
  * This call only works if the stream is pausing or paused. TODO review
  * Frame counters are not reset by a flush. They may be advanced.
- * After this call the state will be in AAUDIO_STREAM_STATE_FLUSHING or AAUDIO_STREAM_STATE_FLUSHED.
+ * After this call the state will be in {@link #AAUDIO_STREAM_STATE_FLUSHING} or
+ * {@link #AAUDIO_STREAM_STATE_FLUSHED}.
  *
- * This will return AAUDIO_ERROR_UNIMPLEMENTED for input streams.
+ * This will return {@link #AAUDIO_ERROR_UNIMPLEMENTED} for input streams.
  *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
- * @return AAUDIO_OK or a negative error.
+ * @return {@link #AAUDIO_OK} or a negative error.
  */
 AAUDIO_API aaudio_result_t  AAudioStream_requestFlush(AAudioStream* stream) __INTRODUCED_IN(26);
 
 /**
  * Asynchronous request for the stream to stop.
  * The stream will stop after all of the data currently buffered has been played.
- * After this call the state will be in AAUDIO_STREAM_STATE_STOPPING or AAUDIO_STREAM_STATE_STOPPED.
+ * After this call the state will be in {@link #AAUDIO_STREAM_STATE_STOPPING} or
+ * {@link #AAUDIO_STREAM_STATE_STOPPED}.
  *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
- * @return AAUDIO_OK or a negative error.
+ * @return {@link #AAUDIO_OK} or a negative error.
  */
 AAUDIO_API aaudio_result_t  AAudioStream_requestStop(AAudioStream* stream) __INTRODUCED_IN(26);
 
 /**
- * Query the current state of the client, eg. AAUDIO_STREAM_STATE_PAUSING
+ * Query the current state of the client, eg. {@link #AAUDIO_STREAM_STATE_PAUSING}
  *
  * This function will immediately return the state without updating the state.
  * If you want to update the client state based on the server state then
  * call AAudioStream_waitForStateChange() with currentState
- * set to AAUDIO_STREAM_STATE_UNKNOWN and a zero timeout.
+ * set to {@link #AAUDIO_STREAM_STATE_UNKNOWN} and a zero timeout.
  *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  */
@@ -965,7 +1032,7 @@
  * @param inputState The state we want to avoid.
  * @param nextState Pointer to a variable that will be set to the new state.
  * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
- * @return AAUDIO_OK or a negative error.
+ * @return {@link #AAUDIO_OK} or a negative error.
  */
 AAUDIO_API aaudio_result_t AAudioStream_waitForStateChange(AAudioStream* stream,
         aaudio_stream_state_t inputState, aaudio_stream_state_t *nextState,
@@ -1082,17 +1149,17 @@
  * This call can be used if the application needs to know the value of numFrames before
  * the stream is started. This is not normally necessary.
  *
- * If a specific size was requested by calling AAudioStreamBuilder_setFramesPerDataCallback()
- * then this will be the same size.
+ * If a specific size was requested by calling
+ * AAudioStreamBuilder_setFramesPerDataCallback() then this will be the same size.
  *
  * If AAudioStreamBuilder_setFramesPerDataCallback() was not called then this will
- * return the size chosen by AAudio, or AAUDIO_UNSPECIFIED.
+ * return the size chosen by AAudio, or {@link #AAUDIO_UNSPECIFIED}.
  *
- * AAUDIO_UNSPECIFIED indicates that the callback buffer size for this stream
+ * {@link #AAUDIO_UNSPECIFIED} indicates that the callback buffer size for this stream
  * may vary from one dataProc callback to the next.
  *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
- * @return callback buffer size in frames or AAUDIO_UNSPECIFIED
+ * @return callback buffer size in frames or {@link #AAUDIO_UNSPECIFIED}
  */
 AAUDIO_API int32_t AAudioStream_getFramesPerDataCallback(AAudioStream* stream) __INTRODUCED_IN(26);
 
@@ -1202,21 +1269,22 @@
  * The session ID can be used to associate a stream with effects processors.
  * The effects are controlled using the Android AudioEffect Java API.
  *
- * If AAudioStreamBuilder_setSessionId() was called with AAUDIO_SESSION_ID_ALLOCATE
+ * If AAudioStreamBuilder_setSessionId() was
+ * called with {@link #AAUDIO_SESSION_ID_ALLOCATE}
  * then a new session ID should be allocated once when the stream is opened.
  *
  * If AAudioStreamBuilder_setSessionId() was called with a previously allocated
  * session ID then that value should be returned.
  *
  * If AAudioStreamBuilder_setSessionId() was not called then this function should
- * return AAUDIO_SESSION_ID_NONE.
+ * return {@link #AAUDIO_SESSION_ID_NONE}.
  *
  * The sessionID for a stream should not change once the stream has been opened.
  *
  * Added in API level 28.
  *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
- * @return session ID or AAUDIO_SESSION_ID_NONE
+ * @return session ID or {@link #AAUDIO_SESSION_ID_NONE}
  */
 AAUDIO_API aaudio_session_id_t AAudioStream_getSessionId(AAudioStream* stream) __INTRODUCED_IN(28);
 
@@ -1225,11 +1293,11 @@
  * This can be used to synchronize audio with video or MIDI.
  * It can also be used to align a recorded stream with a playback stream.
  *
- * Timestamps are only valid when the stream is in AAUDIO_STREAM_STATE_STARTED.
- * AAUDIO_ERROR_INVALID_STATE will be returned if the stream is not started.
+ * Timestamps are only valid when the stream is in {@link #AAUDIO_STREAM_STATE_STARTED}.
+ * {@link #AAUDIO_ERROR_INVALID_STATE} will be returned if the stream is not started.
  * Note that because requestStart() is asynchronous, timestamps will not be valid until
  * a short time after calling requestStart().
- * So AAUDIO_ERROR_INVALID_STATE should not be considered a fatal error.
+ * So {@link #AAUDIO_ERROR_INVALID_STATE} should not be considered a fatal error.
  * Just try calling again later.
  *
  * If an error occurs, then the position and time will not be modified.
@@ -1240,7 +1308,7 @@
  * @param clockid CLOCK_MONOTONIC or CLOCK_BOOTTIME
  * @param framePosition pointer to a variable to receive the position
  * @param timeNanoseconds pointer to a variable to receive the time
- * @return AAUDIO_OK or a negative error
+ * @return {@link #AAUDIO_OK} or a negative error
  */
 AAUDIO_API aaudio_result_t AAudioStream_getTimestamp(AAudioStream* stream,
         clockid_t clockid, int64_t *framePosition, int64_t *timeNanoseconds) __INTRODUCED_IN(26);
@@ -1261,7 +1329,7 @@
  * Added in API level 28.
  *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
- * @return content type, for example AAUDIO_CONTENT_TYPE_MUSIC
+ * @return content type, for example {@link #AAUDIO_CONTENT_TYPE_MUSIC}
  */
 AAUDIO_API aaudio_content_type_t AAudioStream_getContentType(AAudioStream* stream)
         __INTRODUCED_IN(28);
@@ -1272,11 +1340,23 @@
  * Added in API level 28.
  *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
- * @return input preset, for example AAUDIO_INPUT_PRESET_CAMCORDER
+ * @return input preset, for example {@link #AAUDIO_INPUT_PRESET_CAMCORDER}
  */
 AAUDIO_API aaudio_input_preset_t AAudioStream_getInputPreset(AAudioStream* stream)
         __INTRODUCED_IN(28);
 
+/**
+ * Return the policy that determines whether the audio may or may not be captured
+ * by other apps or the system.
+ *
+ * Added in API level 29.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return the allowed capture policy, for example {@link #AAUDIO_ALLOW_CAPTURE_BY_ALL}
+ */
+AAUDIO_API aaudio_allowed_capture_policy_t AAudioStream_getAllowedCapturePolicy(
+        AAudioStream* stream) __INTRODUCED_IN(29);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/media/libaaudio/libaaudio.map.txt b/media/libaaudio/libaaudio.map.txt
index cbf5921..a87ede3 100644
--- a/media/libaaudio/libaaudio.map.txt
+++ b/media/libaaudio/libaaudio.map.txt
@@ -20,6 +20,7 @@
     AAudioStreamBuilder_setUsage;       # introduced=28
     AAudioStreamBuilder_setContentType; # introduced=28
     AAudioStreamBuilder_setInputPreset; # introduced=28
+    AAudioStreamBuilder_setAllowedCapturePolicy; # introduced=29
     AAudioStreamBuilder_setSessionId;   # introduced=28
     AAudioStreamBuilder_openStream;
     AAudioStreamBuilder_delete;
@@ -49,6 +50,7 @@
     AAudioStream_getUsage;       # introduced=28
     AAudioStream_getContentType; # introduced=28
     AAudioStream_getInputPreset; # introduced=28
+    AAudioStream_getAllowedCapturePolicy; # introduced=29
     AAudioStream_getFramesWritten;
     AAudioStream_getFramesRead;
     AAudioStream_getSessionId;   # introduced=28
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
index 3d1bc9b..a987fab 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
@@ -62,6 +62,8 @@
     if (status != NO_ERROR) goto error;
     status = parcel->writeInt32((int32_t) getInputPreset());
     if (status != NO_ERROR) goto error;
+    status = parcel->writeInt32((int32_t) getAllowedCapturePolicy());
+    if (status != NO_ERROR) goto error;
     status = parcel->writeInt32(getSessionId());
     if (status != NO_ERROR) goto error;
     return NO_ERROR;
@@ -105,6 +107,9 @@
     setInputPreset((aaudio_input_preset_t) value);
     status = parcel->readInt32(&value);
     if (status != NO_ERROR) goto error;
+    setAllowedCapturePolicy((aaudio_allowed_capture_policy_t) value);
+    status = parcel->readInt32(&value);
+    if (status != NO_ERROR) goto error;
     setSessionId(value);
 
     return NO_ERROR;
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index 0d71efc..8ae2644 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -204,6 +204,12 @@
     streamBuilder->setBufferCapacity(frames);
 }
 
+AAUDIO_API void AAudioStreamBuilder_setAllowedCapturePolicy(
+        AAudioStreamBuilder* builder, aaudio_allowed_capture_policy_t policy) {
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+    streamBuilder->setAllowedCapturePolicy(policy);
+}
+
 AAUDIO_API void AAudioStreamBuilder_setSessionId(AAudioStreamBuilder* builder,
                                                  aaudio_session_id_t sessionId)
 {
@@ -494,6 +500,13 @@
     return audioStream->getInputPreset();
 }
 
+AAUDIO_API aaudio_allowed_capture_policy_t AAudioStream_getAllowedCapturePolicy(
+        AAudioStream* stream)
+{
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getAllowedCapturePolicy();
+}
+
 AAUDIO_API int32_t AAudioStream_getSessionId(AAudioStream* stream)
 {
     AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.cpp b/media/libaaudio/src/core/AAudioStreamParameters.cpp
index 88da53a..e5bda30 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.cpp
+++ b/media/libaaudio/src/core/AAudioStreamParameters.cpp
@@ -35,17 +35,18 @@
 AAudioStreamParameters::~AAudioStreamParameters() {}
 
 void AAudioStreamParameters::copyFrom(const AAudioStreamParameters &other) {
-    mSamplesPerFrame = other.mSamplesPerFrame;
-    mSampleRate      = other.mSampleRate;
-    mDeviceId        = other.mDeviceId;
-    mSessionId       = other.mSessionId;
-    mSharingMode     = other.mSharingMode;
-    mAudioFormat     = other.mAudioFormat;
-    mDirection       = other.mDirection;
-    mBufferCapacity  = other.mBufferCapacity;
-    mUsage           = other.mUsage;
-    mContentType     = other.mContentType;
-    mInputPreset     = other.mInputPreset;
+    mSamplesPerFrame      = other.mSamplesPerFrame;
+    mSampleRate           = other.mSampleRate;
+    mDeviceId             = other.mDeviceId;
+    mSessionId            = other.mSessionId;
+    mSharingMode          = other.mSharingMode;
+    mAudioFormat          = other.mAudioFormat;
+    mDirection            = other.mDirection;
+    mBufferCapacity       = other.mBufferCapacity;
+    mUsage                = other.mUsage;
+    mContentType          = other.mContentType;
+    mInputPreset          = other.mInputPreset;
+    mAllowedCapturePolicy = other.mAllowedCapturePolicy;
 }
 
 static aaudio_result_t isFormatValid(audio_format_t format) {
@@ -166,19 +167,32 @@
             // break;
     }
 
+    switch (mAllowedCapturePolicy) {
+        case AAUDIO_UNSPECIFIED:
+        case AAUDIO_ALLOW_CAPTURE_BY_ALL:
+        case AAUDIO_ALLOW_CAPTURE_BY_SYSTEM:
+        case AAUDIO_ALLOW_CAPTURE_BY_NONE:
+            break; // valid
+        default:
+            ALOGE("allowed capture policy not valid = %d", mAllowedCapturePolicy);
+            return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+            // break;
+    }
+
     return AAUDIO_OK;
 }
 
 void AAudioStreamParameters::dump() const {
-    ALOGD("mDeviceId        = %6d", mDeviceId);
-    ALOGD("mSessionId       = %6d", mSessionId);
-    ALOGD("mSampleRate      = %6d", mSampleRate);
-    ALOGD("mSamplesPerFrame = %6d", mSamplesPerFrame);
-    ALOGD("mSharingMode     = %6d", (int)mSharingMode);
-    ALOGD("mAudioFormat     = %6d", (int)mAudioFormat);
-    ALOGD("mDirection       = %6d", mDirection);
-    ALOGD("mBufferCapacity  = %6d", mBufferCapacity);
-    ALOGD("mUsage           = %6d", mUsage);
-    ALOGD("mContentType     = %6d", mContentType);
-    ALOGD("mInputPreset     = %6d", mInputPreset);
+    ALOGD("mDeviceId             = %6d", mDeviceId);
+    ALOGD("mSessionId            = %6d", mSessionId);
+    ALOGD("mSampleRate           = %6d", mSampleRate);
+    ALOGD("mSamplesPerFrame      = %6d", mSamplesPerFrame);
+    ALOGD("mSharingMode          = %6d", (int)mSharingMode);
+    ALOGD("mAudioFormat          = %6d", (int)mAudioFormat);
+    ALOGD("mDirection            = %6d", mDirection);
+    ALOGD("mBufferCapacity       = %6d", mBufferCapacity);
+    ALOGD("mUsage                = %6d", mUsage);
+    ALOGD("mContentType          = %6d", mContentType);
+    ALOGD("mInputPreset          = %6d", mInputPreset);
+    ALOGD("mAllowedCapturePolicy = %6d", mAllowedCapturePolicy);
 }
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.h b/media/libaaudio/src/core/AAudioStreamParameters.h
index 6beb4b2..2e21a8d 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.h
+++ b/media/libaaudio/src/core/AAudioStreamParameters.h
@@ -112,6 +112,14 @@
         mInputPreset = inputPreset;
     }
 
+    aaudio_allowed_capture_policy_t getAllowedCapturePolicy() const {
+        return mAllowedCapturePolicy;
+    }
+
+    void setAllowedCapturePolicy(aaudio_allowed_capture_policy_t policy) {
+        mAllowedCapturePolicy = policy;
+    }
+
     aaudio_session_id_t getSessionId() const {
         return mSessionId;
     }
@@ -138,17 +146,18 @@
     void dump() const;
 
 private:
-    int32_t                    mSamplesPerFrame = AAUDIO_UNSPECIFIED;
-    int32_t                    mSampleRate      = AAUDIO_UNSPECIFIED;
-    int32_t                    mDeviceId        = AAUDIO_UNSPECIFIED;
-    aaudio_sharing_mode_t      mSharingMode     = AAUDIO_SHARING_MODE_SHARED;
-    audio_format_t             mAudioFormat     = AUDIO_FORMAT_DEFAULT;
-    aaudio_direction_t         mDirection       = AAUDIO_DIRECTION_OUTPUT;
-    aaudio_usage_t             mUsage           = AAUDIO_UNSPECIFIED;
-    aaudio_content_type_t      mContentType     = AAUDIO_UNSPECIFIED;
-    aaudio_input_preset_t      mInputPreset     = AAUDIO_UNSPECIFIED;
-    int32_t                    mBufferCapacity  = AAUDIO_UNSPECIFIED;
-    aaudio_session_id_t        mSessionId       = AAUDIO_SESSION_ID_NONE;
+    int32_t                         mSamplesPerFrame      = AAUDIO_UNSPECIFIED;
+    int32_t                         mSampleRate           = AAUDIO_UNSPECIFIED;
+    int32_t                         mDeviceId             = AAUDIO_UNSPECIFIED;
+    aaudio_sharing_mode_t           mSharingMode          = AAUDIO_SHARING_MODE_SHARED;
+    audio_format_t                  mAudioFormat          = AUDIO_FORMAT_DEFAULT;
+    aaudio_direction_t              mDirection            = AAUDIO_DIRECTION_OUTPUT;
+    aaudio_usage_t                  mUsage                = AAUDIO_UNSPECIFIED;
+    aaudio_content_type_t           mContentType          = AAUDIO_UNSPECIFIED;
+    aaudio_input_preset_t           mInputPreset          = AAUDIO_UNSPECIFIED;
+    int32_t                         mBufferCapacity       = AAUDIO_UNSPECIFIED;
+    aaudio_allowed_capture_policy_t mAllowedCapturePolicy = AAUDIO_UNSPECIFIED;
+    aaudio_session_id_t             mSessionId            = AAUDIO_SESSION_ID_NONE;
 };
 
 } /* namespace aaudio */
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index e39a075..732d45c 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -94,6 +94,10 @@
     if (mInputPreset == AAUDIO_UNSPECIFIED) {
         mInputPreset = AAUDIO_INPUT_PRESET_VOICE_RECOGNITION;
     }
+    mAllowedCapturePolicy = builder.getAllowedCapturePolicy();
+    if (mAllowedCapturePolicy == AAUDIO_UNSPECIFIED) {
+        mAllowedCapturePolicy = AAUDIO_ALLOW_CAPTURE_BY_ALL;
+    }
 
     // callbacks
     mFramesPerDataCallback = builder.getFramesPerDataCallback();
@@ -113,8 +117,8 @@
           mPerformanceMode,
           (isDataCallbackSet() ? "ON" : "OFF"),
           mFramesPerDataCallback);
-    ALOGI("open() usage  = %d, contentType = %d, inputPreset = %d",
-          mUsage, mContentType, mInputPreset);
+    ALOGI("open() usage  = %d, contentType = %d, inputPreset = %d, allowedCapturePolicy = %d",
+          mUsage, mContentType, mInputPreset, mAllowedCapturePolicy);
 
     return AAUDIO_OK;
 }
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 46951f5..32713b1 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -219,6 +219,10 @@
         return mInputPreset;
     }
 
+    aaudio_allowed_capture_policy_t getAllowedCapturePolicy() const {
+        return mAllowedCapturePolicy;
+    }
+
     int32_t getSessionId() const {
         return mSessionId;
     }
@@ -525,6 +529,13 @@
         mInputPreset = inputPreset;
     }
 
+    /**
+     * This should not be called after the open() call.
+     */
+    void setAllowedCapturePolicy(aaudio_allowed_capture_policy_t policy) {
+        mAllowedCapturePolicy = policy;
+    }
+
 private:
 
     aaudio_result_t safeStop();
@@ -546,6 +557,7 @@
     aaudio_usage_t              mUsage           = AAUDIO_UNSPECIFIED;
     aaudio_content_type_t       mContentType     = AAUDIO_UNSPECIFIED;
     aaudio_input_preset_t       mInputPreset     = AAUDIO_UNSPECIFIED;
+    aaudio_allowed_capture_policy_t mAllowedCapturePolicy = AAUDIO_ALLOW_CAPTURE_BY_ALL;
 
     int32_t                     mSessionId = AAUDIO_UNSPECIFIED;
 
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index ff95aed..d628bf7 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -134,12 +134,14 @@
             AAudioConvert_contentTypeToInternal(builder.getContentType());
     const audio_usage_t usage =
             AAudioConvert_usageToInternal(builder.getUsage());
+    const audio_flags_mask_t attributesFlags =
+        AAudioConvert_allowCapturePolicyToAudioFlagsMask(builder.getAllowedCapturePolicy());
 
     const audio_attributes_t attributes = {
             .content_type = contentType,
             .usage = usage,
             .source = AUDIO_SOURCE_DEFAULT, // only used for recording
-            .flags = AUDIO_FLAG_NONE, // Different than the AUDIO_OUTPUT_FLAGS
+            .flags = attributesFlags,
             .tags = ""
     };
 
@@ -420,6 +422,10 @@
 
 aaudio_result_t AudioStreamTrack::setBufferSize(int32_t requestedFrames)
 {
+    // Do not ask for less than one burst.
+    if (requestedFrames < getFramesPerBurst()) {
+        requestedFrames = getFramesPerBurst();
+    }
     ssize_t result = mAudioTrack->setBufferSizeInFrames(requestedFrames);
     if (result < 0) {
         return AAudioConvert_androidToAAudioResult(result);
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index 723cbf1..96ed56a 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -217,6 +217,22 @@
     return (audio_source_t) preset; // same value
 }
 
+audio_flags_mask_t AAudioConvert_allowCapturePolicyToAudioFlagsMask(
+        aaudio_allowed_capture_policy_t policy) {
+    switch (policy) {
+        case AAUDIO_UNSPECIFIED:
+        case AAUDIO_ALLOW_CAPTURE_BY_ALL:
+            return AUDIO_FLAG_NONE;
+        case AAUDIO_ALLOW_CAPTURE_BY_SYSTEM:
+            return AUDIO_FLAG_NO_MEDIA_PROJECTION;
+        case AAUDIO_ALLOW_CAPTURE_BY_NONE:
+            return AUDIO_FLAG_NO_MEDIA_PROJECTION | AUDIO_FLAG_NO_SYSTEM_CAPTURE;
+        default:
+            ALOGE("%s() 0x%08X unrecognized", __func__, policy);
+            return AUDIO_FLAG_NONE; //
+    }
+}
+
 int32_t AAudioConvert_framesToBytes(int32_t numFrames,
                                     int32_t bytesPerFrame,
                                     int32_t *sizeInBytes) {
diff --git a/media/libaaudio/src/utility/AAudioUtilities.h b/media/libaaudio/src/utility/AAudioUtilities.h
index dc2b198..76d0457 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.h
+++ b/media/libaaudio/src/utility/AAudioUtilities.h
@@ -84,6 +84,14 @@
  */
 audio_source_t AAudioConvert_inputPresetToAudioSource(aaudio_input_preset_t preset);
 
+/**
+ * Note that this function does not validate the passed in value.
+ * That is done somewhere else.
+ * @return internal audio flags mask
+ */
+audio_flags_mask_t AAudioConvert_allowCapturePolicyToAudioFlagsMask(
+        aaudio_allowed_capture_policy_t policy);
+
 // Note that this code may be replaced by Settings or by some other system configuration tool.
 
 #define AAUDIO_PROP_MMAP_POLICY           "aaudio.mmap_policy"
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index cb243a0..958bb2e 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -196,3 +196,10 @@
         "libutils",
     ],
 }
+
+cc_test {
+    name: "test_full_queue",
+    defaults: ["libaaudio_tests_defaults"],
+    srcs: ["test_full_queue.cpp"],
+    shared_libs: ["libaaudio"],
+}
diff --git a/media/libaaudio/tests/test_attributes.cpp b/media/libaaudio/tests/test_attributes.cpp
index dbf8712..32ee2a3 100644
--- a/media/libaaudio/tests/test_attributes.cpp
+++ b/media/libaaudio/tests/test_attributes.cpp
@@ -32,6 +32,7 @@
                             aaudio_usage_t usage,
                             aaudio_content_type_t contentType,
                             aaudio_input_preset_t preset = DONT_SET,
+                            aaudio_allowed_capture_policy_t capturePolicy = DONT_SET,
                             aaudio_direction_t direction = AAUDIO_DIRECTION_OUTPUT) {
 
     float *buffer = new float[kNumFrames * kChannelCount];
@@ -56,6 +57,9 @@
     if (preset != DONT_SET) {
         AAudioStreamBuilder_setInputPreset(aaudioBuilder, preset);
     }
+    if (capturePolicy != DONT_SET) {
+        AAudioStreamBuilder_setAllowedCapturePolicy(aaudioBuilder, capturePolicy);
+    }
 
     // Create an AAudioStream using the Builder.
     ASSERT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream));
@@ -80,6 +84,12 @@
             : preset;
     EXPECT_EQ(expectedPreset, AAudioStream_getInputPreset(aaudioStream));
 
+    aaudio_allowed_capture_policy_t expectedCapturePolicy =
+            (capturePolicy == DONT_SET || capturePolicy == AAUDIO_UNSPECIFIED)
+            ? AAUDIO_ALLOW_CAPTURE_BY_ALL // default
+            : preset;
+    EXPECT_EQ(expectedCapturePolicy, AAudioStream_getAllowedCapturePolicy(aaudioStream));
+
     EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStart(aaudioStream));
 
     if (direction == AAUDIO_DIRECTION_INPUT) {
@@ -133,13 +143,21 @@
     AAUDIO_INPUT_PRESET_VOICE_PERFORMANCE,
 };
 
+static const aaudio_input_preset_t sAllowCapturePolicies[] = {
+    DONT_SET,
+    AAUDIO_UNSPECIFIED,
+    AAUDIO_ALLOW_CAPTURE_BY_ALL,
+    AAUDIO_ALLOW_CAPTURE_BY_SYSTEM,
+    AAUDIO_ALLOW_CAPTURE_BY_NONE,
+};
+
 static void checkAttributesUsage(aaudio_performance_mode_t perfMode) {
     for (aaudio_usage_t usage : sUsages) {
         checkAttributes(perfMode, usage, DONT_SET);
     }
 }
 
-static void checkAttributesContentType(aaudio_input_preset_t perfMode) {
+static void checkAttributesContentType(aaudio_performance_mode_t perfMode) {
     for (aaudio_content_type_t contentType : sContentypes) {
         checkAttributes(perfMode, DONT_SET, contentType);
     }
@@ -151,6 +169,18 @@
                         DONT_SET,
                         DONT_SET,
                         inputPreset,
+                        DONT_SET,
+                        AAUDIO_DIRECTION_INPUT);
+    }
+}
+
+static void checkAttributesAllowedCapturePolicy(aaudio_performance_mode_t perfMode) {
+    for (aaudio_allowed_capture_policy_t policy : sAllowCapturePolicies) {
+        checkAttributes(perfMode,
+                        DONT_SET,
+                        DONT_SET,
+                        DONT_SET,
+                        policy,
                         AAUDIO_DIRECTION_INPUT);
     }
 }
@@ -167,6 +197,10 @@
     checkAttributesInputPreset(AAUDIO_PERFORMANCE_MODE_NONE);
 }
 
+TEST(test_attributes, aaudio_allowed_capture_policy_perfnone) {
+    checkAttributesAllowedCapturePolicy(AAUDIO_PERFORMANCE_MODE_NONE);
+}
+
 TEST(test_attributes, aaudio_usage_lowlat) {
     checkAttributesUsage(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
 }
@@ -178,3 +212,7 @@
 TEST(test_attributes, aaudio_input_preset_lowlat) {
     checkAttributesInputPreset(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
 }
+
+TEST(test_attributes, aaudio_allowed_capture_policy_lowlat) {
+    checkAttributesAllowedCapturePolicy(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+}
diff --git a/media/libaaudio/tests/test_full_queue.cpp b/media/libaaudio/tests/test_full_queue.cpp
new file mode 100644
index 0000000..12d4fa3
--- /dev/null
+++ b/media/libaaudio/tests/test_full_queue.cpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Test whether a stream dies if it is written to after a delay.
+// Maybe because the message queue from the AAudio service fills up.
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <aaudio/AAudio.h>
+#include <gtest/gtest.h>
+
+constexpr int64_t kNanosPerSecond = 1000000000;
+constexpr int64_t kTimeoutNanos = kNanosPerSecond / 2;
+constexpr int kNumFrames = 256;
+constexpr int kChannelCount = 2;
+
+static void checkFullQueue(aaudio_performance_mode_t perfMode,
+                           int32_t sleepMillis) {
+    std::unique_ptr<float[]> buffer = std::make_unique<float[]>(
+            kNumFrames * kChannelCount);
+
+    AAudioStreamBuilder *aaudioBuilder = nullptr;
+
+    // Use an AAudioStreamBuilder to contain requested parameters.
+    ASSERT_EQ(AAUDIO_OK, AAudio_createStreamBuilder(&aaudioBuilder));
+
+    AAudioStreamBuilder_setChannelCount(aaudioBuilder, kChannelCount);
+
+    // Request stream properties.
+    AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, perfMode);
+
+    // Create an AAudioStream using the Builder.
+    AAudioStream *aaudioStream = nullptr;
+    ASSERT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder,
+            &aaudioStream));
+    AAudioStreamBuilder_delete(aaudioBuilder);
+
+    EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStart(aaudioStream));
+
+    // Sleep for awhile. This might kill the stream.
+    usleep(sleepMillis * 1000); // 1000 millis in a microsecond
+
+    for (int i = 0; i < 10; i++) {
+        const aaudio_result_t result = AAudioStream_write(aaudioStream,
+                buffer.get(),
+                kNumFrames,
+                kTimeoutNanos);
+        EXPECT_EQ(kNumFrames, result);
+        if (kNumFrames != result) break;
+    }
+
+    EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStop(aaudioStream));
+
+    EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream));
+}
+
+TEST(test_full_queue, aaudio_full_queue_perf_none_50) {
+    checkFullQueue(AAUDIO_PERFORMANCE_MODE_NONE, 50 /* sleepMillis */);
+}
+
+TEST(test_full_queue, aaudio_full_queue_perf_none_200) {
+    checkFullQueue(AAUDIO_PERFORMANCE_MODE_NONE, 200 /* sleepMillis */);
+}
+
+TEST(test_full_queue, aaudio_full_queue_perf_none_1000) {
+    checkFullQueue(AAUDIO_PERFORMANCE_MODE_NONE, 1000 /* sleepMillis */);
+}
+
+TEST(test_full_queue, aaudio_full_queue_low_latency_50) {
+    checkFullQueue(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY, 50 /* sleepMillis */);
+}
+
+TEST(test_full_queue, aaudio_full_queue_low_latency_200) {
+    checkFullQueue(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY, 200 /* sleepMillis */);
+}
+
+TEST(test_full_queue, aaudio_full_queue_low_latency_1000) {
+    checkFullQueue(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY, 1000 /* sleepMillis */);
+}
diff --git a/media/libaudioclient/AudioPolicy.cpp b/media/libaudioclient/AudioPolicy.cpp
index 3ab38cd..65e797f 100644
--- a/media/libaudioclient/AudioPolicy.cpp
+++ b/media/libaudioclient/AudioPolicy.cpp
@@ -97,6 +97,7 @@
     mDeviceType = (audio_devices_t) parcel->readInt32();
     mDeviceAddress = parcel->readString8();
     mCbFlags = (uint32_t)parcel->readInt32();
+    mAllowPrivilegedPlaybackCapture = parcel->readBool();
     size_t size = (size_t)parcel->readInt32();
     if (size > MAX_CRITERIA_PER_MIX) {
         size = MAX_CRITERIA_PER_MIX;
@@ -120,6 +121,7 @@
     parcel->writeInt32(mDeviceType);
     parcel->writeString8(mDeviceAddress);
     parcel->writeInt32(mCbFlags);
+    parcel->writeBool(mAllowPrivilegedPlaybackCapture);
     size_t size = mCriteria.size();
     if (size > MAX_CRITERIA_PER_MIX) {
         size = MAX_CRITERIA_PER_MIX;
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 0ce2513..f324669 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -1133,6 +1133,12 @@
     }
 }
 
+status_t AudioSystem::setAllowedCapturePolicy(uid_t uid, audio_flags_mask_t flags) {
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == nullptr) return PERMISSION_DENIED;
+    return aps->setAllowedCapturePolicy(uid, flags);
+}
+
 bool AudioSystem::isOffloadSupported(const audio_offload_info_t& info)
 {
     ALOGV("isOffloadSupported()");
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index 3bac44f..bf98c60 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -101,7 +101,8 @@
     LIST_AUDIO_PRODUCT_STRATEGIES,
     GET_STRATEGY_FOR_ATTRIBUTES,
     LIST_AUDIO_VOLUME_GROUPS,
-    GET_VOLUME_GROUP_FOR_ATTRIBUTES
+    GET_VOLUME_GROUP_FOR_ATTRIBUTES,
+    SET_ALLOWED_CAPTURE_POLICY,
 };
 
 #define MAX_ITEMS_PER_LIST 1024
@@ -603,6 +604,15 @@
         return status;
     }
 
+    status_t setAllowedCapturePolicy(uid_t uid, audio_flags_mask_t flags) override {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        data.writeInt32(uid);
+        data.writeInt32(flags);
+        remote()->transact(SET_ALLOWED_CAPTURE_POLICY, data, &reply);
+        return reply.readInt32();
+    }
+
     virtual bool isOffloadSupported(const audio_offload_info_t& info)
     {
         Parcel data, reply;
@@ -2168,7 +2178,7 @@
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             bool isSupported = isHapticPlaybackSupported();
             reply->writeBool(isSupported);
-            return NO_ERROR;    
+            return NO_ERROR;
         }
 
         case SET_UID_DEVICE_AFFINITY: {
@@ -2285,6 +2295,15 @@
             return NO_ERROR;
         }
 
+        case SET_ALLOWED_CAPTURE_POLICY: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            uid_t uid = data.readInt32();
+            audio_flags_mask_t flags = data.readInt32();
+            status_t status = setAllowedCapturePolicy(uid, flags);
+            reply->writeInt32(status);
+            return NO_ERROR;
+        }
+
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libaudioclient/include/media/AudioPolicy.h b/media/libaudioclient/include/media/AudioPolicy.h
index bf8d627..4b94c12 100644
--- a/media/libaudioclient/include/media/AudioPolicy.h
+++ b/media/libaudioclient/include/media/AudioPolicy.h
@@ -114,6 +114,8 @@
     audio_devices_t mDeviceType;
     String8         mDeviceAddress;
     uint32_t        mCbFlags; // flags indicating which callbacks to use, see kCbFlag*
+    /** Ignore the AUDIO_FLAG_NO_MEDIA_PROJECTION */
+    bool            mAllowPrivilegedPlaybackCapture = false;
 };
 
 
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index e64f285..05a1d56 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -299,6 +299,8 @@
 
     static status_t setLowRamDevice(bool isLowRamDevice, int64_t totalMemory);
 
+    static status_t setAllowedCapturePolicy(uid_t uid, audio_flags_mask_t flags);
+
     // Check if hw offload is possible for given format, stream type, sample rate,
     // bit rate, duration, video and streaming or offload property is enabled
     static bool isOffloadSupported(const audio_offload_info_t& info);
diff --git a/media/libaudioclient/include/media/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h
index 35540f0..95530ac 100644
--- a/media/libaudioclient/include/media/IAudioPolicyService.h
+++ b/media/libaudioclient/include/media/IAudioPolicyService.h
@@ -135,6 +135,7 @@
                                             audio_unique_id_t* id) = 0;
     virtual status_t removeSourceDefaultEffect(audio_unique_id_t id) = 0;
     virtual status_t removeStreamDefaultEffect(audio_unique_id_t id) = 0;
+    virtual status_t setAllowedCapturePolicy(uid_t uid, audio_flags_mask_t flags) = 0;
    // Check if offload is possible for given format, stream type, sample rate,
     // bit rate, duration, video and streaming or offload property is enabled
     virtual bool isOffloadSupported(const audio_offload_info_t& info) = 0;
diff --git a/media/libaudiohal/impl/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
index 6c8e6a4..c08dddb 100644
--- a/media/libaudiohal/impl/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -49,7 +49,7 @@
 
     // Instrument audio signal power logging.
     // Note: This assumes channel mask, format, and sample rate do not change after creation.
-    if (mStream != nullptr && mStreamPowerLog.isUserDebugOrEngBuild()) {
+    if (mStream != nullptr /* && mStreamPowerLog.isUserDebugOrEngBuild() */) {
         // Obtain audio properties (see StreamHalHidl::getAudioProperties() below).
         Return<void> ret = mStream->getAudioProperties(
                 [&](auto sr, auto m, auto f) {
diff --git a/media/libaudiohal/impl/StreamHalLocal.cpp b/media/libaudiohal/impl/StreamHalLocal.cpp
index 7d5ce05..4818fd8 100644
--- a/media/libaudiohal/impl/StreamHalLocal.cpp
+++ b/media/libaudiohal/impl/StreamHalLocal.cpp
@@ -33,7 +33,7 @@
           mStream(stream) {
     // Instrument audio signal power logging.
     // Note: This assumes channel mask, format, and sample rate do not change after creation.
-    if (mStream != nullptr && mStreamPowerLog.isUserDebugOrEngBuild()) {
+    if (mStream != nullptr /* && mStreamPowerLog.isUserDebugOrEngBuild() */) {
         mStreamPowerLog.init(mStream->get_sample_rate(mStream),
                 mStream->get_channels(mStream),
                 mStream->get_format(mStream));
diff --git a/media/libaudioprocessing/AudioResamplerDyn.cpp b/media/libaudioprocessing/AudioResamplerDyn.cpp
index eeeecce..52936cc 100644
--- a/media/libaudioprocessing/AudioResamplerDyn.cpp
+++ b/media/libaudioprocessing/AudioResamplerDyn.cpp
@@ -243,7 +243,7 @@
 {
     // compute the normalized transition bandwidth
     const double tbw = firKaiserTbw(c.mHalfNumCoefs, stopBandAtten);
-    const double halfbw = tbw / 2.;
+    const double halfbw = tbw * 0.5;
 
     double fcr; // compute fcr, the 3 dB amplitude cut-off.
     if (inSampleRate < outSampleRate) { // upsample
@@ -290,7 +290,7 @@
 
 #if 0
     // Keep this debug code in case an app causes resampler design issues.
-    const double halfbw = tbw / 2.;
+    const double halfbw = tbw * 0.5;
     // print basic filter stats
     ALOGD("L:%d  hnc:%d  stopBandAtten:%lf  fcr:%lf  atten:%lf  tbw:%lf\n",
             c.mL, c.mHalfNumCoefs, stopBandAtten, fcr, attenuation, tbw);
@@ -305,7 +305,7 @@
 
     const int32_t passSteps = 1000;
 
-    testFir(coefs, c.mL, c.mHalfNumCoefs, fp, fs, passSteps, passSteps * c.ML /*stopSteps*/,
+    testFir(coefs, c.mL, c.mHalfNumCoefs, fp, fs, passSteps, passSteps * c.mL /*stopSteps*/,
             passMin, passMax, passRipple, stopMax, stopRipple);
     ALOGD("passband(%lf, %lf): %.8lf %.8lf %.8lf\n", 0., fp, passMin, passMax, passRipple);
     ALOGD("stopband(%lf, %lf): %.8lf %.3lf\n", fs, 0.5, stopMax, stopRipple);
diff --git a/media/libaudioprocessing/AudioResamplerFirGen.h b/media/libaudioprocessing/AudioResamplerFirGen.h
index 39cafeb..100baa3 100644
--- a/media/libaudioprocessing/AudioResamplerFirGen.h
+++ b/media/libaudioprocessing/AudioResamplerFirGen.h
@@ -547,8 +547,8 @@
         wstart += wstep;
     }
     // renormalize - this is needed for integer filter types, use 1 for float or double.
-    constexpr int64_t integralShift = std::is_integral<T>::value ? (sizeof(T) * 8 - 1) : 0;
-    const double norm = 1. / (L << integralShift);
+    constexpr int integralShift = std::is_integral<T>::value ? (sizeof(T) * CHAR_BIT - 1) : 0;
+    const double norm = 1. / (int64_t{L} << integralShift);
 
     firMin = fmin * norm;
     firMax = fmax * norm;
diff --git a/media/libaudioprocessing/tests/Android.bp b/media/libaudioprocessing/tests/Android.bp
index 0c8e5bb..d990111 100644
--- a/media/libaudioprocessing/tests/Android.bp
+++ b/media/libaudioprocessing/tests/Android.bp
@@ -5,8 +5,8 @@
 
     header_libs: ["libbase_headers"],
     shared_libs: [
-        "libaudioutils",
         "libaudioprocessing",
+        "libaudioutils",
         "libcutils",
         "liblog",
         "libutils",
diff --git a/media/libaudioprocessing/tests/resampler_tests.cpp b/media/libaudioprocessing/tests/resampler_tests.cpp
index e1623f7..8292291 100644
--- a/media/libaudioprocessing/tests/resampler_tests.cpp
+++ b/media/libaudioprocessing/tests/resampler_tests.cpp
@@ -246,7 +246,8 @@
 }
 
 void testFilterResponse(
-        size_t channels, unsigned inputFreq, unsigned outputFreq)
+        size_t channels, unsigned inputFreq, unsigned outputFreq,
+        android::AudioResampler::src_quality quality = android::AudioResampler::DYN_HIGH_QUALITY)
 {
     // create resampler
     using ResamplerType = android::AudioResamplerDyn<float, float, float>;
@@ -256,7 +257,7 @@
                             AUDIO_FORMAT_PCM_FLOAT,
                             channels,
                             outputFreq,
-                            android::AudioResampler::DYN_HIGH_QUALITY)));
+                            quality)));
     rdyn->setSampleRate(inputFreq);
 
     // get design parameters
@@ -268,17 +269,20 @@
     const double attenuation = rdyn->getFilterAttenuation();
     const double stopbandDb = rdyn->getStopbandAttenuationDb();
     const double passbandDb = rdyn->getPassbandRippleDb();
-    const double fp = fcr - tbw / 2;
-    const double fs = fcr + tbw / 2;
+    const double fp = fcr - tbw * 0.5;
+    const double fs = fcr + tbw * 0.5;
+    const double idealfs = inputFreq <= outputFreq
+        ? 0.5                            // upsample
+        : 0.5 * outputFreq  / inputFreq; // downsample
 
-    printf("inputFreq:%d outputFreq:%d design"
+    printf("inputFreq:%d outputFreq:%d design quality %d"
             " phases:%d halfLength:%d"
-            " fcr:%lf fp:%lf fs:%lf tbw:%lf"
+            " fcr:%lf fp:%lf fs:%lf tbw:%lf fcrp:%lf"
             " attenuation:%lf stopRipple:%.lf passRipple:%lf"
             "\n",
-            inputFreq, outputFreq,
+            inputFreq, outputFreq, quality,
             phases, halfLength,
-            fcr, fp, fs, tbw,
+            fcr, fp, fs, tbw, fcr * 100. / idealfs,
             attenuation, stopbandDb, passbandDb);
 
     // verify design parameters
@@ -541,8 +545,36 @@
     }
 }
 
-TEST(audioflinger_resampler, filterresponse) {
-    std::vector<int> inSampleRates{
+// Selected downsampling responses for various frequencies relating to hearing aid.
+TEST(audioflinger_resampler, downsamplingresponse) {
+    static constexpr android::AudioResampler::src_quality qualities[] = {
+        android::AudioResampler::DYN_LOW_QUALITY,
+        android::AudioResampler::DYN_MED_QUALITY,
+        android::AudioResampler::DYN_HIGH_QUALITY,
+    };
+    static constexpr int inSampleRates[] = {
+        32000,
+        44100,
+        48000,
+    };
+    static constexpr int outSampleRates[] = {
+        16000,
+        24000,
+    };
+
+    for (auto quality : qualities) {
+        for (int outSampleRate : outSampleRates) {
+            for (int inSampleRate : inSampleRates) {
+                testFilterResponse(2 /* channels */, inSampleRate, outSampleRate, quality);
+            }
+        }
+    }
+}
+
+// General responses for typical output device scenarios - 44.1, 48, 96 kHz
+// (48, 96 are part of the same resampler generation family).
+TEST(audioflinger_resampler, generalresponse) {
+    static constexpr int inSampleRates[] = {
         8000,
         11025,
         12000,
@@ -557,7 +589,8 @@
         176400,
         192000,
     };
-    std::vector<int> outSampleRates{
+    static constexpr int outSampleRates[] = {
+        44100,
         48000,
         96000,
     };
diff --git a/media/libeffects/dynamicsproc/EffectDynamicsProcessing.cpp b/media/libeffects/dynamicsproc/EffectDynamicsProcessing.cpp
index 0b883f1..c03c6ed 100644
--- a/media/libeffects/dynamicsproc/EffectDynamicsProcessing.cpp
+++ b/media/libeffects/dynamicsproc/EffectDynamicsProcessing.cpp
@@ -25,6 +25,7 @@
 #include <new>
 
 #include <log/log.h>
+#include <sys/param.h>
 
 #include <audio_effects/effect_dynamicsprocessing.h>
 #include <dsp/DPBase.h>
@@ -225,10 +226,6 @@
     } //switch
 }
 
-static inline bool isPowerOf2(unsigned long n) {
-    return (n & (n - 1)) == 0;
-}
-
 void DP_configureVariant(DynamicsProcessingContext *pContext, int newVariant) {
     ALOGV("DP_configureVariant %d", newVariant);
     switch(newVariant) {
@@ -242,7 +239,7 @@
                 desiredBlock);
         if (desiredBlock < minBlockSize) {
             currentBlock = minBlockSize;
-        } else if (!isPowerOf2(desiredBlock)) {
+        } else if (!powerof2(desiredBlock)) {
             //find next highest power of 2.
             currentBlock = 1 << (32 - __builtin_clz(desiredBlock));
         }
@@ -1297,4 +1294,3 @@
 };
 
 }; // extern "C"
-
diff --git a/media/libeffects/dynamicsproc/dsp/DPFrequency.cpp b/media/libeffects/dynamicsproc/dsp/DPFrequency.cpp
index d06fd70..1f53978 100644
--- a/media/libeffects/dynamicsproc/dsp/DPFrequency.cpp
+++ b/media/libeffects/dynamicsproc/dsp/DPFrequency.cpp
@@ -20,6 +20,7 @@
 #include <log/log.h>
 #include "DPFrequency.h"
 #include <algorithm>
+#include <sys/param.h>
 
 namespace dp_fx {
 
@@ -30,10 +31,6 @@
 #define CIRCULAR_BUFFER_UPSAMPLE 4  //4 times buffer size
 
 static constexpr float MIN_ENVELOPE = 1e-6f; //-120 dB
-//helper functionS
-static inline bool isPowerOf2(unsigned long n) {
-    return (n & (n - 1)) == 0;
-}
 static constexpr float EPSILON = 0.0000001f;
 
 static inline bool isZero(float f) {
@@ -151,7 +148,7 @@
     } else if (mBlockSize < MIN_BLOCKSIZE) {
         mBlockSize = MIN_BLOCKSIZE;
     } else {
-        if (!isPowerOf2(blockSize)) {
+        if (!powerof2(blockSize)) {
             //find next highest power of 2.
             mBlockSize = 1 << (32 - __builtin_clz(blockSize));
         }
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Control.c b/media/libeffects/lvm/lib/Bundle/src/LVM_Control.c
index 1d95342..1b27cb4 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Control.c
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Control.c
@@ -793,6 +793,15 @@
         {
             EQNB_Params.SourceFormat = LVEQNB_STEREO;
         }
+#ifdef SUPPORT_MC
+        /* Note: Currently SourceFormat field of EQNB is not been
+         *       used by the module.
+         */
+        else if (LocalParams.SourceFormat == LVM_MULTICHANNEL)
+        {
+            EQNB_Params.SourceFormat = LVEQNB_MULTICHANNEL;
+        }
+#endif
         else
         {
             EQNB_Params.SourceFormat = LVEQNB_MONOINSTEREO;     /* Force to Mono-in-Stereo mode */
@@ -862,7 +871,16 @@
             CS_Params.SpeakerType  = LVCS_HEADPHONES;
         }
 
+#ifdef SUPPORT_MC
+        /* Concert sound module processes only the left and right channels
+         * data. So the Source Format is set to LVCS_STEREO for multichannel
+         * input also.
+         */
+        if (LocalParams.SourceFormat == LVM_STEREO ||
+            LocalParams.SourceFormat == LVM_MULTICHANNEL)
+#else
         if (LocalParams.SourceFormat == LVM_STEREO)    /* Mono format not supported */
+#endif
         {
             CS_Params.SourceFormat = LVCS_STEREO;
         }
diff --git a/media/libeffects/lvm/lib/Common/lib/LVM_Types.h b/media/libeffects/lvm/lib/Common/lib/LVM_Types.h
index 59586e0..fbfdd4d 100644
--- a/media/libeffects/lvm/lib/Common/lib/LVM_Types.h
+++ b/media/libeffects/lvm/lib/Common/lib/LVM_Types.h
@@ -94,6 +94,7 @@
 
 typedef     int32_t             LVM_INT32;          /* Signed 32-bit word */
 typedef     uint32_t            LVM_UINT32;         /* Unsigned 32-bit word */
+typedef     int64_t             LVM_INT64;          /* Signed 64-bit word */
 
 #ifdef BUILD_FLOAT
 
diff --git a/media/libeffects/lvm/lib/Eq/lib/LVEQNB.h b/media/libeffects/lvm/lib/Eq/lib/LVEQNB.h
index 385dbcf..804f1bf 100644
--- a/media/libeffects/lvm/lib/Eq/lib/LVEQNB.h
+++ b/media/libeffects/lvm/lib/Eq/lib/LVEQNB.h
@@ -184,6 +184,9 @@
 {
     LVEQNB_STEREO       = 0,
     LVEQNB_MONOINSTEREO = 1,
+#ifdef SUPPORT_MC
+    LVEQNB_MULTICHANNEL = 2,
+#endif
     LVEQNB_SOURCE_MAX   = LVM_MAXINT_32
 } LVEQNB_SourceFormat_en;
 
diff --git a/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Process.c b/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Process.c
index ea5f74a..61899fe 100644
--- a/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Process.c
+++ b/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Process.c
@@ -22,6 +22,17 @@
 
 #define LVM_MININT_32   0x80000000
 
+static LVM_INT32 mult32x32in32_shiftr(LVM_INT32 a, LVM_INT32 b, LVM_INT32 c) {
+  LVM_INT64 result = ((LVM_INT64)a * b) >> c;
+
+  if (result >= INT32_MAX) {
+    return INT32_MAX;
+  } else if (result <= INT32_MIN) {
+    return INT32_MIN;
+  } else {
+    return (LVM_INT32)result;
+  }
+}
 
 /************************************************************************************/
 /*                                                                                  */
@@ -123,10 +134,10 @@
 
     if(pLVPSA_Inst->pSpectralDataBufferWritePointer != pWrite_Save)
     {
-        MUL32x32INTO32((AudioTime + (LVM_INT32)((LVM_INT32)pLVPSA_Inst->LocalSamplesCount*1000)),
-                        (LVM_INT32)LVPSA_SampleRateInvTab[pLVPSA_Inst->CurrentParams.Fs],
-                        AudioTimeInc,
-                        LVPSA_FsInvertShift)
+        AudioTimeInc = mult32x32in32_shiftr(
+                (AudioTime + ((LVM_INT32)pLVPSA_Inst->LocalSamplesCount * 1000)),
+                (LVM_INT32)LVPSA_SampleRateInvTab[pLVPSA_Inst->CurrentParams.Fs],
+                LVPSA_FsInvertShift);
         pLVPSA_Inst->SpectralDataBufferAudioTime = AudioTime + AudioTimeInc;
     }
 
diff --git a/media/libmedia/IMediaSource.cpp b/media/libmedia/IMediaSource.cpp
index 4dece96..50826c5 100644
--- a/media/libmedia/IMediaSource.cpp
+++ b/media/libmedia/IMediaSource.cpp
@@ -368,13 +368,13 @@
                         ALOGV("Use shared memory: %zu", length);
                         transferBuf = buf;
                     } else {
-                        ALOGD("Large buffer %zu without IMemory!", length);
+                        ALOGV("Large buffer %zu without IMemory!", length);
                         ret = mGroup->acquire_buffer(
                                 (MediaBufferBase **)&transferBuf, false /* nonBlocking */, length);
                         if (ret != OK
                                 || transferBuf == nullptr
                                 || transferBuf->mMemory == nullptr) {
-                            ALOGW("Failed to acquire shared memory, size %zu, ret %d",
+                            ALOGV("Failed to acquire shared memory, size %zu, ret %d",
                                     length, ret);
                             if (transferBuf != nullptr) {
                                 transferBuf->release();
diff --git a/media/libmedia/TypeConverter.cpp b/media/libmedia/TypeConverter.cpp
index 469c5b6..5be78d1 100644
--- a/media/libmedia/TypeConverter.cpp
+++ b/media/libmedia/TypeConverter.cpp
@@ -392,7 +392,8 @@
     MAKE_STRING_FROM_ENUM(AUDIO_FLAG_BYPASS_MUTE),
     MAKE_STRING_FROM_ENUM(AUDIO_FLAG_LOW_LATENCY),
     MAKE_STRING_FROM_ENUM(AUDIO_FLAG_DEEP_BUFFER),
-    MAKE_STRING_FROM_ENUM(AUDIO_FLAG_NO_CAPTURE),
+    MAKE_STRING_FROM_ENUM(AUDIO_FLAG_NO_MEDIA_PROJECTION),
+    MAKE_STRING_FROM_ENUM(AUDIO_FLAG_NO_SYSTEM_CAPTURE),
     TERMINATOR
 };
 
diff --git a/media/libmedia/include/media/IMediaSource.h b/media/libmedia/include/media/IMediaSource.h
index 5ab6e37..381df24 100644
--- a/media/libmedia/include/media/IMediaSource.h
+++ b/media/libmedia/include/media/IMediaSource.h
@@ -124,7 +124,8 @@
         return false;
     }
 
-    static const size_t kBinderMediaBuffers = 4; // buffers managed by BnMediaSource
+    // align buffer count with video request size in NuMediaExtractor::selectTrack()
+    static const size_t kBinderMediaBuffers = 8; // buffers managed by BnMediaSource
     static const size_t kTransferSharedAsSharedThreshold = 4 * 1024;  // if >= shared, else inline
     static const size_t kTransferInlineAsSharedThreshold = 8 * 1024; // if >= shared, else inline
     static const size_t kInlineMaxTransfer = 64 * 1024; // Binder size limited to BINDER_VM_SIZE.
diff --git a/media/libmedia/xsd/vts/Android.bp b/media/libmedia/xsd/vts/Android.bp
new file mode 100644
index 0000000..b590a12
--- /dev/null
+++ b/media/libmedia/xsd/vts/Android.bp
@@ -0,0 +1,33 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_test {
+    name: "vts_mediaProfiles_validate_test",
+    srcs: [
+        "ValidateMediaProfiles.cpp"
+    ],
+    static_libs: [
+        "android.hardware.audio.common.test.utility",
+        "libxml2",
+    ],
+    shared_libs: [
+        "liblog",
+    ],
+    cflags: [
+        "-Wall",
+        "-Werror",
+    ],
+}
diff --git a/media/libmedia/xsd/vts/Android.mk b/media/libmedia/xsd/vts/Android.mk
new file mode 100644
index 0000000..52c3779
--- /dev/null
+++ b/media/libmedia/xsd/vts/Android.mk
@@ -0,0 +1,22 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := VtsValidateMediaProfiles
+include test/vts/tools/build/Android.host_config.mk
diff --git a/media/libmedia/xsd/vts/AndroidTest.xml b/media/libmedia/xsd/vts/AndroidTest.xml
new file mode 100644
index 0000000..e68721b
--- /dev/null
+++ b/media/libmedia/xsd/vts/AndroidTest.xml
@@ -0,0 +1,30 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<configuration description="Config for VTS VtsValidateMediaProfiles.">
+    <option name="config-descriptor:metadata" key="plan" value="vts-treble" />
+    <target_preparer class="com.android.compatibility.common.tradefed.targetprep.VtsFilePusher">
+        <option name="abort-on-push-failure" value="false"/>
+        <option name="push-group" value="HostDrivenTest.push"/>
+        <option name="push" value="DATA/etc/media_profiles.xsd->/data/local/tmp/media_profiles.xsd"/>
+    </target_preparer>
+    <test class="com.android.tradefed.testtype.VtsMultiDeviceTest">
+        <option name="test-module-name" value="VtsValidateMediaProfiles"/>
+        <option name="binary-test-source" value="_32bit::DATA/nativetest/vts_mediaProfiles_validate_test/vts_mediaProfiles_validate_test" />
+        <option name="binary-test-source" value="_64bit::DATA/nativetest64/vts_mediaProfiles_validate_test/vts_mediaProfiles_validate_test" />
+        <option name="binary-test-type" value="gtest"/>
+        <option name="test-timeout" value="30s"/>
+    </test>
+</configuration>
diff --git a/media/libmedia/xsd/vts/ValidateMediaProfiles.cpp b/media/libmedia/xsd/vts/ValidateMediaProfiles.cpp
new file mode 100644
index 0000000..ff9b060
--- /dev/null
+++ b/media/libmedia/xsd/vts/ValidateMediaProfiles.cpp
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "utility/ValidateXml.h"
+
+TEST(CheckConfig, mediaProfilesValidation) {
+    RecordProperty("description",
+                   "Verify that the media profiles file "
+                   "is valid according to the schema");
+
+    const char* location = "/vendor/etc";
+
+    EXPECT_ONE_VALID_XML_MULTIPLE_LOCATIONS("media_profiles_V1_0.xml", {location},
+                                            "/data/local/tmp/media_profiles.xsd");
+}
diff --git a/media/libmediaplayerservice/Android.bp b/media/libmediaplayerservice/Android.bp
index 46a1c24..0776172 100644
--- a/media/libmediaplayerservice/Android.bp
+++ b/media/libmediaplayerservice/Android.bp
@@ -11,6 +11,7 @@
     ],
 
     shared_libs: [
+        "android.hardware.media.c2@1.0",
         "android.hardware.media.omx@1.0",
         "libbase",
         "libaudioclient",
@@ -21,7 +22,6 @@
         "libdl",
         "libgui",
         "libhidlbase",
-        "libhidlmemory",
         "liblog",
         "libmedia",
         "libmedia_omx",
@@ -70,5 +70,3 @@
 
 }
 
-subdirs = ["*"]
-
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index da95817..5061024 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -34,6 +34,8 @@
 
 #include <utils/misc.h>
 
+#include <android/hardware/media/omx/1.0/IOmxStore.h>
+#include <android/hardware/media/c2/1.0/IComponentStore.h>
 #include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
 #include <binder/MemoryHeapBase.h>
@@ -650,17 +652,17 @@
         const sp<MediaPlayerBase>& listener,
         int which) {
     mService = service;
-    mOmx = nullptr;
+    mHService = nullptr;
     mListener = listener;
     mWhich = which;
 }
 
 MediaPlayerService::Client::ServiceDeathNotifier::ServiceDeathNotifier(
-        const sp<IOmx>& omx,
+        const sp<android::hidl::base::V1_0::IBase>& hService,
         const sp<MediaPlayerBase>& listener,
         int which) {
     mService = nullptr;
-    mOmx = omx;
+    mHService = hService;
     mListener = listener;
     mWhich = which;
 }
@@ -692,9 +694,9 @@
     if (mService != nullptr) {
         mService->unlinkToDeath(this);
         mService = nullptr;
-    } else if (mOmx != nullptr) {
-        mOmx->unlinkToDeath(this);
-        mOmx = nullptr;
+    } else if (mHService != nullptr) {
+        mHService->unlinkToDeath(this);
+        mHService = nullptr;
     }
 }
 
@@ -714,10 +716,12 @@
         mExtractorDeathListener->unlinkToDeath();
         mExtractorDeathListener = nullptr;
     }
-    if (mCodecDeathListener != nullptr) {
-        mCodecDeathListener->unlinkToDeath();
-        mCodecDeathListener = nullptr;
+    for (const sp<ServiceDeathNotifier>& codecDeathListener : mCodecDeathListeners) {
+        if (codecDeathListener != nullptr) {
+            codecDeathListener->unlinkToDeath();
+        }
     }
+    mCodecDeathListeners.clear();
 }
 
 sp<MediaPlayerBase> MediaPlayerService::Client::setDataSource_pre(
@@ -741,20 +745,56 @@
             new ServiceDeathNotifier(binder, p, MEDIAEXTRACTOR_PROCESS_DEATH);
     binder->linkToDeath(extractorDeathListener);
 
-    sp<IOmx> omx = IOmx::getService();
-    if (omx == nullptr) {
-        ALOGE("IOmx service is not available");
-        return NULL;
+    std::vector<sp<ServiceDeathNotifier>> codecDeathListeners;
+    {
+        using ::android::hidl::base::V1_0::IBase;
+
+        // Listen to OMX's IOmxStore/default
+        {
+            sp<IBase> store = ::android::hardware::media::omx::V1_0::
+                    IOmxStore::getService();
+            if (store == nullptr) {
+                ALOGD("OMX service is not available");
+            } else {
+                sp<ServiceDeathNotifier> codecDeathListener =
+                        new ServiceDeathNotifier(store, p, MEDIACODEC_PROCESS_DEATH);
+                store->linkToDeath(codecDeathListener, 0);
+                codecDeathListeners.emplace_back(codecDeathListener);
+            }
+        }
+
+        // Listen to Codec2's IComponentStore/software
+        // TODO: Listen to all Codec2 services.
+        {
+            sp<IBase> store = ::android::hardware::media::c2::V1_0::
+                    IComponentStore::getService();
+            if (store == nullptr) {
+                ALOGD("Codec2 system service is not available");
+            } else {
+                sp<ServiceDeathNotifier> codecDeathListener =
+                        new ServiceDeathNotifier(store, p, MEDIACODEC_PROCESS_DEATH);
+                store->linkToDeath(codecDeathListener, 0);
+                codecDeathListeners.emplace_back(codecDeathListener);
+            }
+
+            store = ::android::hardware::media::c2::V1_0::
+                    IComponentStore::getService("software");
+            if (store == nullptr) {
+                ALOGD("Codec2 swcodec service is not available");
+            } else {
+                sp<ServiceDeathNotifier> codecDeathListener =
+                        new ServiceDeathNotifier(store, p, MEDIACODEC_PROCESS_DEATH);
+                store->linkToDeath(codecDeathListener, 0);
+                codecDeathListeners.emplace_back(codecDeathListener);
+            }
+        }
     }
-    sp<ServiceDeathNotifier> codecDeathListener =
-            new ServiceDeathNotifier(omx, p, MEDIACODEC_PROCESS_DEATH);
-    omx->linkToDeath(codecDeathListener, 0);
 
     Mutex::Autolock lock(mLock);
 
     clearDeathNotifiers_l();
     mExtractorDeathListener = extractorDeathListener;
-    mCodecDeathListener = codecDeathListener;
+    mCodecDeathListeners.swap(codecDeathListeners);
     mAudioDeviceUpdatedListener = new AudioDeviceUpdatedNotifier(p);
 
     if (!p->hardwareOutput()) {
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index bfb7cc2..26bfa71 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -30,7 +30,7 @@
 #include <media/Metadata.h>
 #include <media/stagefright/foundation/ABase.h>
 
-#include <android/hardware/media/omx/1.0/IOmx.h>
+#include <hidl/HidlSupport.h>
 
 #include <system/audio.h>
 
@@ -42,7 +42,6 @@
 class IDataSource;
 class IMediaRecorder;
 class IMediaMetadataRetriever;
-class IOMX;
 class IRemoteDisplay;
 class IRemoteDisplayClient;
 class MediaRecorderClient;
@@ -70,7 +69,6 @@
 class MediaPlayerService : public BnMediaPlayerService
 {
     class Client;
-    typedef ::android::hardware::media::omx::V1_0::IOmx IOmx;
 
     class AudioOutput : public MediaPlayerBase::AudioSink
     {
@@ -400,7 +398,7 @@
                     const sp<MediaPlayerBase>& listener,
                     int which);
             ServiceDeathNotifier(
-                    const sp<IOmx>& omx,
+                    const sp<android::hidl::base::V1_0::IBase>& hService,
                     const sp<MediaPlayerBase>& listener,
                     int which);
             virtual ~ServiceDeathNotifier();
@@ -413,7 +411,7 @@
         private:
             int mWhich;
             sp<IBinder> mService;
-            sp<IOmx> mOmx;
+            sp<android::hidl::base::V1_0::IBase> mHService; // HIDL service
             wp<MediaPlayerBase> mListener;
         };
 
@@ -509,7 +507,7 @@
         media::Metadata::Filter mMetadataUpdated;  // protected by mLock
 
         sp<ServiceDeathNotifier> mExtractorDeathListener;
-        sp<ServiceDeathNotifier> mCodecDeathListener;
+        std::vector<sp<ServiceDeathNotifier>> mCodecDeathListeners;
         sp<AudioDeviceUpdatedNotifier> mAudioDeviceUpdatedListener;
 #if CALLBACK_ANTAGONIZER
                     Antagonizer*                  mAntagonizer;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
index 0156ad2..a2cc13e 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
@@ -328,6 +328,11 @@
     const size_t *userData = (size_t *)mpegUserData->data();
 
     for (size_t i = 0; i < mpegUserData->size() / sizeof(size_t); ++i) {
+        if (accessUnit->size() < userData[i]) {
+            ALOGW("b/129068792, skip invalid offset for user data");
+            android_errorWriteLog(0x534e4554, "129068792");
+            continue;
+        }
         trackAdded |= parseMPEGUserDataUnit(
                 timeUs, accessUnit->data() + userData[i], accessUnit->size() - userData[i]);
     }
@@ -337,6 +342,12 @@
 
 // returns true if a new CC track is found
 bool NuPlayer::CCDecoder::parseMPEGUserDataUnit(int64_t timeUs, const uint8_t *data, size_t size) {
+    if (size < 9) {
+        ALOGW("b/129068792, MPEG user data size too small %zu", size);
+        android_errorWriteLog(0x534e4554, "129068792");
+        return false;
+    }
+
     ABitReader br(data + 4, 5);
 
     uint32_t user_identifier = br.getBits(32);
@@ -389,8 +400,14 @@
                 mDTVCCPacket->setRange(0, mDTVCCPacket->size() + 2);
                 br.skipBits(16);
             } else if (mDTVCCPacket->size() > 0 && cc_type == 2) {
-                memcpy(mDTVCCPacket->data() + mDTVCCPacket->size(), br.data(), 2);
-                mDTVCCPacket->setRange(0, mDTVCCPacket->size() + 2);
+                if (mDTVCCPacket->capacity() - mDTVCCPacket->size() >= 2) {
+                    memcpy(mDTVCCPacket->data() + mDTVCCPacket->size(), br.data(), 2);
+                    mDTVCCPacket->setRange(0, mDTVCCPacket->size() + 2);
+                } else {
+                    ALOGW("b/129068792, skip CC due to too much data(%zu, %zu)",
+                          mDTVCCPacket->capacity(), mDTVCCPacket->size());
+                    android_errorWriteLog(0x534e4554, "129068792");
+                }
                 br.skipBits(16);
             } else if (cc_type == 0 || cc_type == 1) {
                 uint8_t cc_data_1 = br.getBits(8) & 0x7f;
@@ -477,6 +494,11 @@
             size_t trackIndex = getTrackIndex(kTrackTypeCEA708, service_number, &trackAdded);
             if (mSelectedTrack == (ssize_t)trackIndex) {
                 sp<ABuffer> ccPacket = new ABuffer(block_size);
+                if (ccPacket->capacity() == 0) {
+                    ALOGW("b/129068792, no memory available, %zu", block_size);
+                    android_errorWriteLog(0x534e4554, "129068792");
+                    return false;
+                }
                 memcpy(ccPacket->data(), br.data(), block_size);
                 mCCMap.add(timeUs, ccPacket);
             }
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index f00c895..cf1a6f1 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -569,7 +569,6 @@
       mFps(-1.0),
       mCaptureFps(-1.0),
       mCreateInputBuffersSuspended(false),
-      mLatency(0),
       mTunneled(false),
       mDescribeColorAspectsIndex((OMX_INDEXTYPE)0),
       mDescribeHDRStaticInfoIndex((OMX_INDEXTYPE)0),
@@ -4425,12 +4424,13 @@
             h264type.eProfile == OMX_VIDEO_AVCProfileHigh) {
         h264type.nSliceHeaderSpacing = 0;
         h264type.bUseHadamard = OMX_TRUE;
-        h264type.nRefFrames = 2;
-        h264type.nBFrames = mLatency == 0 ? 1 : std::min(1U, mLatency - 1);
-
-        // disable B-frames until we have explicit settings for enabling the feature.
-        h264type.nRefFrames = 1;
-        h264type.nBFrames = 0;
+        int32_t maxBframes = 0;
+        (void)msg->findInt32(KEY_MAX_B_FRAMES, &maxBframes);
+        h264type.nBFrames = uint32_t(maxBframes);
+        if (mLatency && h264type.nBFrames > *mLatency) {
+            h264type.nBFrames = *mLatency;
+        }
+        h264type.nRefFrames = h264type.nBFrames == 0 ? 1 : 2;
 
         h264type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate, h264type.nBFrames);
         h264type.nAllowedPictureTypes =
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index dc51b16..b05718c 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -181,6 +181,7 @@
         "libcamera_client",
         "libcutils",
         "libdl",
+        "libdl_android",
         "libdrmframework",
         "libgui",
         "liblog",
@@ -226,6 +227,7 @@
 
     export_shared_lib_headers: [
         "libgui",
+        "libhidlmemory",
         "libmedia",
         "android.hidl.allocator@1.0",
     ],
@@ -318,21 +320,3 @@
     },
 }
 
-subdirs = [
-    "codec2",
-    "codecs/*",
-    "colorconversion",
-    "filters",
-    "flac/dec",
-    "foundation",
-    "http",
-    "httplive",
-    "id3",
-    "mpeg2ts",
-    "omx",
-    "rtsp",
-    "tests",
-    "timedtext",
-    "webm",
-    "xmlparser",
-]
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index 5194e03..d78d729 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -385,11 +385,11 @@
     }
     mLastFrameTimestampUs = timeUs;
 
-    size_t numLostBytes = 0;
+    uint64_t numLostBytes = 0; // AudioRecord::getInputFramesLost() returns uint32_t
     if (mNumFramesReceived > 0) {  // Ignore earlier frame lost
         // getInputFramesLost() returns the number of lost frames.
         // Convert number of frames lost to number of bytes lost.
-        numLostBytes = mRecord->getInputFramesLost() * mRecord->frameSize();
+        numLostBytes = (uint64_t)mRecord->getInputFramesLost() * mRecord->frameSize();
     }
 
     CHECK_EQ(numLostBytes & 1, 0u);
@@ -397,11 +397,11 @@
     if (numLostBytes > 0) {
         // Loss of audio frames should happen rarely; thus the LOGW should
         // not cause a logging spam
-        ALOGW("Lost audio record data: %zu bytes", numLostBytes);
+        ALOGW("Lost audio record data: %" PRIu64 " bytes", numLostBytes);
     }
 
     while (numLostBytes > 0) {
-        size_t bufferSize = numLostBytes;
+        uint64_t bufferSize = numLostBytes;
         if (numLostBytes > kMaxBufferSize) {
             numLostBytes -= kMaxBufferSize;
             bufferSize = kMaxBufferSize;
@@ -431,19 +431,17 @@
 void AudioSource::queueInputBuffer_l(MediaBuffer *buffer, int64_t timeUs) {
     const size_t bufferSize = buffer->range_length();
     const size_t frameSize = mRecord->frameSize();
-    const int64_t timestampUs =
-                mPrevSampleTimeUs +
-                    ((1000000LL * (bufferSize / frameSize)) +
-                        (mSampleRate >> 1)) / mSampleRate;
-
     if (mNumFramesReceived == 0) {
         buffer->meta_data().setInt64(kKeyAnchorTime, mStartTimeUs);
     }
-
+    mNumFramesReceived += bufferSize / frameSize;
+    const int64_t timestampUs =
+                mStartTimeUs +
+                    ((1000000LL * mNumFramesReceived) +
+                        (mSampleRate >> 1)) / mSampleRate;
     buffer->meta_data().setInt64(kKeyTime, mPrevSampleTimeUs);
     buffer->meta_data().setInt64(kKeyDriftTime, timeUs - mInitialReadTimeUs);
     mPrevSampleTimeUs = timestampUs;
-    mNumFramesReceived += bufferSize / frameSize;
     mBuffersReceived.push_back(buffer);
     mFrameAvailableCondition.signal();
 }
diff --git a/media/libstagefright/DataURISource.cpp b/media/libstagefright/DataURISource.cpp
index 3dc345f..b975b38 100644
--- a/media/libstagefright/DataURISource.cpp
+++ b/media/libstagefright/DataURISource.cpp
@@ -13,7 +13,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 #include <media/stagefright/DataURISource.h>
 
 #include <media/stagefright/foundation/ABuffer.h>
@@ -39,15 +38,27 @@
     AString tmp(&uri[5], commaPos - &uri[5]);
 
     if (tmp.endsWith(";base64")) {
-        AString encoded(commaPos + 1);
 
-        // Strip CR and LF...
-        for (size_t i = encoded.size(); i > 0;) {
-            i--;
-            if (encoded.c_str()[i] == '\r' || encoded.c_str()[i] == '\n') {
-                encoded.erase(i, 1);
+        // strip all CR and LF characters.
+        const char *src = commaPos+1;
+        int len = strlen(src) + 1;
+        char *cleansed = (char *) malloc(len);
+        if (cleansed == NULL) return NULL;
+        char *keeping = cleansed;
+        int left = len;
+        for (int i = 0; i < len ; i++)
+        {
+            const char c = *src++;
+            if (c == '\r' || c == '\n') {
+                continue;
             }
+            *keeping++ = c;
+            left--;
         }
+        memset(keeping, 0, left);
+
+        AString encoded(cleansed);
+        free(cleansed);
 
         buffer = decodeBase64(encoded);
 
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index a52da45..2f13dc9 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -71,6 +71,8 @@
 static const uint8_t kNalUnitTypePicParamSet = 0x08;
 static const int64_t kInitialDelayTimeUs     = 700000LL;
 static const int64_t kMaxMetadataSize = 0x4000000LL;   // 64MB max per-frame metadata size
+static const int64_t kMaxCttsOffsetTimeUs = 30 * 60 * 1000000LL;  // 30 minutes
+static const size_t kESDSScratchBufferSize = 10;  // kMaxAtomSize in Mpeg4Extractor 64MB
 
 static const char kMetaKey_Version[]    = "com.android.version";
 static const char kMetaKey_Manufacturer[]      = "com.android.manufacturer";
@@ -136,13 +138,6 @@
     void resetInternal();
 
 private:
-    enum {
-        // TODO: need to increase this considering the bug
-        // about camera app not sending video frames continuously?
-        kMaxCttsOffsetTimeUs = 1000000LL,  // 1 second
-        kSampleArraySize = 1000,
-    };
-
     // A helper class to handle faster write box with table entries
     template<class TYPE, unsigned ENTRY_SIZE>
     // ENTRY_SIZE: # of values in each entry
@@ -2920,14 +2915,18 @@
 }
 
 void MPEG4Writer::Track::dumpTimeStamps() {
-    ALOGE("Dumping %s track's last 10 frames timestamp and frame type ", getTrackType());
-    std::string timeStampString;
-    for (std::list<TimestampDebugHelperEntry>::iterator entry = mTimestampDebugHelper.begin();
-            entry != mTimestampDebugHelper.end(); ++entry) {
-        timeStampString += "(" + std::to_string(entry->pts)+
-                "us, " + std::to_string(entry->dts) + "us " + entry->frameType + ") ";
+    if (!mTimestampDebugHelper.empty()) {
+        std::string timeStampString = "Dumping " + std::string(getTrackType()) + " track's last " +
+                                      std::to_string(mTimestampDebugHelper.size()) +
+                                      " frames' timestamps(pts, dts) and frame type : ";
+        for (const TimestampDebugHelperEntry& entry : mTimestampDebugHelper) {
+            timeStampString += "\n(" + std::to_string(entry.pts) + "us, " +
+                               std::to_string(entry.dts) + "us " + entry.frameType + ") ";
+        }
+        ALOGE("%s", timeStampString.c_str());
+    } else {
+        ALOGE("0 frames to dump timeStamps in %s track ", getTrackType());
     }
-    ALOGE("%s", timeStampString.c_str());
 }
 
 status_t MPEG4Writer::Track::threadEntry() {
@@ -3884,22 +3883,52 @@
     mOwner->endBox();
 }
 
+static void generateEsdsSize(size_t dataLength, size_t* sizeGenerated, uint8_t* buffer) {
+    size_t offset = 0, cur = 0;
+    size_t more = 0x00;
+    *sizeGenerated = 0;
+    /* Start with the LSB(7 bits) of dataLength and build the byte sequence upto MSB.
+     * Continuation flag(most significant bit) will be set on the first N-1 bytes.
+     */
+    do {
+        buffer[cur++] = (dataLength & 0x7f) | more;
+        dataLength >>= 7;
+        more = 0x80;
+        ++(*sizeGenerated);
+    } while (dataLength > 0u);
+    --cur;
+    // Reverse the newly formed byte sequence.
+    while (cur > offset) {
+        uint8_t tmp = buffer[cur];
+        buffer[cur--] = buffer[offset];
+        buffer[offset++] = tmp;
+    }
+}
+
 void MPEG4Writer::Track::writeMp4aEsdsBox() {
-    mOwner->beginBox("esds");
     CHECK(mCodecSpecificData);
     CHECK_GT(mCodecSpecificDataSize, 0u);
 
-    // Make sure all sizes encode to a single byte.
-    CHECK_LT(mCodecSpecificDataSize + 23, 128u);
+    uint8_t sizeESDBuffer[kESDSScratchBufferSize];
+    uint8_t sizeDCDBuffer[kESDSScratchBufferSize];
+    uint8_t sizeDSIBuffer[kESDSScratchBufferSize];
+    size_t sizeESD = 0;
+    size_t sizeDCD = 0;
+    size_t sizeDSI = 0;
+    generateEsdsSize(mCodecSpecificDataSize, &sizeDSI, sizeDSIBuffer);
+    generateEsdsSize(mCodecSpecificDataSize + sizeDSI + 14, &sizeDCD, sizeDCDBuffer);
+    generateEsdsSize(mCodecSpecificDataSize + sizeDSI + sizeDCD + 21, &sizeESD, sizeESDBuffer);
+
+    mOwner->beginBox("esds");
 
     mOwner->writeInt32(0);     // version=0, flags=0
     mOwner->writeInt8(0x03);   // ES_DescrTag
-    mOwner->writeInt8(23 + mCodecSpecificDataSize);
+    mOwner->write(sizeESDBuffer, sizeESD);
     mOwner->writeInt16(0x0000);// ES_ID
     mOwner->writeInt8(0x00);
 
     mOwner->writeInt8(0x04);   // DecoderConfigDescrTag
-    mOwner->writeInt8(15 + mCodecSpecificDataSize);
+    mOwner->write(sizeDCDBuffer, sizeDCD);
     mOwner->writeInt8(0x40);   // objectTypeIndication ISO/IEC 14492-2
     mOwner->writeInt8(0x15);   // streamType AudioStream
 
@@ -3914,7 +3943,7 @@
     mOwner->writeInt32(avgBitrate);
 
     mOwner->writeInt8(0x05);   // DecoderSpecificInfoTag
-    mOwner->writeInt8(mCodecSpecificDataSize);
+    mOwner->write(sizeDSIBuffer, sizeDSI);
     mOwner->write(mCodecSpecificData, mCodecSpecificDataSize);
 
     static const uint8_t kData2[] = {
@@ -3931,20 +3960,27 @@
     CHECK(mCodecSpecificData);
     CHECK_GT(mCodecSpecificDataSize, 0u);
 
-    // Make sure all sizes encode to a single byte.
-    CHECK_LT(23 + mCodecSpecificDataSize, 128u);
+    uint8_t sizeESDBuffer[kESDSScratchBufferSize];
+    uint8_t sizeDCDBuffer[kESDSScratchBufferSize];
+    uint8_t sizeDSIBuffer[kESDSScratchBufferSize];
+    size_t sizeESD = 0;
+    size_t sizeDCD = 0;
+    size_t sizeDSI = 0;
+    generateEsdsSize(mCodecSpecificDataSize, &sizeDSI, sizeDSIBuffer);
+    generateEsdsSize(mCodecSpecificDataSize + sizeDSI + 14, &sizeDCD, sizeDCDBuffer);
+    generateEsdsSize(mCodecSpecificDataSize + sizeDSI + sizeDCD + 21, &sizeESD, sizeESDBuffer);
 
     mOwner->beginBox("esds");
 
     mOwner->writeInt32(0);    // version=0, flags=0
 
     mOwner->writeInt8(0x03);  // ES_DescrTag
-    mOwner->writeInt8(23 + mCodecSpecificDataSize);
+    mOwner->write(sizeESDBuffer, sizeESD);
     mOwner->writeInt16(0x0000);  // ES_ID
     mOwner->writeInt8(0x1f);
 
     mOwner->writeInt8(0x04);  // DecoderConfigDescrTag
-    mOwner->writeInt8(15 + mCodecSpecificDataSize);
+    mOwner->write(sizeDCDBuffer, sizeDCD);
     mOwner->writeInt8(0x20);  // objectTypeIndication ISO/IEC 14492-2
     mOwner->writeInt8(0x11);  // streamType VisualStream
 
@@ -3962,7 +3998,7 @@
 
     mOwner->writeInt8(0x05);  // DecoderSpecificInfoTag
 
-    mOwner->writeInt8(mCodecSpecificDataSize);
+    mOwner->write(sizeDSIBuffer, sizeDSI);
     mOwner->write(mCodecSpecificData, mCodecSpecificDataSize);
 
     static const uint8_t kData2[] = {
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index d4e4000..b6b7784 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -1052,8 +1052,9 @@
         }
 
         // Prevent possible integer overflow in downstream code.
-        if ((uint64_t)mVideoWidth * mVideoHeight > (uint64_t)INT32_MAX / 4) {
-            ALOGE("buffer size is too big, width=%d, height=%d", mVideoWidth, mVideoHeight);
+        if (mVideoWidth < 0 || mVideoHeight < 0 ||
+               (uint64_t)mVideoWidth * mVideoHeight > (uint64_t)INT32_MAX / 4) {
+            ALOGE("Invalid size(s), width=%d, height=%d", mVideoWidth, mVideoHeight);
             return BAD_VALUE;
         }
     }
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index c3d85ee..50e454c 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -831,7 +831,9 @@
 }
 
 void MediaCodecSource::onPause(int64_t pauseStartTimeUs) {
-    if ((mFlags & FLAG_USE_SURFACE_INPUT) && (mEncoder != NULL)) {
+    if (mStopping || mOutput.lock()->mEncoderReachedEOS) {
+        // Nothing to do
+    } else if ((mFlags & FLAG_USE_SURFACE_INPUT) && (mEncoder != NULL)) {
         sp<AMessage> params = new AMessage;
         params->setInt32(PARAMETER_KEY_SUSPEND, true);
         params->setInt64(PARAMETER_KEY_SUSPEND_TIME, pauseStartTimeUs);
diff --git a/media/libstagefright/codecs/aacdec/Android.bp b/media/libstagefright/codecs/aacdec/Android.bp
index 25628a2..e0bb5cd 100644
--- a/media/libstagefright/codecs/aacdec/Android.bp
+++ b/media/libstagefright/codecs/aacdec/Android.bp
@@ -29,12 +29,10 @@
 
     static_libs: ["libFraunhoferAAC"],
 
+    defaults: ["omx_soft_libs"],
+
     shared_libs: [
-        "libstagefright_omx",
-        "libstagefright_foundation",
-        "libutils",
         "libcutils",
-        "liblog",
     ],
     compile_multilib: "32",
 }
diff --git a/media/libstagefright/codecs/aacenc/Android.bp b/media/libstagefright/codecs/aacenc/Android.bp
index ec1151b..0d677fe 100644
--- a/media/libstagefright/codecs/aacenc/Android.bp
+++ b/media/libstagefright/codecs/aacenc/Android.bp
@@ -26,11 +26,7 @@
 
     static_libs: ["libFraunhoferAAC"],
 
-    shared_libs: [
-        "libstagefright_omx",
-        "libstagefright_foundation",
-        "libutils",
-        "liblog",
-    ],
+    defaults: ["omx_soft_libs"],
+
     compile_multilib: "32",
 }
diff --git a/media/libstagefright/codecs/amrnb/Android.bp b/media/libstagefright/codecs/amrnb/Android.bp
deleted file mode 100644
index b44c296..0000000
--- a/media/libstagefright/codecs/amrnb/Android.bp
+++ /dev/null
@@ -1 +0,0 @@
-subdirs = ["*"]
diff --git a/media/libstagefright/codecs/amrnb/dec/Android.bp b/media/libstagefright/codecs/amrnb/dec/Android.bp
index 880f161..f3b272b 100644
--- a/media/libstagefright/codecs/amrnb/dec/Android.bp
+++ b/media/libstagefright/codecs/amrnb/dec/Android.bp
@@ -101,11 +101,9 @@
         "libstagefright_amrwbdec",
     ],
 
+    defaults: ["omx_soft_libs"],
+
     shared_libs: [
-        "libstagefright_omx",
-        "libstagefright_foundation",
-        "libutils",
-        "liblog",
         "libstagefright_amrnb_common",
     ],
     compile_multilib: "32",
diff --git a/media/libstagefright/codecs/amrnb/dec/test/amrnbdec_test.cpp b/media/libstagefright/codecs/amrnb/dec/test/amrnbdec_test.cpp
index 41a9e98..621fda8 100644
--- a/media/libstagefright/codecs/amrnb/dec/test/amrnbdec_test.cpp
+++ b/media/libstagefright/codecs/amrnb/dec/test/amrnbdec_test.cpp
@@ -67,6 +67,7 @@
     int bytesRead = fread(header, 1, kFileHeaderSize, fpInput);
     if (bytesRead != kFileHeaderSize || memcmp(header, "#!AMR\n", kFileHeaderSize)) {
         fprintf(stderr, "Invalid AMR-NB file\n");
+        fclose(fpInput);
         return 1;
     }
 
@@ -79,6 +80,7 @@
     SNDFILE *handle = sf_open(argv[2], SFM_WRITE, &sfInfo);
     if(!handle){
         fprintf(stderr, "Could not create %s\n", argv[2]);
+        fclose(fpInput);
         return 1;
     }
 
@@ -87,6 +89,8 @@
     int err = GSMInitDecode(&amrHandle, (Word8*)"AMRNBDecoder");
     if(err != 0){
         fprintf(stderr, "Error creating AMR-NB decoder instance\n");
+        fclose(fpInput);
+        sf_close(handle);
         return 1;
     }
 
diff --git a/media/libstagefright/codecs/amrnb/enc/Android.bp b/media/libstagefright/codecs/amrnb/enc/Android.bp
index 19fd4a8..1c8b511 100644
--- a/media/libstagefright/codecs/amrnb/enc/Android.bp
+++ b/media/libstagefright/codecs/amrnb/enc/Android.bp
@@ -110,11 +110,9 @@
 
     static_libs: ["libstagefright_amrnbenc"],
 
+    defaults: ["omx_soft_libs"],
+
     shared_libs: [
-        "libstagefright_omx",
-        "libstagefright_foundation",
-        "libutils",
-        "liblog",
         "libstagefright_amrnb_common",
     ],
     compile_multilib: "32",
diff --git a/media/libstagefright/codecs/amrwbenc/Android.bp b/media/libstagefright/codecs/amrwbenc/Android.bp
index b9d45c1..262962f 100644
--- a/media/libstagefright/codecs/amrwbenc/Android.bp
+++ b/media/libstagefright/codecs/amrwbenc/Android.bp
@@ -167,16 +167,11 @@
 
     static_libs: ["libstagefright_amrwbenc"],
 
+    defaults: ["omx_soft_libs"],
+
     shared_libs: [
-        "libstagefright_omx",
-        "libstagefright_foundation",
-        "libutils",
-        "liblog",
         "libstagefright_enc_common",
     ],
     compile_multilib: "32",
 }
 
-//###############################################################################
-
-subdirs = ["SampleCode"]
diff --git a/media/libstagefright/codecs/amrwbenc/SampleCode/AMRWB_E_SAMPLE.c b/media/libstagefright/codecs/amrwbenc/SampleCode/AMRWB_E_SAMPLE.c
index 7c094f3..7282de4 100644
--- a/media/libstagefright/codecs/amrwbenc/SampleCode/AMRWB_E_SAMPLE.c
+++ b/media/libstagefright/codecs/amrwbenc/SampleCode/AMRWB_E_SAMPLE.c
@@ -134,14 +134,16 @@
 	if(handle == 0)
 	{
 		printf("open dll error......");
-		return -1;
+		ret = -1;
+		goto safe_exit;
 	}
 
 	pfunc = dlsym(handle, "voGetAMRWBEncAPI");
 	if(pfunc == 0)
 	{
 		printf("open function error......");
-		return -1;
+		ret = -1;
+		goto safe_exit;
 	}
 
 	pGetAPI = (VOGETAUDIOENCAPI)pfunc;
@@ -150,7 +152,8 @@
 	if(returnCode)
 	{
 		printf("get APIs error......");
-		return -1;
+		ret = -1;
+		goto safe_exit;
 	}
 #else
 	ret = voGetAMRWBEncAPI(&AudioAPI);
@@ -253,7 +256,8 @@
 		fclose(fdst);
 
 #ifdef LINUX
-	dlclose(handle);
+	if (handle)
+		dlclose(handle);
 #endif
 
 	return ret;
diff --git a/media/libstagefright/codecs/avcdec/Android.bp b/media/libstagefright/codecs/avcdec/Android.bp
index 8a34845..567bcca 100644
--- a/media/libstagefright/codecs/avcdec/Android.bp
+++ b/media/libstagefright/codecs/avcdec/Android.bp
@@ -22,12 +22,7 @@
         "frameworks/native/include/media/openmax",
     ],
 
-    shared_libs: [
-        "libstagefright_omx",
-        "libstagefright_foundation",
-        "libutils",
-        "liblog",
-    ],
+    defaults: ["omx_soft_libs"],
 
     sanitize: {
         misc_undefined: [
diff --git a/media/libstagefright/codecs/avcenc/Android.bp b/media/libstagefright/codecs/avcenc/Android.bp
index 6371828..0cd39e1 100644
--- a/media/libstagefright/codecs/avcenc/Android.bp
+++ b/media/libstagefright/codecs/avcenc/Android.bp
@@ -16,12 +16,7 @@
         "frameworks/native/include/media/openmax",
     ],
 
-    shared_libs: [
-        "libstagefright_foundation",
-        "libstagefright_omx",
-        "libutils",
-        "liblog",
-    ],
+    defaults: ["omx_soft_libs"],
 
     sanitize: {
         misc_undefined: [
diff --git a/media/libstagefright/codecs/flac/Android.bp b/media/libstagefright/codecs/flac/Android.bp
deleted file mode 100644
index b44c296..0000000
--- a/media/libstagefright/codecs/flac/Android.bp
+++ /dev/null
@@ -1 +0,0 @@
-subdirs = ["*"]
diff --git a/media/libstagefright/codecs/flac/dec/Android.bp b/media/libstagefright/codecs/flac/dec/Android.bp
index 3d4a44f..18a3f6b 100644
--- a/media/libstagefright/codecs/flac/dec/Android.bp
+++ b/media/libstagefright/codecs/flac/dec/Android.bp
@@ -28,12 +28,10 @@
         cfi: true,
     },
 
+    defaults: ["omx_soft_libs"],
+
     shared_libs: [
-        "liblog",
         "libstagefright_flacdec",
-        "libstagefright_omx",
-        "libstagefright_foundation",
-        "libutils",
     ],
     compile_multilib: "32",
 }
diff --git a/media/libstagefright/codecs/flac/enc/Android.bp b/media/libstagefright/codecs/flac/enc/Android.bp
index b32ab08..4149ccd 100644
--- a/media/libstagefright/codecs/flac/enc/Android.bp
+++ b/media/libstagefright/codecs/flac/enc/Android.bp
@@ -19,13 +19,7 @@
         ],
         cfi: true,
     },
-
-    shared_libs: [
-        "libstagefright_omx",
-        "libstagefright_foundation",
-        "libutils",
-        "liblog",
-    ],
+    defaults: ["omx_soft_libs"],
 
     header_libs: ["libbase_headers"],
     static_libs: [
diff --git a/media/libstagefright/codecs/g711/Android.bp b/media/libstagefright/codecs/g711/Android.bp
deleted file mode 100644
index b44c296..0000000
--- a/media/libstagefright/codecs/g711/Android.bp
+++ /dev/null
@@ -1 +0,0 @@
-subdirs = ["*"]
diff --git a/media/libstagefright/codecs/g711/dec/Android.bp b/media/libstagefright/codecs/g711/dec/Android.bp
index 7097688..c273179 100644
--- a/media/libstagefright/codecs/g711/dec/Android.bp
+++ b/media/libstagefright/codecs/g711/dec/Android.bp
@@ -12,12 +12,7 @@
         "frameworks/native/include/media/openmax",
     ],
 
-    shared_libs: [
-        "libstagefright_foundation",
-        "libstagefright_omx",
-        "libutils",
-        "liblog",
-    ],
+    defaults: ["omx_soft_libs"],
 
     cflags: ["-Werror"],
 
diff --git a/media/libstagefright/codecs/gsm/Android.bp b/media/libstagefright/codecs/gsm/Android.bp
deleted file mode 100644
index b44c296..0000000
--- a/media/libstagefright/codecs/gsm/Android.bp
+++ /dev/null
@@ -1 +0,0 @@
-subdirs = ["*"]
diff --git a/media/libstagefright/codecs/gsm/dec/Android.bp b/media/libstagefright/codecs/gsm/dec/Android.bp
index a973f70..3c5ebfe 100644
--- a/media/libstagefright/codecs/gsm/dec/Android.bp
+++ b/media/libstagefright/codecs/gsm/dec/Android.bp
@@ -25,12 +25,7 @@
         cfi: true,
     },
 
-    shared_libs: [
-        "libstagefright_foundation",
-        "libstagefright_omx",
-        "libutils",
-        "liblog",
-    ],
+    defaults: ["omx_soft_libs"],
 
     static_libs: ["libgsm"],
     compile_multilib: "32",
diff --git a/media/libstagefright/codecs/hevcdec/Android.bp b/media/libstagefright/codecs/hevcdec/Android.bp
index 60fc446..cc91d53 100644
--- a/media/libstagefright/codecs/hevcdec/Android.bp
+++ b/media/libstagefright/codecs/hevcdec/Android.bp
@@ -30,12 +30,7 @@
         cfi: true,
     },
 
-    shared_libs: [
-        "libstagefright_omx",
-        "libstagefright_foundation",
-        "libutils",
-        "liblog",
-    ],
+    defaults: ["omx_soft_libs"],
 
     // We need this because the current asm generates the following link error:
     // requires unsupported dynamic reloc R_ARM_REL32; recompile with -fPIC
diff --git a/media/libstagefright/codecs/m4v_h263/Android.bp b/media/libstagefright/codecs/m4v_h263/Android.bp
deleted file mode 100644
index b44c296..0000000
--- a/media/libstagefright/codecs/m4v_h263/Android.bp
+++ /dev/null
@@ -1 +0,0 @@
-subdirs = ["*"]
diff --git a/media/libstagefright/codecs/m4v_h263/dec/Android.bp b/media/libstagefright/codecs/m4v_h263/dec/Android.bp
index 41141b1..0523143 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/Android.bp
+++ b/media/libstagefright/codecs/m4v_h263/dec/Android.bp
@@ -91,12 +91,7 @@
 
     static_libs: ["libstagefright_m4vh263dec"],
 
-    shared_libs: [
-        "libstagefright_omx",
-        "libstagefright_foundation",
-        "libutils",
-        "liblog",
-    ],
+    defaults: ["omx_soft_libs"],
 
     sanitize: {
         misc_undefined: [
diff --git a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
index a8fcdd1..60750d9 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
@@ -24,7 +24,6 @@
 #include <media/stagefright/foundation/AUtils.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
-#include <media/IOMX.h>
 
 #include "mp4dec_api.h"
 
@@ -118,9 +117,14 @@
                 outHeader->nFlags = OMX_BUFFERFLAG_EOS;
 
                 List<BufferInfo *>::iterator it = outQueue.begin();
-                while ((*it)->mHeader != outHeader) {
+                while (it != outQueue.end() && (*it)->mHeader != outHeader) {
                     ++it;
                 }
+                if (it == outQueue.end()) {
+                    ALOGE("couldn't find port buffer %d in outQueue: b/109891727", mNumSamplesOutput & 1);
+                    android_errorWriteLog(0x534e4554, "109891727");
+                    return;
+                }
 
                 BufferInfo *outInfo = *it;
                 outInfo->mOwnedByUs = false;
diff --git a/media/libstagefright/codecs/m4v_h263/enc/Android.bp b/media/libstagefright/codecs/m4v_h263/enc/Android.bp
index d4f7d50..d38f4b1 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/Android.bp
+++ b/media/libstagefright/codecs/m4v_h263/enc/Android.bp
@@ -77,12 +77,7 @@
 
     static_libs: ["libstagefright_m4vh263enc"],
 
-    shared_libs: [
-        "libstagefright_foundation",
-        "libstagefright_omx",
-        "libutils",
-        "liblog",
-    ],
+    defaults: ["omx_soft_libs"],
 
     sanitize: {
         misc_undefined: [
diff --git a/media/libstagefright/codecs/mp3dec/Android.bp b/media/libstagefright/codecs/mp3dec/Android.bp
index 2154f84..9173ed6 100644
--- a/media/libstagefright/codecs/mp3dec/Android.bp
+++ b/media/libstagefright/codecs/mp3dec/Android.bp
@@ -105,12 +105,7 @@
         cfi: true,
     },
 
-    shared_libs: [
-        "libstagefright_omx",
-        "libstagefright_foundation",
-        "libutils",
-        "liblog",
-    ],
+    defaults: ["omx_soft_libs"],
 
     static_libs: ["libstagefright_mp3dec"],
     compile_multilib: "32",
diff --git a/media/libstagefright/codecs/mpeg2dec/Android.bp b/media/libstagefright/codecs/mpeg2dec/Android.bp
index c655544..26e786e 100644
--- a/media/libstagefright/codecs/mpeg2dec/Android.bp
+++ b/media/libstagefright/codecs/mpeg2dec/Android.bp
@@ -20,12 +20,7 @@
         "frameworks/native/include/media/openmax",
     ],
 
-    shared_libs: [
-        "libstagefright_omx",
-        "libstagefright_foundation",
-        "libutils",
-        "liblog",
-    ],
+    defaults: ["omx_soft_libs"],
 
     ldflags: ["-Wl,-Bsymbolic"],
 
diff --git a/media/libstagefright/codecs/on2/Android.bp b/media/libstagefright/codecs/on2/Android.bp
deleted file mode 100644
index b44c296..0000000
--- a/media/libstagefright/codecs/on2/Android.bp
+++ /dev/null
@@ -1 +0,0 @@
-subdirs = ["*"]
diff --git a/media/libstagefright/codecs/on2/dec/Android.bp b/media/libstagefright/codecs/on2/dec/Android.bp
index 174f183..abd21d7 100644
--- a/media/libstagefright/codecs/on2/dec/Android.bp
+++ b/media/libstagefright/codecs/on2/dec/Android.bp
@@ -14,12 +14,7 @@
 
     static_libs: ["libvpx"],
 
-    shared_libs: [
-        "libstagefright_omx",
-        "libstagefright_foundation",
-        "libutils",
-        "liblog",
-    ],
+    defaults: ["omx_soft_libs"],
 
     cflags: ["-Werror"],
 
diff --git a/media/libstagefright/codecs/on2/enc/Android.bp b/media/libstagefright/codecs/on2/enc/Android.bp
index 891a771..ea46bad 100644
--- a/media/libstagefright/codecs/on2/enc/Android.bp
+++ b/media/libstagefright/codecs/on2/enc/Android.bp
@@ -30,11 +30,7 @@
 
     static_libs: ["libvpx"],
 
-    shared_libs: [
-        "libstagefright_omx",
-        "libstagefright_foundation",
-        "libutils",
-        "liblog",
-    ],
+    defaults: ["omx_soft_libs"],
+
     compile_multilib: "32",
 }
diff --git a/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.h b/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.h
index b4904bf..c5c2abf 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.h
+++ b/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.h
@@ -23,8 +23,6 @@
 #include <OMX_VideoExt.h>
 #include <OMX_IndexExt.h>
 
-#include <hardware/gralloc.h>
-
 #include "vpx/vpx_encoder.h"
 #include "vpx/vpx_codec.h"
 #include "vpx/vp8cx.h"
diff --git a/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.h b/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.h
index 85df69a..308a9ac 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.h
+++ b/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.h
@@ -23,8 +23,6 @@
 #include <OMX_VideoExt.h>
 #include <OMX_IndexExt.h>
 
-#include <hardware/gralloc.h>
-
 #include "vpx/vpx_encoder.h"
 #include "vpx/vpx_codec.h"
 #include "vpx/vp8cx.h"
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
index 263d134..7208d69 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
@@ -23,8 +23,6 @@
 #include <OMX_VideoExt.h>
 #include <OMX_IndexExt.h>
 
-#include <hardware/gralloc.h>
-
 #include "vpx/vpx_encoder.h"
 #include "vpx/vpx_codec.h"
 #include "vpx/vp8cx.h"
diff --git a/media/libstagefright/codecs/opus/Android.bp b/media/libstagefright/codecs/opus/Android.bp
deleted file mode 100644
index b44c296..0000000
--- a/media/libstagefright/codecs/opus/Android.bp
+++ /dev/null
@@ -1 +0,0 @@
-subdirs = ["*"]
diff --git a/media/libstagefright/codecs/opus/dec/Android.bp b/media/libstagefright/codecs/opus/dec/Android.bp
index afe459d..bfcae07 100644
--- a/media/libstagefright/codecs/opus/dec/Android.bp
+++ b/media/libstagefright/codecs/opus/dec/Android.bp
@@ -12,12 +12,10 @@
         "frameworks/native/include/media/openmax",
     ],
 
+    defaults: ["omx_soft_libs"],
+
     shared_libs: [
         "libopus",
-        "libstagefright_omx",
-        "libstagefright_foundation",
-        "libutils",
-        "liblog",
     ],
 
     cflags: ["-Werror"],
diff --git a/media/libstagefright/codecs/raw/Android.bp b/media/libstagefright/codecs/raw/Android.bp
index f822445..1c23bad 100644
--- a/media/libstagefright/codecs/raw/Android.bp
+++ b/media/libstagefright/codecs/raw/Android.bp
@@ -24,11 +24,7 @@
         cfi: true,
     },
 
-    shared_libs: [
-        "libstagefright_omx",
-        "libstagefright_foundation",
-        "libutils",
-        "liblog",
-    ],
+    defaults: ["omx_soft_libs"],
+
     compile_multilib: "32",
 }
diff --git a/media/libstagefright/codecs/vorbis/Android.bp b/media/libstagefright/codecs/vorbis/Android.bp
deleted file mode 100644
index b44c296..0000000
--- a/media/libstagefright/codecs/vorbis/Android.bp
+++ /dev/null
@@ -1 +0,0 @@
-subdirs = ["*"]
diff --git a/media/libstagefright/codecs/vorbis/dec/Android.bp b/media/libstagefright/codecs/vorbis/dec/Android.bp
index a9265cb..2d1a922 100644
--- a/media/libstagefright/codecs/vorbis/dec/Android.bp
+++ b/media/libstagefright/codecs/vorbis/dec/Android.bp
@@ -12,12 +12,10 @@
         "frameworks/native/include/media/openmax",
     ],
 
+    defaults: ["omx_soft_libs"],
+
     shared_libs: [
         "libvorbisidec",
-        "libstagefright_omx",
-        "libstagefright_foundation",
-        "libutils",
-        "liblog",
     ],
 
     cflags: ["-Werror"],
diff --git a/media/libstagefright/codecs/xaacdec/Android.bp b/media/libstagefright/codecs/xaacdec/Android.bp
index 7392f1e..e49eb8f 100644
--- a/media/libstagefright/codecs/xaacdec/Android.bp
+++ b/media/libstagefright/codecs/xaacdec/Android.bp
@@ -24,12 +24,10 @@
 
     static_libs: ["libxaacdec"],
 
+    defaults: ["omx_soft_libs"],
+
     shared_libs: [
-        "libstagefright_omx",
-        "libstagefright_foundation",
-        "libutils",
         "libcutils",
-        "liblog",
     ],
 
     compile_multilib: "32",
diff --git a/media/libstagefright/data/media_codecs_google_audio.xml b/media/libstagefright/data/media_codecs_google_audio.xml
index 632088a..8899adc 100644
--- a/media/libstagefright/data/media_codecs_google_audio.xml
+++ b/media/libstagefright/data/media_codecs_google_audio.xml
@@ -58,7 +58,7 @@
         </MediaCodec>
         <MediaCodec name="OMX.google.raw.decoder" type="audio/raw">
             <Limit name="channel-count" max="8" />
-            <Limit name="sample-rate" ranges="8000-96000" />
+            <Limit name="sample-rate" ranges="8000-192000" />
             <Limit name="bitrate" range="1-10000000" />
         </MediaCodec>
         <MediaCodec name="OMX.google.flac.decoder" type="audio/flac">
diff --git a/media/libstagefright/data/media_codecs_google_c2_audio.xml b/media/libstagefright/data/media_codecs_google_c2_audio.xml
index 47a9715..be2404d 100644
--- a/media/libstagefright/data/media_codecs_google_c2_audio.xml
+++ b/media/libstagefright/data/media_codecs_google_c2_audio.xml
@@ -67,7 +67,7 @@
         <MediaCodec name="c2.android.raw.decoder" type="audio/raw">
             <Alias name="OMX.google.raw.decoder" />
             <Limit name="channel-count" max="8" />
-            <Limit name="sample-rate" ranges="8000-96000" />
+            <Limit name="sample-rate" ranges="8000-192000" />
             <Limit name="bitrate" range="1-10000000" />
         </MediaCodec>
         <MediaCodec name="c2.android.flac.decoder" type="audio/flac">
diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h
index 9d46d2d..784fd36 100644
--- a/media/libstagefright/include/media/stagefright/ACodec.h
+++ b/media/libstagefright/include/media/stagefright/ACodec.h
@@ -286,7 +286,7 @@
     double mFps;
     double mCaptureFps;
     bool mCreateInputBuffersSuspended;
-    uint32_t mLatency;
+    std::optional<uint32_t> mLatency;
 
     bool mTunneled;
 
diff --git a/media/libstagefright/include/media/stagefright/MPEG4Writer.h b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
index 803155d..6f19023 100644
--- a/media/libstagefright/include/media/stagefright/MPEG4Writer.h
+++ b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
@@ -82,10 +82,6 @@
         kWhatSwitch                          = 'swch',
     };
 
-    enum {
-        kMaxCttsOffsetTimeUs = 1000000LL,  // 1 second
-    };
-
     int  mFd;
     int mNextFd;
     sp<MetaData> mStartMeta;
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
index 2dca5c3..8b6944b 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
@@ -772,6 +772,7 @@
 constexpr char KEY_LANGUAGE[] = "language";
 constexpr char KEY_LATENCY[] = "latency";
 constexpr char KEY_LEVEL[] = "level";
+constexpr char KEY_MAX_B_FRAMES[] = "max-bframes";
 constexpr char KEY_MAX_BIT_RATE[] = "max-bitrate";
 constexpr char KEY_MAX_FPS_TO_ENCODER[] = "max-fps-to-encoder";
 constexpr char KEY_MAX_HEIGHT[] = "max-height";
diff --git a/media/libstagefright/omx/Android.bp b/media/libstagefright/omx/Android.bp
index 4383004..c06aca5 100644
--- a/media/libstagefright/omx/Android.bp
+++ b/media/libstagefright/omx/Android.bp
@@ -11,11 +11,6 @@
         "OMXNodeInstance.cpp",
         "OMXUtils.cpp",
         "OmxGraphicBufferSource.cpp",
-        "SimpleSoftOMXComponent.cpp",
-        "SoftOMXComponent.cpp",
-        "SoftOMXPlugin.cpp",
-        "SoftVideoDecoderOMXComponent.cpp",
-        "SoftVideoEncoderOMXComponent.cpp",
         "1.0/Omx.cpp",
         "1.0/OmxStore.cpp",
         "1.0/WGraphicBufferSource.cpp",
@@ -56,6 +51,7 @@
         "libvndksupport",
         "android.hardware.media.omx@1.0",
         "android.hardware.graphics.bufferqueue@1.0",
+        "libstagefright_omx_soft",
     ],
 
     export_shared_lib_headers: [
@@ -81,6 +77,64 @@
     },
 }
 
+cc_defaults {
+    name: "omx_soft_libs",
+    shared_libs: [
+        "libutils",
+        "liblog",
+        "libstagefright_foundation",
+        "libstagefright_omx_soft",
+    ],
+}
+
+cc_library_shared {
+    name: "libstagefright_omx_soft",
+    vendor_available: true,
+    vndk: {
+        enabled: true,
+    },
+
+    srcs: [
+        "SimpleSoftOMXComponent.cpp",
+        "SoftOMXComponent.cpp",
+        "SoftOMXPlugin.cpp",
+        "SoftVideoDecoderOMXComponent.cpp",
+        "SoftVideoEncoderOMXComponent.cpp",
+    ],
+
+    export_include_dirs: [
+        "include",
+    ],
+
+    shared_libs: [
+        "libutils",
+        "liblog",
+        "libui",
+        "libstagefright_foundation",
+    ],
+
+    export_shared_lib_headers: [
+        "libstagefright_foundation",
+        "libutils",
+        "liblog",
+    ],
+
+    cflags: [
+        "-Werror",
+        "-Wall",
+        "-Wno-unused-parameter",
+        "-Wno-documentation",
+    ],
+
+    sanitize: {
+        misc_undefined: [
+            "signed-integer-overflow",
+            "unsigned-integer-overflow",
+        ],
+        cfi: true,
+    },
+}
+
 cc_library_shared {
     name: "libstagefright_omx_utils",
     vendor_available: true,
@@ -116,6 +170,3 @@
     cflags: ["-Wall", "-Werror"],
 }
 
-//###############################################################################
-
-subdirs = ["tests"]
diff --git a/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
index 2fbbb44..d75acda 100644
--- a/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
@@ -559,7 +559,7 @@
         if (nativeMeta.nFenceFd >= 0) {
             sp<Fence> fence = new Fence(nativeMeta.nFenceFd);
             nativeMeta.nFenceFd = -1;
-            status_t err = fence->wait(IOMX::kFenceTimeoutMs);
+            status_t err = fence->wait(kFenceTimeoutMs);
             if (err != OK) {
                 ALOGE("Timed out waiting on input fence");
                 return NULL;
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/SoftOMXComponent.h b/media/libstagefright/omx/include/media/stagefright/omx/SoftOMXComponent.h
index 3ab6f88..79f0c77 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/SoftOMXComponent.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/SoftOMXComponent.h
@@ -21,12 +21,15 @@
 #include <media/stagefright/foundation/ABase.h>
 #include <media/stagefright/foundation/AString.h>
 #include <utils/RefBase.h>
-
+#include <utils/Log.h>
 #include <OMX_Component.h>
 
 namespace android {
 
 struct SoftOMXComponent : public RefBase {
+    enum {
+        kFenceTimeoutMs = 1000
+    };
     SoftOMXComponent(
             const char *name,
             const OMX_CALLBACKTYPE *callbacks,
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoDecoderOMXComponent.h b/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoDecoderOMXComponent.h
index 3b381ce..d7c1658 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoDecoderOMXComponent.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoDecoderOMXComponent.h
@@ -23,7 +23,10 @@
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/AHandlerReflector.h>
 #include <media/stagefright/foundation/ColorUtils.h>
-#include <media/IOMX.h>
+#include <media/openmax/OMX_Core.h>
+#include <media/openmax/OMX_Video.h>
+#include <media/openmax/OMX_VideoExt.h>
+
 #include <media/hardware/HardwareAPI.h>
 
 #include <utils/RefBase.h>
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoEncoderOMXComponent.h b/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoEncoderOMXComponent.h
index 2d6f31b..9cb72dd 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoEncoderOMXComponent.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoEncoderOMXComponent.h
@@ -18,7 +18,9 @@
 
 #define SOFT_VIDEO_ENCODER_OMX_COMPONENT_H_
 
-#include <media/IOMX.h>
+#include <media/openmax/OMX_Core.h>
+#include <media/openmax/OMX_Video.h>
+#include <media/openmax/OMX_VideoExt.h>
 
 #include "SimpleSoftOMXComponent.h"
 
diff --git a/media/libstagefright/xmlparser/vts/Android.bp b/media/libstagefright/xmlparser/vts/Android.bp
new file mode 100644
index 0000000..3f93e9e
--- /dev/null
+++ b/media/libstagefright/xmlparser/vts/Android.bp
@@ -0,0 +1,33 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_test {
+    name: "vts_mediaCodecs_validate_test",
+    srcs: [
+        "ValidateMediaCodecs.cpp"
+    ],
+    static_libs: [
+        "android.hardware.audio.common.test.utility",
+        "libxml2",
+    ],
+    shared_libs: [
+        "liblog",
+    ],
+    cflags: [
+        "-Wall",
+        "-Werror",
+    ],
+}
diff --git a/media/libstagefright/xmlparser/vts/Android.mk b/media/libstagefright/xmlparser/vts/Android.mk
new file mode 100644
index 0000000..d5290ba
--- /dev/null
+++ b/media/libstagefright/xmlparser/vts/Android.mk
@@ -0,0 +1,22 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := VtsValidateMediaCodecs
+include test/vts/tools/build/Android.host_config.mk
diff --git a/media/libstagefright/xmlparser/vts/AndroidTest.xml b/media/libstagefright/xmlparser/vts/AndroidTest.xml
new file mode 100644
index 0000000..97ee107
--- /dev/null
+++ b/media/libstagefright/xmlparser/vts/AndroidTest.xml
@@ -0,0 +1,30 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<configuration description="Config for VTS VtsValidateMediaCodecs.">
+    <option name="config-descriptor:metadata" key="plan" value="vts-treble" />
+    <target_preparer class="com.android.compatibility.common.tradefed.targetprep.VtsFilePusher">
+        <option name="abort-on-push-failure" value="false"/>
+        <option name="push-group" value="HostDrivenTest.push"/>
+        <option name="push" value="DATA/etc/media_codecs.xsd->/data/local/tmp/media_codecs.xsd"/>
+    </target_preparer>
+    <test class="com.android.tradefed.testtype.VtsMultiDeviceTest">
+        <option name="test-module-name" value="VtsValidateMediaCodecs"/>
+        <option name="binary-test-source" value="_32bit::DATA/nativetest/vts_mediaCodecs_validate_test/vts_mediaCodecs_validate_test" />
+        <option name="binary-test-source" value="_64bit::DATA/nativetest64/vts_mediaCodecs_validate_test/vts_mediaCodecs_validate_test" />
+        <option name="binary-test-type" value="gtest"/>
+        <option name="test-timeout" value="30s"/>
+    </test>
+</configuration>
diff --git a/media/libstagefright/xmlparser/vts/ValidateMediaCodecs.cpp b/media/libstagefright/xmlparser/vts/ValidateMediaCodecs.cpp
new file mode 100644
index 0000000..b07833e
--- /dev/null
+++ b/media/libstagefright/xmlparser/vts/ValidateMediaCodecs.cpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <dirent.h>
+#include <regex>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <string>
+
+#include "utility/ValidateXml.h"
+
+static void get_files_in_dirs(const char* dir_path, std::vector<std::string>& files) {
+    DIR* d;
+    struct dirent* de;
+
+    d = opendir(dir_path);
+    if (d == nullptr) {
+        return;
+    }
+
+    while ((de = readdir(d))) {
+        if (de->d_type != DT_REG) {
+            continue;
+        }
+        if (std::regex_match(de->d_name, std::regex("(media_codecs)(.*)(.xml)"))) {
+            files.push_back(de->d_name);
+        }
+    }
+    closedir(d);
+}
+
+TEST(CheckConfig, mediaCodecsValidation) {
+    RecordProperty("description",
+                   "Verify that the media codecs file "
+                   "is valid according to the schema");
+
+    const char* location = "/vendor/etc";
+
+    std::vector<std::string> files;
+    get_files_in_dirs(location, files);
+
+    for (std::string file_name : files) {
+        EXPECT_ONE_VALID_XML_MULTIPLE_LOCATIONS(file_name.c_str(), {location},
+                                                "/data/local/tmp/media_codecs.xsd");
+    }
+}
diff --git a/media/mediaserver/mediaserver.rc b/media/mediaserver/mediaserver.rc
index 8cfcd79..f6c325c 100644
--- a/media/mediaserver/mediaserver.rc
+++ b/media/mediaserver/mediaserver.rc
@@ -2,7 +2,5 @@
     class main
     user media
     group audio camera inet net_bt net_bt_admin net_bw_acct drmrpc mediadrm
-    # TODO(b/123275379): Remove updatable when http://aosp/878198 has landed
-    updatable
     ioprio rt 4
     writepid /dev/cpuset/foreground/tasks /dev/stune/foreground/tasks
diff --git a/media/mtp/Android.bp b/media/mtp/Android.bp
index 2cf9b82..66a3139 100644
--- a/media/mtp/Android.bp
+++ b/media/mtp/Android.bp
@@ -54,6 +54,3 @@
     ],
 }
 
-subdirs = [
-    "tests",
-]
diff --git a/media/ndk/Android.bp b/media/ndk/Android.bp
index f4cc704..a4f5730 100644
--- a/media/ndk/Android.bp
+++ b/media/ndk/Android.bp
@@ -73,6 +73,7 @@
     shared_libs: [
         "android.hardware.graphics.bufferqueue@1.0",
         "android.hidl.token@1.0-utils",
+        "libandroid_runtime_lazy",
         "libbinder",
         "libmedia",
         "libmedia_omx",
@@ -93,12 +94,6 @@
         "libmediandk_utils",
     ],
 
-    required: [
-        // libmediandk may be used by Java and non-Java things. When lower-level things use it,
-        // they shouldn't have to take on the cost of loading libandroid_runtime.
-        "libandroid_runtime",
-    ],
-
     export_include_dirs: ["include"],
 
     export_shared_lib_headers: [
diff --git a/media/ndk/NdkMediaDataSource.cpp b/media/ndk/NdkMediaDataSource.cpp
index 0891f2a..7979c2f 100644
--- a/media/ndk/NdkMediaDataSource.cpp
+++ b/media/ndk/NdkMediaDataSource.cpp
@@ -23,7 +23,8 @@
 #include <jni.h>
 #include <unistd.h>
 
-#include <binder/IBinder.h>
+#include <android_runtime/AndroidRuntime.h>
+#include <android_util_Binder.h>
 #include <cutils/properties.h>
 #include <utils/Log.h>
 #include <utils/StrongPointer.h>
@@ -39,67 +40,9 @@
 #include "../../libstagefright/include/NuCachedSource2.h"
 #include "NdkMediaDataSourceCallbacksPriv.h"
 
-#include <mutex> // std::call_once,once_flag
-#include <dlfcn.h> // dlopen
 
 using namespace android;
 
-// load libandroid_runtime.so lazily.
-// A vendor process may use libmediandk but should not depend on libandroid_runtime.
-// TODO(jooyung): remove duplicate (b/125550121)
-// frameworks/native/libs/binder/ndk/ibinder_jni.cpp
-namespace {
-
-typedef JNIEnv* (*getJNIEnv_t)();
-typedef sp<IBinder> (*ibinderForJavaObject_t)(JNIEnv* env, jobject obj);
-
-getJNIEnv_t getJNIEnv_;
-ibinderForJavaObject_t ibinderForJavaObject_;
-
-std::once_flag mLoadFlag;
-
-void load() {
-    std::call_once(mLoadFlag, []() {
-        void* handle = dlopen("libandroid_runtime.so", RTLD_LAZY);
-        if (handle == nullptr) {
-            ALOGE("Could not open libandroid_runtime.");
-            return;
-        }
-
-        getJNIEnv_ = reinterpret_cast<getJNIEnv_t>(
-                dlsym(handle, "_ZN7android14AndroidRuntime9getJNIEnvEv"));
-        if (getJNIEnv_ == nullptr) {
-            ALOGE("Could not find AndroidRuntime::getJNIEnv.");
-            // no return
-        }
-
-        ibinderForJavaObject_ = reinterpret_cast<ibinderForJavaObject_t>(
-                dlsym(handle, "_ZN7android20ibinderForJavaObjectEP7_JNIEnvP8_jobject"));
-        if (ibinderForJavaObject_ == nullptr) {
-            ALOGE("Could not find ibinderForJavaObject.");
-            // no return
-        }
-    });
-}
-
-JNIEnv* getJNIEnv() {
-    load();
-    if (getJNIEnv_ == nullptr) {
-        return nullptr;
-    }
-    return (getJNIEnv_)();
-}
-
-sp<IBinder> ibinderForJavaObject(JNIEnv* env, jobject obj) {
-    load();
-    if (ibinderForJavaObject_ == nullptr) {
-        return nullptr;
-    }
-    return (ibinderForJavaObject_)(env, obj);
-}
-
-} // namespace
-
 struct AMediaDataSource {
     void *userdata;
     AMediaDataSourceReadAt readAt;
@@ -181,14 +124,9 @@
     if (obj == NULL) {
         return NULL;
     }
-    sp<IBinder> binder;
     switch (version) {
         case 1:
-            binder = ibinderForJavaObject(env, obj);
-            if (binder == NULL) {
-                return NULL;
-            }
-            return interface_cast<IMediaHTTPService>(binder);
+            return interface_cast<IMediaHTTPService>(ibinderForJavaObject(env, obj));
         case 2:
             return new JMedia2HTTPService(env, obj);
         default:
@@ -241,7 +179,7 @@
 
     switch (version) {
         case 1:
-            env = getJNIEnv();
+            env = AndroidRuntime::getJNIEnv();
             clazz = "android/media/MediaHTTPService";
             method = "createHttpServiceBinderIfNecessary";
             signature = "(Ljava/lang/String;)Landroid/os/IBinder;";
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index 2fb24f5..768cd1e 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -130,14 +130,22 @@
     return ok;
 }
 
-bool captureHotwordAllowed(pid_t pid, uid_t uid) {
+bool captureMediaOutputAllowed(pid_t pid, uid_t uid) {
+    if (isAudioServerOrRootUid(uid)) return true;
+    static const String16 sCaptureMediaOutput("android.permission.CAPTURE_MEDIA_OUTPUT");
+    bool ok = PermissionCache::checkPermission(sCaptureMediaOutput, pid, uid);
+    if (!ok) ALOGE("Request requires android.permission.CAPTURE_MEDIA_OUTPUT");
+    return ok;
+}
+
+bool captureHotwordAllowed(const String16& opPackageName, pid_t pid, uid_t uid) {
     // CAPTURE_AUDIO_HOTWORD permission implies RECORD_AUDIO permission
-    bool ok = recordingAllowed(String16(""), pid, uid);
+    bool ok = recordingAllowed(opPackageName, pid, uid);
 
     if (ok) {
         static const String16 sCaptureHotwordAllowed("android.permission.CAPTURE_AUDIO_HOTWORD");
         // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
-        ok = PermissionCache::checkCallingPermission(sCaptureHotwordAllowed);
+        ok = PermissionCache::checkPermission(sCaptureHotwordAllowed, pid, uid);
     }
     if (!ok) ALOGE("android.permission.CAPTURE_AUDIO_HOTWORD");
     return ok;
diff --git a/media/utils/include/mediautils/ServiceUtilities.h b/media/utils/include/mediautils/ServiceUtilities.h
index 94370ee..c5fe05f 100644
--- a/media/utils/include/mediautils/ServiceUtilities.h
+++ b/media/utils/include/mediautils/ServiceUtilities.h
@@ -74,7 +74,8 @@
 bool startRecording(const String16& opPackageName, pid_t pid, uid_t uid);
 void finishRecording(const String16& opPackageName, uid_t uid);
 bool captureAudioOutputAllowed(pid_t pid, uid_t uid);
-bool captureHotwordAllowed(pid_t pid, uid_t uid);
+bool captureMediaOutputAllowed(pid_t pid, uid_t uid);
+bool captureHotwordAllowed(const String16& opPackageName, pid_t pid, uid_t uid);
 bool settingsAllowed();
 bool modifyAudioRoutingAllowed();
 bool modifyDefaultAudioEffectsAllowed();
diff --git a/services/audioflinger/Android.bp b/services/audioflinger/Android.bp
new file mode 100644
index 0000000..96ad54b
--- /dev/null
+++ b/services/audioflinger/Android.bp
@@ -0,0 +1,73 @@
+
+
+cc_library_shared {
+    name: "libaudioflinger",
+
+    srcs: [
+        "AudioFlinger.cpp",
+        "AudioHwDevice.cpp",
+        "AudioStreamOut.cpp",
+        "AudioWatchdog.cpp",
+        "BufLog.cpp",
+        "Effects.cpp",
+        "FastCapture.cpp",
+        "FastCaptureDumpState.cpp",
+        "FastCaptureState.cpp",
+        "FastMixer.cpp",
+        "FastMixerDumpState.cpp",
+        "FastMixerState.cpp",
+        "FastThread.cpp",
+        "FastThreadDumpState.cpp",
+        "FastThreadState.cpp",
+        "NBAIO_Tee.cpp",
+        "PatchPanel.cpp",
+        "SpdifStreamOut.cpp",
+        "StateQueue.cpp",
+        "Threads.cpp",
+        "Tracks.cpp",
+        "TypedLogger.cpp",
+    ],
+
+    include_dirs: [
+        "frameworks/av/services/audiopolicy",
+        "frameworks/av/services/medialog",
+    ],
+
+    shared_libs: [
+        "libaudiohal",
+        "libaudioprocessing",
+        "libaudiospdif",
+        "libaudioutils",
+        "libcutils",
+        "libutils",
+        "liblog",
+        "libbinder",
+        "libaudioclient",
+        "libmedialogservice",
+        "libmediametrics",
+        "libmediautils",
+        "libnbaio",
+        "libnblog",
+        "libpowermanager",
+        "libmediautils",
+        "libmemunreachable",
+        "libmedia_helper",
+        "libvibrator",
+    ],
+
+    static_libs: [
+        "libcpustats",
+        "libsndfile",
+    ],
+
+    cflags: [
+        "-DSTATE_QUEUE_INSTANTIATIONS=\"StateQueueInstantiations.cpp\"",
+        "-fvisibility=hidden",
+        "-Werror",
+        "-Wall",
+    ],
+    sanitize: {
+        integer_overflow: true,
+    },
+
+}
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
deleted file mode 100644
index 40980a6..0000000
--- a/services/audioflinger/Android.mk
+++ /dev/null
@@ -1,74 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:=               \
-    AudioFlinger.cpp            \
-    Threads.cpp                 \
-    Tracks.cpp                  \
-    AudioHwDevice.cpp           \
-    AudioStreamOut.cpp          \
-    SpdifStreamOut.cpp          \
-    Effects.cpp                 \
-    PatchPanel.cpp              \
-    StateQueue.cpp              \
-    BufLog.cpp                  \
-    TypedLogger.cpp             \
-    NBAIO_Tee.cpp               \
-
-LOCAL_C_INCLUDES := \
-    frameworks/av/services/audiopolicy \
-    frameworks/av/services/medialog \
-    $(call include-path-for, audio-utils)
-
-LOCAL_SHARED_LIBRARIES := \
-    libaudiohal \
-    libaudioprocessing \
-    libaudiospdif \
-    libaudioutils \
-    libcutils \
-    libutils \
-    liblog \
-    libbinder \
-    libaudioclient \
-    libmedialogservice \
-    libmediametrics \
-    libmediautils \
-    libnbaio \
-    libnblog \
-    libpowermanager \
-    libmediautils \
-    libmemunreachable \
-    libmedia_helper \
-    libvibrator
-
-LOCAL_STATIC_LIBRARIES := \
-    libcpustats \
-    libsndfile \
-
-LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
-
-LOCAL_MODULE:= libaudioflinger
-
-LOCAL_SRC_FILES += \
-    AudioWatchdog.cpp        \
-    FastCapture.cpp          \
-    FastCaptureDumpState.cpp \
-    FastCaptureState.cpp     \
-    FastMixer.cpp            \
-    FastMixerDumpState.cpp   \
-    FastMixerState.cpp       \
-    FastThread.cpp           \
-    FastThreadDumpState.cpp  \
-    FastThreadState.cpp
-
-LOCAL_CFLAGS += -DSTATE_QUEUE_INSTANTIATIONS='"StateQueueInstantiations.cpp"'
-
-LOCAL_CFLAGS += -fvisibility=hidden
-
-LOCAL_CFLAGS += -Werror -Wall
-LOCAL_SANITIZE := integer_overflow
-
-include $(BUILD_SHARED_LIBRARY)
-
-include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 43260c2..0825cb4 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -466,15 +466,8 @@
 
 bool AudioFlinger::dumpTryLock(Mutex& mutex)
 {
-    bool locked = false;
-    for (int i = 0; i < kDumpLockRetries; ++i) {
-        if (mutex.tryLock() == NO_ERROR) {
-            locked = true;
-            break;
-        }
-        usleep(kDumpLockSleepUs);
-    }
-    return locked;
+    status_t err = mutex.timedLock(kDumpLockTimeoutNs);
+    return err == NO_ERROR;
 }
 
 status_t AudioFlinger::dump(int fd, const Vector<String16>& args)
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 3804418..9960f0e 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -433,8 +433,7 @@
     static uint32_t         mScreenState;
 
     // Internal dump utilities.
-    static const int kDumpLockRetries = 50;
-    static const int kDumpLockSleepUs = 20000;
+    static const int kDumpLockTimeoutNs = 1 * NANOS_PER_SECOND;
     static bool dumpTryLock(Mutex& mutex);
     void dumpPermissionDenial(int fd, const Vector<String16>& args);
     void dumpClients(int fd, const Vector<String16>& args);
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index ecaeb52..2b34267 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -2335,13 +2335,10 @@
 
 void AudioFlinger::EffectChain::dump(int fd, const Vector<String16>& args)
 {
-    const size_t SIZE = 256;
-    char buffer[SIZE];
     String8 result;
 
-    size_t numEffects = mEffects.size();
-    snprintf(buffer, SIZE, "    %zu effects for session %d\n", numEffects, mSessionId);
-    result.append(buffer);
+    const size_t numEffects = mEffects.size();
+    result.appendFormat("    %zu effects for session %d\n", numEffects, mSessionId);
 
     if (numEffects) {
         bool locked = AudioFlinger::dumpTryLock(mLock);
@@ -2369,6 +2366,8 @@
         if (locked) {
             mLock.unlock();
         }
+    } else {
+        write(fd, result.string(), result.size());
     }
 }
 
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 0e1e97f..04d62fa 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -786,12 +786,8 @@
     }
 }
 
-void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args __unused)
+void AudioFlinger::ThreadBase::dump(int fd, const Vector<String16>& args)
 {
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-
     dprintf(fd, "\n%s thread %p, name %s, tid %d, type %d (%s):\n", isOutput() ? "Output" : "Input",
             this, mThreadName, getTid(), type(), threadTypeToString(type()));
 
@@ -800,6 +796,21 @@
         dprintf(fd, "  Thread may be deadlocked\n");
     }
 
+    dumpBase_l(fd, args);
+    dumpInternals_l(fd, args);
+    dumpTracks_l(fd, args);
+    dumpEffectChains_l(fd, args);
+
+    if (locked) {
+        mLock.unlock();
+    }
+
+    dprintf(fd, "  Local log:\n");
+    mLocalLog.dump(fd, "   " /* prefix */, 40 /* lines */);
+}
+
+void AudioFlinger::ThreadBase::dumpBase_l(int fd, const Vector<String16>& args __unused)
+{
     dprintf(fd, "  I/O handle: %d\n", mId);
     dprintf(fd, "  Standby: %s\n", mStandby ? "yes" : "no");
     dprintf(fd, "  Sample rate: %u Hz\n", mSampleRate);
@@ -814,6 +825,8 @@
     dprintf(fd, "  Pending config events:");
     size_t numConfig = mConfigEvents.size();
     if (numConfig) {
+        const size_t SIZE = 256;
+        char buffer[SIZE];
         for (size_t i = 0; i < numConfig; i++) {
             mConfigEvents[i]->dump(buffer, SIZE);
             dprintf(fd, "\n    %s", buffer);
@@ -858,17 +871,12 @@
                 isOutput() ? "write" : "read",
                 mLatencyMs.toString().c_str());
     }
-
-    if (locked) {
-        mLock.unlock();
-    }
 }
 
-void AudioFlinger::ThreadBase::dumpEffectChains(int fd, const Vector<String16>& args)
+void AudioFlinger::ThreadBase::dumpEffectChains_l(int fd, const Vector<String16>& args)
 {
     const size_t SIZE = 256;
     char buffer[SIZE];
-    String8 result;
 
     size_t numEffectChains = mEffectChains.size();
     snprintf(buffer, SIZE, "  %zu Effect Chains\n", numEffectChains);
@@ -1819,16 +1827,24 @@
     free(mEffectBuffer);
 }
 
-void AudioFlinger::PlaybackThread::dump(int fd, const Vector<String16>& args)
+// Thread virtuals
+
+void AudioFlinger::PlaybackThread::onFirstRef()
 {
-    dumpInternals(fd, args);
-    dumpTracks(fd, args);
-    dumpEffectChains(fd, args);
-    dprintf(fd, "  Local log:\n");
-    mLocalLog.dump(fd, "   " /* prefix */, 40 /* lines */);
+    run(mThreadName, ANDROID_PRIORITY_URGENT_AUDIO);
 }
 
-void AudioFlinger::PlaybackThread::dumpTracks(int fd, const Vector<String16>& args __unused)
+// ThreadBase virtuals
+void AudioFlinger::PlaybackThread::preExit()
+{
+    ALOGV("  preExit()");
+    // FIXME this is using hard-coded strings but in the future, this functionality will be
+    //       converted to use audio HAL extensions required to support tunneling
+    status_t result = mOutput->stream->setParameters(String8("exiting=1"));
+    ALOGE_IF(result != OK, "Error when setting parameters on exit: %d", result);
+}
+
+void AudioFlinger::PlaybackThread::dumpTracks_l(int fd, const Vector<String16>& args __unused)
 {
     String8 result;
 
@@ -1893,10 +1909,8 @@
     write(fd, result.string(), result.size());
 }
 
-void AudioFlinger::PlaybackThread::dumpInternals(int fd, const Vector<String16>& args)
+void AudioFlinger::PlaybackThread::dumpInternals_l(int fd, const Vector<String16>& args __unused)
 {
-    dumpBase(fd, args);
-
     dprintf(fd, "  Master mute: %s\n", mMasterMute ? "on" : "off");
     if (mHapticChannelMask != AUDIO_CHANNEL_NONE) {
         dprintf(fd, "  Haptic channel mask: %#x (%s)\n", mHapticChannelMask,
@@ -1927,23 +1941,6 @@
     }
 }
 
-// Thread virtuals
-
-void AudioFlinger::PlaybackThread::onFirstRef()
-{
-    run(mThreadName, ANDROID_PRIORITY_URGENT_AUDIO);
-}
-
-// ThreadBase virtuals
-void AudioFlinger::PlaybackThread::preExit()
-{
-    ALOGV("  preExit()");
-    // FIXME this is using hard-coded strings but in the future, this functionality will be
-    //       converted to use audio HAL extensions required to support tunneling
-    status_t result = mOutput->stream->setParameters(String8("exiting=1"));
-    ALOGE_IF(result != OK, "Error when setting parameters on exit: %d", result);
-}
-
 // PlaybackThread::createTrack_l() must be called with AudioFlinger::mLock held
 sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrack_l(
         const sp<AudioFlinger::Client>& client,
@@ -5356,9 +5353,9 @@
 }
 
 
-void AudioFlinger::MixerThread::dumpInternals(int fd, const Vector<String16>& args)
+void AudioFlinger::MixerThread::dumpInternals_l(int fd, const Vector<String16>& args)
 {
-    PlaybackThread::dumpInternals(fd, args);
+    PlaybackThread::dumpInternals_l(fd, args);
     dprintf(fd, "  Thread throttle time (msecs): %u\n", mThreadThrottleTimeMs);
     dprintf(fd, "  AudioMixer tracks: %s\n", mAudioMixer->trackNames().c_str());
     dprintf(fd, "  Master mono: %s\n", mMasterMono ? "on" : "off");
@@ -5432,9 +5429,9 @@
 {
 }
 
-void AudioFlinger::DirectOutputThread::dumpInternals(int fd, const Vector<String16>& args)
+void AudioFlinger::DirectOutputThread::dumpInternals_l(int fd, const Vector<String16>& args)
 {
-    PlaybackThread::dumpInternals(fd, args);
+    PlaybackThread::dumpInternals_l(fd, args);
     dprintf(fd, "  Master balance: %f  Left: %f  Right: %f\n",
             mMasterBalance.load(), mMasterBalanceLeft, mMasterBalanceRight);
 }
@@ -6449,9 +6446,9 @@
     }
 }
 
-void AudioFlinger::DuplicatingThread::dumpInternals(int fd, const Vector<String16>& args __unused)
+void AudioFlinger::DuplicatingThread::dumpInternals_l(int fd, const Vector<String16>& args __unused)
 {
-    MixerThread::dumpInternals(fd, args);
+    MixerThread::dumpInternals_l(fd, args);
 
     std::stringstream ss;
     const size_t numTracks = mOutputTracks.size();
@@ -7781,19 +7778,8 @@
     }
 }
 
-void AudioFlinger::RecordThread::dump(int fd, const Vector<String16>& args)
+void AudioFlinger::RecordThread::dumpInternals_l(int fd, const Vector<String16>& args __unused)
 {
-    dumpInternals(fd, args);
-    dumpTracks(fd, args);
-    dumpEffectChains(fd, args);
-    dprintf(fd, "  Local log:\n");
-    mLocalLog.dump(fd, "   " /* prefix */, 40 /* lines */);
-}
-
-void AudioFlinger::RecordThread::dumpInternals(int fd, const Vector<String16>& args)
-{
-    dumpBase(fd, args);
-
     AudioStreamIn *input = mInput;
     audio_input_flags_t flags = input != NULL ? input->flags : AUDIO_INPUT_FLAG_NONE;
     dprintf(fd, "  AudioStreamIn: %p flags %#x (%s)\n",
@@ -7820,7 +7806,7 @@
     copy->dump(fd);
 }
 
-void AudioFlinger::RecordThread::dumpTracks(int fd, const Vector<String16>& args __unused)
+void AudioFlinger::RecordThread::dumpTracks_l(int fd, const Vector<String16>& args __unused)
 {
     String8 result;
     size_t numtracks = mTracks.size();
@@ -9085,19 +9071,8 @@
     }
 }
 
-void AudioFlinger::MmapThread::dump(int fd, const Vector<String16>& args)
+void AudioFlinger::MmapThread::dumpInternals_l(int fd, const Vector<String16>& args __unused)
 {
-    dumpInternals(fd, args);
-    dumpTracks(fd, args);
-    dumpEffectChains(fd, args);
-    dprintf(fd, "  Local log:\n");
-    mLocalLog.dump(fd, "   " /* prefix */, 40 /* lines */);
-}
-
-void AudioFlinger::MmapThread::dumpInternals(int fd, const Vector<String16>& args)
-{
-    dumpBase(fd, args);
-
     dprintf(fd, "  Attributes: content type %d usage %d source %d\n",
             mAttr.content_type, mAttr.usage, mAttr.source);
     dprintf(fd, "  Session: %d port Id: %d\n", mSessionId, mPortId);
@@ -9106,7 +9081,7 @@
     }
 }
 
-void AudioFlinger::MmapThread::dumpTracks(int fd, const Vector<String16>& args __unused)
+void AudioFlinger::MmapThread::dumpTracks_l(int fd, const Vector<String16>& args __unused)
 {
     String8 result;
     size_t numtracks = mActiveTracks.size();
@@ -9329,9 +9304,9 @@
     }
 }
 
-void AudioFlinger::MmapPlaybackThread::dumpInternals(int fd, const Vector<String16>& args)
+void AudioFlinger::MmapPlaybackThread::dumpInternals_l(int fd, const Vector<String16>& args)
 {
-    MmapThread::dumpInternals(fd, args);
+    MmapThread::dumpInternals_l(fd, args);
 
     dprintf(fd, "  Stream type: %d Stream volume: %f HAL volume: %f Stream mute %d\n",
             mStreamType, mStreamVolume, mHalVolFloat, mStreamMute);
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index e5abce7..18cb361 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -43,9 +43,6 @@
 
     virtual status_t    readyToRun();
 
-    void dumpBase(int fd, const Vector<String16>& args);
-    void dumpEffectChains(int fd, const Vector<String16>& args);
-
     void clearPowerManager();
 
     // base for record and playback
@@ -418,7 +415,7 @@
 
                 bool                isMsdDevice() const { return mIsMsdDevice; }
 
-    virtual     void                dump(int fd, const Vector<String16>& args) = 0;
+                void                dump(int fd, const Vector<String16>& args);
 
                 // deliver stats to mediametrics.
                 void                sendStatistics(bool force);
@@ -470,6 +467,11 @@
                                 return INVALID_OPERATION;
                             }
 
+    virtual     void        dumpInternals_l(int fd __unused, const Vector<String16>& args __unused)
+                            { }
+    virtual     void        dumpTracks_l(int fd __unused, const Vector<String16>& args __unused) { }
+
+
     friend class AudioFlinger;      // for mEffectChains
 
                 const type_t            mType;
@@ -657,6 +659,10 @@
                 };
 
                 SimpleLog mLocalLog;
+
+private:
+                void dumpBase_l(int fd, const Vector<String16>& args);
+                void dumpEffectChains_l(int fd, const Vector<String16>& args);
 };
 
 class VolumeInterface {
@@ -709,8 +715,6 @@
                    audio_io_handle_t id, audio_devices_t device, type_t type, bool systemReady);
     virtual             ~PlaybackThread();
 
-                void        dump(int fd, const Vector<String16>& args) override;
-
     // Thread virtuals
     virtual     bool        threadLoop();
 
@@ -760,6 +764,9 @@
                                 mActiveTracks.updatePowerState(this, true /* force */);
                             }
 
+                void        dumpInternals_l(int fd, const Vector<String16>& args) override;
+                void        dumpTracks_l(int fd, const Vector<String16>& args) override;
+
 public:
 
     virtual     status_t    initCheck() const { return (mOutput == NULL) ? NO_INIT : NO_ERROR; }
@@ -1009,9 +1016,6 @@
     void        updateMetadata_l() final;
     virtual void sendMetadataToBackend_l(const StreamOutHalInterface::SourceMetadata& metadata);
 
-    virtual void dumpInternals(int fd, const Vector<String16>& args);
-    void        dumpTracks(int fd, const Vector<String16>& args);
-
     // The Tracks class manages tracks added and removed from the Thread.
     template <typename T>
     class Tracks {
@@ -1166,7 +1170,6 @@
 
     virtual     bool        checkForNewParameter_l(const String8& keyValuePair,
                                                    status_t& status);
-    virtual     void        dumpInternals(int fd, const Vector<String16>& args);
 
     virtual     bool        isTrackAllowed_l(
                                     audio_channel_mask_t channelMask, audio_format_t format,
@@ -1185,6 +1188,8 @@
         }
     }
 
+                void        dumpInternals_l(int fd, const Vector<String16>& args) override;
+
     // threadLoop snippets
     virtual     ssize_t     threadLoop_write();
     virtual     void        threadLoop_standby();
@@ -1266,8 +1271,6 @@
     virtual     bool        checkForNewParameter_l(const String8& keyValuePair,
                                                    status_t& status);
 
-                void        dumpInternals(int fd, const Vector<String16>& args) override;
-
     virtual     void        flushHw_l();
 
                 void        setMasterBalance(float balance) override;
@@ -1278,6 +1281,8 @@
     virtual     uint32_t    suspendSleepTimeUs() const;
     virtual     void        cacheParameters_l();
 
+                void        dumpInternals_l(int fd, const Vector<String16>& args) override;
+
     // threadLoop snippets
     virtual     mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove);
     virtual     void        threadLoop_mix();
@@ -1397,8 +1402,6 @@
     virtual                 ~DuplicatingThread();
 
     // Thread virtuals
-    virtual     void        dumpInternals(int fd, const Vector<String16>& args) override;
-
                 void        addOutputTrack(MixerThread* thread);
                 void        removeOutputTrack(MixerThread* thread);
                 uint32_t    waitTimeMs() const { return mWaitTimeMs; }
@@ -1407,6 +1410,7 @@
                         const StreamOutHalInterface::SourceMetadata& metadata) override;
 protected:
     virtual     uint32_t    activeSleepTimeUs() const;
+                void        dumpInternals_l(int fd, const Vector<String16>& args) override;
 
 private:
                 bool        outputsReady(const SortedVector< sp<OutputTrack> > &outputTracks);
@@ -1512,9 +1516,6 @@
     void        destroyTrack_l(const sp<RecordTrack>& track);
     void        removeTrack_l(const sp<RecordTrack>& track);
 
-    void        dumpInternals(int fd, const Vector<String16>& args);
-    void        dumpTracks(int fd, const Vector<String16>& args);
-
     // Thread virtuals
     virtual bool        threadLoop();
     virtual void        preExit();
@@ -1551,7 +1552,6 @@
             // return true if the caller should then do it's part of the stopping process
             bool        stop(RecordTrack* recordTrack);
 
-            void        dump(int fd, const Vector<String16>& args) override;
             AudioStreamIn* clearInput();
             virtual sp<StreamHalInterface> stream() const;
 
@@ -1619,6 +1619,11 @@
                             return audio_is_input_device(
                                     mInDevice & mTimestampCorrectedDevices);
                         }
+
+protected:
+            void        dumpInternals_l(int fd, const Vector<String16>& args) override;
+            void        dumpTracks_l(int fd, const Vector<String16>& args) override;
+
 private:
             // Enter standby if not already in standby, and set mStandby flag
             void    standbyIfNotAlreadyInStandby();
@@ -1768,11 +1773,9 @@
                 // Sets the UID records silence
     virtual     void        setRecordSilenced(uid_t uid __unused, bool silenced __unused) {}
 
-                void        dump(int fd, const Vector<String16>& args) override;
-    virtual     void        dumpInternals(int fd, const Vector<String16>& args);
-                void        dumpTracks(int fd, const Vector<String16>& args);
-
  protected:
+                void        dumpInternals_l(int fd, const Vector<String16>& args) override;
+                void        dumpTracks_l(int fd, const Vector<String16>& args) override;
 
                 audio_attributes_t      mAttr;
                 audio_session_t         mSessionId;
@@ -1822,8 +1825,6 @@
     virtual     void        checkSilentMode_l();
                 void        processVolume_l() override;
 
-    virtual     void        dumpInternals(int fd, const Vector<String16>& args);
-
     virtual     bool        isOutput() const override { return true; }
 
                 void        updateMetadata_l() override;
@@ -1831,6 +1832,7 @@
     virtual     void        toAudioPortConfig(struct audio_port_config *config);
 
 protected:
+                void        dumpInternals_l(int fd, const Vector<String16>& args) override;
 
                 audio_stream_type_t         mStreamType;
                 float                       mMasterVolume;
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index acbfc9e..a2cf7aa 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -198,6 +198,7 @@
     //dump state
     virtual status_t    dump(int fd) = 0;
 
+    virtual status_t setAllowedCapturePolicy(uid_t uid, audio_flags_mask_t flags) = 0;
     virtual bool isOffloadSupported(const audio_offload_info_t& offloadInfo) = 0;
     virtual bool isDirectOutputSupported(const audio_config_base_t& config,
                                          const audio_attributes_t& attributes) = 0;
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index cc43fe6..33e506f 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -135,11 +135,18 @@
     /**
      * @brief filter the devices supported by this collection against another collection
      * @param devices to filter against
-     * @return
+     * @return a filtered DeviceVector
      */
     DeviceVector filter(const DeviceVector &devices) const;
 
     /**
+     * @brief filter the devices supported by this collection before sending
+     * then to the Engine via AudioPolicyManagerObserver interface
+     * @return a filtered DeviceVector
+     */
+    DeviceVector filterForEngine() const;
+
+    /**
      * @brief merge two vectors. As SortedVector Implementation is buggy (it does not check the size
      * of the destination vector, only of the source, it provides a safe implementation
      * @param devices source device vector to merge with
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index f7289ca..1b812c0 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -47,32 +47,29 @@
 
     int indexCriterion = 0;
     for (const auto &criterion : mCriteria) {
-        dst->appendFormat("%*s- Criterion %d:\n", spaces + 2, "", indexCriterion++);
+        dst->appendFormat("%*s- Criterion %d: ", spaces + 2, "", indexCriterion++);
 
-        std::string usageLiteral;
-        if (!UsageTypeConverter::toString(criterion.mValue.mUsage, usageLiteral)) {
-            ALOGE("%s: failed to convert usage %d", __FUNCTION__, criterion.mValue.mUsage);
-            return;
+        std::string ruleType, ruleValue;
+        bool unknownRule = !RuleTypeConverter::toString(criterion.mRule, ruleType);
+        switch (criterion.mRule & ~RULE_EXCLUSION_MASK) { // no need to match RULE_EXCLUDE_...
+        case RULE_MATCH_ATTRIBUTE_USAGE:
+            UsageTypeConverter::toString(criterion.mValue.mUsage, ruleValue);
+            break;
+        case RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET:
+            SourceTypeConverter::toString(criterion.mValue.mSource, ruleValue);
+            break;
+        case RULE_MATCH_UID:
+            ruleValue = std::to_string(criterion.mValue.mUid);
+            break;
+        default:
+            unknownRule = true;
         }
-        dst->appendFormat("%*s- Usage:%s\n", spaces + 4, "", usageLiteral.c_str());
 
-        if (mMixType == MIX_TYPE_RECORDERS) {
-            std::string sourceLiteral;
-            if (!SourceTypeConverter::toString(criterion.mValue.mSource, sourceLiteral)) {
-                ALOGE("%s: failed to convert source %d", __FUNCTION__, criterion.mValue.mSource);
-                return;
-            }
-            dst->appendFormat("%*s- Source:%s\n", spaces + 4, "", sourceLiteral.c_str());
-
+        if (!unknownRule) {
+            dst->appendFormat("%s %s\n", ruleType.c_str(), ruleValue.c_str());
+        } else {
+            dst->appendFormat("Unknown rule type value 0x%x\n", criterion.mRule);
         }
-        dst->appendFormat("%*s- Uid:%d\n", spaces + 4, "", criterion.mValue.mUid);
-
-        std::string ruleLiteral;
-        if (!RuleTypeConverter::toString(criterion.mRule, ruleLiteral)) {
-            ALOGE("%s: failed to convert source %d", __FUNCTION__,criterion.mRule);
-            return;
-        }
-        dst->appendFormat("%*s- Rule:%s\n", spaces + 4, "", ruleLiteral.c_str());
     }
 }
 
@@ -150,7 +147,10 @@
         }
 
         switch (mixMatch(policyMix.get(), i, attributes, uid)) {
-            case MixMatchStatus::INVALID_MIX: return BAD_VALUE; // TODO: Do we really want to abort?
+            case MixMatchStatus::INVALID_MIX:
+                // The mix has contradictory rules, ignore it
+                // TODO: reject invalid mix at registration
+                continue;
             case MixMatchStatus::NO_MATCH:
                 ALOGV("%s: Mix %zu: does not match", __func__, i);
                 continue; // skip the mix
@@ -180,7 +180,12 @@
         // Loopback render mixes are created from a public API and thus restricted
         // to non sensible audio that have not opted out.
         if (is_mix_loopback_render(mix->mRouteFlags)) {
-            if ((attributes.flags & AUDIO_FLAG_NO_CAPTURE) == AUDIO_FLAG_NO_CAPTURE) {
+            auto hasFlag = [](auto flags, auto flag) { return (flags & flag) == flag; };
+            if (hasFlag(attributes.flags, AUDIO_FLAG_NO_SYSTEM_CAPTURE)) {
+                return MixMatchStatus::NO_MATCH;
+            }
+            if (!mix->mAllowPrivilegedPlaybackCapture &&
+                hasFlag(attributes.flags, AUDIO_FLAG_NO_MEDIA_PROJECTION)) {
                 return MixMatchStatus::NO_MATCH;
             }
             if (!(attributes.usage == AUDIO_USAGE_UNKNOWN ||
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 91961d0..ecd5b34 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -40,7 +40,7 @@
     mTagName(tagName), mDeviceType(type), mEncodedFormats(encodedFormats)
 {
     mCurrentEncodedFormat = AUDIO_FORMAT_DEFAULT;
-    if (type == AUDIO_DEVICE_IN_REMOTE_SUBMIX || type == AUDIO_DEVICE_OUT_REMOTE_SUBMIX ) {
+    if (audio_is_remote_submix_device(type)) {
         mAddress = String8("0");
     }
     /* If framework runs against a pre 5.0 Audio HAL, encoded formats are absent from the config.
@@ -399,6 +399,18 @@
     return filter(devices).size() == devices.size();
 }
 
+DeviceVector DeviceVector::filterForEngine() const
+{
+    DeviceVector filteredDevices;
+    for (const auto &device : *this) {
+        if (audio_is_remote_submix_device(device->type()) && device->address() != "0") {
+            continue;
+        }
+        filteredDevices.add(device);
+    }
+    return filteredDevices;
+}
+
 void DeviceDescriptor::log() const
 {
     ALOGI("Device id:%d type:0x%08X:%s, addr:%s", mId,  mDeviceType,
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index ec7ff57..96a8337 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -398,6 +398,13 @@
         if (!moduleDevices.contains(device)) {
             continue;
         }
+
+        // removal of remote submix devices associated with a dynamic policy is
+        // handled by removeOutputProfile() and removeInputProfile()
+        if (audio_is_remote_submix_device(device->type()) && device->address() != "0") {
+            continue;
+        }
+
         device->detach();
         // Only remove from dynamic list, not from declared list!!!
         if (!hwModule->getDynamicDevices().contains(device)) {
diff --git a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
index 7c76d8a..2b5455e 100644
--- a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
@@ -52,7 +52,6 @@
 
 template <>
 const RuleTypeConverter::Table RuleTypeConverter::mTable[] = {
-    MAKE_STRING_FROM_ENUM(RULE_EXCLUSION_MASK),
     MAKE_STRING_FROM_ENUM(RULE_MATCH_ATTRIBUTE_USAGE),
     MAKE_STRING_FROM_ENUM(RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET),
     MAKE_STRING_FROM_ENUM(RULE_MATCH_UID),
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
index 43ba625..ebd82a7 100644
--- a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
+++ b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
@@ -46,9 +46,9 @@
 
     virtual const AudioInputCollection &getInputs() const = 0;
 
-    virtual const DeviceVector &getAvailableOutputDevices() const = 0;
+    virtual const DeviceVector getAvailableOutputDevices() const = 0;
 
-    virtual const DeviceVector &getAvailableInputDevices() const = 0;
+    virtual const DeviceVector getAvailableInputDevices() const = 0;
 
     virtual const sp<DeviceDescriptor> &getDefaultOutputDevice() const = 0;
 
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.cpp b/services/audiopolicy/engineconfigurable/src/Engine.cpp
index 89aaa84..e59d983 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.cpp
+++ b/services/audiopolicy/engineconfigurable/src/Engine.cpp
@@ -207,7 +207,7 @@
         ALOGE("%s: Trying to get device on invalid strategy %d", __FUNCTION__, ps);
         return {};
     }
-    const DeviceVector &availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
+    const DeviceVector availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
     const SwAudioOutputCollection &outputs = getApmObserver()->getOutputs();
     uint32_t availableOutputDevicesType = availableOutputDevices.types();
 
@@ -272,7 +272,7 @@
         return DeviceVector(preferredDevice);
     }
     product_strategy_t strategy = getProductStrategyForAttributes(attributes);
-    const DeviceVector &availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
+    const DeviceVector availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
     const SwAudioOutputCollection &outputs = getApmObserver()->getOutputs();
     //
     // @TODO: what is the priority of explicit routing? Shall it be considered first as it used to
@@ -298,7 +298,7 @@
                                                          sp<AudioPolicyMix> *mix) const
 {
     const auto &policyMixes = getApmObserver()->getAudioPolicyMixCollection();
-    const auto &availableInputDevices = getApmObserver()->getAvailableInputDevices();
+    const auto availableInputDevices = getApmObserver()->getAvailableInputDevices();
     const auto &inputs = getApmObserver()->getInputs();
     std::string address;
     //
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 66a6965..592a0b9 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -502,8 +502,8 @@
 
 audio_devices_t Engine::getDeviceForInputSource(audio_source_t inputSource) const
 {
-    const DeviceVector &availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
-    const DeviceVector &availableInputDevices = getApmObserver()->getAvailableInputDevices();
+    const DeviceVector availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
+    const DeviceVector availableInputDevices = getApmObserver()->getAvailableInputDevices();
     const SwAudioOutputCollection &outputs = getApmObserver()->getOutputs();
     audio_devices_t availableDeviceTypes = availableInputDevices.types() & ~AUDIO_DEVICE_BIT_IN;
 
@@ -704,7 +704,7 @@
         return DeviceVector(preferredDevice);
     }
     product_strategy_t strategy = getProductStrategyForAttributes(attributes);
-    const DeviceVector &availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
+    const DeviceVector availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
     const SwAudioOutputCollection &outputs = getApmObserver()->getOutputs();
     //
     // @TODO: what is the priority of explicit routing? Shall it be considered first as it used to
@@ -730,7 +730,7 @@
                                                          sp<AudioPolicyMix> *mix) const
 {
     const auto &policyMixes = getApmObserver()->getAudioPolicyMixCollection();
-    const auto &availableInputDevices = getApmObserver()->getAvailableInputDevices();
+    const auto availableInputDevices = getApmObserver()->getAvailableInputDevices();
     const auto &inputs = getApmObserver()->getInputs();
     std::string address;
 
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 651017d..61c47a1 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -169,8 +169,6 @@
                 broadcastDeviceConnectionState(device, AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE);
                 return INVALID_OPERATION;
             }
-            // Propagate device availability to Engine
-            mEngine->setDeviceConnectionState(device, state);
 
             // outputs should never be empty here
             ALOG_ASSERT(outputs.size() != 0, "setDeviceConnectionState():"
@@ -200,8 +198,6 @@
             // Reset active device codec
             device->setEncodedFormat(AUDIO_FORMAT_DEFAULT);
 
-            // Propagate device availability to Engine
-            mEngine->setDeviceConnectionState(device, state);
             } break;
 
         default:
@@ -209,6 +205,9 @@
             return BAD_VALUE;
         }
 
+        // Propagate device availability to Engine
+        setEngineDeviceConnectionState(device, state);
+
         // No need to evaluate playback routing when connecting a remote submix
         // output device used by a dynamic policy of type recorder as no
         // playback use case is affected.
@@ -318,9 +317,6 @@
             if (mAvailableInputDevices.add(device) < 0) {
                 return NO_MEMORY;
             }
-
-            // Propagate device availability to Engine
-            mEngine->setDeviceConnectionState(device, state);
         } break;
 
         // handle input device disconnection
@@ -337,9 +333,6 @@
 
             checkInputsForDevice(device, state, inputs);
             mAvailableInputDevices.remove(device);
-
-            // Propagate device availability to Engine
-            mEngine->setDeviceConnectionState(device, state);
         } break;
 
         default:
@@ -347,6 +340,9 @@
             return BAD_VALUE;
         }
 
+        // Propagate device availability to Engine
+        setEngineDeviceConnectionState(device, state);
+
         closeAllInputs();
         // As the input device list can impact the output device selection, update
         // getDeviceForStrategy() cache
@@ -369,6 +365,17 @@
     return BAD_VALUE;
 }
 
+void AudioPolicyManager::setEngineDeviceConnectionState(const sp<DeviceDescriptor> device,
+                                      audio_policy_dev_state_t state) {
+
+    // the Engine does not have to know about remote submix devices used by dynamic audio policies
+    if (audio_is_remote_submix_device(device->type()) && device->address() != "0") {
+        return;
+    }
+    mEngine->setDeviceConnectionState(device, state);
+}
+
+
 audio_policy_dev_state_t AudioPolicyManager::getDeviceConnectionState(audio_devices_t device,
                                                                       const char *device_address)
 {
@@ -416,8 +423,7 @@
     if (!audio_is_output_device(device) && !audio_is_input_device(device)) return BAD_VALUE;
 
     // Check if the device is currently connected
-    DeviceVector availableDevices = getAvailableOutputDevices();
-    DeviceVector deviceList = availableDevices.getDevicesFromTypeMask(device);
+    DeviceVector deviceList = mAvailableOutputDevices.getDevicesFromTypeMask(device);
     if (deviceList.empty()) {
         // Nothing to do: device is not connected
         return NO_ERROR;
@@ -945,6 +951,9 @@
     if (status != NO_ERROR) {
         return status;
     }
+    if (auto it = mAllowedCapturePolicies.find(uid); it != end(mAllowedCapturePolicies)) {
+        resultAttr->flags |= it->second;
+    }
     *stream = mEngine->getStreamTypeForAttributes(*resultAttr);
 
     ALOGV("%s() attributes=%s stream=%s session %d selectedDeviceId %d", __func__,
@@ -3022,6 +3031,11 @@
     mPolicyMixes.dump(dst);
     mAudioSources.dump(dst);
 
+    dst->appendFormat(" AllowedCapturePolicies:\n");
+    for (auto& policy : mAllowedCapturePolicies) {
+        dst->appendFormat("   - uid=%d flag_mask=%#x\n", policy.first, policy.second);
+    }
+
     dst->appendFormat("\nPolicy Engine dump:\n");
     mEngine->dump(dst);
 }
@@ -3034,6 +3048,12 @@
     return NO_ERROR;
 }
 
+status_t AudioPolicyManager::setAllowedCapturePolicy(uid_t uid, audio_flags_mask_t capturePolicy)
+{
+    mAllowedCapturePolicies[uid] = capturePolicy;
+    return NO_ERROR;
+}
+
 // This function checks for the parameters which can be offloaded.
 // This can be enhanced depending on the capability of the DSP and policy
 // of the system.
@@ -4385,7 +4405,7 @@
                 continue;
             }
             // Device is now validated and can be appended to the available devices of the engine
-            mEngine->setDeviceConnectionState(device, AUDIO_POLICY_DEVICE_STATE_AVAILABLE);
+            setEngineDeviceConnectionState(device, AUDIO_POLICY_DEVICE_STATE_AVAILABLE);
             i++;
         }
     };
@@ -4399,7 +4419,7 @@
         status = NO_INIT;
     }
     // If microphones address is empty, set it according to device type
-    for (size_t i = 0; i  < mAvailableInputDevices.size(); i++) {
+    for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
         if (mAvailableInputDevices[i]->address().isEmpty()) {
             if (mAvailableInputDevices[i]->type() == AUDIO_DEVICE_IN_BUILTIN_MIC) {
                 mAvailableInputDevices[i]->setAddress(String8(AUDIO_BOTTOM_MICROPHONE_ADDRESS));
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 1c98684..a700868 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -218,6 +218,7 @@
 
         status_t dump(int fd) override;
 
+        status_t setAllowedCapturePolicy(uid_t uid, audio_flags_mask_t capturePolicy) override;
         virtual bool isOffloadSupported(const audio_offload_info_t& offloadInfo);
 
         virtual bool isDirectOutputSupported(const audio_config_base_t& config,
@@ -341,13 +342,13 @@
         {
             return mInputs;
         }
-        virtual const DeviceVector &getAvailableOutputDevices() const
+        virtual const DeviceVector getAvailableOutputDevices() const
         {
-            return mAvailableOutputDevices;
+            return mAvailableOutputDevices.filterForEngine();
         }
-        virtual const DeviceVector &getAvailableInputDevices() const
+        virtual const DeviceVector getAvailableInputDevices() const
         {
-            return mAvailableInputDevices;
+            return mAvailableInputDevices.filterForEngine();
         }
         virtual const sp<DeviceDescriptor> &getDefaultOutputDevice() const
         {
@@ -754,6 +755,8 @@
         // Surround formats that are enabled manually. Taken into account when
         // "encoded surround" is forced into "manual" mode.
         std::unordered_set<audio_format_t> mManualSurroundFormats;
+
+        std::unordered_map<uid_t, audio_flags_mask_t> mAllowedCapturePolicies;
 private:
         // Add or remove AC3 DTS encodings based on user preferences.
         void modifySurroundFormats(const sp<DeviceDescriptor>& devDesc, FormatVector *formatsPtr);
@@ -844,6 +847,10 @@
                                              const char *device_address,
                                              const char *device_name,
                                              audio_format_t encodedFormat);
+
+        void setEngineDeviceConnectionState(const sp<DeviceDescriptor> device,
+                                      audio_policy_dev_state_t state);
+
         void updateMono(audio_io_handle_t output) {
             AudioParameter param;
             param.addInt(String8(AudioParameter::keyMonoOutput), (int)mMasterMono);
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 93e3c44..b8036bb 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -192,7 +192,7 @@
     }
     audio_attributes_t attr = *originalAttr;
     if (!mPackageManager.allowPlaybackCapture(uid)) {
-        attr.flags |= AUDIO_FLAG_NO_CAPTURE;
+        attr.flags |= AUDIO_FLAG_NO_MEDIA_PROJECTION;
     }
     audio_output_flags_t originalFlags = flags;
     AutoCallerClear acc;
@@ -322,7 +322,7 @@
         return;
     }
     sp<AudioPlaybackClient> client = mAudioPlaybackClients.valueAt(index);
-    mAudioRecordClients.removeItem(portId);
+    mAudioPlaybackClients.removeItem(portId);
 
     // called from internal thread: no need to clear caller identity
     mAudioPolicyManager->releaseOutput(portId);
@@ -385,7 +385,7 @@
         return PERMISSION_DENIED;
     }
 
-    bool canCaptureHotword = captureHotwordAllowed(pid, uid);
+    bool canCaptureHotword = captureHotwordAllowed(opPackageName, pid, uid);
     if ((attr->source == AUDIO_SOURCE_HOTWORD) && !canCaptureHotword) {
         return BAD_VALUE;
     }
@@ -948,6 +948,20 @@
     return audioPolicyEffects->removeStreamDefaultEffect(id);
 }
 
+status_t AudioPolicyService::setAllowedCapturePolicy(uid_t uid, audio_flags_mask_t capturePolicy) {
+    Mutex::Autolock _l(mLock);
+    if (mAudioPolicyManager == NULL) {
+        ALOGV("%s() mAudioPolicyManager == NULL", __func__);
+        return NO_INIT;
+    }
+    uint_t callingUid = IPCThreadState::self()->getCallingUid();
+    if (uid != callingUid) {
+        ALOGD("%s() uid invalid %d != %d", __func__, uid, callingUid);
+        return PERMISSION_DENIED;
+    }
+    return mAudioPolicyManager->setAllowedCapturePolicy(uid, capturePolicy);
+}
+
 bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info)
 {
     if (mAudioPolicyManager == NULL) {
@@ -1083,6 +1097,14 @@
         return PERMISSION_DENIED;
     }
 
+    bool needCaptureMediaOutput = std::any_of(mixes.begin(), mixes.end(), [](auto& mix) {
+            return mix.mAllowPrivilegedPlaybackCapture; });
+    const uid_t callingUid = IPCThreadState::self()->getCallingUid();
+    const pid_t callingPid = IPCThreadState::self()->getCallingPid();
+    if (needCaptureMediaOutput && !captureMediaOutputAllowed(callingPid, callingUid)) {
+        return PERMISSION_DENIED;
+    }
+
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index e858e8d..f63fa81 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -22,8 +22,9 @@
 #define __STDINT_LIMITS
 #define __STDC_LIMIT_MACROS
 #include <stdint.h>
-
 #include <sys/time.h>
+
+#include <audio_utils/clock.h>
 #include <binder/IServiceManager.h>
 #include <utils/Log.h>
 #include <cutils/properties.h>
@@ -48,8 +49,7 @@
 static const char kDeadlockedString[] = "AudioPolicyService may be deadlocked\n";
 static const char kCmdDeadlockedString[] = "AudioPolicyService command thread may be deadlocked\n";
 
-static const int kDumpLockRetries = 50;
-static const int kDumpLockSleepUs = 20000;
+static const int kDumpLockTimeoutNs = 1 * NANOS_PER_SECOND;
 
 static const nsecs_t kAudioCommandTimeoutNs = seconds(3); // 3 seconds
 
@@ -376,17 +376,10 @@
             IPCThreadState::self()->getCallingPid());
 }
 
-static bool tryLock(Mutex& mutex)
+static bool dumpTryLock(Mutex& mutex)
 {
-    bool locked = false;
-    for (int i = 0; i < kDumpLockRetries; ++i) {
-        if (mutex.tryLock() == NO_ERROR) {
-            locked = true;
-            break;
-        }
-        usleep(kDumpLockSleepUs);
-    }
-    return locked;
+    status_t err = mutex.timedLock(kDumpLockTimeoutNs);
+    return err == NO_ERROR;
 }
 
 status_t AudioPolicyService::dumpInternals(int fd)
@@ -627,7 +620,7 @@
     if (!dumpAllowed()) {
         dumpPermissionDenial(fd);
     } else {
-        bool locked = tryLock(mLock);
+        bool locked = dumpTryLock(mLock);
         if (!locked) {
             String8 result(kDeadlockedString);
             write(fd, result.string(), result.size());
@@ -1260,7 +1253,7 @@
     result.append(buffer);
     write(fd, result.string(), result.size());
 
-    bool locked = tryLock(mLock);
+    bool locked = dumpTryLock(mLock);
     if (!locked) {
         String8 result2(kCmdDeadlockedString);
         write(fd, result2.string(), result2.size());
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 160f70f..efdba56 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -182,6 +182,7 @@
                                      audio_io_handle_t output,
                                      int delayMs = 0);
     virtual status_t setVoiceVolume(float volume, int delayMs = 0);
+    status_t setAllowedCapturePolicy(uint_t uid, audio_flags_mask_t capturePolicy) override;
     virtual bool isOffloadSupported(const audio_offload_info_t &config);
     virtual bool isDirectOutputSupported(const audio_config_base_t& config,
                                          const audio_attributes_t& attributes);
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 51d0682..8113c3f 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -972,8 +972,9 @@
     userid_t clientUserId = multiuser_get_user_id(clientUid);
 
     // Only allow clients who are being used by the current foreground device user, unless calling
-    // from our own process.
-    if (callingPid != getpid() && (mAllowedUsers.find(clientUserId) == mAllowedUsers.end())) {
+    // from our own process OR the caller is using the cameraserver's HIDL interface.
+    if (!hardware::IPCThreadState::self()->isServingCall() && callingPid != getpid() &&
+            (mAllowedUsers.find(clientUserId) == mAllowedUsers.end())) {
         ALOGE("CameraService::connect X (PID %d) rejected (cannot connect from "
                 "device user %d, currently allowed device users: %s)", callingPid, clientUserId,
                 toString(mAllowedUsers).string());
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 22e09e4..bc0dafe 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -1062,14 +1062,18 @@
             nsecs_t waitDuration = kBaseGetBufferWait + getExpectedInFlightDuration();
             status_t res = outputStream->getBuffer(&sb, waitDuration);
             if (res != OK) {
-                ALOGE("%s: Can't get output buffer for stream %d: %s (%d)",
-                        __FUNCTION__, streamId, strerror(-res), res);
                 if (res == NO_INIT || res == DEAD_OBJECT) {
+                    ALOGV("%s: Can't get output buffer for stream %d: %s (%d)",
+                            __FUNCTION__, streamId, strerror(-res), res);
                     bufRet.val.error(StreamBufferRequestError::STREAM_DISCONNECTED);
-                } else if (res == TIMED_OUT || res == NO_MEMORY) {
-                    bufRet.val.error(StreamBufferRequestError::NO_BUFFER_AVAILABLE);
                 } else {
-                    bufRet.val.error(StreamBufferRequestError::UNKNOWN_ERROR);
+                    ALOGE("%s: Can't get output buffer for stream %d: %s (%d)",
+                            __FUNCTION__, streamId, strerror(-res), res);
+                    if (res == TIMED_OUT || res == NO_MEMORY) {
+                        bufRet.val.error(StreamBufferRequestError::NO_BUFFER_AVAILABLE);
+                    } else {
+                        bufRet.val.error(StreamBufferRequestError::UNKNOWN_ERROR);
+                    }
                 }
                 currentReqSucceeds = false;
                 break;
@@ -3154,9 +3158,10 @@
 
         // Note: stream may be deallocated at this point, if this buffer was
         // the last reference to it.
-        if (res != OK) {
-            ALOGE("Can't return buffer to its stream: %s (%d)",
-                strerror(-res), res);
+        if (res == NO_INIT || res == DEAD_OBJECT) {
+            ALOGV("Can't return buffer to its stream: %s (%d)", strerror(-res), res);
+        } else if (res != OK) {
+            ALOGE("Can't return buffer to its stream: %s (%d)", strerror(-res), res);
         }
 
         // Long processing consumers can cause returnBuffer timeout for shared stream
@@ -3777,10 +3782,12 @@
             hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_INVALID_ERROR;
 
     int streamId = 0;
+    String16 physicalCameraId;
     if (msg.error_stream != NULL) {
         Camera3Stream *stream =
                 Camera3Stream::cast(msg.error_stream);
         streamId = stream->getId();
+        physicalCameraId = String16(stream->physicalCameraId());
     }
     ALOGV("Camera %s: %s: HAL error, frame %d, stream %d: %d",
             mId.string(), __FUNCTION__, msg.frame_number,
@@ -3802,13 +3809,29 @@
                     InFlightRequest &r = mInFlightMap.editValueAt(idx);
                     r.requestStatus = msg.error_code;
                     resultExtras = r.resultExtras;
-                    if (hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT == errorCode
+                    bool logicalDeviceResultError = false;
+                    if (hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT ==
+                            errorCode) {
+                        if (physicalCameraId.size() > 0) {
+                            String8 cameraId(physicalCameraId);
+                            if (r.physicalCameraIds.find(cameraId) == r.physicalCameraIds.end()) {
+                                ALOGE("%s: Reported result failure for physical camera device: %s "
+                                        " which is not part of the respective request!",
+                                        __FUNCTION__, cameraId.string());
+                                break;
+                            }
+                            resultExtras.errorPhysicalCameraId = physicalCameraId;
+                        } else {
+                            logicalDeviceResultError = true;
+                        }
+                    }
+
+                    if (logicalDeviceResultError
                             ||  hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST ==
                             errorCode) {
                         r.skipResultMetadata = true;
                     }
-                    if (hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT ==
-                            errorCode) {
+                    if (logicalDeviceResultError) {
                         // In case of missing result check whether the buffers
                         // returned. If they returned, then remove inflight
                         // request.
@@ -5580,7 +5603,7 @@
 
             if (mUseHalBufManager) {
                 if (outputStream->isAbandoned()) {
-                    ALOGE("%s: stream %d is abandoned.", __FUNCTION__, streamId);
+                    ALOGV("%s: stream %d is abandoned, skipping request", __FUNCTION__, streamId);
                     return TIMED_OUT;
                 }
                 // HAL will request buffer through requestStreamBuffer API
@@ -5598,7 +5621,7 @@
                     // Can't get output buffer from gralloc queue - this could be due to
                     // abandoned queue or other consumer misbehavior, so not a fatal
                     // error
-                    ALOGE("RequestThread: Can't get output buffer, skipping request:"
+                    ALOGV("RequestThread: Can't get output buffer, skipping request:"
                             " %s (%d)", strerror(-res), res);
 
                     return TIMED_OUT;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index baba856..1c77581 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -233,6 +233,7 @@
      * queueBuffer
      */
     sp<ANativeWindow> currentConsumer = mConsumer;
+    StreamState state = mState;
     mLock.unlock();
 
     ANativeWindowBuffer *anwBuffer = container_of(buffer.buffer, ANativeWindowBuffer, handle);
@@ -244,7 +245,7 @@
         if (mDropBuffers) {
             ALOGV("%s: Dropping a frame for stream %d.", __FUNCTION__, mId);
         } else if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR) {
-            ALOGW("%s: A frame is dropped for stream %d due to buffer error.", __FUNCTION__, mId);
+            ALOGV("%s: A frame is dropped for stream %d due to buffer error.", __FUNCTION__, mId);
         } else {
             ALOGE("%s: Stream %d: timestamp shouldn't be 0", __FUNCTION__, mId);
         }
@@ -252,7 +253,7 @@
         res = currentConsumer->cancelBuffer(currentConsumer.get(),
                 anwBuffer,
                 anwReleaseFence);
-        if (res != OK) {
+        if (shouldLogError(res, state)) {
             ALOGE("%s: Stream %d: Error cancelling buffer to native window:"
                   " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
         }
@@ -284,9 +285,9 @@
         }
 
         res = queueBufferToConsumer(currentConsumer, anwBuffer, anwReleaseFence, surface_ids);
-        if (res != OK) {
-            ALOGE("%s: Stream %d: Error queueing buffer to native window: "
-                  "%s (%d)", __FUNCTION__, mId, strerror(-res), res);
+        if (shouldLogError(res, state)) {
+            ALOGE("%s: Stream %d: Error queueing buffer to native window:"
+                  " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
         }
     }
     mLock.lock();
@@ -534,10 +535,11 @@
             // successful return.
             *anb = gb.get();
             res = mConsumer->attachBuffer(*anb);
-            if (res != OK) {
+            if (shouldLogError(res, mState)) {
                 ALOGE("%s: Stream %d: Can't attach the output buffer to this surface: %s (%d)",
                         __FUNCTION__, mId, strerror(-res), res);
-
+            }
+            if (res != OK) {
                 checkRetAndSetAbandonedLocked(res);
                 return res;
             }
@@ -592,9 +594,10 @@
                 ALOGV("Stream %d: Attached new buffer", getId());
 
                 if (res != OK) {
-                    ALOGE("%s: Stream %d: Can't attach the output buffer to this surface: %s (%d)",
-                            __FUNCTION__, mId, strerror(-res), res);
-
+                    if (shouldLogError(res, mState)) {
+                        ALOGE("%s: Stream %d: Can't attach the output buffer to this surface:"
+                                " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
+                    }
                     checkRetAndSetAbandonedLocked(res);
                     return res;
                 }
@@ -604,9 +607,10 @@
                 return res;
             }
         } else if (res != OK) {
-            ALOGE("%s: Stream %d: Can't dequeue next output buffer: %s (%d)",
-                    __FUNCTION__, mId, strerror(-res), res);
-
+            if (shouldLogError(res, mState)) {
+                ALOGE("%s: Stream %d: Can't dequeue next output buffer: %s (%d)",
+                        __FUNCTION__, mId, strerror(-res), res);
+            }
             checkRetAndSetAbandonedLocked(res);
             return res;
         }
@@ -639,6 +643,16 @@
     }
 }
 
+bool Camera3OutputStream::shouldLogError(status_t res, StreamState state) {
+    if (res == OK) {
+        return false;
+    }
+    if ((res == DEAD_OBJECT || res == NO_INIT) && state == STATE_ABANDONED) {
+        return false;
+    }
+    return true;
+}
+
 status_t Camera3OutputStream::disconnectLocked() {
     status_t res;
 
@@ -838,7 +852,9 @@
         ALOGW("%s: the released buffer has already been freed by the buffer queue!", __FUNCTION__);
     } else if (res != OK) {
         // Treat other errors as abandonment
-        ALOGE("%s: detach next buffer failed: %s (%d).", __FUNCTION__, strerror(-res), res);
+        if (shouldLogError(res, mState)) {
+            ALOGE("%s: detach next buffer failed: %s (%d).", __FUNCTION__, strerror(-res), res);
+        }
         mState = STATE_ABANDONED;
         return res;
     }
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 30fc2f7..729c655 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -317,6 +317,10 @@
     // Check return status of IGBP calls and set abandoned state accordingly
     void checkRetAndSetAbandonedLocked(status_t res);
 
+    // If the status indicates abandonded stream, only log when state hasn't been updated to
+    // STATE_ABANDONED
+    static bool shouldLogError(status_t res, StreamState state);
+
     static const int32_t kDequeueLatencyBinSize = 5; // in ms
     CameraLatencyHistogram mDequeueBufferLatency;
 
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 5eb6a23..3d21029 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -458,7 +458,7 @@
     // Zero for formats with fixed buffer size for given dimensions.
     const size_t mMaxSize;
 
-    enum {
+    enum StreamState {
         STATE_ERROR,
         STATE_CONSTRUCTED,
         STATE_IN_CONFIG,
diff --git a/services/camera/libcameraservice/hidl/Convert.cpp b/services/camera/libcameraservice/hidl/Convert.cpp
index c2ed23a..866c3b5 100644
--- a/services/camera/libcameraservice/hidl/Convert.cpp
+++ b/services/camera/libcameraservice/hidl/Convert.cpp
@@ -157,6 +157,8 @@
     hCaptureResultExtras.frameNumber = captureResultExtras.frameNumber;
     hCaptureResultExtras.partialResultCount = captureResultExtras.partialResultCount;
     hCaptureResultExtras.errorStreamId = captureResultExtras.errorStreamId;
+    hCaptureResultExtras.errorPhysicalCameraId = hidl_string(String8(
+            captureResultExtras.errorPhysicalCameraId).string());
     return hCaptureResultExtras;
 }
 
diff --git a/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy b/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy
index 78ecaf5..4faf8b2 100644
--- a/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy
+++ b/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy
@@ -28,7 +28,6 @@
 fstatfs: 1
 memfd_create: 1
 ftruncate: 1
-ftruncate64: 1
 
 # mremap: Ensure |flags| are (MREMAP_MAYMOVE | MREMAP_FIXED) TODO: Once minijail
 # parser support for '<' is in this needs to be modified to also prevent
@@ -62,3 +61,21 @@
 getrandom: 1
 madvise: 1
 
+# crash dump policy additions
+clock_gettime: 1
+getpid: 1
+gettid: 1
+pipe2: 1
+recvmsg: 1
+process_vm_readv: 1
+tgkill: 1
+rt_sigaction: 1
+rt_tgsigqueueinfo: 1
+#mprotect: arg2 in 0x1|0x2
+munmap: 1
+#mmap: arg2 in 0x1|0x2
+geteuid: 1
+getgid: 1
+getegid: 1
+getgroups: 1
+
diff --git a/services/mediaextractor/mediaextractor.rc b/services/mediaextractor/mediaextractor.rc
index 6b2d0a5..5fc2941 100644
--- a/services/mediaextractor/mediaextractor.rc
+++ b/services/mediaextractor/mediaextractor.rc
@@ -2,7 +2,5 @@
     class main
     user mediaex
     group drmrpc mediadrm
-    # TODO(b/123275379): Remove updatable when http://aosp/878198 has landed
-    updatable
     ioprio rt 4
     writepid /dev/cpuset/foreground/tasks
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index 6c28083..fbf7d10 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -93,12 +93,14 @@
     const audio_source_t source = (direction == AAUDIO_DIRECTION_INPUT)
             ? AAudioConvert_inputPresetToAudioSource(getInputPreset())
             : AUDIO_SOURCE_DEFAULT;
+    const audio_flags_mask_t flags = AUDIO_FLAG_LOW_LATENCY |
+            AAudioConvert_allowCapturePolicyToAudioFlagsMask(getAllowedCapturePolicy());
 
     const audio_attributes_t attributes = {
             .content_type = contentType,
             .usage = usage,
             .source = source,
-            .flags = AUDIO_FLAG_LOW_LATENCY,
+            .flags = flags,
             .tags = ""
     };
 
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index b16b5dc..880a3d7 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -343,6 +343,20 @@
     return writeUpMessageQueue(&command);
 }
 
+bool AAudioServiceStreamBase::isUpMessageQueueBusy() {
+    std::lock_guard<std::mutex> lock(mUpMessageQueueLock);
+    if (mUpMessageQueue == nullptr) {
+        ALOGE("%s(): mUpMessageQueue null! - stream not open", __func__);
+        return true;
+    }
+    int32_t framesAvailable = mUpMessageQueue->getFifoBuffer()
+        ->getFullFramesAvailable();
+    int32_t capacity = mUpMessageQueue->getFifoBuffer()
+        ->getBufferCapacityInFrames();
+    // Is it half full or more
+    return framesAvailable >= (capacity / 2);
+}
+
 aaudio_result_t AAudioServiceStreamBase::writeUpMessageQueue(AAudioServiceMessage *command) {
     std::lock_guard<std::mutex> lock(mUpMessageQueueLock);
     if (mUpMessageQueue == nullptr) {
@@ -366,6 +380,13 @@
 
 aaudio_result_t AAudioServiceStreamBase::sendCurrentTimestamp() {
     AAudioServiceMessage command;
+    // It is not worth filling up the queue with timestamps.
+    // That can cause the stream to get suspended.
+    // So just drop the timestamp if the queue is getting full.
+    if (isUpMessageQueueBusy()) {
+        return AAUDIO_OK;
+    }
+
     // Send a timestamp for the clock model.
     aaudio_result_t result = getFreeRunningPosition(&command.timestamp.position,
                                                     &command.timestamp.timestamp);
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index ffc768b..097bc64 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -312,6 +312,12 @@
     android::wp<AAudioServiceEndpoint> mServiceEndpointWeak;
 
 private:
+
+    /**
+     * @return true if the queue is getting full.
+     */
+    bool isUpMessageQueueBusy();
+
     aaudio_handle_t         mHandle = -1;
     bool                    mFlowing = false;
 
diff --git a/services/soundtrigger/Android.bp b/services/soundtrigger/Android.bp
index 1f2283a..3f02f48 100644
--- a/services/soundtrigger/Android.bp
+++ b/services/soundtrigger/Android.bp
@@ -28,6 +28,7 @@
         "libhardware",
         "libsoundtrigger",
         "libaudioclient",
+        "libaudioutils",
         "libmediautils",
 
         "libhwbinder",
diff --git a/services/soundtrigger/SoundTriggerHwService.cpp b/services/soundtrigger/SoundTriggerHwService.cpp
index fe2ccf2..377d30b 100644
--- a/services/soundtrigger/SoundTriggerHwService.cpp
+++ b/services/soundtrigger/SoundTriggerHwService.cpp
@@ -22,6 +22,7 @@
 #include <sys/types.h>
 #include <pthread.h>
 
+#include <audio_utils/clock.h>
 #include <system/sound_trigger.h>
 #include <cutils/atomic.h>
 #include <cutils/properties.h>
@@ -81,11 +82,13 @@
     }
 }
 
-status_t SoundTriggerHwService::listModules(struct sound_trigger_module_descriptor *modules,
+status_t SoundTriggerHwService::listModules(const String16& opPackageName,
+                             struct sound_trigger_module_descriptor *modules,
                              uint32_t *numModules)
 {
     ALOGV("listModules");
-    if (!captureHotwordAllowed(IPCThreadState::self()->getCallingPid(),
+    if (!captureHotwordAllowed(opPackageName,
+                               IPCThreadState::self()->getCallingPid(),
                                IPCThreadState::self()->getCallingUid())) {
         return PERMISSION_DENIED;
     }
@@ -102,12 +105,14 @@
     return NO_ERROR;
 }
 
-status_t SoundTriggerHwService::attach(const sound_trigger_module_handle_t handle,
+status_t SoundTriggerHwService::attach(const String16& opPackageName,
+                        const sound_trigger_module_handle_t handle,
                         const sp<ISoundTriggerClient>& client,
                         sp<ISoundTrigger>& moduleInterface)
 {
     ALOGV("attach module %d", handle);
-    if (!captureHotwordAllowed(IPCThreadState::self()->getCallingPid(),
+    if (!captureHotwordAllowed(opPackageName,
+                               IPCThreadState::self()->getCallingPid(),
                                IPCThreadState::self()->getCallingUid())) {
         return PERMISSION_DENIED;
     }
@@ -123,7 +128,7 @@
     }
     sp<Module> module = mModules.valueAt(index);
 
-    sp<ModuleClient> moduleClient = module->addClient(client);
+    sp<ModuleClient> moduleClient = module->addClient(client, opPackageName);
     if (moduleClient == 0) {
         return NO_INIT;
     }
@@ -146,20 +151,12 @@
 }
 
 
-static const int kDumpLockRetries = 50;
-static const int kDumpLockSleep = 60000;
+static const int kDumpLockTimeoutNs = 1 * NANOS_PER_SECOND;
 
-static bool tryLock(Mutex& mutex)
+static bool dumpTryLock(Mutex& mutex)
 {
-    bool locked = false;
-    for (int i = 0; i < kDumpLockRetries; ++i) {
-        if (mutex.tryLock() == NO_ERROR) {
-            locked = true;
-            break;
-        }
-        usleep(kDumpLockSleep);
-    }
-    return locked;
+    status_t err = mutex.timedLock(kDumpLockTimeoutNs);
+    return err == NO_ERROR;
 }
 
 status_t SoundTriggerHwService::dump(int fd, const Vector<String16>& args __unused) {
@@ -168,7 +165,7 @@
         result.appendFormat("Permission Denial: can't dump SoundTriggerHwService");
         write(fd, result.string(), result.size());
     } else {
-        bool locked = tryLock(mServiceLock);
+        bool locked = dumpTryLock(mServiceLock);
         // failed to lock - SoundTriggerHwService is probably deadlocked
         if (!locked) {
             result.append("SoundTriggerHwService may be deadlocked\n");
@@ -486,7 +483,8 @@
 }
 
 sp<SoundTriggerHwService::ModuleClient>
-SoundTriggerHwService::Module::addClient(const sp<ISoundTriggerClient>& client)
+SoundTriggerHwService::Module::addClient(const sp<ISoundTriggerClient>& client,
+                                         const String16& opPackageName)
 {
     AutoMutex lock(mLock);
     sp<ModuleClient> moduleClient;
@@ -497,7 +495,7 @@
             return moduleClient;
         }
     }
-    moduleClient = new ModuleClient(this, client);
+    moduleClient = new ModuleClient(this, client, opPackageName);
 
     ALOGV("addClient() client %p", moduleClient.get());
     mModuleClients.add(moduleClient);
@@ -920,8 +918,9 @@
 #define LOG_TAG "SoundTriggerHwService::ModuleClient"
 
 SoundTriggerHwService::ModuleClient::ModuleClient(const sp<Module>& module,
-                                                  const sp<ISoundTriggerClient>& client)
- : mModule(module), mClient(client)
+                                                  const sp<ISoundTriggerClient>& client,
+                                                  const String16& opPackageName)
+ : mModule(module), mClient(client), mOpPackageName(opPackageName)
 {
 }
 
@@ -945,7 +944,8 @@
 
 void SoundTriggerHwService::ModuleClient::detach() {
     ALOGV("detach()");
-    if (!captureHotwordAllowed(IPCThreadState::self()->getCallingPid(),
+    if (!captureHotwordAllowed(mOpPackageName,
+                               IPCThreadState::self()->getCallingPid(),
                                IPCThreadState::self()->getCallingUid())) {
         return;
     }
@@ -969,7 +969,8 @@
                                 sound_model_handle_t *handle)
 {
     ALOGV("loadSoundModel() handle");
-    if (!captureHotwordAllowed(IPCThreadState::self()->getCallingPid(),
+    if (!captureHotwordAllowed(mOpPackageName,
+                               IPCThreadState::self()->getCallingPid(),
                                IPCThreadState::self()->getCallingUid())) {
         return PERMISSION_DENIED;
     }
@@ -987,7 +988,8 @@
 status_t SoundTriggerHwService::ModuleClient::unloadSoundModel(sound_model_handle_t handle)
 {
     ALOGV("unloadSoundModel() model handle %d", handle);
-    if (!captureHotwordAllowed(IPCThreadState::self()->getCallingPid(),
+    if (!captureHotwordAllowed(mOpPackageName,
+                               IPCThreadState::self()->getCallingPid(),
                                IPCThreadState::self()->getCallingUid())) {
         return PERMISSION_DENIED;
     }
@@ -1003,7 +1005,8 @@
                                  const sp<IMemory>& dataMemory)
 {
     ALOGV("startRecognition() model handle %d", handle);
-    if (!captureHotwordAllowed(IPCThreadState::self()->getCallingPid(),
+    if (!captureHotwordAllowed(mOpPackageName,
+                               IPCThreadState::self()->getCallingPid(),
                                IPCThreadState::self()->getCallingUid())) {
         return PERMISSION_DENIED;
     }
@@ -1021,7 +1024,8 @@
 status_t SoundTriggerHwService::ModuleClient::stopRecognition(sound_model_handle_t handle)
 {
     ALOGV("stopRecognition() model handle %d", handle);
-    if (!captureHotwordAllowed(IPCThreadState::self()->getCallingPid(),
+    if (!captureHotwordAllowed(mOpPackageName,
+                               IPCThreadState::self()->getCallingPid(),
                                IPCThreadState::self()->getCallingUid())) {
         return PERMISSION_DENIED;
     }
@@ -1036,7 +1040,8 @@
 status_t SoundTriggerHwService::ModuleClient::getModelState(sound_model_handle_t handle)
 {
     ALOGV("getModelState() model handle %d", handle);
-    if (!captureHotwordAllowed(IPCThreadState::self()->getCallingPid(),
+    if (!captureHotwordAllowed(mOpPackageName,
+                               IPCThreadState::self()->getCallingPid(),
                                IPCThreadState::self()->getCallingUid())) {
         return PERMISSION_DENIED;
     }
diff --git a/services/soundtrigger/SoundTriggerHwService.h b/services/soundtrigger/SoundTriggerHwService.h
index 4258ec0..43ad611 100644
--- a/services/soundtrigger/SoundTriggerHwService.h
+++ b/services/soundtrigger/SoundTriggerHwService.h
@@ -47,10 +47,12 @@
     virtual             ~SoundTriggerHwService();
 
     // ISoundTriggerHwService
-    virtual status_t listModules(struct sound_trigger_module_descriptor *modules,
+    virtual status_t listModules(const String16& opPackageName,
+                                 struct sound_trigger_module_descriptor *modules,
                                  uint32_t *numModules);
 
-    virtual status_t attach(const sound_trigger_module_handle_t handle,
+    virtual status_t attach(const String16& opPackageName,
+                            const sound_trigger_module_handle_t handle,
                             const sp<ISoundTriggerClient>& client,
                             sp<ISoundTrigger>& module);
 
@@ -133,7 +135,8 @@
 
        void setCaptureState_l(bool active);
 
-       sp<ModuleClient> addClient(const sp<ISoundTriggerClient>& client);
+       sp<ModuleClient> addClient(const sp<ISoundTriggerClient>& client,
+                                  const String16& opPackageName);
 
        void detach(const sp<ModuleClient>& moduleClient);
 
@@ -156,7 +159,8 @@
     public:
 
        ModuleClient(const sp<Module>& module,
-              const sp<ISoundTriggerClient>& client);
+              const sp<ISoundTriggerClient>& client,
+              const String16& opPackageName);
 
        virtual ~ModuleClient();
 
@@ -190,6 +194,7 @@
         mutable Mutex               mLock;
         wp<Module>                  mModule;
         sp<ISoundTriggerClient>     mClient;
+        const String16              mOpPackageName;
     }; // class ModuleClient
 
     class CallbackThread : public Thread {
diff --git a/soundtrigger/ISoundTriggerHwService.cpp b/soundtrigger/ISoundTriggerHwService.cpp
index d44f5cb..bd107b4 100644
--- a/soundtrigger/ISoundTriggerHwService.cpp
+++ b/soundtrigger/ISoundTriggerHwService.cpp
@@ -50,7 +50,8 @@
     {
     }
 
-    virtual status_t listModules(struct sound_trigger_module_descriptor *modules,
+    virtual status_t listModules(const String16& opPackageName,
+                                 struct sound_trigger_module_descriptor *modules,
                                  uint32_t *numModules)
     {
         if (numModules == NULL || (*numModules != 0 && modules == NULL)) {
@@ -58,6 +59,7 @@
         }
         Parcel data, reply;
         data.writeInterfaceToken(ISoundTriggerHwService::getInterfaceDescriptor());
+        data.writeString16(opPackageName);
         unsigned int numModulesReq = (modules == NULL) ? 0 : *numModules;
         data.writeInt32(numModulesReq);
         status_t status = remote()->transact(LIST_MODULES, data, &reply);
@@ -77,12 +79,14 @@
         return status;
     }
 
-    virtual status_t attach(const sound_trigger_module_handle_t handle,
+    virtual status_t attach(const String16& opPackageName,
+                            const sound_trigger_module_handle_t handle,
                             const sp<ISoundTriggerClient>& client,
                             sp<ISoundTrigger>& module)
     {
         Parcel data, reply;
         data.writeInterfaceToken(ISoundTriggerHwService::getInterfaceDescriptor());
+        data.writeString16(opPackageName);
         data.write(&handle, sizeof(sound_trigger_module_handle_t));
         data.writeStrongBinder(IInterface::asBinder(client));
         status_t status = remote()->transact(ATTACH, data, &reply);
@@ -120,6 +124,11 @@
     switch(code) {
         case LIST_MODULES: {
             CHECK_INTERFACE(ISoundTriggerHwService, data, reply);
+            String16 opPackageName;
+            status_t status = data.readString16(&opPackageName);
+            if (status != NO_ERROR) {
+                return status;
+            }
             unsigned int numModulesReq = data.readInt32();
             if (numModulesReq > MAX_ITEMS_PER_LIST) {
                 numModulesReq = MAX_ITEMS_PER_LIST;
@@ -133,7 +142,7 @@
                 reply->writeInt32(0);
                 return NO_ERROR;
             }
-            status_t status = listModules(modules, &numModules);
+            status = listModules(opPackageName, modules, &numModules);
             reply->writeInt32(status);
             reply->writeInt32(numModules);
             ALOGV("LIST_MODULES status %d got numModules %d", status, numModules);
@@ -151,12 +160,17 @@
 
         case ATTACH: {
             CHECK_INTERFACE(ISoundTriggerHwService, data, reply);
+            String16 opPackageName;
+            status_t status = data.readString16(&opPackageName);
+            if (status != NO_ERROR) {
+                return status;
+            }
             sound_trigger_module_handle_t handle;
             data.read(&handle, sizeof(sound_trigger_module_handle_t));
             sp<ISoundTriggerClient> client =
                     interface_cast<ISoundTriggerClient>(data.readStrongBinder());
             sp<ISoundTrigger> module;
-            status_t status = attach(handle, client, module);
+            status = attach(opPackageName, handle, client, module);
             reply->writeInt32(status);
             if (module != 0) {
                 reply->writeInt32(1);
diff --git a/soundtrigger/SoundTrigger.cpp b/soundtrigger/SoundTrigger.cpp
index d1eb367..9708ea7 100644
--- a/soundtrigger/SoundTrigger.cpp
+++ b/soundtrigger/SoundTrigger.cpp
@@ -80,19 +80,21 @@
 }
 
 // Static methods
-status_t SoundTrigger::listModules(struct sound_trigger_module_descriptor *modules,
-                                 uint32_t *numModules)
+status_t SoundTrigger::listModules(const String16& opPackageName,
+                                   struct sound_trigger_module_descriptor *modules,
+                                   uint32_t *numModules)
 {
     ALOGV("listModules()");
     const sp<ISoundTriggerHwService> service = getSoundTriggerHwService();
     if (service == 0) {
         return NO_INIT;
     }
-    return service->listModules(modules, numModules);
+    return service->listModules(opPackageName, modules, numModules);
 }
 
-sp<SoundTrigger> SoundTrigger::attach(const sound_trigger_module_handle_t module,
-                                            const sp<SoundTriggerCallback>& callback)
+sp<SoundTrigger> SoundTrigger::attach(const String16& opPackageName,
+                                      const sound_trigger_module_handle_t module,
+                                      const sp<SoundTriggerCallback>& callback)
 {
     ALOGV("attach()");
     sp<SoundTrigger> soundTrigger;
@@ -101,7 +103,8 @@
         return soundTrigger;
     }
     soundTrigger = new SoundTrigger(module, callback);
-    status_t status = service->attach(module, soundTrigger, soundTrigger->mISoundTrigger);
+    status_t status = service->attach(opPackageName, module, soundTrigger,
+                                      soundTrigger->mISoundTrigger);
 
     if (status == NO_ERROR && soundTrigger->mISoundTrigger != 0) {
         IInterface::asBinder(soundTrigger->mISoundTrigger)->linkToDeath(soundTrigger);