Merge "Fix hardware protected path for AImageReader."
diff --git a/apex/Android.bp b/apex/Android.bp
index 51e4c23..39997d2 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -15,6 +15,7 @@
 apex {
     name: "com.android.media",
     manifest: "manifest.json",
+    java_libs: ["updatable-media"],
     native_shared_libs: [
         // Extractor plugins
         "libaacextractor",
@@ -28,14 +29,13 @@
         "liboggextractor",
         "libwavextractor",
         // MediaPlayer2
-        "libmediaplayer2_jni",
+        "libmedia2_jni",
     ],
     key: "com.android.media.key",
 }
 
 apex {
     name: "com.android.media.swcodec",
-    compile_multilib: "32",
     manifest: "manifest_codec.json",
     native_shared_libs: [
         "libmedia_codecserviceregistrant",
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 4bb74cb..641816f 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -5561,12 +5561,12 @@
      *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
      * </ul></p>
      *
-     * <p>For a logical camera, this is concatenation of all underlying physical camera ids.
-     * The null terminator for physical camera id must be preserved so that the whole string
-     * can be tokenized using '\0' to generate list of physical camera ids.</p>
-     * <p>For example, if the physical camera ids of the logical camera are "2" and "3", the
+     * <p>For a logical camera, this is concatenation of all underlying physical camera IDs.
+     * The null terminator for physical camera ID must be preserved so that the whole string
+     * can be tokenized using '\0' to generate list of physical camera IDs.</p>
+     * <p>For example, if the physical camera IDs of the logical camera are "2" and "3", the
      * value of this tag will be ['2', '\0', '3', '\0'].</p>
-     * <p>The number of physical camera ids must be no less than 2.</p>
+     * <p>The number of physical camera IDs must be no less than 2.</p>
      */
     ACAMERA_LOGICAL_MULTI_CAMERA_PHYSICAL_IDS =                 // byte[n]
             ACAMERA_LOGICAL_MULTI_CAMERA_START,
@@ -5591,6 +5591,28 @@
      */
     ACAMERA_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE =             // byte (acamera_metadata_enum_android_logical_multi_camera_sensor_sync_type_t)
             ACAMERA_LOGICAL_MULTI_CAMERA_START + 1,
+    /**
+     * <p>String containing the ID of the underlying active physical camera.</p>
+     *
+     * <p>Type: byte</p>
+     *
+     * <p>This tag may appear in:
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul></p>
+     *
+     * <p>The ID of the active physical camera that's backing the logical camera. All camera
+     * streams and metadata that are not physical camera specific will be originating from this
+     * physical camera. This must be one of valid physical IDs advertised in the physicalIds
+     * static tag.</p>
+     * <p>For a logical camera made up of physical cameras where each camera's lenses have
+     * different characteristics, the camera device may choose to switch between the physical
+     * cameras when application changes FOCAL_LENGTH or SCALER_CROP_REGION.
+     * At the time of lens switch, this result metadata reflects the new active physical camera
+     * ID.</p>
+     */
+    ACAMERA_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID =           // byte
+            ACAMERA_LOGICAL_MULTI_CAMERA_START + 2,
     ACAMERA_LOGICAL_MULTI_CAMERA_END,
 
     /**
@@ -7162,6 +7184,10 @@
      * <p>If this is supported, android.scaler.streamConfigurationMap will
      * additionally return a min frame duration that is greater than
      * zero for each supported size-format combination.</p>
+     * <p>For camera devices with LOGICAL_MULTI_CAMERA capability, when the underlying active
+     * physical camera switches, exposureTime, sensitivity, and lens properties may change
+     * even if AE/AF is locked. However, the overall auto exposure and auto focus experience
+     * for users will be consistent. Refer to LOGICAL_MULTI_CAMERA capability for details.</p>
      *
      * @see ACAMERA_BLACK_LEVEL_LOCK
      * @see ACAMERA_CONTROL_AE_LOCK
@@ -7217,6 +7243,10 @@
      * will accurately report the values applied by AWB in the result.</p>
      * <p>A given camera device may also support additional post-processing
      * controls, but this capability only covers the above list of controls.</p>
+     * <p>For camera devices with LOGICAL_MULTI_CAMERA capability, when underlying active
+     * physical camera switches, tonemap, white balance, and shading map may change even if
+     * awb is locked. However, the overall post-processing experience for users will be
+     * consistent. Refer to LOGICAL_MULTI_CAMERA capability for details.</p>
      *
      * @see ACAMERA_COLOR_CORRECTION_ABERRATION_MODE
      * @see ACAMERA_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES
@@ -7396,7 +7426,7 @@
      * </li>
      * <li>The SENSOR_INFO_TIMESTAMP_SOURCE of the logical device and physical devices must be
      *   the same.</li>
-     * <li>The logical camera device must be LIMITED or higher device.</li>
+     * <li>The logical camera must be LIMITED or higher device.</li>
      * </ul>
      * <p>Both the logical camera device and its underlying physical devices support the
      * mandatory stream combinations required for their device levels.</p>
@@ -7416,13 +7446,84 @@
      * <p>Using physical streams in place of a logical stream of the same size and format will
      * not slow down the frame rate of the capture, as long as the minimum frame duration
      * of the physical and logical streams are the same.</p>
+     * <p>A logical camera device's dynamic metadata may contain
+     * ACAMERA_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID to notify the application of the current
+     * active physical camera Id. An active physical camera is the physical camera from which
+     * the logical camera's main image data outputs (YUV or RAW) and metadata come from.
+     * In addition, this serves as an indication which physical camera is used to output to
+     * a RAW stream, or in case only physical cameras support RAW, which physical RAW stream
+     * the application should request.</p>
+     * <p>Logical camera's static metadata tags below describe the default active physical
+     * camera. An active physical camera is default if it's used when application directly
+     * uses requests built from a template. All templates will default to the same active
+     * physical camera.</p>
+     * <ul>
+     * <li>ACAMERA_SENSOR_INFO_SENSITIVITY_RANGE</li>
+     * <li>ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT</li>
+     * <li>ACAMERA_SENSOR_INFO_EXPOSURE_TIME_RANGE</li>
+     * <li>ACAMERA_SENSOR_INFO_MAX_FRAME_DURATION</li>
+     * <li>ACAMERA_SENSOR_INFO_PHYSICAL_SIZE</li>
+     * <li>ACAMERA_SENSOR_INFO_WHITE_LEVEL</li>
+     * <li>ACAMERA_SENSOR_INFO_LENS_SHADING_APPLIED</li>
+     * <li>ACAMERA_SENSOR_REFERENCE_ILLUMINANT1</li>
+     * <li>ACAMERA_SENSOR_REFERENCE_ILLUMINANT2</li>
+     * <li>ACAMERA_SENSOR_CALIBRATION_TRANSFORM1</li>
+     * <li>ACAMERA_SENSOR_CALIBRATION_TRANSFORM2</li>
+     * <li>ACAMERA_SENSOR_COLOR_TRANSFORM1</li>
+     * <li>ACAMERA_SENSOR_COLOR_TRANSFORM2</li>
+     * <li>ACAMERA_SENSOR_FORWARD_MATRIX1</li>
+     * <li>ACAMERA_SENSOR_FORWARD_MATRIX2</li>
+     * <li>ACAMERA_SENSOR_BLACK_LEVEL_PATTERN</li>
+     * <li>ACAMERA_SENSOR_MAX_ANALOG_SENSITIVITY</li>
+     * <li>ACAMERA_SENSOR_OPTICAL_BLACK_REGIONS</li>
+     * <li>ACAMERA_SENSOR_AVAILABLE_TEST_PATTERN_MODES</li>
+     * <li>ACAMERA_LENS_INFO_HYPERFOCAL_DISTANCE</li>
+     * <li>ACAMERA_LENS_INFO_MINIMUM_FOCUS_DISTANCE</li>
+     * <li>ACAMERA_LENS_INFO_FOCUS_DISTANCE_CALIBRATION</li>
+     * <li>ACAMERA_LENS_POSE_ROTATION</li>
+     * <li>ACAMERA_LENS_POSE_TRANSLATION</li>
+     * <li>ACAMERA_LENS_INTRINSIC_CALIBRATION</li>
+     * <li>ACAMERA_LENS_POSE_REFERENCE</li>
+     * <li>ACAMERA_LENS_DISTORTION</li>
+     * </ul>
+     * <p>To maintain backward compatibility, the capture request and result metadata tags
+     * required for basic camera functionalities will be solely based on the
+     * logical camera capabiltity. Other request and result metadata tags, on the other
+     * hand, will be based on current active physical camera. For example, the physical
+     * cameras' sensor sensitivity and lens capability could be different from each other.
+     * So when the application manually controls sensor exposure time/gain, or does manual
+     * focus control, it must checks the current active physical camera's exposure, gain,
+     * and focus distance range.</p>
      *
      * @see ACAMERA_LENS_DISTORTION
+     * @see ACAMERA_LENS_INFO_FOCUS_DISTANCE_CALIBRATION
+     * @see ACAMERA_LENS_INFO_HYPERFOCAL_DISTANCE
+     * @see ACAMERA_LENS_INFO_MINIMUM_FOCUS_DISTANCE
      * @see ACAMERA_LENS_INTRINSIC_CALIBRATION
      * @see ACAMERA_LENS_POSE_REFERENCE
      * @see ACAMERA_LENS_POSE_ROTATION
      * @see ACAMERA_LENS_POSE_TRANSLATION
+     * @see ACAMERA_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID
      * @see ACAMERA_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE
+     * @see ACAMERA_SENSOR_AVAILABLE_TEST_PATTERN_MODES
+     * @see ACAMERA_SENSOR_BLACK_LEVEL_PATTERN
+     * @see ACAMERA_SENSOR_CALIBRATION_TRANSFORM1
+     * @see ACAMERA_SENSOR_CALIBRATION_TRANSFORM2
+     * @see ACAMERA_SENSOR_COLOR_TRANSFORM1
+     * @see ACAMERA_SENSOR_COLOR_TRANSFORM2
+     * @see ACAMERA_SENSOR_FORWARD_MATRIX1
+     * @see ACAMERA_SENSOR_FORWARD_MATRIX2
+     * @see ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT
+     * @see ACAMERA_SENSOR_INFO_EXPOSURE_TIME_RANGE
+     * @see ACAMERA_SENSOR_INFO_LENS_SHADING_APPLIED
+     * @see ACAMERA_SENSOR_INFO_MAX_FRAME_DURATION
+     * @see ACAMERA_SENSOR_INFO_PHYSICAL_SIZE
+     * @see ACAMERA_SENSOR_INFO_SENSITIVITY_RANGE
+     * @see ACAMERA_SENSOR_INFO_WHITE_LEVEL
+     * @see ACAMERA_SENSOR_MAX_ANALOG_SENSITIVITY
+     * @see ACAMERA_SENSOR_OPTICAL_BLACK_REGIONS
+     * @see ACAMERA_SENSOR_REFERENCE_ILLUMINANT1
+     * @see ACAMERA_SENSOR_REFERENCE_ILLUMINANT2
      */
     ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA      = 11,
 
diff --git a/camera/ndk/ndk_vendor/impl/utils.cpp b/camera/ndk/ndk_vendor/impl/utils.cpp
index 7193006..5d2d47c 100644
--- a/camera/ndk/ndk_vendor/impl/utils.cpp
+++ b/camera/ndk/ndk_vendor/impl/utils.cpp
@@ -70,7 +70,6 @@
         return;
     }
     size_t size = get_camera_metadata_size(src);
-    ALOGE("Converting metadata size: %d", (int)size);
     dst->setToExternal((uint8_t *) src, size);
     return;
 }
diff --git a/drm/drmserver/DrmManagerService.cpp b/drm/drmserver/DrmManagerService.cpp
index 2532275..2600a2c 100644
--- a/drm/drmserver/DrmManagerService.cpp
+++ b/drm/drmserver/DrmManagerService.cpp
@@ -58,22 +58,26 @@
     return drm_perm_labels[index];
 }
 
-bool DrmManagerService::selinuxIsProtectedCallAllowed(pid_t spid, drm_perm_t perm) {
+bool DrmManagerService::selinuxIsProtectedCallAllowed(pid_t spid, const char* ssid, drm_perm_t perm) {
     if (selinux_enabled <= 0) {
         return true;
     }
 
-    char *sctx;
+    char *sctx = NULL;
     const char *selinux_class = "drmservice";
     const char *str_perm = get_perm_label(perm);
 
-    if (getpidcon(spid, &sctx) != 0) {
-        ALOGE("SELinux: getpidcon(pid=%d) failed.\n", spid);
-        return false;
+    if (ssid == NULL) {
+        android_errorWriteLog(0x534e4554, "121035042");
+
+        if (getpidcon(spid, &sctx) != 0) {
+            ALOGE("SELinux: getpidcon(pid=%d) failed.\n", spid);
+            return false;
+        }
     }
 
-    bool allowed = (selinux_check_access(sctx, drmserver_context, selinux_class,
-            str_perm, NULL) == 0);
+    bool allowed = (selinux_check_access(ssid ? ssid : sctx, drmserver_context,
+            selinux_class, str_perm, NULL) == 0);
     freecon(sctx);
 
     return allowed;
@@ -86,10 +90,11 @@
     IPCThreadState* ipcState = IPCThreadState::self();
     uid_t uid = ipcState->getCallingUid();
     pid_t spid = ipcState->getCallingPid();
+    const char* ssid = ipcState->getCallingSid();
 
     for (unsigned int i = 0; i < trustedUids.size(); ++i) {
         if (trustedUids[i] == uid) {
-            return selinuxIsProtectedCallAllowed(spid, perm);
+            return selinuxIsProtectedCallAllowed(spid, ssid, perm);
         }
     }
     return false;
@@ -97,7 +102,9 @@
 
 void DrmManagerService::instantiate() {
     ALOGV("instantiate");
-    defaultServiceManager()->addService(String16("drm.drmManager"), new DrmManagerService());
+    sp<DrmManagerService> service = new DrmManagerService();
+    service->setRequestingSid(true);
+    defaultServiceManager()->addService(String16("drm.drmManager"), service);
 
     if (0 >= trustedUids.size()) {
         // TODO
diff --git a/drm/drmserver/DrmManagerService.h b/drm/drmserver/DrmManagerService.h
index 7aaeab5..2e27a3c 100644
--- a/drm/drmserver/DrmManagerService.h
+++ b/drm/drmserver/DrmManagerService.h
@@ -60,7 +60,7 @@
 
     static const char *get_perm_label(drm_perm_t perm);
 
-    static bool selinuxIsProtectedCallAllowed(pid_t spid, drm_perm_t perm);
+    static bool selinuxIsProtectedCallAllowed(pid_t spid, const char* ssid, drm_perm_t perm);
 
     static bool isProtectedCallAllowed(drm_perm_t perm);
 
diff --git a/drm/libmediadrm/DrmHal.cpp b/drm/libmediadrm/DrmHal.cpp
index f4c0341..480c7cd 100644
--- a/drm/libmediadrm/DrmHal.cpp
+++ b/drm/libmediadrm/DrmHal.cpp
@@ -63,6 +63,7 @@
 
 typedef drm::V1_1::KeyRequestType KeyRequestType_V1_1;
 typedef drm::V1_2::Status Status_V1_2;
+typedef drm::V1_2::HdcpLevel HdcpLevel_V1_2;
 
 namespace {
 
@@ -144,6 +145,23 @@
     }
 }
 
+static SecurityLevel toHidlSecurityLevel(DrmPlugin::SecurityLevel level) {
+    switch(level) {
+    case DrmPlugin::kSecurityLevelSwSecureCrypto:
+        return SecurityLevel::SW_SECURE_CRYPTO;
+    case DrmPlugin::kSecurityLevelSwSecureDecode:
+        return SecurityLevel::SW_SECURE_DECODE;
+    case DrmPlugin::kSecurityLevelHwSecureCrypto:
+        return SecurityLevel::HW_SECURE_CRYPTO;
+    case DrmPlugin::kSecurityLevelHwSecureDecode:
+        return SecurityLevel::HW_SECURE_DECODE;
+    case DrmPlugin::kSecurityLevelHwSecureAll:
+        return SecurityLevel::HW_SECURE_ALL;
+    default:
+        return SecurityLevel::UNKNOWN;
+    }
+}
+
 static DrmPlugin::OfflineLicenseState toOfflineLicenseState(
         OfflineLicenseState licenseState) {
     switch(licenseState) {
@@ -156,26 +174,26 @@
     }
 }
 
-static DrmPlugin::HdcpLevel toHdcpLevel(HdcpLevel level) {
+static DrmPlugin::HdcpLevel toHdcpLevel(HdcpLevel_V1_2 level) {
     switch(level) {
-    case HdcpLevel::HDCP_NONE:
+    case HdcpLevel_V1_2::HDCP_NONE:
         return DrmPlugin::kHdcpNone;
-    case HdcpLevel::HDCP_V1:
+    case HdcpLevel_V1_2::HDCP_V1:
         return DrmPlugin::kHdcpV1;
-    case HdcpLevel::HDCP_V2:
+    case HdcpLevel_V1_2::HDCP_V2:
         return DrmPlugin::kHdcpV2;
-    case HdcpLevel::HDCP_V2_1:
+    case HdcpLevel_V1_2::HDCP_V2_1:
         return DrmPlugin::kHdcpV2_1;
-    case HdcpLevel::HDCP_V2_2:
+    case HdcpLevel_V1_2::HDCP_V2_2:
         return DrmPlugin::kHdcpV2_2;
-    case HdcpLevel::HDCP_NO_OUTPUT:
+    case HdcpLevel_V1_2::HDCP_V2_3:
+        return DrmPlugin::kHdcpV2_3;
+    case HdcpLevel_V1_2::HDCP_NO_OUTPUT:
         return DrmPlugin::kHdcpNoOutput;
     default:
         return DrmPlugin::kHdcpLevelUnknown;
     }
 }
-
-
 static ::KeyedVector toHidlKeyedVector(const KeyedVector<String8, String8>&
         keyedVector) {
     std::vector<KeyValue> stdKeyedVector;
@@ -568,16 +586,39 @@
     return Void();
 }
 
-bool DrmHal::isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType) {
+bool DrmHal::matchMimeTypeAndSecurityLevel(sp<IDrmFactory> &factory,
+                                           const uint8_t uuid[16],
+                                           const String8 &mimeType,
+                                           DrmPlugin::SecurityLevel level) {
+    if (mimeType == "") {
+        return true;
+    } else if (!factory->isContentTypeSupported(mimeType.string())) {
+        return false;
+    }
+
+    if (level == DrmPlugin::kSecurityLevelUnknown) {
+        return true;
+    } else {
+        sp<drm::V1_2::IDrmFactory> factoryV1_2 = drm::V1_2::IDrmFactory::castFrom(factory);
+        if (factoryV1_2 == NULL) {
+            return true;
+        } else if (factoryV1_2->isCryptoSchemeSupported_1_2(uuid,
+                        mimeType.string(), toHidlSecurityLevel(level))) {
+            return true;
+        }
+    }
+    return false;
+}
+
+bool DrmHal::isCryptoSchemeSupported(const uint8_t uuid[16],
+                                     const String8 &mimeType,
+                                     DrmPlugin::SecurityLevel level) {
     Mutex::Autolock autoLock(mLock);
 
     for (size_t i = 0; i < mFactories.size(); i++) {
-        if (mFactories[i]->isCryptoSchemeSupported(uuid)) {
-            if (mimeType != "") {
-                if (mFactories[i]->isContentTypeSupported(mimeType.string())) {
-                    return true;
-                }
-            } else {
+        sp<IDrmFactory> factory = mFactories[i];
+        if (factory->isCryptoSchemeSupported(uuid)) {
+            if (matchMimeTypeAndSecurityLevel(factory, uuid, mimeType, level)) {
                 return true;
             }
         }
@@ -633,30 +674,15 @@
     Mutex::Autolock autoLock(mLock);
     INIT_CHECK();
 
-    SecurityLevel hSecurityLevel;
+    SecurityLevel hSecurityLevel = toHidlSecurityLevel(level);
     bool setSecurityLevel = true;
 
-    switch(level) {
-    case DrmPlugin::kSecurityLevelSwSecureCrypto:
-        hSecurityLevel = SecurityLevel::SW_SECURE_CRYPTO;
-        break;
-    case DrmPlugin::kSecurityLevelSwSecureDecode:
-        hSecurityLevel = SecurityLevel::SW_SECURE_DECODE;
-        break;
-    case DrmPlugin::kSecurityLevelHwSecureCrypto:
-        hSecurityLevel = SecurityLevel::HW_SECURE_CRYPTO;
-        break;
-    case DrmPlugin::kSecurityLevelHwSecureDecode:
-        hSecurityLevel = SecurityLevel::HW_SECURE_DECODE;
-        break;
-    case DrmPlugin::kSecurityLevelHwSecureAll:
-        hSecurityLevel = SecurityLevel::HW_SECURE_ALL;
-        break;
-    case DrmPlugin::kSecurityLevelMax:
+    if (level == DrmPlugin::kSecurityLevelMax) {
         setSecurityLevel = false;
-        break;
-    default:
-        return ERROR_DRM_CANNOT_HANDLE;
+    } else {
+        if (hSecurityLevel == SecurityLevel::UNKNOWN) {
+            return ERROR_DRM_CANNOT_HANDLE;
+        }
     }
 
     status_t  err = UNKNOWN_ERROR;
@@ -1093,22 +1119,31 @@
     }
     status_t err = UNKNOWN_ERROR;
 
-    if (mPluginV1_1 == NULL) {
-        return ERROR_DRM_CANNOT_HANDLE;
-    }
-
     *connected = DrmPlugin::kHdcpLevelUnknown;
     *max = DrmPlugin::kHdcpLevelUnknown;
 
-    Return<void> hResult = mPluginV1_1->getHdcpLevels(
-            [&](Status status, const HdcpLevel& hConnected, const HdcpLevel& hMax) {
-                if (status == Status::OK) {
-                    *connected = toHdcpLevel(hConnected);
-                    *max = toHdcpLevel(hMax);
-                }
-                err = toStatusT(status);
-            }
-    );
+    Return<void> hResult;
+    if (mPluginV1_2 != NULL) {
+        hResult = mPluginV1_2->getHdcpLevels_1_2(
+                [&](Status_V1_2 status, const HdcpLevel_V1_2& hConnected, const HdcpLevel_V1_2& hMax) {
+                    if (status == Status_V1_2::OK) {
+                        *connected = toHdcpLevel(hConnected);
+                        *max = toHdcpLevel(hMax);
+                    }
+                    err = toStatusT_1_2(status);
+                });
+    } else if (mPluginV1_1 != NULL) {
+        hResult = mPluginV1_1->getHdcpLevels(
+                [&](Status status, const HdcpLevel& hConnected, const HdcpLevel& hMax) {
+                    if (status == Status::OK) {
+                        *connected = toHdcpLevel(static_cast<HdcpLevel_V1_2>(hConnected));
+                        *max = toHdcpLevel(static_cast<HdcpLevel_V1_2>(hMax));
+                    }
+                    err = toStatusT(status);
+                });
+    } else {
+        return ERROR_DRM_CANNOT_HANDLE;
+    }
 
     return hResult.isOk() ? err : DEAD_OBJECT;
 }
diff --git a/drm/libmediadrm/IDrm.cpp b/drm/libmediadrm/IDrm.cpp
index 8c26317..0f34315 100644
--- a/drm/libmediadrm/IDrm.cpp
+++ b/drm/libmediadrm/IDrm.cpp
@@ -83,11 +83,14 @@
         return reply.readInt32();
     }
 
-    virtual bool isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType) {
+    virtual bool isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType,
+            DrmPlugin::SecurityLevel level) {
         Parcel data, reply;
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
         data.write(uuid, 16);
         data.writeString8(mimeType);
+        data.writeInt32(level);
+
         status_t status = remote()->transact(IS_CRYPTO_SUPPORTED, data, &reply);
         if (status != OK) {
             ALOGE("isCryptoSchemeSupported: binder call failed: %d", status);
@@ -123,11 +126,11 @@
         return reply.readInt32();
     }
 
-    virtual status_t openSession(DrmPlugin::SecurityLevel securityLevel,
+    virtual status_t openSession(DrmPlugin::SecurityLevel level,
             Vector<uint8_t> &sessionId) {
         Parcel data, reply;
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
-        data.writeInt32(securityLevel);
+        data.writeInt32(level);
 
         status_t status = remote()->transact(OPEN_SESSION, data, &reply);
         if (status != OK) {
@@ -768,7 +771,9 @@
             uint8_t uuid[16];
             data.read(uuid, sizeof(uuid));
             String8 mimeType = data.readString8();
-            reply->writeInt32(isCryptoSchemeSupported(uuid, mimeType));
+            DrmPlugin::SecurityLevel level =
+                    static_cast<DrmPlugin::SecurityLevel>(data.readInt32());
+            reply->writeInt32(isCryptoSchemeSupported(uuid, mimeType, level));
             return OK;
         }
 
diff --git a/drm/mediadrm/plugins/clearkey/hidl/DrmFactory.cpp b/drm/mediadrm/plugins/clearkey/hidl/DrmFactory.cpp
index 9d040a8..9fb5bbe 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/DrmFactory.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/DrmFactory.cpp
@@ -34,6 +34,7 @@
 namespace clearkey {
 
 using ::android::hardware::drm::V1_0::Status;
+using ::android::hardware::drm::V1_1::SecurityLevel;
 using ::android::hardware::Void;
 
 Return<bool> DrmFactory::isCryptoSchemeSupported(
@@ -41,6 +42,13 @@
     return clearkeydrm::isClearKeyUUID(uuid.data());
 }
 
+Return<bool> DrmFactory::isCryptoSchemeSupported_1_2(const hidl_array<uint8_t, 16>& uuid,
+                                                     const hidl_string &mimeType,
+                                                     SecurityLevel level) {
+    return isCryptoSchemeSupported(uuid) && isContentTypeSupported(mimeType) &&
+            level == SecurityLevel::SW_SECURE_CRYPTO;
+}
+
 Return<bool> DrmFactory::isContentTypeSupported(const hidl_string &mimeType) {
     // This should match the mimeTypes handed by InitDataParser.
     return mimeType == kIsoBmffVideoMimeType ||
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/ClearKeyTypes.h b/drm/mediadrm/plugins/clearkey/hidl/include/ClearKeyTypes.h
index 2dafa36..03c434e 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/ClearKeyTypes.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/ClearKeyTypes.h
@@ -28,6 +28,7 @@
 namespace clearkey {
 
 using ::android::hardware::drm::V1_0::KeyValue;
+using ::android::hardware::drm::V1_1::SecurityLevel;
 using ::android::hardware::hidl_vec;
 
 const uint8_t kBlockSize = 16; //AES_BLOCK_SIZE;
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/DrmFactory.h b/drm/mediadrm/plugins/clearkey/hidl/include/DrmFactory.h
index ff715ea..4ca856d 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/DrmFactory.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/DrmFactory.h
@@ -39,6 +39,10 @@
     Return<bool> isCryptoSchemeSupported(const hidl_array<uint8_t, 16>& uuid)
             override;
 
+    Return<bool> isCryptoSchemeSupported_1_2(const hidl_array<uint8_t, 16>& uuid,
+                                             const hidl_string& mimeType,
+                                             SecurityLevel level) override;
+
     Return<bool> isContentTypeSupported(const hidl_string &mimeType)
             override;
 
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
index a9b897b..ba5fa65 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
@@ -63,6 +63,7 @@
 typedef drm::V1_1::KeyRequestType KeyRequestType_V1_1;
 typedef drm::V1_2::IDrmPluginListener IDrmPluginListener_V1_2;
 typedef drm::V1_2::Status Status_V1_2;
+typedef drm::V1_2::HdcpLevel HdcpLevel_V1_2;
 
 struct DrmPlugin : public IDrmPlugin {
     explicit DrmPlugin(SessionLibrary* sessionLibrary);
@@ -162,6 +163,13 @@
         return Void();
     }
 
+    Return<void> getHdcpLevels_1_2(getHdcpLevels_1_2_cb _hidl_cb) {
+        HdcpLevel_V1_2 connectedLevel = HdcpLevel_V1_2::HDCP_NONE;
+        HdcpLevel_V1_2 maxLevel = HdcpLevel_V1_2::HDCP_NO_OUTPUT;
+        _hidl_cb(Status_V1_2::OK, connectedLevel, maxLevel);
+        return Void();
+    }
+
     Return<void> getNumberOfSessions(getNumberOfSessions_cb _hidl_cb) override;
 
     Return<void> getSecurityLevel(const hidl_vec<uint8_t>& sessionId,
diff --git a/include/media/MediaExtractorPluginHelper.h b/include/media/MediaExtractorPluginHelper.h
index f4d4da6..b86f177 100644
--- a/include/media/MediaExtractorPluginHelper.h
+++ b/include/media/MediaExtractorPluginHelper.h
@@ -171,6 +171,9 @@
 };
 
 inline CMediaTrack *wrap(MediaTrackHelper *track) {
+    if (track == nullptr) {
+        return nullptr;
+    }
     CMediaTrack *wrapper = (CMediaTrack*) malloc(sizeof(CMediaTrack));
     wrapper->data = track;
     wrapper->free = [](void *data) -> void {
diff --git a/include/media/MediaTrack.h b/include/media/MediaTrack.h
index e828a7f..493eba3 100644
--- a/include/media/MediaTrack.h
+++ b/include/media/MediaTrack.h
@@ -142,7 +142,7 @@
 
 class MediaTrackCUnwrapper : public MediaTrack {
 public:
-    explicit MediaTrackCUnwrapper(CMediaTrack *wrapper);
+    static MediaTrackCUnwrapper *create(CMediaTrack *wrapper);
 
     virtual status_t start();
     virtual status_t stop();
@@ -155,6 +155,7 @@
     virtual ~MediaTrackCUnwrapper();
 
 private:
+    explicit MediaTrackCUnwrapper(CMediaTrack *wrapper);
     CMediaTrack *wrapper;
     MediaBufferGroup *bufferGroup;
 };
diff --git a/media/codec2/components/opus/Android.bp b/media/codec2/components/opus/Android.bp
index 240cdb9..0ed141b 100644
--- a/media/codec2/components/opus/Android.bp
+++ b/media/codec2/components/opus/Android.bp
@@ -9,3 +9,14 @@
 
     shared_libs: ["libopus"],
 }
+cc_library_shared {
+    name: "libcodec2_soft_opusenc",
+    defaults: [
+        "libcodec2_soft-defaults",
+        "libcodec2_soft_sanitize_all-defaults",
+    ],
+
+    srcs: ["C2SoftOpusEnc.cpp"],
+
+    shared_libs: ["libopus"],
+}
diff --git a/media/codec2/components/opus/C2SoftOpusDec.cpp b/media/codec2/components/opus/C2SoftOpusDec.cpp
index 2439c3c..3ce1fd6 100644
--- a/media/codec2/components/opus/C2SoftOpusDec.cpp
+++ b/media/codec2/components/opus/C2SoftOpusDec.cpp
@@ -19,10 +19,9 @@
 #include <log/log.h>
 
 #include <media/stagefright/foundation/MediaDefs.h>
-
+#include <media/stagefright/foundation/OpusHeader.h>
 #include <C2PlatformSupport.h>
 #include <SimpleC2Interface.h>
-
 #include "C2SoftOpusDec.h"
 
 extern "C" {
@@ -188,16 +187,6 @@
     work->workletsProcessed = 1u;
 }
 
-static uint16_t ReadLE16(const uint8_t *data, size_t data_size,
-                         uint32_t read_offset) {
-    if (read_offset + 1 > data_size)
-        return 0;
-    uint16_t val;
-    val = data[read_offset];
-    val |= data[read_offset + 1] << 8;
-    return val;
-}
-
 static const int kRate = 48000;
 
 // Opus uses Vorbis channel mapping, and Vorbis channel mapping specifies
@@ -216,81 +205,6 @@
 static const int kMaxChannelsWithDefaultLayout = 2;
 static const uint8_t kDefaultOpusChannelLayout[kMaxChannelsWithDefaultLayout] = { 0, 1 };
 
-// Parses Opus Header. Header spec: http://wiki.xiph.org/OggOpus#ID_Header
-static bool ParseOpusHeader(const uint8_t *data, size_t data_size,
-                            OpusHeader* header) {
-    // Size of the Opus header excluding optional mapping information.
-    const size_t kOpusHeaderSize = 19;
-
-    // Offset to the channel count byte in the Opus header.
-    const size_t kOpusHeaderChannelsOffset = 9;
-
-    // Offset to the pre-skip value in the Opus header.
-    const size_t kOpusHeaderSkipSamplesOffset = 10;
-
-    // Offset to the gain value in the Opus header.
-    const size_t kOpusHeaderGainOffset = 16;
-
-    // Offset to the channel mapping byte in the Opus header.
-    const size_t kOpusHeaderChannelMappingOffset = 18;
-
-    // Opus Header contains a stream map. The mapping values are in the header
-    // beyond the always present |kOpusHeaderSize| bytes of data. The mapping
-    // data contains stream count, coupling information, and per channel mapping
-    // values:
-    //   - Byte 0: Number of streams.
-    //   - Byte 1: Number coupled.
-    //   - Byte 2: Starting at byte 2 are |header->channels| uint8 mapping
-    //             values.
-    const size_t kOpusHeaderNumStreamsOffset = kOpusHeaderSize;
-    const size_t kOpusHeaderNumCoupledOffset = kOpusHeaderNumStreamsOffset + 1;
-    const size_t kOpusHeaderStreamMapOffset = kOpusHeaderNumStreamsOffset + 2;
-
-    if (data_size < kOpusHeaderSize) {
-        ALOGE("Header size is too small.");
-        return false;
-    }
-    header->channels = *(data + kOpusHeaderChannelsOffset);
-    if (header->channels <= 0 || header->channels > kMaxChannels) {
-        ALOGE("Invalid Header, wrong channel count: %d", header->channels);
-        return false;
-    }
-
-    header->skip_samples = ReadLE16(data,
-                                    data_size,
-                                    kOpusHeaderSkipSamplesOffset);
-
-    header->gain_db = static_cast<int16_t>(ReadLE16(data,
-                                                    data_size,
-                                                    kOpusHeaderGainOffset));
-
-    header->channel_mapping = *(data + kOpusHeaderChannelMappingOffset);
-    if (!header->channel_mapping) {
-        if (header->channels > kMaxChannelsWithDefaultLayout) {
-            ALOGE("Invalid Header, missing stream map.");
-            return false;
-        }
-        header->num_streams = 1;
-        header->num_coupled = header->channels > 1;
-        header->stream_map[0] = 0;
-        header->stream_map[1] = 1;
-        return true;
-    }
-    if (data_size < kOpusHeaderStreamMapOffset + header->channels) {
-        ALOGE("Invalid stream map; insufficient data for current channel "
-              "count: %d", header->channels);
-        return false;
-    }
-    header->num_streams = *(data + kOpusHeaderNumStreamsOffset);
-    header->num_coupled = *(data + kOpusHeaderNumCoupledOffset);
-    if (header->num_streams + header->num_coupled != header->channels) {
-        ALOGE("Inconsistent channel mapping.");
-        return false;
-    }
-    for (int i = 0; i < header->channels; ++i)
-        header->stream_map[i] = *(data + kOpusHeaderStreamMapOffset + i);
-    return true;
-}
 
 // Convert nanoseconds to number of samples.
 static uint64_t ns_to_samples(uint64_t ns, int rate) {
@@ -338,7 +252,19 @@
     const uint8_t *data = rView.data() + inOffset;
     if (mInputBufferCount < 3) {
         if (mInputBufferCount == 0) {
-            if (!ParseOpusHeader(data, inSize, &mHeader)) {
+            size_t opusHeadSize = inSize;
+            size_t codecDelayBufSize = 0;
+            size_t seekPreRollBufSize = 0;
+            void *opusHeadBuf = (void *)data;
+            void *codecDelayBuf = NULL;
+            void *seekPreRollBuf = NULL;
+
+            GetOpusHeaderBuffers(data, inSize, &opusHeadBuf,
+                                &opusHeadSize, &codecDelayBuf,
+                                &codecDelayBufSize, &seekPreRollBuf,
+                                &seekPreRollBufSize);
+
+            if (!ParseOpusHeader((uint8_t *)opusHeadBuf, opusHeadSize, &mHeader)) {
                 ALOGE("Encountered error while Parsing Opus Header.");
                 mSignalledError = true;
                 work->result = C2_CORRUPTED;
@@ -377,6 +303,20 @@
                 work->result = C2_CORRUPTED;
                 return;
             }
+
+            if (codecDelayBuf && codecDelayBufSize == 8) {
+                uint64_t value;
+                memcpy(&value, codecDelayBuf, sizeof(uint64_t));
+                mCodecDelay = ns_to_samples(value, kRate);
+                mSamplesToDiscard = mCodecDelay;
+                ++mInputBufferCount;
+            }
+            if (seekPreRollBuf && seekPreRollBufSize == 8) {
+                uint64_t value;
+                memcpy(&value, codecDelayBuf, sizeof(uint64_t));
+                mSeekPreRoll = ns_to_samples(value, kRate);
+                ++mInputBufferCount;
+            }
         } else {
             if (inSize < 8) {
                 ALOGE("Input sample size is too small.");
@@ -392,29 +332,30 @@
             }
             else {
                 mSeekPreRoll = samples;
-
-                ALOGI("Configuring decoder: %d Hz, %d channels",
-                       kRate, mHeader.channels);
-                C2StreamSampleRateInfo::output sampleRateInfo(0u, kRate);
-                C2StreamChannelCountInfo::output channelCountInfo(0u, mHeader.channels);
-                std::vector<std::unique_ptr<C2SettingResult>> failures;
-                c2_status_t err = mIntf->config(
-                        { &sampleRateInfo, &channelCountInfo },
-                        C2_MAY_BLOCK,
-                        &failures);
-                if (err == OK) {
-                    work->worklets.front()->output.configUpdate.push_back(C2Param::Copy(sampleRateInfo));
-                    work->worklets.front()->output.configUpdate.push_back(C2Param::Copy(channelCountInfo));
-                } else {
-                    ALOGE("Config Update failed");
-                    mSignalledError = true;
-                    work->result = C2_CORRUPTED;
-                    return;
-                }
             }
         }
 
         ++mInputBufferCount;
+        if (mInputBufferCount == 3) {
+            ALOGI("Configuring decoder: %d Hz, %d channels",
+                   kRate, mHeader.channels);
+            C2StreamSampleRateInfo::output sampleRateInfo(0u, kRate);
+            C2StreamChannelCountInfo::output channelCountInfo(0u, mHeader.channels);
+            std::vector<std::unique_ptr<C2SettingResult>> failures;
+            c2_status_t err = mIntf->config(
+                    { &sampleRateInfo, &channelCountInfo },
+                    C2_MAY_BLOCK,
+                    &failures);
+            if (err == OK) {
+                work->worklets.front()->output.configUpdate.push_back(C2Param::Copy(sampleRateInfo));
+                work->worklets.front()->output.configUpdate.push_back(C2Param::Copy(channelCountInfo));
+            } else {
+                ALOGE("Config Update failed");
+                mSignalledError = true;
+                work->result = C2_CORRUPTED;
+                return;
+            }
+        }
         fillEmptyWork(work);
         if (eos) {
             mSignalledOutputEos = true;
diff --git a/media/codec2/components/opus/C2SoftOpusDec.h b/media/codec2/components/opus/C2SoftOpusDec.h
index 92b7426..b0715ac 100644
--- a/media/codec2/components/opus/C2SoftOpusDec.h
+++ b/media/codec2/components/opus/C2SoftOpusDec.h
@@ -24,16 +24,6 @@
 
 namespace android {
 
-struct OpusHeader {
-  int channels;
-  int skip_samples;
-  int channel_mapping;
-  int num_streams;
-  int num_coupled;
-  int16_t gain_db;
-  uint8_t stream_map[8];
-};
-
 struct C2SoftOpusDec : public SimpleC2Component {
     class IntfImpl;
 
diff --git a/media/codec2/components/opus/C2SoftOpusEnc.cpp b/media/codec2/components/opus/C2SoftOpusEnc.cpp
new file mode 100644
index 0000000..d6ed5ff
--- /dev/null
+++ b/media/codec2/components/opus/C2SoftOpusEnc.cpp
@@ -0,0 +1,638 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "C2SoftOpusEnc"
+#include <utils/Log.h>
+
+#include <C2PlatformSupport.h>
+#include <SimpleC2Interface.h>
+#include <media/stagefright/foundation/MediaDefs.h>
+#include <media/stagefright/foundation/OpusHeader.h>
+#include "C2SoftOpusEnc.h"
+
+extern "C" {
+    #include <opus.h>
+    #include <opus_multistream.h>
+}
+
+#define DEFAULT_FRAME_DURATION_MS 20
+namespace android {
+
+constexpr char COMPONENT_NAME[] = "c2.android.opus.encoder";
+
+class C2SoftOpusEnc::IntfImpl : public C2InterfaceHelper {
+public:
+    explicit IntfImpl(const std::shared_ptr<C2ReflectorHelper> &helper)
+        : C2InterfaceHelper(helper) {
+
+        setDerivedInstance(this);
+
+        addParameter(
+                DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
+                .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatAudio))
+                .build());
+
+        addParameter(
+                DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
+                .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+                .build());
+
+        addParameter(
+                DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
+                .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+                        MEDIA_MIMETYPE_AUDIO_RAW))
+                .build());
+
+        addParameter(
+                DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
+                .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+                        MEDIA_MIMETYPE_AUDIO_OPUS))
+                .build());
+
+        addParameter(
+                DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+                .withDefault(new C2StreamSampleRateInfo::input(0u, 48000))
+                .withFields({C2F(mSampleRate, value).oneOf({
+                    8000, 12000, 16000, 24000, 48000})})
+                .withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
+                .build());
+
+        addParameter(
+                DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+                .withDefault(new C2StreamChannelCountInfo::input(0u, 1))
+                .withFields({C2F(mChannelCount, value).inRange(1, 8)})
+                .withSetter((Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps))
+                .build());
+
+        addParameter(
+                DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
+                .withDefault(new C2BitrateTuning::output(0u, 128000))
+                .withFields({C2F(mBitrate, value).inRange(500, 512000)})
+                .withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
+                .build());
+
+        addParameter(
+                DefineParam(mComplexity, C2_PARAMKEY_COMPLEXITY)
+                .withDefault(new C2StreamComplexityTuning::output(0u, 10))
+                .withFields({C2F(mComplexity, value).inRange(1, 10)})
+                .withSetter(Setter<decltype(*mComplexity)>::NonStrictValueWithNoDeps)
+                .build());
+
+        addParameter(
+                DefineParam(mInputMaxBufSize, C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE)
+                .withConstValue(new C2StreamMaxBufferSizeInfo::input(0u, 3840))
+                .build());
+    }
+
+    uint32_t getSampleRate() const { return mSampleRate->value; }
+    uint32_t getChannelCount() const { return mChannelCount->value; }
+    uint32_t getBitrate() const { return mBitrate->value; }
+    uint32_t getComplexity() const { return mComplexity->value; }
+
+private:
+    std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
+    std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
+    std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
+    std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+    std::shared_ptr<C2StreamSampleRateInfo::input> mSampleRate;
+    std::shared_ptr<C2StreamChannelCountInfo::input> mChannelCount;
+    std::shared_ptr<C2BitrateTuning::output> mBitrate;
+    std::shared_ptr<C2StreamComplexityTuning::output> mComplexity;
+    std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
+};
+
+C2SoftOpusEnc::C2SoftOpusEnc(const char* name, c2_node_id_t id,
+                               const std::shared_ptr<IntfImpl>& intfImpl)
+    : SimpleC2Component(
+          std::make_shared<SimpleInterface<IntfImpl>>(name, id, intfImpl)),
+      mIntf(intfImpl),
+      mOutputBlock(nullptr),
+      mEncoder(nullptr),
+      mInputBufferPcm16(nullptr),
+      mOutIndex(0u) {
+}
+
+C2SoftOpusEnc::~C2SoftOpusEnc() {
+    onRelease();
+}
+
+c2_status_t C2SoftOpusEnc::onInit() {
+    return initEncoder();
+}
+
+c2_status_t C2SoftOpusEnc::configureEncoder() {
+    unsigned char mono_mapping[256] = {0};
+    unsigned char stereo_mapping[256] = {0, 1};
+    unsigned char surround_mapping[256] = {0, 1, 255};
+    mSampleRate = mIntf->getSampleRate();
+    mChannelCount = mIntf->getChannelCount();
+    uint32_t bitrate = mIntf->getBitrate();
+    int complexity = mIntf->getComplexity();
+    mNumSamplesPerFrame = mSampleRate / (1000 / mFrameDurationMs);
+    mNumPcmBytesPerInputFrame =
+        mChannelCount * mNumSamplesPerFrame * sizeof(int16_t);
+    int err = C2_OK;
+
+    unsigned char* mapping;
+    if (mChannelCount < 2) {
+        mapping = mono_mapping;
+    } else if (mChannelCount == 2) {
+        mapping = stereo_mapping;
+    } else {
+        mapping = surround_mapping;
+    }
+
+    if (mEncoder != nullptr) {
+        opus_multistream_encoder_destroy(mEncoder);
+    }
+
+    mEncoder = opus_multistream_encoder_create(mSampleRate, mChannelCount,
+        1, 1, mapping, OPUS_APPLICATION_AUDIO, &err);
+    if (err) {
+        ALOGE("Could not create libopus encoder. Error code: %i", err);
+        return C2_CORRUPTED;
+    }
+
+    // Complexity
+    if (opus_multistream_encoder_ctl(
+            mEncoder, OPUS_SET_COMPLEXITY(complexity)) != OPUS_OK) {
+        ALOGE("failed to set complexity");
+        return C2_BAD_VALUE;
+    }
+
+    // DTX
+    if (opus_multistream_encoder_ctl(mEncoder, OPUS_SET_DTX(0) != OPUS_OK)) {
+        ALOGE("failed to set dtx");
+        return C2_BAD_VALUE;
+    }
+
+    // Application
+    if (opus_multistream_encoder_ctl(mEncoder,
+            OPUS_SET_APPLICATION(OPUS_APPLICATION_AUDIO)) != OPUS_OK) {
+        ALOGE("failed to set application");
+        return C2_BAD_VALUE;
+    }
+
+    // Signal type
+    if (opus_multistream_encoder_ctl(mEncoder, OPUS_SET_SIGNAL(OPUS_AUTO)) !=
+        OPUS_OK) {
+        ALOGE("failed to set signal");
+        return C2_BAD_VALUE;
+    }
+
+    // Unconstrained VBR
+    if (opus_multistream_encoder_ctl(mEncoder, OPUS_SET_VBR(0) != OPUS_OK)) {
+        ALOGE("failed to set vbr type");
+        return C2_BAD_VALUE;
+    }
+    if (opus_multistream_encoder_ctl(mEncoder, OPUS_SET_VBR_CONSTRAINT(0) !=
+            OPUS_OK)) {
+        ALOGE("failed to set vbr constraint");
+        return C2_BAD_VALUE;
+    }
+
+    // Bitrate
+    if (opus_multistream_encoder_ctl(mEncoder, OPUS_SET_BITRATE(bitrate)) !=
+            OPUS_OK) {
+        ALOGE("failed to set bitrate");
+        return C2_BAD_VALUE;
+    }
+
+    // Get codecDelay
+    int32_t lookahead;
+    if (opus_multistream_encoder_ctl(mEncoder, OPUS_GET_LOOKAHEAD(&lookahead)) !=
+            OPUS_OK) {
+        ALOGE("failed to get lookahead");
+        return C2_BAD_VALUE;
+    }
+    mCodecDelay = lookahead * 1000000000ll / mSampleRate;
+
+    // Set seek preroll to 80 ms
+    mSeekPreRoll = 80000000;
+    return C2_OK;
+}
+
+c2_status_t C2SoftOpusEnc::initEncoder() {
+    mSignalledEos = false;
+    mSignalledError = false;
+    mHeaderGenerated = false;
+    mIsFirstFrame = true;
+    mEncoderFlushed = false;
+    mBufferAvailable = false;
+    mAnchorTimeStamp = 0ull;
+    mProcessedSamples = 0;
+    mFilledLen = 0;
+    mFrameDurationMs = DEFAULT_FRAME_DURATION_MS;
+    if (!mInputBufferPcm16) {
+        mInputBufferPcm16 =
+            (int16_t*)malloc(kFrameSize * kMaxNumChannels * sizeof(int16_t));
+    }
+    if (!mInputBufferPcm16) return C2_NO_MEMORY;
+
+    /* Default Configurations */
+    c2_status_t status = configureEncoder();
+    return status;
+}
+
+c2_status_t C2SoftOpusEnc::onStop() {
+    mSignalledEos = false;
+    mSignalledError = false;
+    mIsFirstFrame = true;
+    mEncoderFlushed = false;
+    mBufferAvailable = false;
+    mAnchorTimeStamp = 0ull;
+    mProcessedSamples = 0u;
+    mFilledLen = 0;
+    if (mEncoder) {
+        int status = opus_multistream_encoder_ctl(mEncoder, OPUS_RESET_STATE);
+        if (status != OPUS_OK) {
+            ALOGE("OPUS_RESET_STATE failed status = %s", opus_strerror(status));
+            mSignalledError = true;
+            return C2_CORRUPTED;
+        }
+    }
+    if (mOutputBlock) mOutputBlock.reset();
+    mOutputBlock = nullptr;
+
+    return C2_OK;
+}
+
+void C2SoftOpusEnc::onReset() {
+    (void)onStop();
+}
+
+void C2SoftOpusEnc::onRelease() {
+    (void)onStop();
+    if (mInputBufferPcm16) {
+        free(mInputBufferPcm16);
+        mInputBufferPcm16 = nullptr;
+    }
+    if (mEncoder) {
+        opus_multistream_encoder_destroy(mEncoder);
+        mEncoder = nullptr;
+    }
+}
+
+c2_status_t C2SoftOpusEnc::onFlush_sm() {
+    return onStop();
+}
+
+// Drain the encoder to get last frames (if any)
+int C2SoftOpusEnc::drainEncoder(uint8_t* outPtr) {
+    memset((uint8_t *)mInputBufferPcm16 + mFilledLen, 0,
+        (mNumPcmBytesPerInputFrame - mFilledLen));
+    int encodedBytes = opus_multistream_encode(
+        mEncoder, mInputBufferPcm16, mNumSamplesPerFrame, outPtr, kMaxPayload);
+    if (encodedBytes > mOutputBlock->capacity()) {
+        ALOGE("not enough space left to write encoded data, dropping %d bytes",
+              mBytesEncoded);
+        // a fatal error would stop the encoding
+        return -1;
+    }
+    ALOGV("encoded %i Opus bytes from %zu PCM bytes", encodedBytes,
+          mNumPcmBytesPerInputFrame);
+    mEncoderFlushed = true;
+    mFilledLen = 0;
+    return encodedBytes;
+}
+
+void C2SoftOpusEnc::process(const std::unique_ptr<C2Work>& work,
+                            const std::shared_ptr<C2BlockPool>& pool) {
+    // Initialize output work
+    work->result = C2_OK;
+    work->workletsProcessed = 1u;
+    work->worklets.front()->output.flags = work->input.flags;
+
+    if (mSignalledError || mSignalledEos) {
+        work->result = C2_BAD_VALUE;
+        return;
+    }
+
+    bool eos = (work->input.flags & C2FrameData::FLAG_END_OF_STREAM) != 0;
+    C2ReadView rView = mDummyReadView;
+    size_t inOffset = 0u;
+    size_t inSize = 0u;
+    c2_status_t err = C2_OK;
+    if (!work->input.buffers.empty()) {
+        rView =
+            work->input.buffers[0]->data().linearBlocks().front().map().get();
+        inSize = rView.capacity();
+        if (inSize && rView.error()) {
+            ALOGE("read view map failed %d", rView.error());
+            work->result = C2_CORRUPTED;
+            return;
+        }
+    }
+
+    ALOGV("in buffer attr. size %zu timestamp %d frameindex %d, flags %x",
+          inSize, (int)work->input.ordinal.timestamp.peeku(),
+          (int)work->input.ordinal.frameIndex.peeku(), work->input.flags);
+
+    if (!mEncoder) {
+        if (initEncoder() != C2_OK) {
+            ALOGE("initEncoder failed with status %d", err);
+            work->result = err;
+            mSignalledError = true;
+            return;
+        }
+    }
+    if (mIsFirstFrame) {
+        mAnchorTimeStamp = work->input.ordinal.timestamp.peekull();
+        mIsFirstFrame = false;
+    }
+
+    C2MemoryUsage usage = {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE};
+    err = pool->fetchLinearBlock(kMaxPayload, usage, &mOutputBlock);
+    if (err != C2_OK) {
+        ALOGE("fetchLinearBlock for Output failed with status %d", err);
+        work->result = C2_NO_MEMORY;
+        return;
+    }
+
+    C2WriteView wView = mOutputBlock->map().get();
+    if (wView.error()) {
+        ALOGE("write view map failed %d", wView.error());
+        work->result = C2_CORRUPTED;
+        mOutputBlock.reset();
+        return;
+    }
+
+    size_t inPos = 0;
+    size_t processSize = 0;
+    mBytesEncoded = 0;
+    uint64_t outTimeStamp = 0u;
+    std::shared_ptr<C2Buffer> buffer;
+    uint64_t inputIndex = work->input.ordinal.frameIndex.peeku();
+    const uint8_t* inPtr = rView.data() + inOffset;
+
+    class FillWork {
+    public:
+        FillWork(uint32_t flags, C2WorkOrdinalStruct ordinal,
+                 const std::shared_ptr<C2Buffer> &buffer)
+            : mFlags(flags), mOrdinal(ordinal), mBuffer(buffer) {
+        }
+        ~FillWork() = default;
+
+        void operator()(const std::unique_ptr<C2Work>& work) {
+            work->worklets.front()->output.flags = (C2FrameData::flags_t)mFlags;
+            work->worklets.front()->output.buffers.clear();
+            work->worklets.front()->output.ordinal = mOrdinal;
+            work->workletsProcessed = 1u;
+            work->result = C2_OK;
+            if (mBuffer) {
+                work->worklets.front()->output.buffers.push_back(mBuffer);
+            }
+            ALOGV("timestamp = %lld, index = %lld, w/%s buffer",
+                  mOrdinal.timestamp.peekll(),
+                  mOrdinal.frameIndex.peekll(),
+                  mBuffer ? "" : "o");
+        }
+
+    private:
+        const uint32_t mFlags;
+        const C2WorkOrdinalStruct mOrdinal;
+        const std::shared_ptr<C2Buffer> mBuffer;
+    };
+
+    C2WorkOrdinalStruct outOrdinal = work->input.ordinal;
+
+    if (!mHeaderGenerated) {
+        uint8_t header[AOPUS_UNIFIED_CSD_MAXSIZE];
+        memset(header, 0, sizeof(header));
+        OpusHeader opusHeader;
+        opusHeader.channels = mChannelCount;
+        opusHeader.num_streams = mChannelCount;
+        opusHeader.num_coupled = 0;
+        opusHeader.channel_mapping = ((mChannelCount > 8) ? 255 : (mChannelCount > 2));
+        opusHeader.gain_db = 0;
+        opusHeader.skip_samples = 0;
+        int headerLen = WriteOpusHeaders(opusHeader, mSampleRate, header,
+            sizeof(header), mCodecDelay, mSeekPreRoll);
+
+        std::unique_ptr<C2StreamCsdInfo::output> csd =
+            C2StreamCsdInfo::output::AllocUnique(headerLen, 0u);
+        if (!csd) {
+            ALOGE("CSD allocation failed");
+            mSignalledError = true;
+            work->result = C2_NO_MEMORY;
+            return;
+        }
+        ALOGV("put csd, %d bytes", headerLen);
+        memcpy(csd->m.value, header, headerLen);
+        work->worklets.front()->output.configUpdate.push_back(std::move(csd));
+        mHeaderGenerated = true;
+    }
+
+    /*
+     * For buffer size which is not a multiple of mNumPcmBytesPerInputFrame, we will
+     * accumulate the input and keep it. Once the input is filled with expected number
+     * of bytes, we will send it to encoder. mFilledLen manages the bytes of input yet
+     * to be processed. The next call will fill mNumPcmBytesPerInputFrame - mFilledLen
+     * bytes to input and send it to the encoder.
+     */
+    while (inPos < inSize) {
+        const uint8_t* pcmBytes = inPtr + inPos;
+        int filledSamples = mFilledLen / sizeof(int16_t);
+        if ((inPos + (mNumPcmBytesPerInputFrame - mFilledLen)) <= inSize) {
+            processSize = mNumPcmBytesPerInputFrame - mFilledLen;
+            mBufferAvailable = true;
+        } else {
+            processSize = inSize - inPos;
+            mBufferAvailable = false;
+            if (eos) {
+                memset(mInputBufferPcm16 + filledSamples, 0,
+                       (mNumPcmBytesPerInputFrame - mFilledLen));
+                mBufferAvailable = true;
+            }
+        }
+        const unsigned nInputSamples = processSize / sizeof(int16_t);
+
+        for (unsigned i = 0; i < nInputSamples; i++) {
+            int32_t data = pcmBytes[2 * i + 1] << 8 | pcmBytes[2 * i];
+            data = ((data & 0xFFFF) ^ 0x8000) - 0x8000;
+            mInputBufferPcm16[i + filledSamples] = data;
+        }
+        inPos += processSize;
+        mFilledLen += processSize;
+        if (!mBufferAvailable) break;
+        uint8_t* outPtr = wView.data() + mBytesEncoded;
+        int encodedBytes =
+            opus_multistream_encode(mEncoder, mInputBufferPcm16,
+                                    mNumSamplesPerFrame, outPtr, kMaxPayload);
+        ALOGV("encoded %i Opus bytes from %zu PCM bytes", encodedBytes,
+              processSize);
+
+        if (encodedBytes < 0 || encodedBytes > kMaxPayload) {
+            ALOGE("opus_encode failed, encodedBytes : %d", encodedBytes);
+            mSignalledError = true;
+            work->result = C2_CORRUPTED;
+            return;
+        }
+        if (buffer) {
+            outOrdinal.frameIndex = mOutIndex++;
+            outOrdinal.timestamp = mAnchorTimeStamp + outTimeStamp;
+            cloneAndSend(
+                inputIndex, work,
+                FillWork(C2FrameData::FLAG_INCOMPLETE, outOrdinal, buffer));
+            buffer.reset();
+        }
+        if (encodedBytes > 0) {
+            buffer =
+                createLinearBuffer(mOutputBlock, mBytesEncoded, encodedBytes);
+        }
+        mBytesEncoded += encodedBytes;
+        mProcessedSamples += (filledSamples + nInputSamples);
+        outTimeStamp =
+            mProcessedSamples * 1000000ll / mChannelCount / mSampleRate;
+        if ((processSize + mFilledLen) < mNumPcmBytesPerInputFrame)
+            mEncoderFlushed = true;
+        mFilledLen = 0;
+    }
+
+    uint32_t flags = 0;
+    if (eos) {
+        ALOGV("signalled eos");
+        mSignalledEos = true;
+        if (!mEncoderFlushed) {
+            if (buffer) {
+                outOrdinal.frameIndex = mOutIndex++;
+                outOrdinal.timestamp = mAnchorTimeStamp + outTimeStamp;
+                cloneAndSend(
+                    inputIndex, work,
+                    FillWork(C2FrameData::FLAG_INCOMPLETE, outOrdinal, buffer));
+                buffer.reset();
+            }
+            // drain the encoder for last buffer
+            drainInternal(pool, work);
+        }
+        flags = C2FrameData::FLAG_END_OF_STREAM;
+    }
+    if (buffer) {
+        outOrdinal.frameIndex = mOutIndex++;
+        outOrdinal.timestamp = mAnchorTimeStamp + outTimeStamp;
+        FillWork((C2FrameData::flags_t)(flags), outOrdinal, buffer)(work);
+        buffer.reset();
+    }
+    mOutputBlock = nullptr;
+}
+
+c2_status_t C2SoftOpusEnc::drainInternal(
+        const std::shared_ptr<C2BlockPool>& pool,
+        const std::unique_ptr<C2Work>& work) {
+    mBytesEncoded = 0;
+    std::shared_ptr<C2Buffer> buffer = nullptr;
+    C2WorkOrdinalStruct outOrdinal = work->input.ordinal;
+    bool eos = (work->input.flags & C2FrameData::FLAG_END_OF_STREAM) != 0;
+
+    C2MemoryUsage usage = {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE};
+    c2_status_t err = pool->fetchLinearBlock(kMaxPayload, usage, &mOutputBlock);
+    if (err != C2_OK) {
+        ALOGE("fetchLinearBlock for Output failed with status %d", err);
+        return C2_NO_MEMORY;
+    }
+
+    C2WriteView wView = mOutputBlock->map().get();
+    if (wView.error()) {
+        ALOGE("write view map failed %d", wView.error());
+        mOutputBlock.reset();
+        return C2_CORRUPTED;
+    }
+
+    int encBytes = drainEncoder(wView.data());
+    if (encBytes > 0) mBytesEncoded += encBytes;
+    if (mBytesEncoded > 0) {
+        buffer = createLinearBuffer(mOutputBlock, 0, mBytesEncoded);
+        mOutputBlock.reset();
+    }
+    mProcessedSamples += (mNumPcmBytesPerInputFrame / sizeof(int16_t));
+    uint64_t outTimeStamp =
+        mProcessedSamples * 1000000ll / mChannelCount / mSampleRate;
+    outOrdinal.frameIndex = mOutIndex++;
+    outOrdinal.timestamp = mAnchorTimeStamp + outTimeStamp;
+    work->worklets.front()->output.flags =
+        (C2FrameData::flags_t)(eos ? C2FrameData::FLAG_END_OF_STREAM : 0);
+    work->worklets.front()->output.buffers.clear();
+    work->worklets.front()->output.ordinal = outOrdinal;
+    work->workletsProcessed = 1u;
+    work->result = C2_OK;
+    if (buffer) {
+        work->worklets.front()->output.buffers.push_back(buffer);
+    }
+    mOutputBlock = nullptr;
+    return C2_OK;
+}
+
+c2_status_t C2SoftOpusEnc::drain(uint32_t drainMode,
+                                 const std::shared_ptr<C2BlockPool>& pool) {
+    if (drainMode == NO_DRAIN) {
+        ALOGW("drain with NO_DRAIN: no-op");
+        return C2_OK;
+    }
+    if (drainMode == DRAIN_CHAIN) {
+        ALOGW("DRAIN_CHAIN not supported");
+        return C2_OMITTED;
+    }
+    mIsFirstFrame = true;
+    mAnchorTimeStamp = 0ull;
+    mProcessedSamples = 0u;
+    return drainInternal(pool, nullptr);
+}
+
+class C2SoftOpusEncFactory : public C2ComponentFactory {
+public:
+    C2SoftOpusEncFactory()
+        : mHelper(std::static_pointer_cast<C2ReflectorHelper>(
+              GetCodec2PlatformComponentStore()->getParamReflector())) {}
+
+    virtual c2_status_t createComponent(
+        c2_node_id_t id, std::shared_ptr<C2Component>* const component,
+        std::function<void(C2Component*)> deleter) override {
+        *component = std::shared_ptr<C2Component>(
+            new C2SoftOpusEnc(
+                COMPONENT_NAME, id,
+                std::make_shared<C2SoftOpusEnc::IntfImpl>(mHelper)),
+            deleter);
+        return C2_OK;
+    }
+
+    virtual c2_status_t createInterface(
+        c2_node_id_t id, std::shared_ptr<C2ComponentInterface>* const interface,
+        std::function<void(C2ComponentInterface*)> deleter) override {
+        *interface = std::shared_ptr<C2ComponentInterface>(
+            new SimpleInterface<C2SoftOpusEnc::IntfImpl>(
+                COMPONENT_NAME, id,
+                std::make_shared<C2SoftOpusEnc::IntfImpl>(mHelper)),
+            deleter);
+        return C2_OK;
+    }
+
+    virtual ~C2SoftOpusEncFactory() override = default;
+private:
+    std::shared_ptr<C2ReflectorHelper> mHelper;
+};
+
+}  // namespace android
+
+extern "C" ::C2ComponentFactory* CreateCodec2Factory() {
+    ALOGV("in %s", __func__);
+    return new ::android::C2SoftOpusEncFactory();
+}
+
+extern "C" void DestroyCodec2Factory(::C2ComponentFactory* factory) {
+    ALOGV("in %s", __func__);
+    delete factory;
+}
diff --git a/media/codec2/components/opus/C2SoftOpusEnc.h b/media/codec2/components/opus/C2SoftOpusEnc.h
new file mode 100644
index 0000000..69e5240
--- /dev/null
+++ b/media/codec2/components/opus/C2SoftOpusEnc.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_C2_SOFT_OPUS_ENC_H_
+#define ANDROID_C2_SOFT_OPUS_ENC_H_
+
+#include <atomic>
+#include <SimpleC2Component.h>
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+struct OpusMSEncoder;
+
+namespace android {
+
+struct C2SoftOpusEnc : public SimpleC2Component {
+    class IntfImpl;
+
+    C2SoftOpusEnc(const char *name, c2_node_id_t id,
+               const std::shared_ptr<IntfImpl> &intfImpl);
+    virtual ~C2SoftOpusEnc();
+
+    // From SimpleC2Component
+    c2_status_t onInit() override;
+    c2_status_t onStop() override;
+    void onReset() override;
+    void onRelease() override;
+    c2_status_t onFlush_sm() override;
+    void process(
+            const std::unique_ptr<C2Work> &work,
+            const std::shared_ptr<C2BlockPool> &pool) override;
+    c2_status_t drain(
+            uint32_t drainMode,
+            const std::shared_ptr<C2BlockPool> &pool) override;
+private:
+    /* OPUS_FRAMESIZE_20_MS */
+    const int kFrameSize = 960;
+    const int kMaxPayload = 4000;
+    const int kMaxNumChannels = 8;
+
+    std::shared_ptr<IntfImpl> mIntf;
+    std::shared_ptr<C2LinearBlock> mOutputBlock;
+
+    OpusMSEncoder* mEncoder;
+    int16_t* mInputBufferPcm16;
+
+    bool mHeaderGenerated;
+    bool mIsFirstFrame;
+    bool mEncoderFlushed;
+    bool mBufferAvailable;
+    bool mSignalledEos;
+    bool mSignalledError;
+    uint32_t mSampleRate;
+    uint32_t mChannelCount;
+    uint32_t mFrameDurationMs;
+    uint64_t mAnchorTimeStamp;
+    uint64_t mProcessedSamples;
+    // Codec delay in ns
+    uint64_t mCodecDelay;
+    // Seek pre-roll in ns
+    uint64_t mSeekPreRoll;
+    int mNumSamplesPerFrame;
+    int mBytesEncoded;
+    int32_t mFilledLen;
+    size_t mNumPcmBytesPerInputFrame;
+    std::atomic_uint64_t mOutIndex;
+    c2_status_t initEncoder();
+    c2_status_t configureEncoder();
+    int drainEncoder(uint8_t* outPtr);
+    c2_status_t drainInternal(const std::shared_ptr<C2BlockPool>& pool,
+                              const std::unique_ptr<C2Work>& work);
+
+    C2_DO_NOT_COPY(C2SoftOpusEnc);
+};
+
+}  // namespace android
+
+#endif  // ANDROID_C2_SOFT_OPUS_ENC_H_
diff --git a/media/codec2/core/include/C2Component.h b/media/codec2/core/include/C2Component.h
index 8810725..ecf8d2e 100644
--- a/media/codec2/core/include/C2Component.h
+++ b/media/codec2/core/include/C2Component.h
@@ -409,12 +409,13 @@
         kind_t kind; ///< component kind
         rank_t rank; ///< component rank
         C2String mediaType; ///< media type supported by the component
+        C2String owner; ///< name of the component store owning this component
 
         /**
          * name alias(es) for backward compatibility.
          * \note Multiple components can have the same alias as long as their media-type differs.
          */
-        std::vector<C2StringLiteral> aliases; ///< name aliases for backward compatibility
+        std::vector<C2String> aliases; ///< name aliases for backward compatibility
     };
 
     // METHODS AVAILABLE WHEN RUNNING
diff --git a/media/codec2/core/include/C2Config.h b/media/codec2/core/include/C2Config.h
index cf1f6cf..23939b5 100644
--- a/media/codec2/core/include/C2Config.h
+++ b/media/codec2/core/include/C2Config.h
@@ -638,7 +638,7 @@
     LEVEL_VP9_6_1,                              ///< VP9 Level 6.1
     LEVEL_VP9_6_2,                              ///< VP9 Level 6.2
 
-    // Dolby Vision level
+    // Dolby Vision levels
     LEVEL_DV_MAIN_HD_24 = _C2_PL_DV_BASE,       ///< Dolby Vision main tier hd24
     LEVEL_DV_MAIN_HD_30,                        ///< Dolby Vision main tier hd30
     LEVEL_DV_MAIN_FHD_24,                       ///< Dolby Vision main tier fhd24
@@ -659,6 +659,7 @@
     LEVEL_DV_HIGH_UHD_48,                       ///< Dolby Vision high tier uhd48
     LEVEL_DV_HIGH_UHD_60,                       ///< Dolby Vision high tier uhd60
 
+    // AV1 levels
     LEVEL_AV1_2    = _C2_PL_AV1_BASE ,          ///< AV1 Level 2
     LEVEL_AV1_2_1,                              ///< AV1 Level 2.1
     LEVEL_AV1_2_2,                              ///< AV1 Level 2.2
diff --git a/media/codec2/hidl/client/client.cpp b/media/codec2/hidl/client/client.cpp
index ff3e534..f5cc9ff 100644
--- a/media/codec2/hidl/client/client.cpp
+++ b/media/codec2/hidl/client/client.cpp
@@ -557,6 +557,7 @@
                 for (size_t i = 0; i < t.size(); ++i) {
                     c2_status_t status = objcpy(
                             &mTraitsList[i], &mAliasesBuffer[i], t[i]);
+                    mTraitsList[i].owner = mInstanceName;
                     if (status != C2_OK) {
                         ALOGE("listComponents -- corrupted output.");
                         return;
diff --git a/media/codec2/sfplugin/C2OMXNode.cpp b/media/codec2/sfplugin/C2OMXNode.cpp
index 749fd7a..9500aed 100644
--- a/media/codec2/sfplugin/C2OMXNode.cpp
+++ b/media/codec2/sfplugin/C2OMXNode.cpp
@@ -225,14 +225,18 @@
     if (omxBuf.mBufferType == OMXBuffer::kBufferTypeANWBuffer
             && omxBuf.mGraphicBuffer != nullptr) {
         std::shared_ptr<C2GraphicAllocation> alloc;
+        native_handle_t *clonedHandle = native_handle_clone(omxBuf.mGraphicBuffer->handle);
         handle = WrapNativeCodec2GrallocHandle(
-                native_handle_clone(omxBuf.mGraphicBuffer->handle),
+                clonedHandle,
                 omxBuf.mGraphicBuffer->width,
                 omxBuf.mGraphicBuffer->height,
                 omxBuf.mGraphicBuffer->format,
                 omxBuf.mGraphicBuffer->usage,
                 omxBuf.mGraphicBuffer->stride);
         c2_status_t err = mAllocator->priorGraphicAllocation(handle, &alloc);
+        if (clonedHandle) {
+            native_handle_delete(clonedHandle);
+        }
         if (err != OK) {
             return UNKNOWN_ERROR;
         }
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 55a97d8..b529cbc 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -271,12 +271,8 @@
 
 namespace {
 
-// TODO: get this info from component
-const static size_t kMinInputBufferArraySize = 4;
-const static size_t kMaxPipelineCapacity = 18;
-const static size_t kChannelOutputDelay = 0;
-const static size_t kMinOutputBufferArraySize = kMaxPipelineCapacity +
-                                                kChannelOutputDelay;
+const static size_t kSmoothnessFactor = 4;
+const static size_t kRenderingDepth = 3;
 const static size_t kLinearBufferSize = 1048576;
 // This can fit 4K RGBA frame, and most likely client won't need more than this.
 const static size_t kMaxLinearBufferSize = 3840 * 2160 * 4;
@@ -829,6 +825,7 @@
             const sp<ICrypto> &crypto,
             int32_t heapSeqNum,
             size_t capacity,
+            size_t numInputSlots,
             const char *componentName, const char *name = "EncryptedInput")
         : LinearInputBuffers(componentName, name),
           mUsage({0, 0}),
@@ -840,7 +837,7 @@
         } else {
             mUsage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
         }
-        for (size_t i = 0; i < kMinInputBufferArraySize; ++i) {
+        for (size_t i = 0; i < numInputSlots; ++i) {
             sp<IMemory> memory = mDealer->allocate(capacity);
             if (memory == nullptr) {
                 ALOGD("[%s] Failed to allocate memory from dealer: only %zu slots allocated", mName, i);
@@ -951,11 +948,12 @@
 
 class GraphicInputBuffers : public CCodecBufferChannel::InputBuffers {
 public:
-    GraphicInputBuffers(const char *componentName, const char *name = "2D-BB-Input")
+    GraphicInputBuffers(
+            size_t numInputSlots, const char *componentName, const char *name = "2D-BB-Input")
         : InputBuffers(componentName, name),
           mImpl(mName),
           mLocalBufferPool(LocalBufferPool::Create(
-                  kMaxLinearBufferSize * kMinInputBufferArraySize)) { }
+                  kMaxLinearBufferSize * numInputSlots)) { }
     ~GraphicInputBuffers() override = default;
 
     bool requestNewBuffer(size_t *index, sp<MediaCodecBuffer> *buffer) override {
@@ -1291,10 +1289,11 @@
 
 class RawGraphicOutputBuffers : public FlexOutputBuffers {
 public:
-    RawGraphicOutputBuffers(const char *componentName, const char *name = "2D-BB-Output")
+    RawGraphicOutputBuffers(
+            size_t numOutputSlots, const char *componentName, const char *name = "2D-BB-Output")
         : FlexOutputBuffers(componentName, name),
           mLocalBufferPool(LocalBufferPool::Create(
-                  kMaxLinearBufferSize * kMinOutputBufferArraySize)) { }
+                  kMaxLinearBufferSize * numOutputSlots)) { }
     ~RawGraphicOutputBuffers() override = default;
 
     sp<Codec2Buffer> wrap(const std::shared_ptr<C2Buffer> &buffer) override {
@@ -1545,6 +1544,8 @@
         const std::shared_ptr<CCodecCallback> &callback)
     : mHeapSeqNum(-1),
       mCCodecCallback(callback),
+      mNumInputSlots(kSmoothnessFactor),
+      mNumOutputSlots(kSmoothnessFactor),
       mFrameIndex(0u),
       mFirstValidFrameIndex(0u),
       mMetaMode(MODE_NONE),
@@ -2006,7 +2007,7 @@
     Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
 
     if (!(*buffers)->isArrayMode()) {
-        *buffers = (*buffers)->toArrayMode(kMinInputBufferArraySize);
+        *buffers = (*buffers)->toArrayMode(mNumInputSlots);
     }
 
     (*buffers)->getArray(array);
@@ -2017,7 +2018,7 @@
     Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
 
     if (!(*buffers)->isArrayMode()) {
-        *buffers = (*buffers)->toArrayMode(kMinOutputBufferArraySize);
+        *buffers = (*buffers)->toArrayMode(mNumOutputSlots);
     }
 
     (*buffers)->getArray(array);
@@ -2029,12 +2030,19 @@
     C2StreamBufferTypeSetting::output oStreamFormat(0u);
     C2PortReorderBufferDepthTuning::output reorderDepth;
     C2PortReorderKeySetting::output reorderKey;
+    C2PortActualDelayTuning::input inputDelay(0);
+    C2PortActualDelayTuning::output outputDelay(0);
+    C2ActualPipelineDelayTuning pipelineDelay(0);
+
     c2_status_t err = mComponent->query(
             {
                 &iStreamFormat,
                 &oStreamFormat,
                 &reorderDepth,
                 &reorderKey,
+                &inputDelay,
+                &pipelineDelay,
+                &outputDelay,
             },
             {},
             C2_DONT_BLOCK,
@@ -2057,6 +2065,13 @@
             reorder->setKey(reorderKey.value);
         }
     }
+
+    mNumInputSlots =
+        (inputDelay ? inputDelay.value : 0) +
+        (pipelineDelay ? pipelineDelay.value : 0) +
+        kSmoothnessFactor;
+    mNumOutputSlots = (outputDelay ? outputDelay.value : 0) + kSmoothnessFactor;
+
     // TODO: get this from input format
     bool secure = mComponent->getName().find(".secure") != std::string::npos;
 
@@ -2127,6 +2142,7 @@
             pools->inputPool = pool;
         }
 
+        bool forceArrayMode = false;
         Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
         if (graphic) {
             if (mInputSurface) {
@@ -2134,7 +2150,7 @@
             } else if (mMetaMode == MODE_ANW) {
                 buffers->reset(new GraphicMetadataInputBuffers(mName));
             } else {
-                buffers->reset(new GraphicInputBuffers(mName));
+                buffers->reset(new GraphicInputBuffers(mNumInputSlots, mName));
             }
         } else {
             if (hasCryptoOrDescrambler()) {
@@ -2147,7 +2163,7 @@
                 if (mDealer == nullptr) {
                     mDealer = new MemoryDealer(
                             align(capacity, MemoryDealer::getAllocationAlignment())
-                                * (kMinInputBufferArraySize + 1),
+                                * (mNumInputSlots + 1),
                             "EncryptedLinearInputBuffers");
                     mDecryptDestination = mDealer->allocate((size_t)capacity);
                 }
@@ -2157,7 +2173,9 @@
                     mHeapSeqNum = -1;
                 }
                 buffers->reset(new EncryptedLinearInputBuffers(
-                        secure, mDealer, mCrypto, mHeapSeqNum, (size_t)capacity, mName));
+                        secure, mDealer, mCrypto, mHeapSeqNum, (size_t)capacity,
+                        mNumInputSlots, mName));
+                forceArrayMode = true;
             } else {
                 buffers->reset(new LinearInputBuffers(mName));
             }
@@ -2169,6 +2187,10 @@
         } else {
             // TODO: error
         }
+
+        if (forceArrayMode) {
+            *buffers = (*buffers)->toArrayMode(mNumInputSlots);
+        }
     }
 
     if (outputFormat != nullptr) {
@@ -2286,7 +2308,7 @@
             if (outputSurface) {
                 buffers->reset(new GraphicOutputBuffers(mName));
             } else {
-                buffers->reset(new RawGraphicOutputBuffers(mName));
+                buffers->reset(new RawGraphicOutputBuffers(mNumOutputSlots, mName));
             }
         } else {
             buffers->reset(new LinearOutputBuffers(mName));
@@ -2307,7 +2329,7 @@
             // WORKAROUND: if we're using early CSD workaround we convert to
             //             array mode, to appease apps assuming the output
             //             buffers to be of the same size.
-            (*buffers) = (*buffers)->toArrayMode(kMinOutputBufferArraySize);
+            (*buffers) = (*buffers)->toArrayMode(mNumOutputSlots);
 
             int32_t channelCount;
             int32_t sampleRate;
@@ -2335,27 +2357,10 @@
     // about buffers from the previous generation do not interfere with the
     // newly initialized pipeline capacity.
 
-    // Query delays
-    C2PortRequestedDelayTuning::input inputDelay;
-    C2PortRequestedDelayTuning::output outputDelay;
-    C2RequestedPipelineDelayTuning pipelineDelay;
-#if 0
-    err = mComponent->query(
-            { &inputDelay, &pipelineDelay, &outputDelay },
-            {},
-            C2_DONT_BLOCK,
-            nullptr);
     mAvailablePipelineCapacity.initialize(
-            inputDelay,
-            inputDelay + pipelineDelay,
-            inputDelay + pipelineDelay + outputDelay,
+            mNumInputSlots,
+            mNumInputSlots + mNumOutputSlots,
             mName);
-#else
-    mAvailablePipelineCapacity.initialize(
-            kMinInputBufferArraySize,
-            kMaxPipelineCapacity,
-            mName);
-#endif
 
     mInputMetEos = false;
     mSync.start();
@@ -2374,7 +2379,7 @@
     }
     std::vector<sp<MediaCodecBuffer>> toBeQueued;
     // TODO: use proper buffer depth instead of this random value
-    for (size_t i = 0; i < kMinInputBufferArraySize; ++i) {
+    for (size_t i = 0; i < mNumInputSlots; ++i) {
         size_t index;
         sp<MediaCodecBuffer> buffer;
         {
@@ -2731,7 +2736,7 @@
     sp<IGraphicBufferProducer> producer;
     if (newSurface) {
         newSurface->setScalingMode(NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
-        newSurface->setMaxDequeuedBufferCount(kMinOutputBufferArraySize);
+        newSurface->setMaxDequeuedBufferCount(mNumOutputSlots + kRenderingDepth);
         producer = newSurface->getIGraphicBufferProducer();
         producer->setGenerationNumber(generation);
     } else {
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.h b/media/codec2/sfplugin/CCodecBufferChannel.h
index 431baaa..fd806b7 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.h
+++ b/media/codec2/sfplugin/CCodecBufferChannel.h
@@ -37,6 +37,8 @@
 
 namespace android {
 
+class MemoryDealer;
+
 class CCodecCallback {
 public:
     virtual ~CCodecCallback() = default;
@@ -233,6 +235,9 @@
     QueueSync mQueueSync;
     std::vector<std::unique_ptr<C2Param>> mParamsToBeSet;
 
+    size_t mNumInputSlots;
+    size_t mNumOutputSlots;
+
     Mutexed<std::unique_ptr<InputBuffers>> mInputBuffers;
     Mutexed<std::list<sp<ABuffer>>> mFlushedConfigs;
     Mutexed<std::unique_ptr<OutputBuffers>> mOutputBuffers;
diff --git a/media/codec2/sfplugin/Codec2InfoBuilder.cpp b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
index 5d0ccd2..a8cc62d 100644
--- a/media/codec2/sfplugin/Codec2InfoBuilder.cpp
+++ b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
@@ -73,10 +73,10 @@
 constexpr OMX_U32 kMaxIndicesToCheck = 32;
 
 status_t queryOmxCapabilities(
-        const char* name, const char* mime, bool isEncoder,
+        const char* name, const char* mediaType, bool isEncoder,
         MediaCodecInfo::CapabilitiesWriter* caps) {
 
-    const char *role = GetComponentRole(isEncoder, mime);
+    const char *role = GetComponentRole(isEncoder, mediaType);
     if (role == nullptr) {
         return BAD_VALUE;
     }
@@ -128,8 +128,8 @@
         return err;
     }
 
-    bool isVideo = hasPrefix(mime, "video/") == 0;
-    bool isImage = hasPrefix(mime, "image/") == 0;
+    bool isVideo = hasPrefix(mediaType, "video/") == 0;
+    bool isImage = hasPrefix(mediaType, "image/") == 0;
 
     if (isVideo || isImage) {
         OMX_VIDEO_PARAM_PROFILELEVELTYPE param;
@@ -149,7 +149,7 @@
             // AVC components may not list the constrained profiles explicitly, but
             // decoders that support a profile also support its constrained version.
             // Encoders must explicitly support constrained profiles.
-            if (!isEncoder && strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC) == 0) {
+            if (!isEncoder && strcasecmp(mediaType, MEDIA_MIMETYPE_VIDEO_AVC) == 0) {
                 if (param.eProfile == OMX_VIDEO_AVCProfileHigh) {
                     caps->addProfileLevel(OMX_VIDEO_AVCProfileConstrainedHigh, param.eLevel);
                 } else if (param.eProfile == OMX_VIDEO_AVCProfileBaseline) {
@@ -193,7 +193,7 @@
                         asString(portFormat.eColorFormat), portFormat.eColorFormat);
             }
         }
-    } else if (strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC) == 0) {
+    } else if (strcasecmp(mediaType, MEDIA_MIMETYPE_AUDIO_AAC) == 0) {
         // More audio codecs if they have profiles.
         OMX_AUDIO_PARAM_ANDROID_PROFILETYPE param;
         InitOMXParams(&param);
@@ -228,14 +228,17 @@
         if (omxNode->configureVideoTunnelMode(
                 kPortIndexOutput, OMX_TRUE, 0, &sidebandHandle) == OK) {
             // tunneled playback includes adaptive playback
-            caps->addFlags(MediaCodecInfo::Capabilities::kFlagSupportsAdaptivePlayback
-                    | MediaCodecInfo::Capabilities::kFlagSupportsTunneledPlayback);
-        } else if (omxNode->setPortMode(
-                kPortIndexOutput, IOMX::kPortModeDynamicANWBuffer) == OK ||
-                omxNode->prepareForAdaptivePlayback(
-                kPortIndexOutput, OMX_TRUE,
-                1280 /* width */, 720 /* height */) == OK) {
-            caps->addFlags(MediaCodecInfo::Capabilities::kFlagSupportsAdaptivePlayback);
+        } else {
+            // tunneled playback is not supported
+            caps->removeDetail(MediaCodecInfo::Capabilities::FEATURE_TUNNELED_PLAYBACK);
+            if (omxNode->setPortMode(
+                    kPortIndexOutput, IOMX::kPortModeDynamicANWBuffer) == OK ||
+                    omxNode->prepareForAdaptivePlayback(
+                            kPortIndexOutput, OMX_TRUE,
+                            1280 /* width */, 720 /* height */) != OK) {
+                // adaptive playback is not supported
+                caps->removeDetail(MediaCodecInfo::Capabilities::FEATURE_ADAPTIVE_PLAYBACK);
+            }
         }
     }
 
@@ -243,11 +246,20 @@
         OMX_VIDEO_CONFIG_ANDROID_INTRAREFRESHTYPE params;
         InitOMXParams(&params);
         params.nPortIndex = kPortIndexOutput;
-        // TODO: should we verify if fallback is supported?
+
+        OMX_VIDEO_PARAM_INTRAREFRESHTYPE fallbackParams;
+        InitOMXParams(&fallbackParams);
+        fallbackParams.nPortIndex = kPortIndexOutput;
+        fallbackParams.eRefreshMode = OMX_VIDEO_IntraRefreshCyclic;
+
         if (omxNode->getConfig(
                 (OMX_INDEXTYPE)OMX_IndexConfigAndroidIntraRefresh,
-                &params, sizeof(params)) == OK) {
-            caps->addFlags(MediaCodecInfo::Capabilities::kFlagSupportsIntraRefresh);
+                &params, sizeof(params)) != OK &&
+                omxNode->getParameter(
+                    OMX_IndexParamVideoIntraRefresh, &fallbackParams,
+                    sizeof(fallbackParams)) != OK) {
+            // intra refresh is not supported
+            caps->removeDetail(MediaCodecInfo::Capabilities::FEATURE_INTRA_REFRESH);
         }
     }
 
@@ -270,12 +282,26 @@
                 writer->addMediaCodecInfo();
         info->setName(name.c_str());
         info->setOwner("default");
-        info->setEncoder(encoder);
+        typename std::underlying_type<MediaCodecInfo::Attributes>::type attrs = 0;
+        if (encoder) {
+            attrs |= MediaCodecInfo::kFlagIsEncoder;
+        }
+        // NOTE: we don't support software-only codecs in OMX
+        if (!hasPrefix(name, "OMX.google.")) {
+            attrs |= MediaCodecInfo::kFlagIsVendor;
+            if (properties.quirkSet.find("attribute::software-codec")
+                    == properties.quirkSet.end()) {
+                attrs |= MediaCodecInfo::kFlagIsHardwareAccelerated;
+            }
+        }
+        info->setAttributes(attrs);
         info->setRank(omxRank);
-        for (const MediaCodecsXmlParser::Type& type : properties.typeMap) {
-            const std::string &mime = type.first;
+        // OMX components don't have aliases
+        for (const MediaCodecsXmlParser::Type &type : properties.typeMap) {
+            const std::string &mediaType = type.first;
+
             std::unique_ptr<MediaCodecInfo::CapabilitiesWriter> caps =
-                    info->addMime(mime.c_str());
+                    info->addMediaType(mediaType.c_str());
             const MediaCodecsXmlParser::AttributeMap &attrMap = type.second;
             for (const MediaCodecsXmlParser::Attribute& attr : attrMap) {
                 const std::string &key = attr.first;
@@ -289,13 +315,13 @@
             }
             status_t err = queryOmxCapabilities(
                     name.c_str(),
-                    mime.c_str(),
+                    mediaType.c_str(),
                     encoder,
                     caps.get());
             if (err != OK) {
-                ALOGE("Failed to query capabilities for %s (mime: %s). Error: %d",
+                ALOGI("Failed to query capabilities for %s (media type: %s). Error: %d",
                         name.c_str(),
-                        mime.c_str(),
+                        mediaType.c_str(),
                         static_cast<int>(err));
             }
         }
@@ -407,20 +433,40 @@
             break;
         }
 
+        ALOGV("canonName = %s", canonName.c_str());
         std::unique_ptr<MediaCodecInfoWriter> codecInfo = writer->addMediaCodecInfo();
         codecInfo->setName(trait.name.c_str());
-        codecInfo->setOwner("codec2");
+        codecInfo->setOwner(("codec2::" + trait.owner).c_str());
+        const MediaCodecsXmlParser::CodecProperties &codec = parser.getCodecMap().at(canonName);
+
         bool encoder = trait.kind == C2Component::KIND_ENCODER;
-        codecInfo->setEncoder(encoder);
+        typename std::underlying_type<MediaCodecInfo::Attributes>::type attrs = 0;
+
+        if (encoder) {
+            attrs |= MediaCodecInfo::kFlagIsEncoder;
+        }
+        if (trait.owner == "software") {
+            attrs |= MediaCodecInfo::kFlagIsSoftwareOnly;
+        } else {
+            attrs |= MediaCodecInfo::kFlagIsVendor;
+            if (trait.owner == "vendor-software") {
+                attrs |= MediaCodecInfo::kFlagIsSoftwareOnly;
+            } else if (codec.quirkSet.find("attribute::software-codec") == codec.quirkSet.end()) {
+                attrs |= MediaCodecInfo::kFlagIsHardwareAccelerated;
+            }
+        }
+        codecInfo->setAttributes(attrs);
         codecInfo->setRank(rank);
-        const MediaCodecsXmlParser::CodecProperties &codec =
-            parser.getCodecMap().at(canonName);
+
+        for (const std::string &alias : codec.aliases) {
+            codecInfo->addAlias(alias.c_str());
+        }
 
         for (auto typeIt = codec.typeMap.begin(); typeIt != codec.typeMap.end(); ++typeIt) {
             const std::string &mediaType = typeIt->first;
             const MediaCodecsXmlParser::AttributeMap &attrMap = typeIt->second;
             std::unique_ptr<MediaCodecInfo::CapabilitiesWriter> caps =
-                codecInfo->addMime(mediaType.c_str());
+                codecInfo->addMediaType(mediaType.c_str());
             for (auto attrIt = attrMap.begin(); attrIt != attrMap.end(); ++attrIt) {
                 std::string key, value;
                 std::tie(key, value) = *attrIt;
@@ -450,6 +496,23 @@
                         asString(err), asString(profileQuery[0].status));
                 if (err == C2_OK && profileQuery[0].status == C2_OK) {
                     if (profileQuery[0].values.type == C2FieldSupportedValues::VALUES) {
+                        std::vector<std::shared_ptr<C2ParamDescriptor>> paramDescs;
+                        c2_status_t err1 = intf->querySupportedParams(&paramDescs);
+                        bool isHdr = false, isHdr10Plus = false;
+                        if (err1 == C2_OK) {
+                            for (const std::shared_ptr<C2ParamDescriptor> &desc : paramDescs) {
+                                if ((uint32_t)desc->index() ==
+                                        C2StreamHdr10PlusInfo::output::PARAM_TYPE) {
+                                    isHdr10Plus = true;
+                                } else if ((uint32_t)desc->index() ==
+                                        C2StreamHdrStaticInfo::output::PARAM_TYPE) {
+                                    isHdr = true;
+                                }
+                            }
+                        }
+                        // For VP9, the static info is always propagated by framework.
+                        isHdr |= (mediaType == MIMETYPE_VIDEO_VP9);
+
                         for (C2Value::Primitive profile : profileQuery[0].values.values) {
                             pl.profile = (C2Config::profile_t)profile.ref<uint32_t>();
                             std::vector<std::unique_ptr<C2SettingResult>> failures;
@@ -473,6 +536,26 @@
                                         caps->addProfileLevel(
                                                 (uint32_t)sdkProfile, (uint32_t)sdkLevel);
                                         gotProfileLevels = true;
+                                        if (isHdr) {
+                                            auto hdrMapper = C2Mapper::GetHdrProfileLevelMapper(
+                                                    trait.mediaType);
+                                            if (hdrMapper && hdrMapper->mapProfile(
+                                                    pl.profile, &sdkProfile)) {
+                                                caps->addProfileLevel(
+                                                        (uint32_t)sdkProfile,
+                                                        (uint32_t)sdkLevel);
+                                            }
+                                            if (isHdr10Plus) {
+                                                hdrMapper = C2Mapper::GetHdrProfileLevelMapper(
+                                                        trait.mediaType, true /*isHdr10Plus*/);
+                                                if (hdrMapper && hdrMapper->mapProfile(
+                                                        pl.profile, &sdkProfile)) {
+                                                    caps->addProfileLevel(
+                                                            (uint32_t)sdkProfile,
+                                                            (uint32_t)sdkLevel);
+                                                }
+                                            }
+                                        }
                                     } else if (!mapper) {
                                         caps->addProfileLevel(pl.profile, pl.level);
                                         gotProfileLevels = true;
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.cpp b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
index b1b33e1..c369e16 100644
--- a/media/codec2/sfplugin/utils/Codec2Mapper.cpp
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
@@ -253,6 +253,14 @@
     { C2Config::PROFILE_HEVC_MAIN_10_INTRA, HEVCProfileMain10 },
 };
 
+ALookup<C2Config::profile_t, int32_t> sHevcHdrProfiles = {
+    { C2Config::PROFILE_HEVC_MAIN_10, HEVCProfileMain10HDR10 },
+};
+
+ALookup<C2Config::profile_t, int32_t> sHevcHdr10PlusProfiles = {
+    { C2Config::PROFILE_HEVC_MAIN_10, HEVCProfileMain10HDR10Plus },
+};
+
 ALookup<C2Config::level_t, int32_t> sMpeg2Levels = {
     { C2Config::LEVEL_MP2V_LOW,         MPEG2LevelLL },
     { C2Config::LEVEL_MP2V_MAIN,        MPEG2LevelML },
@@ -324,6 +332,20 @@
     { C2Config::PROFILE_VP9_1, VP9Profile1 },
     { C2Config::PROFILE_VP9_2, VP9Profile2 },
     { C2Config::PROFILE_VP9_3, VP9Profile3 },
+    { C2Config::PROFILE_VP9_2, VP9Profile2HDR },
+    { C2Config::PROFILE_VP9_3, VP9Profile3HDR },
+    { C2Config::PROFILE_VP9_2, VP9Profile2HDR10Plus },
+    { C2Config::PROFILE_VP9_3, VP9Profile3HDR10Plus },
+};
+
+ALookup<C2Config::profile_t, int32_t> sVp9HdrProfiles = {
+    { C2Config::PROFILE_VP9_2, VP9Profile2HDR },
+    { C2Config::PROFILE_VP9_3, VP9Profile3HDR },
+};
+
+ALookup<C2Config::profile_t, int32_t> sVp9Hdr10PlusProfiles = {
+    { C2Config::PROFILE_VP9_2, VP9Profile2HDR10Plus },
+    { C2Config::PROFILE_VP9_3, VP9Profile3HDR10Plus },
 };
 
 ALookup<C2Config::level_t, int32_t> sAv1Levels = {
@@ -461,6 +483,10 @@
 };
 
 struct HevcProfileLevelMapper : ProfileLevelMapperHelper {
+    HevcProfileLevelMapper(bool isHdr = false, bool isHdr10Plus = false) :
+        ProfileLevelMapperHelper(),
+        mIsHdr(isHdr), mIsHdr10Plus(isHdr10Plus) {}
+
     virtual bool simpleMap(C2Config::level_t from, int32_t *to) {
         return sHevcLevels.map(from, to);
     }
@@ -468,11 +494,19 @@
         return sHevcLevels.map(from, to);
     }
     virtual bool simpleMap(C2Config::profile_t from, int32_t *to) {
-        return sHevcProfiles.map(from, to);
+        return mIsHdr10Plus ? sHevcHdr10PlusProfiles.map(from, to) :
+                     mIsHdr ? sHevcHdrProfiles.map(from, to) :
+                              sHevcProfiles.map(from, to);
     }
     virtual bool simpleMap(int32_t from, C2Config::profile_t *to) {
-        return sHevcProfiles.map(from, to);
+        return mIsHdr10Plus ? sHevcHdr10PlusProfiles.map(from, to) :
+                     mIsHdr ? sHevcHdrProfiles.map(from, to) :
+                              sHevcProfiles.map(from, to);
     }
+
+private:
+    bool mIsHdr;
+    bool mIsHdr10Plus;
 };
 
 struct Mpeg2ProfileLevelMapper : ProfileLevelMapperHelper {
@@ -527,6 +561,10 @@
 };
 
 struct Vp9ProfileLevelMapper : ProfileLevelMapperHelper {
+    Vp9ProfileLevelMapper(bool isHdr = false, bool isHdr10Plus = false) :
+        ProfileLevelMapperHelper(),
+        mIsHdr(isHdr), mIsHdr10Plus(isHdr10Plus) {}
+
     virtual bool simpleMap(C2Config::level_t from, int32_t *to) {
         return sVp9Levels.map(from, to);
     }
@@ -534,11 +572,19 @@
         return sVp9Levels.map(from, to);
     }
     virtual bool simpleMap(C2Config::profile_t from, int32_t *to) {
-        return sVp9Profiles.map(from, to);
+        return mIsHdr10Plus ? sVp9Hdr10PlusProfiles.map(from, to) :
+                     mIsHdr ? sVp9HdrProfiles.map(from, to) :
+                              sVp9Profiles.map(from, to);
     }
     virtual bool simpleMap(int32_t from, C2Config::profile_t *to) {
-        return sVp9Profiles.map(from, to);
+        return mIsHdr10Plus ? sVp9Hdr10PlusProfiles.map(from, to) :
+                     mIsHdr ? sVp9HdrProfiles.map(from, to) :
+                              sVp9Profiles.map(from, to);
     }
+
+private:
+    bool mIsHdr;
+    bool mIsHdr10Plus;
 };
 
 } // namespace
@@ -570,6 +616,18 @@
 }
 
 // static
+std::shared_ptr<C2Mapper::ProfileLevelMapper>
+C2Mapper::GetHdrProfileLevelMapper(std::string mediaType, bool isHdr10Plus) {
+    std::transform(mediaType.begin(), mediaType.begin(), mediaType.end(), ::tolower);
+    if (mediaType == MIMETYPE_VIDEO_HEVC) {
+        return std::make_shared<HevcProfileLevelMapper>(true, isHdr10Plus);
+    } else if (mediaType == MIMETYPE_VIDEO_VP9) {
+        return std::make_shared<Vp9ProfileLevelMapper>(true, isHdr10Plus);
+    }
+    return nullptr;
+}
+
+// static
 bool C2Mapper::map(C2Config::bitrate_mode_t from, int32_t *to) {
     return sBitrateModes.map(from, to);
 }
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.h b/media/codec2/sfplugin/utils/Codec2Mapper.h
index 1eeb92e..cec6f07 100644
--- a/media/codec2/sfplugin/utils/Codec2Mapper.h
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.h
@@ -40,6 +40,9 @@
         static std::shared_ptr<ProfileLevelMapper>
         GetProfileLevelMapper(std::string mediaType);
 
+        static std::shared_ptr<ProfileLevelMapper>
+        GetHdrProfileLevelMapper(std::string mediaType, bool isHdr10Plus = false);
+
         // convert between bitrates
         static bool map(C2Config::bitrate_mode_t, int32_t*);
         static bool map(int32_t, C2Config::bitrate_mode_t*);
diff --git a/media/codec2/vndk/C2AllocatorGralloc.cpp b/media/codec2/vndk/C2AllocatorGralloc.cpp
index 4878974..18f2430 100644
--- a/media/codec2/vndk/C2AllocatorGralloc.cpp
+++ b/media/codec2/vndk/C2AllocatorGralloc.cpp
@@ -304,17 +304,23 @@
 }
 
 C2AllocationGralloc::~C2AllocationGralloc() {
-    if (!mBuffer) {
-        return;
-    }
-    if (mLocked) {
+    if (mBuffer && mLocked) {
         // implementation ignores addresss and rect
         uint8_t* addr[C2PlanarLayout::MAX_NUM_PLANES] = {};
         unmap(addr, C2Rect(), nullptr);
     }
-    mMapper->freeBuffer(const_cast<native_handle_t *>(mBuffer));
-    native_handle_delete(const_cast<native_handle_t*>(
-            reinterpret_cast<const native_handle_t*>(mHandle)));
+    if (mBuffer) {
+        mMapper->freeBuffer(const_cast<native_handle_t *>(mBuffer));
+    }
+    if (mHandle) {
+        native_handle_delete(
+                const_cast<native_handle_t *>(reinterpret_cast<const native_handle_t *>(mHandle)));
+    }
+    if (mLockedHandle) {
+        native_handle_delete(
+                const_cast<native_handle_t *>(
+                        reinterpret_cast<const native_handle_t *>(mLockedHandle)));
+    }
 }
 
 c2_status_t C2AllocationGralloc::map(
diff --git a/media/codec2/vndk/C2Config.cpp b/media/codec2/vndk/C2Config.cpp
index 782bec5..8a27088 100644
--- a/media/codec2/vndk/C2Config.cpp
+++ b/media/codec2/vndk/C2Config.cpp
@@ -221,6 +221,30 @@
         { "vp9-6", C2Config::LEVEL_VP9_6 },
         { "vp9-6.1", C2Config::LEVEL_VP9_6_1 },
         { "vp9-6.2", C2Config::LEVEL_VP9_6_2 },
+        { "av1-2", C2Config::LEVEL_AV1_2 },
+        { "av1-2.1", C2Config::LEVEL_AV1_2_1 },
+        { "av1-2.2", C2Config::LEVEL_AV1_2_2 },
+        { "av1-2.3", C2Config::LEVEL_AV1_2_3 },
+        { "av1-3", C2Config::LEVEL_AV1_3 },
+        { "av1-3.1", C2Config::LEVEL_AV1_3_1 },
+        { "av1-3.2", C2Config::LEVEL_AV1_3_2 },
+        { "av1-3.3", C2Config::LEVEL_AV1_3_3 },
+        { "av1-4", C2Config::LEVEL_AV1_4 },
+        { "av1-4.1", C2Config::LEVEL_AV1_4_1 },
+        { "av1-4.2", C2Config::LEVEL_AV1_4_2 },
+        { "av1-4.3", C2Config::LEVEL_AV1_4_3 },
+        { "av1-5", C2Config::LEVEL_AV1_5 },
+        { "av1-5.1", C2Config::LEVEL_AV1_5_1 },
+        { "av1-5.2", C2Config::LEVEL_AV1_5_2 },
+        { "av1-5.3", C2Config::LEVEL_AV1_5_3 },
+        { "av1-6", C2Config::LEVEL_AV1_6 },
+        { "av1-6.1", C2Config::LEVEL_AV1_6_1 },
+        { "av1-6.2", C2Config::LEVEL_AV1_6_2 },
+        { "av1-6.3", C2Config::LEVEL_AV1_6_3 },
+        { "av1-7", C2Config::LEVEL_AV1_7 },
+        { "av1-7.1", C2Config::LEVEL_AV1_7_1 },
+        { "av1-7.2", C2Config::LEVEL_AV1_7_2 },
+        { "av1-7.3", C2Config::LEVEL_AV1_7_3 },
 }))
 
 DEFINE_C2_ENUM_VALUE_CUSTOM_HELPER(C2BufferData::type_t, ({
diff --git a/media/codec2/vndk/C2Store.cpp b/media/codec2/vndk/C2Store.cpp
index a5dd203..dc7e89c 100644
--- a/media/codec2/vndk/C2Store.cpp
+++ b/media/codec2/vndk/C2Store.cpp
@@ -817,6 +817,7 @@
     emplace("c2.android.mp3.decoder", "libcodec2_soft_mp3dec.so");
     emplace("c2.android.vorbis.decoder", "libcodec2_soft_vorbisdec.so");
     emplace("c2.android.opus.decoder", "libcodec2_soft_opusdec.so");
+    emplace("c2.android.opus.encoder", "libcodec2_soft_opusenc.so");
     emplace("c2.android.vp8.decoder", "libcodec2_soft_vp8dec.so");
     emplace("c2.android.vp9.decoder", "libcodec2_soft_vp9dec.so");
     emplace("c2.android.vp8.encoder", "libcodec2_soft_vp8enc.so");
diff --git a/media/extractors/flac/FLACExtractor.cpp b/media/extractors/flac/FLACExtractor.cpp
index dcda6bf..84fbcee 100644
--- a/media/extractors/flac/FLACExtractor.cpp
+++ b/media/extractors/flac/FLACExtractor.cpp
@@ -806,7 +806,32 @@
 
 bool SniffFLAC(DataSourceHelper *source, float *confidence)
 {
-    // FLAC header.
+    // Skip ID3 tags
+    off64_t pos = 0;
+    uint8_t header[10];
+    for (;;) {
+        if (source->readAt(pos, header, sizeof(header)) != sizeof(header)) {
+            return false; // no more file to read.
+        }
+
+        // check for ID3 tag
+        if (memcmp("ID3", header, 3) != 0) {
+            break; // not an ID3 tag.
+        }
+
+        // skip the ID3v2 data and check again
+        const unsigned id3Len = 10 +
+                (((header[6] & 0x7f) << 21)
+                 | ((header[7] & 0x7f) << 14)
+                 | ((header[8] & 0x7f) << 7)
+                 | (header[9] & 0x7f));
+        pos += id3Len;
+
+        ALOGV("skipped ID3 tag of len %u new starting offset is %#016llx",
+                id3Len, (long long)pos);
+    }
+
+    // Check FLAC header.
     // https://xiph.org/flac/format.html#stream
     //
     // Note: content stored big endian.
@@ -815,12 +840,8 @@
     // 4            8         metadata type STREAMINFO (0) (note: OR with 0x80 if last metadata)
     // 5            24        size of metadata, for STREAMINFO (0x22).
 
-    // Android is LE, so express header as little endian int64 constant.
-    constexpr int64_t flacHeader = (0x22LL << 56) | 'CaLf';
-    constexpr int64_t flacHeader2 = flacHeader | (0x80LL << 32); // alternate form (last metadata)
-    int64_t header;
-    if (source->readAt(0, &header, sizeof(header)) != sizeof(header)
-            || (header != flacHeader && header != flacHeader2)) {
+    if (memcmp("fLaC\x00\x00\x00\x22", header, 8) != 0 &&
+        memcmp("fLaC\x80\x00\x00\x22", header, 8) != 0) {
         return false;
     }
 
diff --git a/media/extractors/mp4/ItemTable.cpp b/media/extractors/mp4/ItemTable.cpp
index 55a0c47..eb6602c 100644
--- a/media/extractors/mp4/ItemTable.cpp
+++ b/media/extractors/mp4/ItemTable.cpp
@@ -48,7 +48,7 @@
             offset(0), size(0), nextTileIndex(0) {}
 
     bool isGrid() const {
-        return type == FOURCC('g', 'r', 'i', 'd');
+        return type == FOURCC("grid");
     }
 
     status_t getNextTileItemId(uint32_t *nextTileItemId, bool reset) {
@@ -223,7 +223,7 @@
 
 struct PitmBox : public FullBox {
     PitmBox(DataSourceHelper *source) :
-        FullBox(source, FOURCC('p', 'i', 't', 'm')) {}
+        FullBox(source, FOURCC("pitm")) {}
 
     status_t parse(off64_t offset, size_t size, uint32_t *primaryItemId);
 };
@@ -303,7 +303,7 @@
 
 struct IlocBox : public FullBox {
     IlocBox(DataSourceHelper *source, KeyedVector<uint32_t, ItemLoc> *itemLocs) :
-        FullBox(source, FOURCC('i', 'l', 'o', 'c')),
+        FullBox(source, FOURCC("iloc")),
         mItemLocs(itemLocs), mHasConstructMethod1(false) {}
 
     status_t parse(off64_t offset, size_t size);
@@ -497,7 +497,7 @@
     ALOGV("attach reference type 0x%x to item id %d)", type(), mItemId);
 
     switch(type()) {
-    case FOURCC('d', 'i', 'm', 'g'): {
+    case FOURCC("dimg"): {
         ssize_t itemIndex = itemIdToItemMap.indexOfKey(mItemId);
 
         // ignore non-image items
@@ -525,7 +525,7 @@
         }
         break;
     }
-    case FOURCC('t', 'h', 'm', 'b'): {
+    case FOURCC("thmb"): {
         ssize_t itemIndex = itemIdToItemMap.indexOfKey(mItemId);
 
         // ignore non-image items
@@ -554,7 +554,7 @@
         }
         break;
     }
-    case FOURCC('c', 'd', 's', 'c'): {
+    case FOURCC("cdsc"): {
         ssize_t itemIndex = itemIdToExifMap.indexOfKey(mItemId);
 
         // ignore non-exif block items
@@ -575,7 +575,7 @@
         }
         break;
     }
-    case FOURCC('a', 'u', 'x', 'l'): {
+    case FOURCC("auxl"): {
         ssize_t itemIndex = itemIdToItemMap.indexOfKey(mItemId);
 
         // ignore non-image items
@@ -628,7 +628,7 @@
 
 struct IrefBox : public FullBox {
     IrefBox(DataSourceHelper *source, Vector<sp<ItemReference> > *itemRefs) :
-        FullBox(source, FOURCC('i', 'r', 'e', 'f')), mRefIdSize(0), mItemRefs(itemRefs) {}
+        FullBox(source, FOURCC("iref")), mRefIdSize(0), mItemRefs(itemRefs) {}
 
     status_t parse(off64_t offset, size_t size);
 
@@ -690,7 +690,7 @@
 
 struct IspeBox : public FullBox, public ItemProperty {
     IspeBox(DataSourceHelper *source) :
-        FullBox(source, FOURCC('i', 's', 'p', 'e')), mWidth(0), mHeight(0) {}
+        FullBox(source, FOURCC("ispe")), mWidth(0), mHeight(0) {}
 
     status_t parse(off64_t offset, size_t size) override;
 
@@ -726,7 +726,7 @@
 
 struct HvccBox : public Box, public ItemProperty {
     HvccBox(DataSourceHelper *source) :
-        Box(source, FOURCC('h', 'v', 'c', 'C')) {}
+        Box(source, FOURCC("hvcC")) {}
 
     status_t parse(off64_t offset, size_t size) override;
 
@@ -759,7 +759,7 @@
 
 struct IrotBox : public Box, public ItemProperty {
     IrotBox(DataSourceHelper *source) :
-        Box(source, FOURCC('i', 'r', 'o', 't')), mAngle(0) {}
+        Box(source, FOURCC("irot")), mAngle(0) {}
 
     status_t parse(off64_t offset, size_t size) override;
 
@@ -788,7 +788,7 @@
 
 struct ColrBox : public Box, public ItemProperty {
     ColrBox(DataSourceHelper *source) :
-        Box(source, FOURCC('c', 'o', 'l', 'r')) {}
+        Box(source, FOURCC("colr")) {}
 
     status_t parse(off64_t offset, size_t size) override;
 
@@ -812,11 +812,11 @@
     }
     offset += 4;
     size -= 4;
-    if (colour_type == FOURCC('n', 'c', 'l', 'x')) {
+    if (colour_type == FOURCC("nclx")) {
         return OK;
     }
-    if ((colour_type != FOURCC('r', 'I', 'C', 'C')) &&
-        (colour_type != FOURCC('p', 'r', 'o', 'f'))) {
+    if ((colour_type != FOURCC("rICC")) &&
+        (colour_type != FOURCC("prof"))) {
         return ERROR_MALFORMED;
     }
 
@@ -836,7 +836,7 @@
 
 struct IpmaBox : public FullBox {
     IpmaBox(DataSourceHelper *source, Vector<AssociationEntry> *associations) :
-        FullBox(source, FOURCC('i', 'p', 'm', 'a')), mAssociations(associations) {}
+        FullBox(source, FOURCC("ipma")), mAssociations(associations) {}
 
     status_t parse(off64_t offset, size_t size);
 private:
@@ -910,7 +910,7 @@
 
 struct IpcoBox : public Box {
     IpcoBox(DataSourceHelper *source, Vector<sp<ItemProperty> > *properties) :
-        Box(source, FOURCC('i', 'p', 'c', 'o')), mItemProperties(properties) {}
+        Box(source, FOURCC("ipco")), mItemProperties(properties) {}
 
     status_t parse(off64_t offset, size_t size);
 protected:
@@ -930,22 +930,22 @@
 status_t IpcoBox::onChunkData(uint32_t type, off64_t offset, size_t size) {
     sp<ItemProperty> itemProperty;
     switch(type) {
-        case FOURCC('h', 'v', 'c', 'C'):
+        case FOURCC("hvcC"):
         {
             itemProperty = new HvccBox(source());
             break;
         }
-        case FOURCC('i', 's', 'p', 'e'):
+        case FOURCC("ispe"):
         {
             itemProperty = new IspeBox(source());
             break;
         }
-        case FOURCC('i', 'r', 'o', 't'):
+        case FOURCC("irot"):
         {
             itemProperty = new IrotBox(source());
             break;
         }
-        case FOURCC('c', 'o', 'l', 'r'):
+        case FOURCC("colr"):
         {
             itemProperty = new ColrBox(source());
             break;
@@ -969,7 +969,7 @@
     IprpBox(DataSourceHelper *source,
             Vector<sp<ItemProperty> > *properties,
             Vector<AssociationEntry> *associations) :
-        Box(source, FOURCC('i', 'p', 'r', 'p')),
+        Box(source, FOURCC("iprp")),
         mProperties(properties), mAssociations(associations) {}
 
     status_t parse(off64_t offset, size_t size);
@@ -993,12 +993,12 @@
 
 status_t IprpBox::onChunkData(uint32_t type, off64_t offset, size_t size) {
     switch(type) {
-        case FOURCC('i', 'p', 'c', 'o'):
+        case FOURCC("ipco"):
         {
             IpcoBox ipcoBox(source(), mProperties);
             return ipcoBox.parse(offset, size);
         }
-        case FOURCC('i', 'p', 'm', 'a'):
+        case FOURCC("ipma"):
         {
             IpmaBox ipmaBox(source(), mAssociations);
             return ipmaBox.parse(offset, size);
@@ -1024,7 +1024,7 @@
 
 struct InfeBox : public FullBox {
     InfeBox(DataSourceHelper *source) :
-        FullBox(source, FOURCC('i', 'n', 'f', 'e')) {}
+        FullBox(source, FOURCC("infe")) {}
 
     status_t parse(off64_t offset, size_t size, ItemInfo *itemInfo);
 
@@ -1104,7 +1104,7 @@
         }
         ALOGV("item_name %s", item_name.c_str());
 
-        if (item_type == FOURCC('m', 'i', 'm', 'e')) {
+        if (item_type == FOURCC("mime")) {
             String8 content_type;
             if (!parseNullTerminatedString(&offset, &size, &content_type)) {
                 return ERROR_MALFORMED;
@@ -1117,7 +1117,7 @@
                     return ERROR_MALFORMED;
                 }
             }
-        } else if (item_type == FOURCC('u', 'r', 'i', ' ')) {
+        } else if (item_type == FOURCC("uri ")) {
             String8 item_uri_type;
             if (!parseNullTerminatedString(&offset, &size, &item_uri_type)) {
                 return ERROR_MALFORMED;
@@ -1129,7 +1129,7 @@
 
 struct IinfBox : public FullBox {
     IinfBox(DataSourceHelper *source, Vector<ItemInfo> *itemInfos) :
-        FullBox(source, FOURCC('i', 'i', 'n', 'f')),
+        FullBox(source, FOURCC("iinf")),
         mItemInfos(itemInfos), mHasGrids(false) {}
 
     status_t parse(off64_t offset, size_t size);
@@ -1179,7 +1179,7 @@
 }
 
 status_t IinfBox::onChunkData(uint32_t type, off64_t offset, size_t size) {
-    if (type != FOURCC('i', 'n', 'f', 'e')) {
+    if (type != FOURCC("infe")) {
         return OK;
     }
 
@@ -1188,7 +1188,7 @@
     status_t err = infeBox.parse(offset, size, &itemInfo);
     if (err == OK) {
         mItemInfos->push_back(itemInfo);
-        mHasGrids |= (itemInfo.itemType == FOURCC('g', 'r', 'i', 'd'));
+        mHasGrids |= (itemInfo.itemType == FOURCC("grid"));
     }
     // InfeBox parse returns ERROR_UNSUPPORTED if the box if an unsupported
     // version. Ignore this error as it's not fatal.
@@ -1214,31 +1214,31 @@
 
 status_t ItemTable::parse(uint32_t type, off64_t data_offset, size_t chunk_data_size) {
     switch(type) {
-        case FOURCC('i', 'l', 'o', 'c'):
+        case FOURCC("iloc"):
         {
             return parseIlocBox(data_offset, chunk_data_size);
         }
-        case FOURCC('i', 'i', 'n', 'f'):
+        case FOURCC("iinf"):
         {
             return parseIinfBox(data_offset, chunk_data_size);
         }
-        case FOURCC('i', 'p', 'r', 'p'):
+        case FOURCC("iprp"):
         {
             return parseIprpBox(data_offset, chunk_data_size);
         }
-        case FOURCC('p', 'i', 't', 'm'):
+        case FOURCC("pitm"):
         {
             return parsePitmBox(data_offset, chunk_data_size);
         }
-        case FOURCC('i', 'd', 'a', 't'):
+        case FOURCC("idat"):
         {
             return parseIdatBox(data_offset, chunk_data_size);
         }
-        case FOURCC('i', 'r', 'e', 'f'):
+        case FOURCC("iref"):
         {
             return parseIrefBox(data_offset, chunk_data_size);
         }
-        case FOURCC('i', 'p', 'r', 'o'):
+        case FOURCC("ipro"):
         {
             ALOGW("ipro box not supported!");
             break;
@@ -1355,9 +1355,9 @@
         //   'grid': derived image from tiles
         //   'hvc1': coded image (or tile)
         //   'Exif': EXIF metadata
-        if (info.itemType != FOURCC('g', 'r', 'i', 'd') &&
-            info.itemType != FOURCC('h', 'v', 'c', '1') &&
-            info.itemType != FOURCC('E', 'x', 'i', 'f')) {
+        if (info.itemType != FOURCC("grid") &&
+            info.itemType != FOURCC("hvc1") &&
+            info.itemType != FOURCC("Exif")) {
             continue;
         }
 
@@ -1380,7 +1380,7 @@
             return ERROR_MALFORMED;
         }
 
-        if (info.itemType == FOURCC('E', 'x', 'i', 'f')) {
+        if (info.itemType == FOURCC("Exif")) {
             // Only add if the Exif data is non-empty. The first 4 bytes contain
             // the offset to TIFF header, which the Exif parser doesn't use.
             if (size > 4) {
@@ -1687,8 +1687,31 @@
     }
 
     // skip the first 4-byte of the offset to TIFF header
-    *offset = mItemIdToExifMap[exifIndex].offset + 4;
-    *size = mItemIdToExifMap[exifIndex].size - 4;
+    uint32_t tiffOffset;
+    if (!mDataSource->readAt(
+            mItemIdToExifMap[exifIndex].offset, &tiffOffset, 4)) {
+        return ERROR_IO;
+    }
+
+    // We need 'Exif\0\0' before the tiff header
+    tiffOffset = ntohl(tiffOffset);
+    if (tiffOffset < 6) {
+        return ERROR_MALFORMED;
+    }
+    // The first 4-byte of the item is the offset of the tiff header within the
+    // exif data. The size of the item should be > 4 for a non-empty exif (this
+    // was already checked when the item was added). Also check that the tiff
+    // header offset is valid.
+    if (mItemIdToExifMap[exifIndex].size <= 4 ||
+            tiffOffset > mItemIdToExifMap[exifIndex].size - 4) {
+        return ERROR_MALFORMED;
+    }
+
+    // Offset of 'Exif\0\0' relative to the beginning of 'Exif' item
+    // (first 4-byte is the tiff header offset)
+    uint32_t exifOffset = 4 + tiffOffset - 6;
+    *offset = mItemIdToExifMap[exifIndex].offset + exifOffset;
+    *size = mItemIdToExifMap[exifIndex].size - exifOffset;
     return OK;
 }
 
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 0441359..d0efddd 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -308,42 +308,42 @@
 
 static const char *FourCC2MIME(uint32_t fourcc) {
     switch (fourcc) {
-        case FOURCC('m', 'p', '4', 'a'):
+        case FOURCC("mp4a"):
             return MEDIA_MIMETYPE_AUDIO_AAC;
 
-        case FOURCC('s', 'a', 'm', 'r'):
+        case FOURCC("samr"):
             return MEDIA_MIMETYPE_AUDIO_AMR_NB;
 
-        case FOURCC('s', 'a', 'w', 'b'):
+        case FOURCC("sawb"):
             return MEDIA_MIMETYPE_AUDIO_AMR_WB;
 
-        case FOURCC('e', 'c', '-', '3'):
+        case FOURCC("ec-3"):
             return MEDIA_MIMETYPE_AUDIO_EAC3;
 
-        case FOURCC('m', 'p', '4', 'v'):
+        case FOURCC("mp4v"):
             return MEDIA_MIMETYPE_VIDEO_MPEG4;
 
-        case FOURCC('s', '2', '6', '3'):
-        case FOURCC('h', '2', '6', '3'):
-        case FOURCC('H', '2', '6', '3'):
+        case FOURCC("s263"):
+        case FOURCC("h263"):
+        case FOURCC("H263"):
             return MEDIA_MIMETYPE_VIDEO_H263;
 
-        case FOURCC('a', 'v', 'c', '1'):
+        case FOURCC("avc1"):
             return MEDIA_MIMETYPE_VIDEO_AVC;
 
-        case FOURCC('h', 'v', 'c', '1'):
-        case FOURCC('h', 'e', 'v', '1'):
+        case FOURCC("hvc1"):
+        case FOURCC("hev1"):
             return MEDIA_MIMETYPE_VIDEO_HEVC;
-        case FOURCC('a', 'c', '-', '4'):
+        case FOURCC("ac-4"):
             return MEDIA_MIMETYPE_AUDIO_AC4;
 
-        case FOURCC('t', 'w', 'o', 's'):
-        case FOURCC('s', 'o', 'w', 't'):
+        case FOURCC("twos"):
+        case FOURCC("sowt"):
             return MEDIA_MIMETYPE_AUDIO_RAW;
-        case FOURCC('a', 'l', 'a', 'c'):
+        case FOURCC("alac"):
             return MEDIA_MIMETYPE_AUDIO_ALAC;
 
-        case FOURCC('a', 'v', '0', '1'):
+        case FOURCC("av01"):
             return MEDIA_MIMETYPE_VIDEO_AV1;
         default:
             ALOGW("Unknown fourcc: %c%c%c%c",
@@ -594,7 +594,7 @@
                 }
             } else {
                 uint32_t sampleIndex;
-                uint32_t sampleTime;
+                uint64_t sampleTime;
                 if (track->timescale != 0 &&
                         track->sampleTable->findThumbnailSample(&sampleIndex) == OK
                         && track->sampleTable->getMetaDataForSample(
@@ -749,21 +749,21 @@
 
 static bool underMetaDataPath(const Vector<uint32_t> &path) {
     return path.size() >= 5
-        && path[0] == FOURCC('m', 'o', 'o', 'v')
-        && path[1] == FOURCC('u', 'd', 't', 'a')
-        && path[2] == FOURCC('m', 'e', 't', 'a')
-        && path[3] == FOURCC('i', 'l', 's', 't');
+        && path[0] == FOURCC("moov")
+        && path[1] == FOURCC("udta")
+        && path[2] == FOURCC("meta")
+        && path[3] == FOURCC("ilst");
 }
 
 static bool underQTMetaPath(const Vector<uint32_t> &path, int32_t depth) {
     return path.size() >= 2
-            && path[0] == FOURCC('m', 'o', 'o', 'v')
-            && path[1] == FOURCC('m', 'e', 't', 'a')
+            && path[0] == FOURCC("moov")
+            && path[1] == FOURCC("meta")
             && (depth == 2
             || (depth == 3
-                    && (path[2] == FOURCC('h', 'd', 'l', 'r')
-                    ||  path[2] == FOURCC('i', 'l', 's', 't')
-                    ||  path[2] == FOURCC('k', 'e', 'y', 's'))));
+                    && (path[2] == FOURCC("hdlr")
+                    ||  path[2] == FOURCC("ilst")
+                    ||  path[2] == FOURCC("keys"))));
 }
 
 // Given a time in seconds since Jan 1 1904, produce a human-readable string.
@@ -867,7 +867,7 @@
         ALOGE("b/23540914");
         return ERROR_MALFORMED;
     }
-    if (chunk_type != FOURCC('m', 'd', 'a', 't') && chunk_data_size > kMaxAtomSize) {
+    if (chunk_type != FOURCC("mdat") && chunk_data_size > kMaxAtomSize) {
         char errMsg[100];
         sprintf(errMsg, "%s atom has size %" PRId64, chunk, chunk_data_size);
         ALOGE("%s (b/28615448)", errMsg);
@@ -875,8 +875,8 @@
         return ERROR_MALFORMED;
     }
 
-    if (chunk_type != FOURCC('c', 'p', 'r', 't')
-            && chunk_type != FOURCC('c', 'o', 'v', 'r')
+    if (chunk_type != FOURCC("cprt")
+            && chunk_type != FOURCC("covr")
             && mPath.size() == 5 && underMetaDataPath(mPath)) {
         off64_t stop_offset = *offset + chunk_size;
         *offset = data_offset;
@@ -895,40 +895,40 @@
     }
 
     switch(chunk_type) {
-        case FOURCC('m', 'o', 'o', 'v'):
-        case FOURCC('t', 'r', 'a', 'k'):
-        case FOURCC('m', 'd', 'i', 'a'):
-        case FOURCC('m', 'i', 'n', 'f'):
-        case FOURCC('d', 'i', 'n', 'f'):
-        case FOURCC('s', 't', 'b', 'l'):
-        case FOURCC('m', 'v', 'e', 'x'):
-        case FOURCC('m', 'o', 'o', 'f'):
-        case FOURCC('t', 'r', 'a', 'f'):
-        case FOURCC('m', 'f', 'r', 'a'):
-        case FOURCC('u', 'd', 't', 'a'):
-        case FOURCC('i', 'l', 's', 't'):
-        case FOURCC('s', 'i', 'n', 'f'):
-        case FOURCC('s', 'c', 'h', 'i'):
-        case FOURCC('e', 'd', 't', 's'):
-        case FOURCC('w', 'a', 'v', 'e'):
+        case FOURCC("moov"):
+        case FOURCC("trak"):
+        case FOURCC("mdia"):
+        case FOURCC("minf"):
+        case FOURCC("dinf"):
+        case FOURCC("stbl"):
+        case FOURCC("mvex"):
+        case FOURCC("moof"):
+        case FOURCC("traf"):
+        case FOURCC("mfra"):
+        case FOURCC("udta"):
+        case FOURCC("ilst"):
+        case FOURCC("sinf"):
+        case FOURCC("schi"):
+        case FOURCC("edts"):
+        case FOURCC("wave"):
         {
-            if (chunk_type == FOURCC('m', 'o', 'o', 'v') && depth != 0) {
+            if (chunk_type == FOURCC("moov") && depth != 0) {
                 ALOGE("moov: depth %d", depth);
                 return ERROR_MALFORMED;
             }
 
-            if (chunk_type == FOURCC('m', 'o', 'o', 'v') && mInitCheck == OK) {
+            if (chunk_type == FOURCC("moov") && mInitCheck == OK) {
                 ALOGE("duplicate moov");
                 return ERROR_MALFORMED;
             }
 
-            if (chunk_type == FOURCC('m', 'o', 'o', 'f') && !mMoofFound) {
+            if (chunk_type == FOURCC("moof") && !mMoofFound) {
                 // store the offset of the first segment
                 mMoofFound = true;
                 mMoofOffset = *offset;
             }
 
-            if (chunk_type == FOURCC('s', 't', 'b', 'l')) {
+            if (chunk_type == FOURCC("stbl")) {
                 ALOGV("sampleTable chunk is %" PRIu64 " bytes long.", chunk_size);
 
                 if (mDataSource->flags()
@@ -954,7 +954,7 @@
             }
 
             bool isTrack = false;
-            if (chunk_type == FOURCC('t', 'r', 'a', 'k')) {
+            if (chunk_type == FOURCC("trak")) {
                 if (depth != 1) {
                     ALOGE("trak: depth %d", depth);
                     return ERROR_MALFORMED;
@@ -985,6 +985,22 @@
             off64_t stop_offset = *offset + chunk_size;
             *offset = data_offset;
             while (*offset < stop_offset) {
+
+                // pass udata terminate
+                if (mIsQT && stop_offset - *offset == 4 && chunk_type == FOURCC("udta")) {
+                    // handle the case that udta terminates with terminate code x00000000
+                    // note that 0 terminator is optional and we just handle this case.
+                    uint32_t terminate_code = 1;
+                    mDataSource->readAt(*offset, &terminate_code, 4);
+                    if (0 == terminate_code) {
+                        *offset += 4;
+                        ALOGD("Terminal code for udta");
+                        continue;
+                    } else {
+                        ALOGW("invalid udta Terminal code");
+                    }
+                }
+
                 status_t err = parseChunk(offset, depth + 1);
                 if (err != OK) {
                     if (isTrack) {
@@ -1033,7 +1049,7 @@
 
                     return OK;
                 }
-            } else if (chunk_type == FOURCC('m', 'o', 'o', 'v')) {
+            } else if (chunk_type == FOURCC("moov")) {
                 mInitCheck = OK;
 
                 return UNKNOWN_ERROR;  // Return a dummy error.
@@ -1041,7 +1057,7 @@
             break;
         }
 
-        case FOURCC('s', 'c', 'h', 'm'):
+        case FOURCC("schm"):
         {
 
             *offset += chunk_size;
@@ -1056,23 +1072,23 @@
             scheme_type = ntohl(scheme_type);
             int32_t mode = kCryptoModeUnencrypted;
             switch(scheme_type) {
-                case FOURCC('c', 'b', 'c', '1'):
+                case FOURCC("cbc1"):
                 {
                     mode = kCryptoModeAesCbc;
                     break;
                 }
-                case FOURCC('c', 'b', 'c', 's'):
+                case FOURCC("cbcs"):
                 {
                     mode = kCryptoModeAesCbc;
                     mLastTrack->subsample_encryption = true;
                     break;
                 }
-                case FOURCC('c', 'e', 'n', 'c'):
+                case FOURCC("cenc"):
                 {
                     mode = kCryptoModeAesCtr;
                     break;
                 }
-                case FOURCC('c', 'e', 'n', 's'):
+                case FOURCC("cens"):
                 {
                     mode = kCryptoModeAesCtr;
                     mLastTrack->subsample_encryption = true;
@@ -1086,7 +1102,7 @@
         }
 
 
-        case FOURCC('e', 'l', 's', 't'):
+        case FOURCC("elst"):
         {
             *offset += chunk_size;
 
@@ -1142,7 +1158,7 @@
             break;
         }
 
-        case FOURCC('f', 'r', 'm', 'a'):
+        case FOURCC("frma"):
         {
             *offset += chunk_size;
 
@@ -1171,7 +1187,7 @@
             // If format type is 'alac', it is necessary to get the parameters
             // from a alac atom spreading behind the frma atom.
             // See 'external/alac/ALACMagicCookieDescription.txt'.
-            if (original_fourcc == FOURCC('a', 'l', 'a', 'c')) {
+            if (original_fourcc == FOURCC("alac")) {
                 // Store ALAC magic cookie (decoder needs it).
                 uint8_t alacInfo[12];
                 data_offset = *offset;
@@ -1181,7 +1197,7 @@
                 }
                 uint32_t size = U32_AT(&alacInfo[0]);
                 if ((size != ALAC_SPECIFIC_INFO_SIZE) ||
-                        (U32_AT(&alacInfo[4]) != FOURCC('a', 'l', 'a', 'c')) ||
+                        (U32_AT(&alacInfo[4]) != FOURCC("alac")) ||
                         (U32_AT(&alacInfo[8]) != 0)) {
                     return ERROR_MALFORMED;
                 }
@@ -1210,7 +1226,7 @@
             break;
         }
 
-        case FOURCC('t', 'e', 'n', 'c'):
+        case FOURCC("tenc"):
         {
             *offset += chunk_size;
 
@@ -1323,7 +1339,7 @@
             break;
         }
 
-        case FOURCC('t', 'k', 'h', 'd'):
+        case FOURCC("tkhd"):
         {
             *offset += chunk_size;
 
@@ -1335,7 +1351,7 @@
             break;
         }
 
-        case FOURCC('t', 'r', 'e', 'f'):
+        case FOURCC("tref"):
         {
             off64_t stop_offset = *offset + chunk_size;
             *offset = data_offset;
@@ -1351,7 +1367,7 @@
             break;
         }
 
-        case FOURCC('t', 'h', 'm', 'b'):
+        case FOURCC("thmb"):
         {
             *offset += chunk_size;
 
@@ -1368,7 +1384,7 @@
             break;
         }
 
-        case FOURCC('p', 's', 's', 'h'):
+        case FOURCC("pssh"):
         {
             *offset += chunk_size;
 
@@ -1404,7 +1420,7 @@
             break;
         }
 
-        case FOURCC('m', 'd', 'h', 'd'):
+        case FOURCC("mdhd"):
         {
             *offset += chunk_size;
 
@@ -1500,7 +1516,7 @@
             break;
         }
 
-        case FOURCC('s', 't', 's', 'd'):
+        case FOURCC("stsd"):
         {
             uint8_t buffer[8];
             if (chunk_data_size < (off64_t)sizeof(buffer)) {
@@ -1552,7 +1568,7 @@
             }
             break;
         }
-        case FOURCC('m', 'e', 't', 't'):
+        case FOURCC("mett"):
         {
             *offset += chunk_size;
 
@@ -1606,16 +1622,16 @@
             break;
         }
 
-        case FOURCC('m', 'p', '4', 'a'):
-        case FOURCC('e', 'n', 'c', 'a'):
-        case FOURCC('s', 'a', 'm', 'r'):
-        case FOURCC('s', 'a', 'w', 'b'):
-        case FOURCC('t', 'w', 'o', 's'):
-        case FOURCC('s', 'o', 'w', 't'):
-        case FOURCC('a', 'l', 'a', 'c'):
+        case FOURCC("mp4a"):
+        case FOURCC("enca"):
+        case FOURCC("samr"):
+        case FOURCC("sawb"):
+        case FOURCC("twos"):
+        case FOURCC("sowt"):
+        case FOURCC("alac"):
         {
-            if (mIsQT && chunk_type == FOURCC('m', 'p', '4', 'a')
-                    && depth >= 1 && mPath[depth - 1] == FOURCC('w', 'a', 'v', 'e')) {
+            if (mIsQT && chunk_type == FOURCC("mp4a")
+                    && depth >= 1 && mPath[depth - 1] == FOURCC("wave")) {
                 // Ignore mp4a embedded in QT wave atom
                 *offset += chunk_size;
                 break;
@@ -1645,7 +1661,7 @@
             off64_t stop_offset = *offset + chunk_size;
             *offset = data_offset + sizeof(buffer);
 
-            if (mIsQT && chunk_type == FOURCC('m', 'p', '4', 'a')) {
+            if (mIsQT && chunk_type == FOURCC("mp4a")) {
                 if (version == 1) {
                     if (mDataSource->readAt(*offset, buffer, 16) < 16) {
                         return ERROR_IO;
@@ -1678,7 +1694,7 @@
                 }
             }
 
-            if (chunk_type != FOURCC('e', 'n', 'c', 'a')) {
+            if (chunk_type != FOURCC("enca")) {
                 // if the chunk type is enca, we'll get the type from the frma box later
                 AMediaFormat_setString(mLastTrack->meta,
                         AMEDIAFORMAT_KEY_MIME, FourCC2MIME(chunk_type));
@@ -1687,7 +1703,7 @@
                 if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_RAW, FourCC2MIME(chunk_type))) {
                     AMediaFormat_setInt32(mLastTrack->meta,
                             AMEDIAFORMAT_KEY_BITS_PER_SAMPLE, sample_size);
-                    if (chunk_type == FOURCC('t', 'w', 'o', 's')) {
+                    if (chunk_type == FOURCC("twos")) {
                         AMediaFormat_setInt32(mLastTrack->meta,
                                 AMEDIAFORMAT_KEY_PCM_BIG_ENDIAN, 1);
                     }
@@ -1698,7 +1714,7 @@
             AMediaFormat_setInt32(mLastTrack->meta, AMEDIAFORMAT_KEY_CHANNEL_COUNT, num_channels);
             AMediaFormat_setInt32(mLastTrack->meta, AMEDIAFORMAT_KEY_SAMPLE_RATE, sample_rate);
 
-            if (chunk_type == FOURCC('a', 'l', 'a', 'c')) {
+            if (chunk_type == FOURCC("alac")) {
 
                 // See 'external/alac/ALACMagicCookieDescription.txt for the detail'.
                 // Store ALAC magic cookie (decoder needs it).
@@ -1710,7 +1726,7 @@
                 }
                 uint32_t size = U32_AT(&alacInfo[0]);
                 if ((size != ALAC_SPECIFIC_INFO_SIZE) ||
-                        (U32_AT(&alacInfo[4]) != FOURCC('a', 'l', 'a', 'c')) ||
+                        (U32_AT(&alacInfo[4]) != FOURCC("alac")) ||
                         (U32_AT(&alacInfo[8]) != 0)) {
                     return ERROR_MALFORMED;
                 }
@@ -1748,15 +1764,15 @@
             break;
         }
 
-        case FOURCC('m', 'p', '4', 'v'):
-        case FOURCC('e', 'n', 'c', 'v'):
-        case FOURCC('s', '2', '6', '3'):
-        case FOURCC('H', '2', '6', '3'):
-        case FOURCC('h', '2', '6', '3'):
-        case FOURCC('a', 'v', 'c', '1'):
-        case FOURCC('h', 'v', 'c', '1'):
-        case FOURCC('h', 'e', 'v', '1'):
-        case FOURCC('a', 'v', '0', '1'):
+        case FOURCC("mp4v"):
+        case FOURCC("encv"):
+        case FOURCC("s263"):
+        case FOURCC("H263"):
+        case FOURCC("h263"):
+        case FOURCC("avc1"):
+        case FOURCC("hvc1"):
+        case FOURCC("hev1"):
+        case FOURCC("av01"):
         {
             uint8_t buffer[78];
             if (chunk_data_size < (ssize_t)sizeof(buffer)) {
@@ -1786,7 +1802,7 @@
             if (mLastTrack == NULL)
                 return ERROR_MALFORMED;
 
-            if (chunk_type != FOURCC('e', 'n', 'c', 'v')) {
+            if (chunk_type != FOURCC("encv")) {
                 // if the chunk type is encv, we'll get the type from the frma box later
                 AMediaFormat_setString(mLastTrack->meta,
                         AMEDIAFORMAT_KEY_MIME, FourCC2MIME(chunk_type));
@@ -1809,8 +1825,8 @@
             break;
         }
 
-        case FOURCC('s', 't', 'c', 'o'):
-        case FOURCC('c', 'o', '6', '4'):
+        case FOURCC("stco"):
+        case FOURCC("co64"):
         {
             if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL)) {
                 return ERROR_MALFORMED;
@@ -1829,7 +1845,7 @@
             break;
         }
 
-        case FOURCC('s', 't', 's', 'c'):
+        case FOURCC("stsc"):
         {
             if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
                 return ERROR_MALFORMED;
@@ -1847,8 +1863,8 @@
             break;
         }
 
-        case FOURCC('s', 't', 's', 'z'):
-        case FOURCC('s', 't', 'z', '2'):
+        case FOURCC("stsz"):
+        case FOURCC("stz2"):
         {
             if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL)) {
                 return ERROR_MALFORMED;
@@ -1967,7 +1983,7 @@
             break;
         }
 
-        case FOURCC('s', 't', 't', 's'):
+        case FOURCC("stts"):
         {
             if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
                 return ERROR_MALFORMED;
@@ -1985,7 +2001,7 @@
             break;
         }
 
-        case FOURCC('c', 't', 't', 's'):
+        case FOURCC("ctts"):
         {
             if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
                 return ERROR_MALFORMED;
@@ -2003,7 +2019,7 @@
             break;
         }
 
-        case FOURCC('s', 't', 's', 's'):
+        case FOURCC("stss"):
         {
             if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
                 return ERROR_MALFORMED;
@@ -2022,7 +2038,7 @@
         }
 
         // \xA9xyz
-        case FOURCC(0xA9, 'x', 'y', 'z'):
+        case FOURCC("\251xyz"):
         {
             *offset += chunk_size;
 
@@ -2072,7 +2088,7 @@
             break;
         }
 
-        case FOURCC('e', 's', 'd', 's'):
+        case FOURCC("esds"):
         {
             *offset += chunk_size;
 
@@ -2102,7 +2118,7 @@
                     AMEDIAFORMAT_KEY_ESDS, &buffer[4], chunk_data_size - 4);
 
             if (mPath.size() >= 2
-                    && mPath[mPath.size() - 2] == FOURCC('m', 'p', '4', 'a')) {
+                    && mPath[mPath.size() - 2] == FOURCC("mp4a")) {
                 // Information from the ESDS must be relied on for proper
                 // setup of sample rate and channel count for MPEG4 Audio.
                 // The generic header appears to only contain generic
@@ -2116,7 +2132,7 @@
                 }
             }
             if (mPath.size() >= 2
-                    && mPath[mPath.size() - 2] == FOURCC('m', 'p', '4', 'v')) {
+                    && mPath[mPath.size() - 2] == FOURCC("mp4v")) {
                 // Check if the video is MPEG2
                 ESDS esds(&buffer[4], chunk_data_size - 4);
 
@@ -2131,7 +2147,7 @@
             break;
         }
 
-        case FOURCC('b', 't', 'r', 't'):
+        case FOURCC("btrt"):
         {
             *offset += chunk_size;
             if (mLastTrack == NULL) {
@@ -2161,7 +2177,7 @@
             break;
         }
 
-        case FOURCC('a', 'v', 'c', 'C'):
+        case FOURCC("avcC"):
         {
             *offset += chunk_size;
 
@@ -2185,7 +2201,7 @@
 
             break;
         }
-        case FOURCC('h', 'v', 'c', 'C'):
+        case FOURCC("hvcC"):
         {
             auto buffer = heapbuffer<uint8_t>(chunk_data_size);
 
@@ -2209,7 +2225,7 @@
             break;
         }
 
-        case FOURCC('d', '2', '6', '3'):
+        case FOURCC("d263"):
         {
             *offset += chunk_size;
             /*
@@ -2244,7 +2260,7 @@
             break;
         }
 
-        case FOURCC('m', 'e', 't', 'a'):
+        case FOURCC("meta"):
         {
             off64_t stop_offset = *offset + chunk_size;
             *offset = data_offset;
@@ -2288,13 +2304,13 @@
             break;
         }
 
-        case FOURCC('i', 'l', 'o', 'c'):
-        case FOURCC('i', 'i', 'n', 'f'):
-        case FOURCC('i', 'p', 'r', 'p'):
-        case FOURCC('p', 'i', 't', 'm'):
-        case FOURCC('i', 'd', 'a', 't'):
-        case FOURCC('i', 'r', 'e', 'f'):
-        case FOURCC('i', 'p', 'r', 'o'):
+        case FOURCC("iloc"):
+        case FOURCC("iinf"):
+        case FOURCC("iprp"):
+        case FOURCC("pitm"):
+        case FOURCC("idat"):
+        case FOURCC("iref"):
+        case FOURCC("ipro"):
         {
             if (mIsHeif) {
                 if (mItemTable == NULL) {
@@ -2310,9 +2326,9 @@
             break;
         }
 
-        case FOURCC('m', 'e', 'a', 'n'):
-        case FOURCC('n', 'a', 'm', 'e'):
-        case FOURCC('d', 'a', 't', 'a'):
+        case FOURCC("mean"):
+        case FOURCC("name"):
+        case FOURCC("data"):
         {
             *offset += chunk_size;
 
@@ -2327,7 +2343,7 @@
             break;
         }
 
-        case FOURCC('m', 'v', 'h', 'd'):
+        case FOURCC("mvhd"):
         {
             *offset += chunk_size;
 
@@ -2379,7 +2395,7 @@
             break;
         }
 
-        case FOURCC('m', 'e', 'h', 'd'):
+        case FOURCC("mehd"):
         {
             *offset += chunk_size;
 
@@ -2424,7 +2440,7 @@
             break;
         }
 
-        case FOURCC('m', 'd', 'a', 't'):
+        case FOURCC("mdat"):
         {
             mMdatFound = true;
 
@@ -2432,7 +2448,7 @@
             break;
         }
 
-        case FOURCC('h', 'd', 'l', 'r'):
+        case FOURCC("hdlr"):
         {
             *offset += chunk_size;
 
@@ -2450,7 +2466,7 @@
             // For the 3GPP file format, the handler-type within the 'hdlr' box
             // shall be 'text'. We also want to support 'sbtl' handler type
             // for a practical reason as various MPEG4 containers use it.
-            if (type == FOURCC('t', 'e', 'x', 't') || type == FOURCC('s', 'b', 't', 'l')) {
+            if (type == FOURCC("text") || type == FOURCC("sbtl")) {
                 if (mLastTrack != NULL) {
                     AMediaFormat_setString(mLastTrack->meta,
                             AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_TEXT_3GPP);
@@ -2460,7 +2476,7 @@
             break;
         }
 
-        case FOURCC('k', 'e', 'y', 's'):
+        case FOURCC("keys"):
         {
             *offset += chunk_size;
 
@@ -2473,7 +2489,7 @@
             break;
         }
 
-        case FOURCC('t', 'r', 'e', 'x'):
+        case FOURCC("trex"):
         {
             *offset += chunk_size;
 
@@ -2492,7 +2508,7 @@
             break;
         }
 
-        case FOURCC('t', 'x', '3', 'g'):
+        case FOURCC("tx3g"):
         {
             if (mLastTrack == NULL)
                 return ERROR_MALFORMED;
@@ -2536,7 +2552,7 @@
             break;
         }
 
-        case FOURCC('c', 'o', 'v', 'r'):
+        case FOURCC("covr"):
         {
             *offset += chunk_size;
 
@@ -2567,12 +2583,12 @@
             break;
         }
 
-        case FOURCC('c', 'o', 'l', 'r'):
+        case FOURCC("colr"):
         {
             *offset += chunk_size;
             // this must be in a VisualSampleEntry box under the Sample Description Box ('stsd')
             // ignore otherwise
-            if (depth >= 2 && mPath[depth - 2] == FOURCC('s', 't', 's', 'd')) {
+            if (depth >= 2 && mPath[depth - 2] == FOURCC("stsd")) {
                 status_t err = parseColorInfo(data_offset, chunk_data_size);
                 if (err != OK) {
                     return err;
@@ -2582,12 +2598,12 @@
             break;
         }
 
-        case FOURCC('t', 'i', 't', 'l'):
-        case FOURCC('p', 'e', 'r', 'f'):
-        case FOURCC('a', 'u', 't', 'h'):
-        case FOURCC('g', 'n', 'r', 'e'):
-        case FOURCC('a', 'l', 'b', 'm'):
-        case FOURCC('y', 'r', 'r', 'c'):
+        case FOURCC("titl"):
+        case FOURCC("perf"):
+        case FOURCC("auth"):
+        case FOURCC("gnre"):
+        case FOURCC("albm"):
+        case FOURCC("yrrc"):
         {
             *offset += chunk_size;
 
@@ -2600,7 +2616,7 @@
             break;
         }
 
-        case FOURCC('I', 'D', '3', '2'):
+        case FOURCC("ID32"):
         {
             *offset += chunk_size;
 
@@ -2613,7 +2629,7 @@
             break;
         }
 
-        case FOURCC('-', '-', '-', '-'):
+        case FOURCC("----"):
         {
             mLastCommentMean.clear();
             mLastCommentName.clear();
@@ -2622,7 +2638,7 @@
             break;
         }
 
-        case FOURCC('s', 'i', 'd', 'x'):
+        case FOURCC("sidx"):
         {
             status_t err = parseSegmentIndex(data_offset, chunk_data_size);
             if (err != OK) {
@@ -2632,25 +2648,25 @@
             return UNKNOWN_ERROR; // stop parsing after sidx
         }
 
-        case FOURCC('a', 'c', '-', '3'):
+        case FOURCC("ac-3"):
         {
             *offset += chunk_size;
             return parseAC3SpecificBox(data_offset);
         }
 
-        case FOURCC('e', 'c', '-', '3'):
+        case FOURCC("ec-3"):
         {
             *offset += chunk_size;
             return parseEAC3SpecificBox(data_offset);
         }
 
-        case FOURCC('a', 'c', '-', '4'):
+        case FOURCC("ac-4"):
         {
             *offset += chunk_size;
             return parseAC4SpecificBox(data_offset);
         }
 
-        case FOURCC('f', 't', 'y', 'p'):
+        case FOURCC("ftyp"):
         {
             if (chunk_data_size < 8 || depth != 0) {
                 return ERROR_MALFORMED;
@@ -2675,16 +2691,16 @@
                 brandSet.insert(brand);
             }
 
-            if (brandSet.count(FOURCC('q', 't', ' ', ' ')) > 0) {
+            if (brandSet.count(FOURCC("qt  ")) > 0) {
                 mIsQT = true;
             } else {
-                if (brandSet.count(FOURCC('m', 'i', 'f', '1')) > 0
-                 && brandSet.count(FOURCC('h', 'e', 'i', 'c')) > 0) {
+                if (brandSet.count(FOURCC("mif1")) > 0
+                 && brandSet.count(FOURCC("heic")) > 0) {
                     ALOGV("identified HEIF image");
 
                     mIsHeif = true;
-                    brandSet.erase(FOURCC('m', 'i', 'f', '1'));
-                    brandSet.erase(FOURCC('h', 'e', 'i', 'c'));
+                    brandSet.erase(FOURCC("mif1"));
+                    brandSet.erase(FOURCC("heic"));
                 }
 
                 if (!brandSet.empty()) {
@@ -2771,7 +2787,7 @@
     // + 4-byte size
     offset += 4;
     uint32_t type;
-    if (!mDataSource->getUInt32(offset, &type) || type != FOURCC('d', 'a', 'c', '4')) {
+    if (!mDataSource->getUInt32(offset, &type) || type != FOURCC("dac4")) {
         ALOGE("MPEG4Extractor: error while reading ac-4 specific block: header not dac4");
         return ERROR_MALFORMED;
     }
@@ -2898,7 +2914,7 @@
 
     offset += 4;
     uint32_t type;
-    if (!mDataSource->getUInt32(offset, &type) || type != FOURCC('d', 'e', 'c', '3')) {
+    if (!mDataSource->getUInt32(offset, &type) || type != FOURCC("dec3")) {
         ALOGE("MPEG4Extractor: error while reading eac-3 specific block: header not dec3");
         return ERROR_MALFORMED;
     }
@@ -3055,7 +3071,7 @@
 
     offset += 4;
     uint32_t type;
-    if (!mDataSource->getUInt32(offset, &type) || type != FOURCC('d', 'a', 'c', '3')) {
+    if (!mDataSource->getUInt32(offset, &type) || type != FOURCC("dac3")) {
         ALOGE("MPEG4Extractor: error while reading ac-3 specific block: header not dac3");
         return ERROR_MALFORMED;
     }
@@ -3257,7 +3273,7 @@
 
         uint32_t type;
         if (!mDataSource->getUInt32(keyOffset + 4, &type)
-                || type != FOURCC('m', 'd', 't', 'a')) {
+                || type != FOURCC("mdta")) {
             return ERROR_MALFORMED;
         }
 
@@ -3299,7 +3315,7 @@
     }
     uint32_t atomFourCC;
     if (!mDataSource->getUInt32(offset + 4, &atomFourCC)
-            || atomFourCC != FOURCC('d', 'a', 't', 'a')) {
+            || atomFourCC != FOURCC("data")) {
         return ERROR_MALFORMED;
     }
     uint32_t dataType;
@@ -3460,48 +3476,48 @@
     MakeFourCCString(mPath[4], chunk);
     ALOGV("meta: %s @ %lld", chunk, (long long)offset);
     switch ((int32_t)mPath[4]) {
-        case FOURCC(0xa9, 'a', 'l', 'b'):
+        case FOURCC("\251alb"):
         {
             metadataKey = "album";
             break;
         }
-        case FOURCC(0xa9, 'A', 'R', 'T'):
+        case FOURCC("\251ART"):
         {
             metadataKey = "artist";
             break;
         }
-        case FOURCC('a', 'A', 'R', 'T'):
+        case FOURCC("aART"):
         {
             metadataKey = "albumartist";
             break;
         }
-        case FOURCC(0xa9, 'd', 'a', 'y'):
+        case FOURCC("\251day"):
         {
             metadataKey = "year";
             break;
         }
-        case FOURCC(0xa9, 'n', 'a', 'm'):
+        case FOURCC("\251nam"):
         {
             metadataKey = "title";
             break;
         }
-        case FOURCC(0xa9, 'w', 'r', 't'):
+        case FOURCC("\251wrt"):
         {
             metadataKey = "writer";
             break;
         }
-        case FOURCC('c', 'o', 'v', 'r'):
+        case FOURCC("covr"):
         {
             metadataKey = "albumart";
             break;
         }
-        case FOURCC('g', 'n', 'r', 'e'):
-        case FOURCC(0xa9, 'g', 'e', 'n'):
+        case FOURCC("gnre"):
+        case FOURCC("\251gen"):
         {
             metadataKey = "genre";
             break;
         }
-        case FOURCC('c', 'p', 'i', 'l'):
+        case FOURCC("cpil"):
         {
             if (size == 9 && flags == 21) {
                 char tmp[16];
@@ -3512,7 +3528,7 @@
             }
             break;
         }
-        case FOURCC('t', 'r', 'k', 'n'):
+        case FOURCC("trkn"):
         {
             if (size == 16 && flags == 0) {
                 char tmp[16];
@@ -3524,7 +3540,7 @@
             }
             break;
         }
-        case FOURCC('d', 'i', 's', 'k'):
+        case FOURCC("disk"):
         {
             if ((size == 14 || size == 16) && flags == 0) {
                 char tmp[16];
@@ -3536,17 +3552,17 @@
             }
             break;
         }
-        case FOURCC('-', '-', '-', '-'):
+        case FOURCC("----"):
         {
             buffer[size] = '\0';
             switch (mPath[5]) {
-                case FOURCC('m', 'e', 'a', 'n'):
+                case FOURCC("mean"):
                     mLastCommentMean.setTo((const char *)buffer + 4);
                     break;
-                case FOURCC('n', 'a', 'm', 'e'):
+                case FOURCC("name"):
                     mLastCommentName.setTo((const char *)buffer + 4);
                     break;
-                case FOURCC('d', 'a', 't', 'a'):
+                case FOURCC("data"):
                     if (size < 8) {
                         delete[] buffer;
                         buffer = NULL;
@@ -3654,8 +3670,8 @@
     }
 
     int32_t type = U32_AT(&buffer[0]);
-    if ((type == FOURCC('n', 'c', 'l', 'x') && size >= 11)
-            || (type == FOURCC('n', 'c', 'l', 'c') && size >= 10)) {
+    if ((type == FOURCC("nclx") && size >= 11)
+            || (type == FOURCC("nclc") && size >= 10)) {
         // only store the first color specification
         int32_t existingColor;
         if (!AMediaFormat_getInt32(mLastTrack->meta,
@@ -3663,7 +3679,7 @@
             int32_t primaries = U16_AT(&buffer[4]);
             int32_t isotransfer = U16_AT(&buffer[6]);
             int32_t coeffs = U16_AT(&buffer[8]);
-            bool fullRange = (type == FOURCC('n', 'c', 'l', 'x')) && (buffer[10] & 128);
+            bool fullRange = (type == FOURCC("nclx")) && (buffer[10] & 128);
 
             int32_t range = 0;
             int32_t standard = 0;
@@ -3709,27 +3725,27 @@
 
     const char *metadataKey = nullptr;
     switch (mPath[depth]) {
-        case FOURCC('t', 'i', 't', 'l'):
+        case FOURCC("titl"):
         {
             metadataKey = "title";
             break;
         }
-        case FOURCC('p', 'e', 'r', 'f'):
+        case FOURCC("perf"):
         {
             metadataKey = "artist";
             break;
         }
-        case FOURCC('a', 'u', 't', 'h'):
+        case FOURCC("auth"):
         {
             metadataKey = "writer";
             break;
         }
-        case FOURCC('g', 'n', 'r', 'e'):
+        case FOURCC("gnre"):
         {
             metadataKey = "genre";
             break;
         }
-        case FOURCC('a', 'l', 'b', 'm'):
+        case FOURCC("albm"):
         {
             if (buffer[size - 1] != '\0') {
               char tmp[4];
@@ -3741,7 +3757,7 @@
             metadataKey = "album";
             break;
         }
-        case FOURCC('y', 'r', 'r', 'c'):
+        case FOURCC("yrrc"):
         {
             if (size < 6) {
                 delete[] buffer;
@@ -4487,7 +4503,7 @@
     }
 
     if (!strncasecmp("video/", mime, 6)) {
-        uint32_t firstSampleCTS = 0;
+        uint64_t firstSampleCTS = 0;
         err = mSampleTable->getMetaDataForSample(0, NULL, NULL, &firstSampleCTS);
         // Start offset should be less or equal to composition time of first sample.
         // Composition time stamp of first sample cannot be negative.
@@ -4594,8 +4610,8 @@
 
     switch(chunk_type) {
 
-        case FOURCC('t', 'r', 'a', 'f'):
-        case FOURCC('m', 'o', 'o', 'f'): {
+        case FOURCC("traf"):
+        case FOURCC("moof"): {
             off64_t stop_offset = *offset + chunk_size;
             *offset = data_offset;
             while (*offset < stop_offset) {
@@ -4604,7 +4620,7 @@
                     return err;
                 }
             }
-            if (chunk_type == FOURCC('m', 'o', 'o', 'f')) {
+            if (chunk_type == FOURCC("moof")) {
                 // *offset points to the box following this moof. Find the next moof from there.
 
                 while (true) {
@@ -4633,7 +4649,7 @@
                         return ERROR_MALFORMED;
                     }
 
-                    if (chunk_type == FOURCC('m', 'o', 'o', 'f')) {
+                    if (chunk_type == FOURCC("moof")) {
                         mNextMoofOffset = *offset;
                         break;
                     } else if (chunk_size == 0) {
@@ -4645,7 +4661,7 @@
             break;
         }
 
-        case FOURCC('t', 'f', 'h', 'd'): {
+        case FOURCC("tfhd"): {
                 status_t err;
                 if ((err = parseTrackFragmentHeader(data_offset, chunk_data_size)) != OK) {
                     return err;
@@ -4654,7 +4670,7 @@
                 break;
         }
 
-        case FOURCC('t', 'r', 'u', 'n'): {
+        case FOURCC("trun"): {
                 status_t err;
                 if (mLastParsedTrackId == mTrackId) {
                     if ((err = parseTrackFragmentRun(data_offset, chunk_data_size)) != OK) {
@@ -4666,7 +4682,7 @@
                 break;
         }
 
-        case FOURCC('s', 'a', 'i', 'z'): {
+        case FOURCC("saiz"): {
             status_t err;
             if ((err = parseSampleAuxiliaryInformationSizes(data_offset, chunk_data_size)) != OK) {
                 return err;
@@ -4674,7 +4690,7 @@
             *offset += chunk_size;
             break;
         }
-        case FOURCC('s', 'a', 'i', 'o'): {
+        case FOURCC("saio"): {
             status_t err;
             if ((err = parseSampleAuxiliaryInformationOffsets(data_offset, chunk_data_size))
                     != OK) {
@@ -4684,7 +4700,7 @@
             break;
         }
 
-        case FOURCC('s', 'e', 'n', 'c'): {
+        case FOURCC("senc"): {
             status_t err;
             if ((err = parseSampleEncryption(data_offset)) != OK) {
                 return err;
@@ -4693,7 +4709,7 @@
             break;
         }
 
-        case FOURCC('m', 'd', 'a', 't'): {
+        case FOURCC("mdat"): {
             // parse DRM info if present
             ALOGV("MPEG4Source::parseChunk mdat");
             // if saiz/saoi was previously observed, do something with the sampleinfos
@@ -4852,7 +4868,9 @@
         off64_t offset, bool isSubsampleEncryption, uint32_t flags) {
 
     int32_t ivlength;
-    CHECK(AMediaFormat_getInt32(mFormat, AMEDIAFORMAT_KEY_CRYPTO_DEFAULT_IV_SIZE, &ivlength));
+    if (!AMediaFormat_getInt32(mFormat, AMEDIAFORMAT_KEY_CRYPTO_DEFAULT_IV_SIZE, &ivlength)) {
+        return ERROR_MALFORMED;
+    }
 
     // only 0, 8 and 16 byte initialization vectors are supported
     if (ivlength != 0 && ivlength != 8 && ivlength != 16) {
@@ -5142,7 +5160,7 @@
         sampleCtsOffset = 0;
     }
 
-    if (size < (off64_t)(sampleCount * bytesPerSample)) {
+    if (size < (off64_t)sampleCount * bytesPerSample) {
         return -EINVAL;
     }
 
@@ -5333,7 +5351,7 @@
                         sampleIndex, &syncSampleIndex, findFlags);
             }
 
-            uint32_t sampleTime;
+            uint64_t sampleTime;
             if (err == OK) {
                 err = mSampleTable->getMetaDataForSample(
                         sampleIndex, NULL, NULL, &sampleTime);
@@ -5383,7 +5401,7 @@
 
     off64_t offset = 0;
     size_t size = 0;
-    uint32_t cts, stts;
+    uint64_t cts, stts;
     bool isSyncSample;
     bool newBuffer = false;
     if (mBuffer == NULL) {
@@ -6015,28 +6033,29 @@
 
 static bool isCompatibleBrand(uint32_t fourcc) {
     static const uint32_t kCompatibleBrands[] = {
-        FOURCC('i', 's', 'o', 'm'),
-        FOURCC('i', 's', 'o', '2'),
-        FOURCC('a', 'v', 'c', '1'),
-        FOURCC('h', 'v', 'c', '1'),
-        FOURCC('h', 'e', 'v', '1'),
-        FOURCC('a', 'v', '0', '1'),
-        FOURCC('3', 'g', 'p', '4'),
-        FOURCC('m', 'p', '4', '1'),
-        FOURCC('m', 'p', '4', '2'),
-        FOURCC('d', 'a', 's', 'h'),
+        FOURCC("isom"),
+        FOURCC("iso2"),
+        FOURCC("avc1"),
+        FOURCC("hvc1"),
+        FOURCC("hev1"),
+        FOURCC("av01"),
+        FOURCC("3gp4"),
+        FOURCC("mp41"),
+        FOURCC("mp42"),
+        FOURCC("dash"),
 
         // Won't promise that the following file types can be played.
         // Just give these file types a chance.
-        FOURCC('q', 't', ' ', ' '),  // Apple's QuickTime
-        FOURCC('M', 'S', 'N', 'V'),  // Sony's PSP
+        FOURCC("qt  "),  // Apple's QuickTime
+        FOURCC("MSNV"),  // Sony's PSP
+        FOURCC("wmf "),
 
-        FOURCC('3', 'g', '2', 'a'),  // 3GPP2
-        FOURCC('3', 'g', '2', 'b'),
-        FOURCC('m', 'i', 'f', '1'),  // HEIF image
-        FOURCC('h', 'e', 'i', 'c'),  // HEIF image
-        FOURCC('m', 's', 'f', '1'),  // HEIF image sequence
-        FOURCC('h', 'e', 'v', 'c'),  // HEIF image sequence
+        FOURCC("3g2a"),  // 3GPP2
+        FOURCC("3g2b"),
+        FOURCC("mif1"),  // HEIF image
+        FOURCC("heic"),  // HEIF image
+        FOURCC("msf1"),  // HEIF image sequence
+        FOURCC("hevc"),  // HEIF image sequence
     };
 
     for (size_t i = 0;
@@ -6104,7 +6123,7 @@
         ALOGV("saw chunk type %s, size %" PRIu64 " @ %lld",
                 chunkstring, chunkSize, (long long)offset);
         switch (chunkType) {
-            case FOURCC('f', 't', 'y', 'p'):
+            case FOURCC("ftyp"):
             {
                 if (chunkDataSize < 8) {
                     return false;
@@ -6139,7 +6158,7 @@
                 break;
             }
 
-            case FOURCC('m', 'o', 'o', 'v'):
+            case FOURCC("moov"):
             {
                 moovAtomEndOffset = offset + chunkSize;
 
diff --git a/media/extractors/mp4/SampleIterator.cpp b/media/extractors/mp4/SampleIterator.cpp
index 1a6d306..ec12130 100644
--- a/media/extractors/mp4/SampleIterator.cpp
+++ b/media/extractors/mp4/SampleIterator.cpp
@@ -301,7 +301,7 @@
 }
 
 status_t SampleIterator::findSampleTimeAndDuration(
-        uint32_t sampleIndex, uint32_t *time, uint32_t *duration) {
+        uint32_t sampleIndex, uint64_t *time, uint64_t *duration) {
     if (sampleIndex >= mTable->mNumSampleSizes) {
         return ERROR_OUT_OF_RANGE;
     }
@@ -314,8 +314,8 @@
             break;
         }
         if (mTimeToSampleIndex == mTable->mTimeToSampleCount ||
-            (mTTSDuration != 0 && mTTSCount > UINT32_MAX / mTTSDuration) ||
-            mTTSSampleTime > UINT32_MAX - (mTTSCount * mTTSDuration)) {
+            (mTTSDuration != 0 && mTTSCount > UINT64_MAX / mTTSDuration) ||
+            mTTSSampleTime > UINT64_MAX - (mTTSCount * mTTSDuration)) {
             return ERROR_OUT_OF_RANGE;
         }
 
@@ -330,7 +330,7 @@
 
     // below is equivalent to:
     // *time = mTTSSampleTime + mTTSDuration * (sampleIndex - mTTSSampleIndex);
-    uint32_t tmp;
+    uint64_t tmp;
     if (__builtin_sub_overflow(sampleIndex, mTTSSampleIndex, &tmp) ||
             __builtin_mul_overflow(mTTSDuration, tmp, &tmp) ||
             __builtin_add_overflow(mTTSSampleTime, tmp, &tmp)) {
@@ -340,15 +340,15 @@
 
     int32_t offset = mTable->getCompositionTimeOffset(sampleIndex);
     if ((offset < 0 && *time < (offset == INT32_MIN ?
-            INT32_MAX : uint32_t(-offset))) ||
-            (offset > 0 && *time > UINT32_MAX - offset)) {
-        ALOGE("%u + %d would overflow", *time, offset);
+            INT64_MAX : uint64_t(-offset))) ||
+            (offset > 0 && *time > UINT64_MAX - offset)) {
+        ALOGE("%llu + %d would overflow", (unsigned long long) *time, offset);
         return ERROR_OUT_OF_RANGE;
     }
     if (offset > 0) {
         *time += offset;
     } else {
-        *time -= (offset == INT32_MIN ? INT32_MAX : (-offset));
+        *time -= (offset == INT64_MIN ? INT64_MAX : (-offset));
     }
 
     *duration = mTTSDuration;
diff --git a/media/extractors/mp4/SampleIterator.h b/media/extractors/mp4/SampleIterator.h
index 6e4f60e..5a0ea76 100644
--- a/media/extractors/mp4/SampleIterator.h
+++ b/media/extractors/mp4/SampleIterator.h
@@ -33,8 +33,8 @@
     uint32_t getDescIndex() const { return mChunkDesc; }
     off64_t getSampleOffset() const { return mCurrentSampleOffset; }
     size_t getSampleSize() const { return mCurrentSampleSize; }
-    uint32_t getSampleTime() const { return mCurrentSampleTime; }
-    uint32_t getSampleDuration() const { return mCurrentSampleDuration; }
+    uint64_t getSampleTime() const { return mCurrentSampleTime; }
+    uint64_t getSampleDuration() const { return mCurrentSampleDuration; }
 
     uint32_t getLastSampleIndexInChunk() const {
         return mCurrentSampleIndex + mSamplesPerChunk -
@@ -63,20 +63,20 @@
 
     uint32_t mTimeToSampleIndex;
     uint32_t mTTSSampleIndex;
-    uint32_t mTTSSampleTime;
+    uint64_t mTTSSampleTime;
     uint32_t mTTSCount;
-    uint32_t mTTSDuration;
+    uint64_t mTTSDuration;
 
     uint32_t mCurrentSampleIndex;
     off64_t mCurrentSampleOffset;
     size_t mCurrentSampleSize;
-    uint32_t mCurrentSampleTime;
-    uint32_t mCurrentSampleDuration;
+    uint64_t mCurrentSampleTime;
+    uint64_t mCurrentSampleDuration;
 
     void reset();
     status_t findChunkRange(uint32_t sampleIndex);
     status_t getChunkOffset(uint32_t chunk, off64_t *offset);
-    status_t findSampleTimeAndDuration(uint32_t sampleIndex, uint32_t *time, uint32_t *duration);
+    status_t findSampleTimeAndDuration(uint32_t sampleIndex, uint64_t *time, uint64_t *duration);
 
     SampleIterator(const SampleIterator &);
     SampleIterator &operator=(const SampleIterator &);
diff --git a/media/extractors/mp4/SampleTable.cpp b/media/extractors/mp4/SampleTable.cpp
index d242798..bf29bf1 100644
--- a/media/extractors/mp4/SampleTable.cpp
+++ b/media/extractors/mp4/SampleTable.cpp
@@ -37,13 +37,13 @@
 namespace android {
 
 // static
-const uint32_t SampleTable::kChunkOffsetType32 = FOURCC('s', 't', 'c', 'o');
+const uint32_t SampleTable::kChunkOffsetType32 = FOURCC("stco");
 // static
-const uint32_t SampleTable::kChunkOffsetType64 = FOURCC('c', 'o', '6', '4');
+const uint32_t SampleTable::kChunkOffsetType64 = FOURCC("co64");
 // static
-const uint32_t SampleTable::kSampleSizeType32 = FOURCC('s', 't', 's', 'z');
+const uint32_t SampleTable::kSampleSizeType32 = FOURCC("stsz");
 // static
-const uint32_t SampleTable::kSampleSizeTypeCompact = FOURCC('s', 't', 'z', '2');
+const uint32_t SampleTable::kSampleSizeTypeCompact = FOURCC("stz2");
 
 ////////////////////////////////////////////////////////////////////////////////
 
@@ -614,7 +614,7 @@
     return OK;
 }
 
-uint32_t abs_difference(uint32_t time1, uint32_t time2) {
+uint32_t abs_difference(uint64_t time1, uint64_t time2) {
     return time1 > time2 ? time1 - time2 : time2 - time1;
 }
 
@@ -662,7 +662,7 @@
     }
 
     uint32_t sampleIndex = 0;
-    uint32_t sampleTime = 0;
+    uint64_t sampleTime = 0;
 
     for (uint32_t i = 0; i < mTimeToSampleCount; ++i) {
         uint32_t n = mTimeToSample[2 * i];
@@ -684,13 +684,13 @@
                         (compTimeDelta == INT32_MIN ?
                                 INT32_MAX : uint32_t(-compTimeDelta)))
                         || (compTimeDelta > 0 &&
-                                sampleTime > UINT32_MAX - compTimeDelta)) {
-                    ALOGE("%u + %d would overflow, clamping",
-                            sampleTime, compTimeDelta);
+                                sampleTime > UINT64_MAX - compTimeDelta)) {
+                    ALOGE("%llu + %d would overflow, clamping",
+                            (unsigned long long) sampleTime, compTimeDelta);
                     if (compTimeDelta < 0) {
                         sampleTime = 0;
                     } else {
-                        sampleTime = UINT32_MAX;
+                        sampleTime = UINT64_MAX;
                     }
                     compTimeDelta = 0;
                 }
@@ -701,10 +701,10 @@
             }
 
             ++sampleIndex;
-            if (sampleTime > UINT32_MAX - delta) {
-                ALOGE("%u + %u would overflow, clamping",
-                    sampleTime, delta);
-                sampleTime = UINT32_MAX;
+            if (sampleTime > UINT64_MAX - delta) {
+                ALOGE("%llu + %u would overflow, clamping",
+                    (unsigned long long) sampleTime, delta);
+                sampleTime = UINT64_MAX;
             } else {
                 sampleTime += delta;
             }
@@ -870,19 +870,19 @@
             if (err != OK) {
                 return err;
             }
-            uint32_t sample_time = mSampleIterator->getSampleTime();
+            uint64_t sample_time = mSampleIterator->getSampleTime();
 
             err = mSampleIterator->seekTo(mSyncSamples[left]);
             if (err != OK) {
                 return err;
             }
-            uint32_t upper_time = mSampleIterator->getSampleTime();
+            uint64_t upper_time = mSampleIterator->getSampleTime();
 
             err = mSampleIterator->seekTo(mSyncSamples[left - 1]);
             if (err != OK) {
                 return err;
             }
-            uint32_t lower_time = mSampleIterator->getSampleTime();
+            uint64_t lower_time = mSampleIterator->getSampleTime();
 
             // use abs_difference for safety
             if (abs_difference(upper_time, sample_time) >
@@ -955,9 +955,9 @@
         uint32_t sampleIndex,
         off64_t *offset,
         size_t *size,
-        uint32_t *compositionTime,
+        uint64_t *compositionTime,
         bool *isSyncSample,
-        uint32_t *sampleDuration) {
+        uint64_t *sampleDuration) {
     Mutex::Autolock autoLock(mLock);
 
     status_t err;
diff --git a/media/extractors/mp4/SampleTable.h b/media/extractors/mp4/SampleTable.h
index d4b5dc8..57f6e62 100644
--- a/media/extractors/mp4/SampleTable.h
+++ b/media/extractors/mp4/SampleTable.h
@@ -66,9 +66,9 @@
             uint32_t sampleIndex,
             off64_t *offset,
             size_t *size,
-            uint32_t *compositionTime,
+            uint64_t *compositionTime,
             bool *isSyncSample = NULL,
-            uint32_t *sampleDuration = NULL);
+            uint64_t *sampleDuration = NULL);
 
     // call only after getMetaDataForSample has been called successfully.
     uint32_t getLastSampleIndexInChunk();
@@ -124,7 +124,7 @@
 
     struct SampleTimeEntry {
         uint32_t mSampleIndex;
-        uint32_t mCompositionTime;
+        uint64_t mCompositionTime;
     };
     SampleTimeEntry *mSampleTimeEntries;
 
diff --git a/media/extractors/mpeg2/MPEG2TSExtractor.cpp b/media/extractors/mpeg2/MPEG2TSExtractor.cpp
index e1509ee..49dd0b4 100644
--- a/media/extractors/mpeg2/MPEG2TSExtractor.cpp
+++ b/media/extractors/mpeg2/MPEG2TSExtractor.cpp
@@ -302,16 +302,21 @@
     return AMEDIA_ERROR_UNKNOWN;
 }
 
-void MPEG2TSExtractor::addSource(const sp<AnotherPacketSource> &impl) {
-    bool found = false;
+status_t MPEG2TSExtractor::findIndexOfSource(const sp<AnotherPacketSource> &impl, size_t *index) {
     for (size_t i = 0; i < mSourceImpls.size(); i++) {
         if (mSourceImpls[i] == impl) {
-            found = true;
-            break;
+            *index = i;
+            return OK;
         }
     }
-    if (!found) {
+    return NAME_NOT_FOUND;
+}
+
+void MPEG2TSExtractor::addSource(const sp<AnotherPacketSource> &impl) {
+    size_t index;
+    if (findIndexOfSource(impl, &index) != OK) {
         mSourceImpls.push(impl);
+        mSyncPoints.push();
     }
 }
 
@@ -319,6 +324,7 @@
     bool haveAudio = false;
     bool haveVideo = false;
     int64_t startTime = ALooper::GetNowUs();
+    size_t index;
 
     status_t err;
     while ((err = feedMore(true /* isInit */)) == OK
@@ -337,8 +343,9 @@
                     haveVideo = true;
                     addSource(impl);
                     if (!isScrambledFormat(*(format.get()))) {
-                        mSyncPoints.push();
-                        mSeekSyncPoints = &mSyncPoints.editTop();
+                        if (findIndexOfSource(impl, &index) == OK) {
+                            mSeekSyncPoints = &mSyncPoints.editItemAt(index);
+                        }
                     }
                 }
             }
@@ -352,10 +359,9 @@
                 if (format != NULL) {
                     haveAudio = true;
                     addSource(impl);
-                    if (!isScrambledFormat(*(format.get()))) {
-                        mSyncPoints.push();
-                        if (!haveVideo) {
-                            mSeekSyncPoints = &mSyncPoints.editTop();
+                    if (!isScrambledFormat(*(format.get())) && !haveVideo) {
+                        if (findIndexOfSource(impl, &index) == OK) {
+                            mSeekSyncPoints = &mSyncPoints.editItemAt(index);
                         }
                     }
                 }
diff --git a/media/extractors/mpeg2/MPEG2TSExtractor.h b/media/extractors/mpeg2/MPEG2TSExtractor.h
index e425d23..2537d3b 100644
--- a/media/extractors/mpeg2/MPEG2TSExtractor.h
+++ b/media/extractors/mpeg2/MPEG2TSExtractor.h
@@ -95,6 +95,7 @@
     status_t seekBeyond(int64_t seekTimeUs);
 
     status_t feedUntilBufferAvailable(const sp<AnotherPacketSource> &impl);
+    status_t findIndexOfSource(const sp<AnotherPacketSource> &impl, size_t *index);
 
     // Add a SynPoint derived from |event|.
     void addSyncPoint_l(const ATSParser::SyncEvent &event);
diff --git a/media/libaaudio/examples/loopback/src/loopback.cpp b/media/libaaudio/examples/loopback/src/loopback.cpp
index 2a02b20..3de1514 100644
--- a/media/libaaudio/examples/loopback/src/loopback.cpp
+++ b/media/libaaudio/examples/loopback/src/loopback.cpp
@@ -105,9 +105,14 @@
         assert(false);
     }
     if (framesRead < 0) {
-        myData->inputError = framesRead;
-        printf("ERROR in read = %d = %s\n", framesRead,
-               AAudio_convertResultToText(framesRead));
+        // Expect INVALID_STATE if STATE_STARTING
+        if (myData->framesReadTotal > 0) {
+            myData->inputError = framesRead;
+            printf("ERROR in read = %d = %s\n", framesRead,
+                   AAudio_convertResultToText(framesRead));
+        } else {
+            framesRead = 0;
+        }
     } else {
         myData->framesReadTotal += framesRead;
     }
@@ -149,8 +154,10 @@
         int32_t totalFramesRead = 0;
         do {
             actualFramesRead = readFormattedData(myData, numFrames);
-            if (actualFramesRead) {
+            if (actualFramesRead > 0) {
                 totalFramesRead += actualFramesRead;
+            } else if (actualFramesRead < 0) {
+                result = AAUDIO_CALLBACK_RESULT_STOP;
             }
             // Ignore errors because input stream may not be started yet.
         } while (actualFramesRead > 0);
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index fffcda0..3b03601 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -62,7 +62,7 @@
         , mServiceStreamHandle(AAUDIO_HANDLE_INVALID)
         , mInService(inService)
         , mServiceInterface(serviceInterface)
-        , mAtomicTimestamp()
+        , mAtomicInternalTimestamp()
         , mWakeupDelayNanos(AAudioProperty_getWakeupDelayMicros() * AAUDIO_NANOS_PER_MICROSECOND)
         , mMinimumSleepNanos(AAudioProperty_getMinimumSleepMicros() * AAUDIO_NANOS_PER_MICROSECOND)
         {
@@ -349,8 +349,7 @@
     }
 }
 
-aaudio_result_t AudioStreamInternal::requestStop()
-{
+aaudio_result_t AudioStreamInternal::requestStop() {
     aaudio_result_t result = stopCallback();
     if (result != AAUDIO_OK) {
         return result;
@@ -364,7 +363,7 @@
 
     mClockModel.stop(AudioClock::getNanoseconds());
     setState(AAUDIO_STREAM_STATE_STOPPING);
-    mAtomicTimestamp.clear();
+    mAtomicInternalTimestamp.clear();
 
     return mServiceInterface.stopStream(mServiceStreamHandle);
 }
@@ -413,8 +412,8 @@
                            int64_t *framePosition,
                            int64_t *timeNanoseconds) {
     // Generated in server and passed to client. Return latest.
-    if (mAtomicTimestamp.isValid()) {
-        Timestamp timestamp = mAtomicTimestamp.read();
+    if (mAtomicInternalTimestamp.isValid()) {
+        Timestamp timestamp = mAtomicInternalTimestamp.read();
         int64_t position = timestamp.getPosition() + mFramesOffsetFromService;
         if (position >= 0) {
             *framePosition = position;
@@ -461,7 +460,7 @@
 
 aaudio_result_t AudioStreamInternal::onTimestampHardware(AAudioServiceMessage *message) {
     Timestamp timestamp(message->timestamp.position, message->timestamp.timestamp);
-    mAtomicTimestamp.write(timestamp);
+    mAtomicInternalTimestamp.write(timestamp);
     return AAUDIO_OK;
 }
 
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 3bb9e1e..1c88f52 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -163,7 +163,7 @@
 
     AAudioServiceInterface  &mServiceInterface;   // abstract interface to the service
 
-    SimpleDoubleBuffer<Timestamp>  mAtomicTimestamp;
+    SimpleDoubleBuffer<Timestamp>  mAtomicInternalTimestamp;
 
     AtomicRequestor          mNeedCatchUp;   // Ask read() or write() to sync on first timestamp.
 
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index 58ef7b1..7dcb620 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -259,6 +259,7 @@
 
         if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
             ALOGD("%s(): callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
+            result = systemStopFromCallback();
             break;
         }
     }
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 9af47b2..6af8e7d 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -71,7 +71,7 @@
 
     mClockModel.stop(AudioClock::getNanoseconds());
     setState(AAUDIO_STREAM_STATE_PAUSING);
-    mAtomicTimestamp.clear();
+    mAtomicInternalTimestamp.clear();
     return mServiceInterface.pauseStream(mServiceStreamHandle);
 }
 
@@ -294,6 +294,7 @@
             }
         } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
             ALOGD("%s(): callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
+            result = systemStopFromCallback();
             break;
         }
     }
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index 2fb3986..0d71efc 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -316,7 +316,7 @@
 {
     AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
     ALOGD("%s(%p) called", __func__, stream);
-    return audioStream->systemStop();
+    return audioStream->systemStopFromApp();
 }
 
 AAUDIO_API aaudio_result_t AAudioStream_waitForStateChange(AAudioStream* stream,
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 391af29..e39a075 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -119,21 +119,29 @@
     return AAUDIO_OK;
 }
 
-aaudio_result_t AudioStream::safeStart() {
+aaudio_result_t AudioStream::systemStart() {
     std::lock_guard<std::mutex> lock(mStreamLock);
+
     if (collidesWithCallback()) {
         ALOGE("%s cannot be called from a callback!", __func__);
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    return requestStart();
+
+    aaudio_result_t result = requestStart();
+    if (result == AAUDIO_OK) {
+        // We only call this for logging in "dumpsys audio". So ignore return code.
+        (void) mPlayerBase->start();
+    }
+    return result;
 }
 
-aaudio_result_t AudioStream::safePause() {
+aaudio_result_t AudioStream::systemPause() {
+    std::lock_guard<std::mutex> lock(mStreamLock);
+
     if (!isPauseSupported()) {
         return AAUDIO_ERROR_UNIMPLEMENTED;
     }
 
-    std::lock_guard<std::mutex> lock(mStreamLock);
     if (collidesWithCallback()) {
         ALOGE("%s cannot be called from a callback!", __func__);
         return AAUDIO_ERROR_INVALID_STATE;
@@ -169,7 +177,12 @@
             return AAUDIO_ERROR_INVALID_STATE;
     }
 
-    return requestPause();
+    aaudio_result_t result = requestPause();
+    if (result == AAUDIO_OK) {
+        // We only call this for logging in "dumpsys audio". So ignore return code.
+        (void) mPlayerBase->pause();
+    }
+    return result;
 }
 
 aaudio_result_t AudioStream::safeFlush() {
@@ -192,12 +205,31 @@
     return requestFlush();
 }
 
-aaudio_result_t AudioStream::safeStop() {
+aaudio_result_t AudioStream::systemStopFromCallback() {
+    std::lock_guard<std::mutex> lock(mStreamLock);
+    aaudio_result_t result = safeStop();
+    if (result == AAUDIO_OK) {
+        // We only call this for logging in "dumpsys audio". So ignore return code.
+        (void) mPlayerBase->stop();
+    }
+    return result;
+}
+
+aaudio_result_t AudioStream::systemStopFromApp() {
     std::lock_guard<std::mutex> lock(mStreamLock);
     if (collidesWithCallback()) {
-        ALOGE("stream cannot be stopped from a callback!");
+        ALOGE("stream cannot be stopped by calling from a callback!");
         return AAUDIO_ERROR_INVALID_STATE;
     }
+    aaudio_result_t result = safeStop();
+    if (result == AAUDIO_OK) {
+        // We only call this for logging in "dumpsys audio". So ignore return code.
+        (void) mPlayerBase->stop();
+    }
+    return result;
+}
+
+aaudio_result_t AudioStream::safeStop() {
 
     switch (getState()) {
         // Proceed with stopping.
@@ -224,7 +256,7 @@
         case AAUDIO_STREAM_STATE_CLOSING:
         case AAUDIO_STREAM_STATE_CLOSED:
         default:
-            ALOGW("requestStop() stream not running, state = %s",
+            ALOGW("%s() stream not running, state = %s", __func__,
                   AAudio_convertStreamStateToText(getState()));
             return AAUDIO_ERROR_INVALID_STATE;
     }
@@ -349,21 +381,33 @@
     }
 }
 
-aaudio_result_t AudioStream::joinThread(void** returnArg, int64_t timeoutNanoseconds)
+aaudio_result_t AudioStream::joinThread(void** returnArg, int64_t timeoutNanoseconds __unused)
 {
     if (!mHasThread) {
         ALOGE("joinThread() - but has no thread");
         return AAUDIO_ERROR_INVALID_STATE;
     }
+    aaudio_result_t result = AAUDIO_OK;
+    // If the callback is stopping the stream because the app passed back STOP
+    // then we don't need to join(). The thread is already about to exit.
+    if (pthread_self() != mThread) {
+        // Called from an app thread. Not the callback.
 #if 0
-    // TODO implement equivalent of pthread_timedjoin_np()
-    struct timespec abstime;
-    int err = pthread_timedjoin_np(mThread, returnArg, &abstime);
+        // TODO implement equivalent of pthread_timedjoin_np()
+        struct timespec abstime;
+        int err = pthread_timedjoin_np(mThread, returnArg, &abstime);
 #else
-    int err = pthread_join(mThread, returnArg);
+        int err = pthread_join(mThread, returnArg);
 #endif
+        if (err) {
+            ALOGE("%s() pthread_join() returns err = %d", __func__, err);
+            result = AAudioConvert_androidToAAudioResult(-err);
+        }
+    }
+    // This must be set false so that the callback thread can be created
+    // when the stream is restarted.
     mHasThread = false;
-    return err ? AAudioConvert_androidToAAudioResult(-errno) : mThreadRegistrationResult;
+    return (result != AAUDIO_OK) ? result : mThreadRegistrationResult;
 }
 
 aaudio_data_callback_result_t AudioStream::maybeCallDataCallback(void *audioData,
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 60200b2..46951f5 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -51,21 +51,6 @@
 
     virtual ~AudioStream();
 
-    /**
-     * Lock a mutex and make sure we are not calling from a callback function.
-     * @return result of requestStart();
-     */
-    aaudio_result_t safeStart();
-
-    aaudio_result_t safePause();
-
-    aaudio_result_t safeFlush();
-
-    aaudio_result_t safeStop();
-
-    aaudio_result_t safeClose();
-
-    // =========== Begin ABSTRACT methods ===========================
 protected:
 
     /* Asynchronous requests.
@@ -74,7 +59,7 @@
     virtual aaudio_result_t requestStart() = 0;
 
     /**
-     * Check the state to see if Pause if currently legal.
+     * Check the state to see if Pause is currently legal.
      *
      * @param result pointer to return code
      * @return true if OK to continue, if false then return result
@@ -356,33 +341,28 @@
         mPlayerBase->unregisterWithAudioManager();
     }
 
-    // Pass start request through PlayerBase for tracking.
-    aaudio_result_t systemStart() {
-        mPlayerBase->start();
-        // Pass aaudio_result_t around the PlayerBase interface, which uses status__t.
-        return mPlayerBase->getResult();
-    }
+    aaudio_result_t systemStart();
 
-    // Pass pause request through PlayerBase for tracking.
-    aaudio_result_t systemPause() {
-        mPlayerBase->pause();
-        return mPlayerBase->getResult();
-    }
+    aaudio_result_t systemPause();
 
-    // Pass stop request through PlayerBase for tracking.
-    aaudio_result_t systemStop() {
-        mPlayerBase->stop();
-        return mPlayerBase->getResult();
-    }
+    aaudio_result_t safeFlush();
+
+    /**
+     * This is called when an app calls AAudioStream_requestStop();
+     * It prevents calls from a callback.
+     */
+    aaudio_result_t systemStopFromApp();
+
+    /**
+     * This is called internally when an app callback returns AAUDIO_CALLBACK_RESULT_STOP.
+     */
+    aaudio_result_t systemStopFromCallback();
+
+    aaudio_result_t safeClose();
 
 protected:
 
-    // PlayerBase allows the system to control the stream.
-    // Calling through PlayerBase->start() notifies the AudioManager of the player state.
-    // The AudioManager also can start/stop a stream by calling mPlayerBase->playerStart().
-    // systemStart() ==> mPlayerBase->start()   mPlayerBase->playerStart() ==> requestStart()
-    //                        \                           /
-    //                         ------ AudioManager -------
+    // PlayerBase allows the system to control the stream volume.
     class MyPlayerBase : public android::PlayerBase {
     public:
         explicit MyPlayerBase(AudioStream *parent);
@@ -406,20 +386,19 @@
 
         void clearParentReference() { mParent = nullptr; }
 
+        // Just a stub. The ability to start audio through PlayerBase is being deprecated.
         android::status_t playerStart() override {
-            // mParent should NOT be null. So go ahead and crash if it is.
-            mResult = mParent->safeStart();
-            return AAudioConvert_aaudioToAndroidStatus(mResult);
+            return android::NO_ERROR;
         }
 
+        // Just a stub. The ability to pause audio through PlayerBase is being deprecated.
         android::status_t playerPause() override {
-            mResult = mParent->safePause();
-            return AAudioConvert_aaudioToAndroidStatus(mResult);
+            return android::NO_ERROR;
         }
 
+        // Just a stub. The ability to stop audio through PlayerBase is being deprecated.
         android::status_t playerStop() override {
-            mResult = mParent->safeStop();
-            return AAudioConvert_aaudioToAndroidStatus(mResult);
+            return android::NO_ERROR;
         }
 
         android::status_t playerSetVolume() override {
@@ -548,6 +527,8 @@
 
 private:
 
+    aaudio_result_t safeStop();
+
     std::mutex                 mStreamLock;
 
     const android::sp<MyPlayerBase>   mPlayerBase;
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
index a6b9f5d..2edab58 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
@@ -78,8 +78,9 @@
 
 void AudioStreamLegacy::processCallbackCommon(aaudio_callback_operation_t opcode, void *info) {
     aaudio_data_callback_result_t callbackResult;
-    // This illegal size can be used to tell AudioFlinger to stop calling us.
-    // This takes advantage of AudioFlinger killing the stream.
+    // This illegal size can be used to tell AudioRecord or AudioTrack to stop calling us.
+    // This takes advantage of them killing the stream when they see a size out of range.
+    // That is an undocumented behavior.
     // TODO add to API in AudioRecord and AudioTrack
     const size_t SIZE_STOP_CALLBACKS = SIZE_MAX;
 
@@ -95,7 +96,7 @@
                 ALOGW("processCallbackCommon() data, stream disconnected");
                 audioBuffer->size = SIZE_STOP_CALLBACKS;
             } else if (!mCallbackEnabled.load()) {
-                ALOGW("processCallbackCommon() stopping because callback disabled");
+                ALOGW("processCallbackCommon() no data because callback disabled");
                 audioBuffer->size = SIZE_STOP_CALLBACKS;
             } else {
                 if (audioBuffer->frameCount == 0) {
@@ -115,10 +116,16 @@
                 }
                 if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
                     audioBuffer->size = audioBuffer->frameCount * getBytesPerDeviceFrame();
-                } else { // STOP or invalid result
-                    ALOGW("%s() callback requested stop, fake an error", __func__);
-                    audioBuffer->size = SIZE_STOP_CALLBACKS;
-                    // Disable the callback just in case AudioFlinger keeps trying to call us.
+                } else {
+                    if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
+                        ALOGD("%s() callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
+                    } else {
+                        ALOGW("%s() callback returned invalid result = %d",
+                              __func__, callbackResult);
+                    }
+                    audioBuffer->size = 0;
+                    systemStopFromCallback();
+                    // Disable the callback just in case the system keeps trying to call us.
                     mCallbackEnabled.store(false);
                 }
 
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index 40e22ac..f550089 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -486,6 +486,9 @@
                                                int64_t *framePosition,
                                                int64_t *timeNanoseconds) {
     ExtendedTimestamp extendedTimestamp;
+    if (getState() != AAUDIO_STREAM_STATE_STARTED) {
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
     status_t status = mAudioRecord->getTimestamp(&extendedTimestamp);
     if (status == WOULD_BLOCK) {
         return AAUDIO_ERROR_INVALID_STATE;
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 1ac2558..c995e99 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -288,7 +288,7 @@
 
 aaudio_result_t AudioStreamTrack::requestPause() {
     if (mAudioTrack.get() == nullptr) {
-        ALOGE("requestPause() no AudioTrack");
+        ALOGE("%s() no AudioTrack", __func__);
         return AAUDIO_ERROR_INVALID_STATE;
     }
 
@@ -304,7 +304,7 @@
 
 aaudio_result_t AudioStreamTrack::requestFlush() {
     if (mAudioTrack.get() == nullptr) {
-        ALOGE("requestFlush() no AudioTrack");
+        ALOGE("%s() no AudioTrack", __func__);
         return AAUDIO_ERROR_INVALID_STATE;
     }
 
@@ -318,7 +318,7 @@
 
 aaudio_result_t AudioStreamTrack::requestStop() {
     if (mAudioTrack.get() == nullptr) {
-        ALOGE("requestStop() no AudioTrack");
+        ALOGE("%s() no AudioTrack", __func__);
         return AAUDIO_ERROR_INVALID_STATE;
     }
 
diff --git a/media/libaaudio/tests/test_timestamps.cpp b/media/libaaudio/tests/test_timestamps.cpp
index dfa7815..7b1dfd3 100644
--- a/media/libaaudio/tests/test_timestamps.cpp
+++ b/media/libaaudio/tests/test_timestamps.cpp
@@ -35,6 +35,7 @@
 
 #define NUM_SECONDS             1
 #define NUM_LOOPS               4
+#define MAX_TESTS               20
 
 typedef struct TimestampInfo {
     int64_t         framesTotal;
@@ -53,6 +54,49 @@
     bool           forceUnderruns = false;
 } TimestampCallbackData_t;
 
+struct TimeStampTestLog {
+    aaudio_policy_t           isMmap;
+    aaudio_sharing_mode_t     sharingMode;
+    aaudio_performance_mode_t performanceMode;
+    aaudio_direction_t        direction;
+    aaudio_result_t           result;
+};
+
+static int s_numTests = 0;
+// Use a plain old array because we reference this from the callback and do not want any
+// automatic memory allocation.
+static TimeStampTestLog s_testLogs[MAX_TESTS]{};
+
+static void logTestResult(bool isMmap,
+                          aaudio_sharing_mode_t sharingMode,
+                          aaudio_performance_mode_t performanceMode,
+                          aaudio_direction_t direction,
+                          aaudio_result_t result) {
+    if(s_numTests >= MAX_TESTS) {
+        printf("ERROR - MAX_TESTS too small = %d\n", MAX_TESTS);
+        return;
+    }
+    s_testLogs[s_numTests].isMmap = isMmap;
+    s_testLogs[s_numTests].sharingMode = sharingMode;
+    s_testLogs[s_numTests].performanceMode = performanceMode;
+    s_testLogs[s_numTests].direction = direction;
+    s_testLogs[s_numTests].result = result;
+    s_numTests++;
+}
+
+static void printTestResults() {
+    for (int i = 0; i < s_numTests; i++) {
+        TimeStampTestLog *log = &s_testLogs[i];
+        printf("%2d: mmap = %3s, sharing = %9s, perf = %11s, dir = %6s ---- %4s\n",
+               i,
+               log->isMmap ? "yes" : "no",
+               getSharingModeText(log->sharingMode),
+               getPerformanceModeText(log->performanceMode),
+               getDirectionText(log->direction),
+               log->result ? "FAIL" : "pass");
+    }
+}
+
 // Callback function that fills the audio output buffer.
 aaudio_data_callback_result_t timestampDataCallbackProc(
         AAudioStream *stream,
@@ -115,6 +159,7 @@
     int32_t originalBufferSize = 0;
     int32_t requestedBufferSize = 0;
     int32_t finalBufferSize = 0;
+    bool    isMmap = false;
     aaudio_format_t actualDataFormat = AAUDIO_FORMAT_PCM_FLOAT;
     aaudio_sharing_mode_t actualSharingMode = AAUDIO_SHARING_MODE_SHARED;
     aaudio_sharing_mode_t actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
@@ -124,7 +169,8 @@
 
     memset(&sTimestampData, 0, sizeof(sTimestampData));
 
-    printf("------------ testTimeStamps(policy = %d, sharing = %s, perf = %s, dir = %s) -----------\n",
+    printf("\n=================================================================================\n");
+    printf("--------- testTimeStamps(policy = %d, sharing = %s, perf = %s, dir = %s) --------\n",
            mmapPolicy,
            getSharingModeText(sharingMode),
            getPerformanceModeText(performanceMode),
@@ -177,8 +223,8 @@
 
     printf("    chans = %3d, rate = %6d format = %d\n",
            actualChannelCount, actualSampleRate, actualDataFormat);
-    printf("    Is MMAP used? %s\n", AAudioStream_isMMapUsed(aaudioStream)
-                                     ? "yes" : "no");
+    isMmap = AAudioStream_isMMapUsed(aaudioStream);
+    printf("    Is MMAP used? %s\n", isMmap ? "yes" : "no");
 
     // This is the number of frames that are read in one chunk by a DMA controller
     // or a DSP or a mixer.
@@ -218,7 +264,7 @@
 
         for (int second = 0; second < NUM_SECONDS; second++) {
             // Give AAudio callback time to run in the background.
-            sleep(1);
+            usleep(200 * 1000);
 
             // Periodically print the progress so we know it hasn't died.
             printf("framesWritten = %d, XRuns = %d\n",
@@ -234,18 +280,25 @@
         }
 
         printf("timestampCount = %d\n", sTimestampData.timestampCount);
-        int printed = 0;
-        for (int i = 0; i < sTimestampData.timestampCount; i++) {
+        int printedGood = 0;
+        int printedBad = 0;
+        for (int i = 1; i < sTimestampData.timestampCount; i++) {
             TimestampInfo *timestamp = &sTimestampData.timestamps[i];
-            bool posChanged = (timestamp->timestampPosition != (timestamp - 1)->timestampPosition);
-            bool timeChanged = (timestamp->timestampNanos != (timestamp - 1)->timestampNanos);
-            if ((printed < 20) && ((i < 10) || posChanged || timeChanged)) {
-                printf("  %3d : frames %8lld, xferd %8lld", i,
-                       (long long) timestamp->framesTotal,
-                       (long long) timestamp->appPosition);
-                if (timestamp->result != AAUDIO_OK) {
-                    printf(", result = %s\n", AAudio_convertResultToText(timestamp->result));
-                } else {
+            if (timestamp->result != AAUDIO_OK) {
+                if (printedBad < 5) {
+                    printf("  %3d : frames %8lld, xferd %8lld, result = %s\n",
+                           i,
+                           (long long) timestamp->framesTotal,
+                           (long long) timestamp->appPosition,
+                           AAudio_convertResultToText(timestamp->result));
+                    printedBad++;
+                }
+            } else {
+                const bool posChanged = (timestamp->timestampPosition !=
+                                   (timestamp - 1)->timestampPosition);
+                const bool timeChanged = (timestamp->timestampNanos
+                        != (timestamp - 1)->timestampNanos);
+                if ((printedGood < 20) && (posChanged || timeChanged)) {
                     bool negative = timestamp->timestampPosition < 0;
                     bool retro = (i > 0 && (timestamp->timestampPosition <
                                             (timestamp - 1)->timestampPosition));
@@ -253,17 +306,39 @@
                                                    : (retro ? "  <= RETROGRADE!" : "");
 
                     double latency = calculateLatencyMillis(timestamp->timestampPosition,
-                                             timestamp->timestampNanos,
-                                             timestamp->appPosition,
-                                             timestamp->appNanoseconds,
-                                             actualSampleRate);
-                    printf(", STAMP: pos = %8lld, nanos = %8lld, lat = %7.1f msec %s\n",
+                                                            timestamp->timestampNanos,
+                                                            timestamp->appPosition,
+                                                            timestamp->appNanoseconds,
+                                                            actualSampleRate);
+                    printf("  %3d : frames %8lld, xferd %8lld",
+                           i,
+                           (long long) timestamp->framesTotal,
+                           (long long) timestamp->appPosition);
+                    printf(" STAMP: pos = %8lld, nanos = %8lld, lat = %7.1f msec %s\n",
                            (long long) timestamp->timestampPosition,
                            (long long) timestamp->timestampNanos,
                            latency,
                            message);
+                    printedGood++;
                 }
-                printed++;
+            }
+        }
+
+        if (printedGood == 0) {
+            printf("ERROR - AAudioStream_getTimestamp() never gave us a valid timestamp\n");
+            result = AAUDIO_ERROR_INTERNAL;
+        } else {
+            // Make sure we do not get timestamps when stopped.
+            int64_t position;
+            int64_t time;
+            aaudio_result_t tempResult = AAudioStream_getTimestamp(aaudioStream,
+                                                                   CLOCK_MONOTONIC,
+                                                                   &position, &time);
+            if (tempResult != AAUDIO_ERROR_INVALID_STATE) {
+                printf("ERROR - AAudioStream_getTimestamp() should return"
+                       " INVALID_STATE when stopped! %s\n",
+                       AAudio_convertResultToText(tempResult));
+                result = AAUDIO_ERROR_INTERNAL;
             }
         }
 
@@ -273,12 +348,14 @@
     }
 
 finish:
+
+    logTestResult(isMmap, sharingMode, performanceMode, direction, result);
+
     if (aaudioStream != nullptr) {
         AAudioStream_close(aaudioStream);
     }
     AAudioStreamBuilder_delete(aaudioBuilder);
     printf("result = %d = %s\n", result, AAudio_convertResultToText(result));
-
     return result;
 }
 
@@ -292,7 +369,7 @@
     // in a buffer if we hang or crash.
     setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
 
-    printf("Test Timestamps V0.1.3\n");
+    printf("Test Timestamps V0.1.4\n");
 
     // Legacy
     aaudio_policy_t policy = AAUDIO_POLICY_NEVER;
@@ -332,5 +409,7 @@
                             AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
                             AAUDIO_DIRECTION_OUTPUT);
 
+    printTestResults();
+
     return (result == AAUDIO_OK) ? EXIT_SUCCESS : EXIT_FAILURE;
 }
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 827df6a..1417aaf 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -50,6 +50,7 @@
         "libmediametrics",
         "libmediautils",
         "libnblog",
+        "libprocessgroup",
         "libutils",
     ],
     export_shared_lib_headers: ["libbinder"],
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index 3223647..72a23e3 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -26,6 +26,7 @@
 #include <media/AudioRecord.h>
 #include <utils/Log.h>
 #include <private/media/AudioTrackShared.h>
+#include <processgroup/sched_policy.h>
 #include <media/IAudioFlinger.h>
 #include <media/MediaAnalyticsItem.h>
 #include <media/TypeConverter.h>
@@ -1398,6 +1399,17 @@
     return mAudioRecord->getActiveMicrophones(activeMicrophones).transactionError();
 }
 
+status_t AudioRecord::setMicrophoneDirection(audio_microphone_direction_t direction)
+{
+    AutoMutex lock(mLock);
+    return mAudioRecord->setMicrophoneDirection(direction).transactionError();
+}
+
+status_t AudioRecord::setMicrophoneFieldDimension(float zoom) {
+    AutoMutex lock(mLock);
+    return mAudioRecord->setMicrophoneFieldDimension(zoom).transactionError();
+}
+
 // =========================================================================
 
 void AudioRecord::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index baeae8b..4c762ed 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -784,7 +784,8 @@
 status_t AudioSystem::setDeviceConnectionState(audio_devices_t device,
                                                audio_policy_dev_state_t state,
                                                const char *device_address,
-                                               const char *device_name)
+                                               const char *device_name,
+                                               audio_format_t encodedFormat)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     const char *address = "";
@@ -798,7 +799,7 @@
     if (device_name != NULL) {
         name = device_name;
     }
-    return aps->setDeviceConnectionState(device, state, address, name);
+    return aps->setDeviceConnectionState(device, state, address, name, encodedFormat);
 }
 
 audio_policy_dev_state_t AudioSystem::getDeviceConnectionState(audio_devices_t device,
@@ -812,7 +813,8 @@
 
 status_t AudioSystem::handleDeviceConfigChange(audio_devices_t device,
                                                const char *device_address,
-                                               const char *device_name)
+                                               const char *device_name,
+                                               audio_format_t encodedFormat)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     const char *address = "";
@@ -826,7 +828,7 @@
     if (device_name != NULL) {
         name = device_name;
     }
-    return aps->handleDeviceConfigChange(device, address, name);
+    return aps->handleDeviceConfigChange(device, address, name, encodedFormat);
 }
 
 status_t AudioSystem::setPhoneState(audio_mode_t state)
@@ -1335,6 +1337,13 @@
     return aps->isHapticPlaybackSupported();
 }
 
+status_t AudioSystem::getHwOffloadEncodingFormatsSupportedForA2DP(
+                                std::vector<audio_format_t> *formats)
+{
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return PERMISSION_DENIED;
+    return aps->getHwOffloadEncodingFormatsSupportedForA2DP(formats);
+}
 
 // ---------------------------------------------------------------------------
 
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index b444d2d..e9a0e22 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -29,6 +29,7 @@
 #include <media/AudioTrack.h>
 #include <utils/Log.h>
 #include <private/media/AudioTrackShared.h>
+#include <processgroup/sched_policy.h>
 #include <media/IAudioFlinger.h>
 #include <media/IAudioPolicyService.h>
 #include <media/AudioParameter.h>
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index 272415c..8c7fac5 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -92,6 +92,7 @@
     IS_HAPTIC_PLAYBACK_SUPPORTED,
     SET_UID_DEVICE_AFFINITY,
     REMOVE_UID_DEVICE_AFFINITY,
+    GET_OFFLOAD_FORMATS_A2DP
 };
 
 #define MAX_ITEMS_PER_LIST 1024
@@ -108,7 +109,8 @@
                                     audio_devices_t device,
                                     audio_policy_dev_state_t state,
                                     const char *device_address,
-                                    const char *device_name)
+                                    const char *device_name,
+                                    audio_format_t encodedFormat)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -116,6 +118,7 @@
         data.writeInt32(static_cast <uint32_t>(state));
         data.writeCString(device_address);
         data.writeCString(device_name);
+        data.writeInt32(static_cast <uint32_t>(encodedFormat));
         remote()->transact(SET_DEVICE_CONNECTION_STATE, data, &reply);
         return static_cast <status_t> (reply.readInt32());
     }
@@ -134,13 +137,15 @@
 
     virtual status_t handleDeviceConfigChange(audio_devices_t device,
                                               const char *device_address,
-                                              const char *device_name)
+                                              const char *device_name,
+                                              audio_format_t encodedFormat)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
         data.writeInt32(static_cast <uint32_t>(device));
         data.writeCString(device_address);
         data.writeCString(device_name);
+        data.writeInt32(static_cast <uint32_t>(encodedFormat));
         remote()->transact(HANDLE_DEVICE_CONFIG_CHANGE, data, &reply);
         return static_cast <status_t> (reply.readInt32());
     }
@@ -884,7 +889,30 @@
         return reply.readInt32();
     }
 
-    virtual status_t addStreamDefaultEffect(const effect_uuid_t *type,
+    virtual status_t getHwOffloadEncodingFormatsSupportedForA2DP(
+                std::vector<audio_format_t> *formats)
+    {
+        if (formats == NULL) {
+            return BAD_VALUE;
+        }
+
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_OFFLOAD_FORMATS_A2DP, data, &reply);
+        if (status != NO_ERROR || (status = (status_t)reply.readInt32()) != NO_ERROR) {
+            return status;
+        }
+
+        size_t list_size = reply.readUint32();
+
+        for (size_t i = 0; i < list_size; i++) {
+            formats->push_back(static_cast<audio_format_t>(reply.readInt32()));
+        }
+        return NO_ERROR;
+    }
+
+
+     virtual status_t addStreamDefaultEffect(const effect_uuid_t *type,
                                             const String16& opPackageName,
                                             const effect_uuid_t *uuid,
                                             int32_t priority,
@@ -1096,7 +1124,8 @@
         case SET_ASSISTANT_UID:
         case SET_A11Y_SERVICES_UIDS:
         case SET_UID_DEVICE_AFFINITY:
-        case REMOVE_UID_DEVICE_AFFINITY: {
+        case REMOVE_UID_DEVICE_AFFINITY:
+        case GET_OFFLOAD_FORMATS_A2DP: {
             if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
                 ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
                       __func__, code, IPCThreadState::self()->getCallingPid(),
@@ -1121,6 +1150,7 @@
                     static_cast <audio_policy_dev_state_t>(data.readInt32());
             const char *device_address = data.readCString();
             const char *device_name = data.readCString();
+            audio_format_t codecFormat = static_cast <audio_format_t>(data.readInt32());
             if (device_address == nullptr || device_name == nullptr) {
                 ALOGE("Bad Binder transaction: SET_DEVICE_CONNECTION_STATE for device %u", device);
                 reply->writeInt32(static_cast<int32_t> (BAD_VALUE));
@@ -1128,7 +1158,8 @@
                 reply->writeInt32(static_cast<uint32_t> (setDeviceConnectionState(device,
                                                                                   state,
                                                                                   device_address,
-                                                                                  device_name)));
+                                                                                  device_name,
+                                                                                  codecFormat)));
             }
             return NO_ERROR;
         } break;
@@ -1154,13 +1185,16 @@
                     static_cast <audio_devices_t>(data.readInt32());
             const char *device_address = data.readCString();
             const char *device_name = data.readCString();
+            audio_format_t codecFormat =
+                    static_cast <audio_format_t>(data.readInt32());
             if (device_address == nullptr || device_name == nullptr) {
                 ALOGE("Bad Binder transaction: HANDLE_DEVICE_CONFIG_CHANGE for device %u", device);
                 reply->writeInt32(static_cast<int32_t> (BAD_VALUE));
             } else {
                 reply->writeInt32(static_cast<uint32_t> (handleDeviceConfigChange(device,
                                                                                   device_address,
-                                                                                  device_name)));
+                                                                                  device_name,
+                                                                                  codecFormat)));
             }
             return NO_ERROR;
         } break;
@@ -1745,6 +1779,21 @@
             return NO_ERROR;
         }
 
+        case GET_OFFLOAD_FORMATS_A2DP: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            std::vector<audio_format_t> encodingFormats;
+            status_t status = getHwOffloadEncodingFormatsSupportedForA2DP(&encodingFormats);
+            reply->writeInt32(status);
+            if (status != NO_ERROR) {
+                return NO_ERROR;
+            }
+            reply->writeUint32(static_cast<uint32_t>(encodingFormats.size()));
+            for (size_t i = 0; i < encodingFormats.size(); i++)
+                reply->writeInt32(static_cast<int32_t>(encodingFormats[i]));
+            return NO_ERROR;
+        }
+
+
         case ADD_STREAM_DEFAULT_EFFECT: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             effect_uuid_t type;
diff --git a/media/libaudioclient/aidl/android/media/IAudioRecord.aidl b/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
index 01e0a71..cf9c7f4 100644
--- a/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
@@ -36,4 +36,12 @@
   /* Get a list of current active microphones.
    */
   void getActiveMicrophones(out MicrophoneInfo[] activeMicrophones);
+
+  /* Set the microphone direction (for processing).
+   */
+  void setMicrophoneDirection(int /*audio_microphone_direction_t*/ direction);
+
+  /* Set the microphone zoom (for processing).
+   */
+  void setMicrophoneFieldDimension(float zoom);
 }
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index 35a7e05..ebee124 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -534,6 +534,14 @@
      */
             status_t    getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
 
+    /* Set the Microphone direction (for processing purposes).
+     */
+            status_t    setMicrophoneDirection(audio_microphone_direction_t direction);
+
+    /* Set the Microphone zoom factor (for processing purposes).
+     */
+            status_t    setMicrophoneFieldDimension(float zoom);
+
      /* Get the unique port ID assigned to this AudioRecord instance by audio policy manager.
       * The ID is unique across all audioserver clients and can change during the life cycle
       * of a given AudioRecord instance if the connection to audioserver is restored.
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 781e9df..a208602 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -209,12 +209,14 @@
     // IAudioPolicyService interface (see AudioPolicyInterface for method descriptions)
     //
     static status_t setDeviceConnectionState(audio_devices_t device, audio_policy_dev_state_t state,
-                                             const char *device_address, const char *device_name);
+                                             const char *device_address, const char *device_name,
+                                             audio_format_t encodedFormat);
     static audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
                                                                 const char *device_address);
     static status_t handleDeviceConfigChange(audio_devices_t device,
                                              const char *device_address,
-                                             const char *device_name);
+                                             const char *device_name,
+                                             audio_format_t encodedFormat);
     static status_t setPhoneState(audio_mode_t state);
     static status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
     static audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage);
@@ -342,6 +344,9 @@
 
     static status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
 
+    static status_t getHwOffloadEncodingFormatsSupportedForA2DP(
+                                    std::vector<audio_format_t> *formats);
+
     // numSurroundFormats holds the maximum number of formats and bool value allowed in the array.
     // When numSurroundFormats is 0, surroundFormats and surroundFormatsEnabled will not be
     // populated. The actual number of surround formats should be returned at numSurroundFormats.
diff --git a/media/libaudioclient/include/media/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h
index fb4fe93..177adc2 100644
--- a/media/libaudioclient/include/media/IAudioPolicyService.h
+++ b/media/libaudioclient/include/media/IAudioPolicyService.h
@@ -44,12 +44,14 @@
     virtual status_t setDeviceConnectionState(audio_devices_t device,
                                               audio_policy_dev_state_t state,
                                               const char *device_address,
-                                              const char *device_name) = 0;
+                                              const char *device_name,
+                                              audio_format_t encodedFormat) = 0;
     virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
                                                                   const char *device_address) = 0;
     virtual status_t handleDeviceConfigChange(audio_devices_t device,
                                               const char *device_address,
-                                              const char *device_name) = 0;
+                                              const char *device_name,
+                                              audio_format_t encodedFormat) = 0;
     virtual status_t setPhoneState(audio_mode_t state) = 0;
     virtual status_t setForceUse(audio_policy_force_use_t usage,
                                     audio_policy_forced_cfg_t config) = 0;
@@ -186,6 +188,8 @@
                                         audio_format_t *surroundFormats,
                                         bool *surroundFormatsEnabled,
                                         bool reported) = 0;
+    virtual status_t getHwOffloadEncodingFormatsSupportedForA2DP(
+                                        std::vector<audio_format_t> *formats) = 0;
     virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled) = 0;
 
     virtual status_t setAssistantUid(uid_t uid) = 0;
diff --git a/media/libaudiohal/impl/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
index 7a9e843..a1e869f 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -268,6 +268,8 @@
         audio_input_flags_t flags,
         const char *address,
         audio_source_t source,
+        audio_devices_t outputDevice,
+        const char *outputDeviceAddress,
         sp<StreamInHalInterface> *inStream) {
     if (mDevice == 0) return NO_INIT;
     DeviceAddress hidlDevice;
@@ -283,6 +285,17 @@
     //       for now, only send the main source at 1dbfs
     SinkMetadata sinkMetadata = {{{ .source = AudioSource(source), .gain = 1 }}};
 #endif
+#if MAJOR_VERSION < 5
+    (void)outputDevice;
+    (void)outputDeviceAddress;
+#else
+    if (outputDevice != AUDIO_DEVICE_NONE) {
+        DeviceAddress hidlOutputDevice;
+        status = deviceAddressFromHal(outputDevice, outputDeviceAddress, &hidlOutputDevice);
+        if (status != OK) return status;
+        sinkMetadata.tracks[0].destination.device(std::move(hidlOutputDevice));
+    }
+#endif
     Return<void> ret = mDevice->openInputStream(
             handle,
             hidlDevice,
diff --git a/media/libaudiohal/impl/DeviceHalHidl.h b/media/libaudiohal/impl/DeviceHalHidl.h
index 291c88f..f7d465f 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.h
+++ b/media/libaudiohal/impl/DeviceHalHidl.h
@@ -86,6 +86,8 @@
             audio_input_flags_t flags,
             const char *address,
             audio_source_t source,
+            audio_devices_t outputDevice,
+            const char *outputDeviceAddress,
             sp<StreamInHalInterface> *inStream);
 
     // Returns whether createAudioPatch and releaseAudioPatch operations are supported.
diff --git a/media/libaudiohal/impl/DeviceHalLocal.cpp b/media/libaudiohal/impl/DeviceHalLocal.cpp
index dffe9da..ee68252 100644
--- a/media/libaudiohal/impl/DeviceHalLocal.cpp
+++ b/media/libaudiohal/impl/DeviceHalLocal.cpp
@@ -131,6 +131,8 @@
         audio_input_flags_t flags,
         const char *address,
         audio_source_t source,
+        audio_devices_t /*outputDevice*/,
+        const char */*outputDeviceAddress*/,
         sp<StreamInHalInterface> *inStream) {
     audio_stream_in_t *halStream;
     ALOGV("open_input_stream handle: %d devices: %x flags: %#x "
diff --git a/media/libaudiohal/impl/DeviceHalLocal.h b/media/libaudiohal/impl/DeviceHalLocal.h
index 18bd879..36db72e 100644
--- a/media/libaudiohal/impl/DeviceHalLocal.h
+++ b/media/libaudiohal/impl/DeviceHalLocal.h
@@ -79,6 +79,8 @@
             audio_input_flags_t flags,
             const char *address,
             audio_source_t source,
+            audio_devices_t outputDevice,
+            const char *outputDeviceAddress,
             sp<StreamInHalInterface> *inStream);
 
     // Returns whether createAudioPatch and releaseAudioPatch operations are supported.
diff --git a/media/libaudiohal/impl/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
index c12b362..2e35be6 100644
--- a/media/libaudiohal/impl/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -854,5 +854,29 @@
 }
 #endif
 
+#if MAJOR_VERSION < 5
+status_t StreamInHalHidl::setMicrophoneDirection(audio_microphone_direction_t direction __unused) {
+    if (mStream == 0) return NO_INIT;
+    return INVALID_OPERATION;
+}
+
+status_t StreamInHalHidl::setMicrophoneFieldDimension(float zoom __unused) {
+    if (mStream == 0) return NO_INIT;
+    return INVALID_OPERATION;
+}
+#else
+status_t StreamInHalHidl::setMicrophoneDirection(audio_microphone_direction_t direction) {
+    if (!mStream) return NO_INIT;
+    return processReturn("setMicrophoneDirection",
+                mStream->setMicrophoneDirection(static_cast<MicrophoneDirection>(direction)));
+}
+
+status_t StreamInHalHidl::setMicrophoneFieldDimension(float zoom) {
+    if (!mStream) return NO_INIT;
+    return processReturn("setMicrophoneFieldDimension",
+                mStream->setMicrophoneFieldDimension(zoom));
+}
+#endif
+
 } // namespace CPP_VERSION
 } // namespace android
diff --git a/media/libaudiohal/impl/StreamHalHidl.h b/media/libaudiohal/impl/StreamHalHidl.h
index f7b507e..9ac1067 100644
--- a/media/libaudiohal/impl/StreamHalHidl.h
+++ b/media/libaudiohal/impl/StreamHalHidl.h
@@ -220,6 +220,12 @@
     // Get active microphones
     virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
 
+    // Set microphone direction (for processing)
+    virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction) override;
+
+    // Set microphone zoom (for processing)
+    virtual status_t setMicrophoneFieldDimension(float zoom) override;
+
     // Called when the metadata of the stream's sink has been changed.
     status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
 
diff --git a/media/libaudiohal/impl/StreamHalLocal.cpp b/media/libaudiohal/impl/StreamHalLocal.cpp
index 26d30d4..fcb809b 100644
--- a/media/libaudiohal/impl/StreamHalLocal.cpp
+++ b/media/libaudiohal/impl/StreamHalLocal.cpp
@@ -368,5 +368,26 @@
 }
 #endif
 
+#if MAJOR_VERSION < 5
+status_t StreamInHalLocal::setMicrophoneDirection(audio_microphone_direction_t direction __unused) {
+    return INVALID_OPERATION;
+}
+
+status_t StreamInHalLocal::setMicrophoneFieldDimension(float zoom __unused) {
+    return INVALID_OPERATION;
+}
+#else
+status_t StreamInHalLocal::setMicrophoneDirection(audio_microphone_direction_t direction) {
+    if (mStream->set_microphone_direction == NULL) return INVALID_OPERATION;
+    return mStream->set_microphone_direction(mStream, direction);
+}
+
+status_t StreamInHalLocal::setMicrophoneFieldDimension(float zoom) {
+    if (mStream->set_microphone_field_dimension == NULL) return INVALID_OPERATION;
+    return mStream->set_microphone_field_dimension(mStream, zoom);
+
+}
+#endif
+
 } // namespace CPP_VERSION
 } // namespace android
diff --git a/media/libaudiohal/impl/StreamHalLocal.h b/media/libaudiohal/impl/StreamHalLocal.h
index 4fd1960..3d6c50e 100644
--- a/media/libaudiohal/impl/StreamHalLocal.h
+++ b/media/libaudiohal/impl/StreamHalLocal.h
@@ -204,6 +204,12 @@
     // Get active microphones
     virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
 
+    // Sets microphone direction (for processing)
+    virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction);
+
+    // Sets microphone zoom (for processing)
+    virtual status_t setMicrophoneFieldDimension(float zoom);
+
     // Called when the metadata of the stream's sink has been changed.
     status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
 
diff --git a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
index 7de8eb3..e565237 100644
--- a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
@@ -84,6 +84,8 @@
             audio_input_flags_t flags,
             const char *address,
             audio_source_t source,
+            audio_devices_t outputDevice,
+            const char *outputDeviceAddress,
             sp<StreamInHalInterface> *inStream) = 0;
 
     // Returns whether createAudioPatch and releaseAudioPatch operations are supported.
diff --git a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
index bd71dc0..ed8282f 100644
--- a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
@@ -179,6 +179,12 @@
     // Get active microphones
     virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones) = 0;
 
+    // Set direction for capture processing
+    virtual status_t setMicrophoneDirection(audio_microphone_direction_t) = 0;
+
+    // Set zoom factor for capture stream
+    virtual status_t setMicrophoneFieldDimension(float zoom) = 0;
+
     struct SinkMetadata {
         std::vector<record_track_metadata_t> tracks;
     };
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Init.c b/media/libeffects/lvm/lib/Bundle/src/LVM_Init.c
index 0669a81..c57498e 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Init.c
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Init.c
@@ -61,6 +61,72 @@
 /*                                                                                      */
 /****************************************************************************************/
 
+/*
+ * 4 Types of Memory Regions of LVM
+ * TODO: Allocate on the fly.
+ * i)   LVM_MEMREGION_PERSISTENT_SLOW_DATA - For Instance Handles
+ * ii)  LVM_MEMREGION_PERSISTENT_FAST_DATA - Persistent Buffers
+ * iii) LVM_MEMREGION_PERSISTENT_FAST_COEF - For Holding Structure values
+ * iv)  LVM_MEMREGION_TEMPORARY_FAST       - For Holding Structure values
+ *
+ * LVM_MEMREGION_PERSISTENT_SLOW_DATA:
+ *   Total Memory size:
+ *     sizeof(LVM_Instance_t) + \
+ *     sizeof(LVM_Buffer_t) + \
+ *     sizeof(LVPSA_InstancePr_t) + \
+ *     sizeof(LVM_Buffer_t) - needed if buffer mode is LVM_MANAGED_BUFFER
+ *
+ * LVM_MEMREGION_PERSISTENT_FAST_DATA:
+ *   Total Memory size:
+ *     sizeof(LVM_TE_Data_t) + \
+ *     2 * pInstParams->EQNB_NumBands * sizeof(LVM_EQNB_BandDef_t) + \
+ *     sizeof(LVCS_Data_t) + \
+ *     sizeof(LVDBE_Data_FLOAT_t) + \
+ *     sizeof(Biquad_2I_Order2_FLOAT_Taps_t) + \
+ *     sizeof(Biquad_2I_Order2_FLOAT_Taps_t) + \
+ *     pInstParams->EQNB_NumBands * sizeof(Biquad_2I_Order2_FLOAT_Taps_t) + \
+ *     pInstParams->EQNB_NumBands * sizeof(LVEQNB_BandDef_t) + \
+ *     pInstParams->EQNB_NumBands * sizeof(LVEQNB_BiquadType_en) + \
+ *     2 * LVM_HEADROOM_MAX_NBANDS * sizeof(LVM_HeadroomBandDef_t) + \
+ *     PSA_InitParams.nBands * sizeof(Biquad_1I_Order2_Taps_t) + \
+ *     PSA_InitParams.nBands * sizeof(QPD_Taps_t)
+ *
+ * LVM_MEMREGION_PERSISTENT_FAST_COEF:
+ *   Total Memory size:
+ *     sizeof(LVM_TE_Coefs_t) + \
+ *     sizeof(LVCS_Coefficient_t) + \
+ *     sizeof(LVDBE_Coef_FLOAT_t) + \
+ *     sizeof(Biquad_FLOAT_Instance_t) + \
+ *     sizeof(Biquad_FLOAT_Instance_t) + \
+ *     pInstParams->EQNB_NumBands * sizeof(Biquad_FLOAT_Instance_t) + \
+ *     PSA_InitParams.nBands * sizeof(Biquad_Instance_t) + \
+ *     PSA_InitParams.nBands * sizeof(QPD_State_t)
+ *
+ * LVM_MEMREGION_TEMPORARY_FAST (Scratch):
+ *   Total Memory Size:
+ *     BundleScratchSize + \
+ *     MAX_INTERNAL_BLOCKSIZE * sizeof(LVM_FLOAT) + \
+ *     MaxScratchOf (CS, EQNB, DBE, PSA)
+ *
+ *     a)BundleScratchSize:
+ *         3 * LVM_MAX_CHANNELS \
+ *         * (MIN_INTERNAL_BLOCKSIZE + InternalBlockSize) * sizeof(LVM_FLOAT)
+ *       This Memory is allocated only when Buffer mode is LVM_MANAGED_BUFFER.
+ *     b)MaxScratchOf (CS, EQNB, DBE, PSA)
+ *       This Memory is needed for scratch usage for CS, EQNB, DBE, PSA.
+ *       CS   = (LVCS_SCRATCHBUFFERS * sizeof(LVM_FLOAT)
+ *               * pCapabilities->MaxBlockSize)
+ *       EQNB = (LVEQNB_SCRATCHBUFFERS * sizeof(LVM_FLOAT)
+ *               * pCapabilities->MaxBlockSize)
+ *       DBE  = (LVDBE_SCRATCHBUFFERS_INPLACE*sizeof(LVM_FLOAT)
+ *               * pCapabilities->MaxBlockSize)
+ *       PSA  = (2 * pInitParams->MaxInputBlockSize * sizeof(LVM_FLOAT))
+ *              one MaxInputBlockSize for input and another for filter output
+ *     c)MAX_INTERNAL_BLOCKSIZE
+ *       This Memory is needed for PSAInput - Temp memory to store output
+ *       from McToMono block and given as input to PSA block
+ */
+
 LVM_ReturnStatus_en LVM_GetMemoryTable(LVM_Handle_t         hInstance,
                                        LVM_MemTab_t         *pMemoryTable,
                                        LVM_InstParams_t     *pInstParams)
@@ -168,7 +234,13 @@
     AlgScratchSize    = 0;
     if (pInstParams->BufferMode == LVM_MANAGED_BUFFERS)
     {
+#ifdef BUILD_FLOAT
+        BundleScratchSize = 3 * LVM_MAX_CHANNELS \
+                            * (MIN_INTERNAL_BLOCKSIZE + InternalBlockSize) \
+                            * sizeof(LVM_FLOAT);
+#else
         BundleScratchSize = 6 * (MIN_INTERNAL_BLOCKSIZE + InternalBlockSize) * sizeof(LVM_INT16);
+#endif
         InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_TEMPORARY_FAST],        /* Scratch buffer */
                             BundleScratchSize);
         InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_SLOW_DATA],
@@ -369,8 +441,13 @@
                 PSA_MemTab.Region[LVM_PERSISTENT_FAST_COEF].Size);
 
             /* Fast Temporary */
+#ifdef BUILD_FLOAT
+            InstAlloc_AddMember(&AllocMem[LVM_TEMPORARY_FAST],
+                                MAX_INTERNAL_BLOCKSIZE * sizeof(LVM_FLOAT));
+#else
             InstAlloc_AddMember(&AllocMem[LVM_TEMPORARY_FAST],
                                 MAX_INTERNAL_BLOCKSIZE * sizeof(LVM_INT16));
+#endif
 
             if (PSA_MemTab.Region[LVM_TEMPORARY_FAST].Size > AlgScratchSize)
             {
@@ -559,13 +636,20 @@
          */
         pInstance->pBufferManagement = InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_SLOW_DATA],
                                                            sizeof(LVM_Buffer_t));
+#ifdef BUILD_FLOAT
+        BundleScratchSize = (LVM_INT32)
+                            (3 * LVM_MAX_CHANNELS \
+                             * (MIN_INTERNAL_BLOCKSIZE + InternalBlockSize) \
+                             * sizeof(LVM_FLOAT));
+#else
         BundleScratchSize = (LVM_INT32)(6 * (MIN_INTERNAL_BLOCKSIZE + InternalBlockSize) * sizeof(LVM_INT16));
+#endif
         pInstance->pBufferManagement->pScratch = InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_TEMPORARY_FAST],   /* Scratch 1 buffer */
                                                                      (LVM_UINT32)BundleScratchSize);
 #ifdef BUILD_FLOAT
         LoadConst_Float(0,                                   /* Clear the input delay buffer */
                         (LVM_FLOAT *)&pInstance->pBufferManagement->InDelayBuffer,
-                        (LVM_INT16)(2 * MIN_INTERNAL_BLOCKSIZE));
+                        (LVM_INT16)(LVM_MAX_CHANNELS * MIN_INTERNAL_BLOCKSIZE));
 #else
         LoadConst_16(0,                                                        /* Clear the input delay buffer */
                      (LVM_INT16 *)&pInstance->pBufferManagement->InDelayBuffer,
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Mixer_SetTimeConstant.c b/media/libeffects/lvm/lib/Common/src/LVC_Mixer_SetTimeConstant.c
index 48f5d54..9d3ee88 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Mixer_SetTimeConstant.c
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Mixer_SetTimeConstant.c
@@ -51,7 +51,7 @@
                                LVM_INT16           NumChannels)
 {
 #ifdef HIGHER_FS
-    LVM_FLOAT   DeltaTable[11] = {0.500000f,/*8000*/
+    LVM_FLOAT   DeltaTable[13] = {0.500000f,/*8000*/
                                   0.362812f,/*11025*/
                                   0.333333f,/*12000*/
                                   0.250000f,/*16000*/
@@ -60,7 +60,9 @@
                                   0.125000f,/*32000*/
                                   0.090703f,/*44100*/
                                   0.083333f,/*48000*/
+                                  0.045352f,/*88200*/
                                   0.041667f,/*96000*/
+                                  0.022676f,/*176400*/
                                   0.020833f};/*192000*/
 #else
     LVM_FLOAT   DeltaTable[9] = {0.500000f,/*8000*/
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Mixer_VarSlope_SetTimeConstant.c b/media/libeffects/lvm/lib/Common/src/LVC_Mixer_VarSlope_SetTimeConstant.c
index 9dc7d21..0e0acf1 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Mixer_VarSlope_SetTimeConstant.c
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Mixer_VarSlope_SetTimeConstant.c
@@ -52,7 +52,7 @@
                                          LVM_INT16           NumChannels)
 {
 #ifdef HIGHER_FS
-     LVM_FLOAT   DeltaTable[11] = {0.500000f,/*8000*/
+     LVM_FLOAT   DeltaTable[13] = {0.500000f,/*8000*/
                                    0.362812f,/*11025*/
                                    0.333333f,/*12000*/
                                    0.250000f,/*16000*/
@@ -61,7 +61,9 @@
                                    0.125000f,/*32000*/
                                    0.090703f,/*44100*/
                                    0.083333f,/*48000*/
+                                   0.045352f,/*88200*/
                                    0.041666f,/*96000*/
+                                   0.022676f,/*176400*/
                                    0.020833f};/*192000*/
 #else
     LVM_FLOAT   DeltaTable[9] = {0.500000f,/*8000*/
diff --git a/media/libeffects/lvm/lib/Common/src/LVM_GetOmega.c b/media/libeffects/lvm/lib/Common/src/LVM_GetOmega.c
index 7846ca0..6307e68 100644
--- a/media/libeffects/lvm/lib/Common/src/LVM_GetOmega.c
+++ b/media/libeffects/lvm/lib/Common/src/LVM_GetOmega.c
@@ -53,7 +53,9 @@
 #define LVVDL_2PiBy_48000_f       0.000130900f
 
 #ifdef HIGHER_FS
+#define LVVDL_2PiBy_88200_f       0.000071238f
 #define LVVDL_2PiBy_96000_f       0.000065450f
+#define LVVDL_2PiBy_176400_f      0.000035619f
 #define LVVDL_2PiBy_192000_f      0.000032725f
 #endif
 const LVM_FLOAT     LVVDL_2PiOnFsTable[] =  {LVVDL_2PiBy_8000_f,
@@ -66,7 +68,9 @@
                                              LVVDL_2PiBy_44100_f,
                                              LVVDL_2PiBy_48000_f
 #ifdef HIGHER_FS
+                                            ,LVVDL_2PiBy_88200_f
                                             ,LVVDL_2PiBy_96000_f
+                                            ,LVVDL_2PiBy_176400_f
                                             ,LVVDL_2PiBy_192000_f
 #endif
                                            };
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Headphone_Coeffs.h b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Headphone_Coeffs.h
index e45d81f..ba05577 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Headphone_Coeffs.h
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Headphone_Coeffs.h
@@ -239,13 +239,12 @@
 #define LVCS_STEREODELAY_CS_24KHZ                   279         /* Sample rate 24kS/s */
 #define LVCS_STEREODELAY_CS_32KHZ                   372         /* Sample rate 32kS/s */
 #define LVCS_STEREODELAY_CS_44KHZ                   512         /* Sample rate 44kS/s */
-// TODO: this should linearly scale by frequency but is limited to 512 frames until
-// we ensure enough buffer size has been allocated.
-#define LVCS_STEREODELAY_CS_48KHZ                   512         /* Sample rate 48kS/s */
-#define LVCS_STEREODELAY_CS_88KHZ                   512         /* Sample rate 88.2kS/s */
-#define LVCS_STEREODELAY_CS_96KHZ                   512         /* Sample rate 96kS/s */
-#define LVCS_STEREODELAY_CS_176KHZ                  512         /* Sample rate 176.4kS/s */
-#define LVCS_STEREODELAY_CS_192KHZ                  512         /* Sample rate 196kS/s */
+#define LVCS_STEREODELAY_CS_48KHZ                   557         /* Sample rate 48kS/s */
+#define LVCS_STEREODELAY_CS_88KHZ                   1024        /* Sample rate 88.2kS/s */
+#define LVCS_STEREODELAY_CS_96KHZ                   1115        /* Sample rate 96kS/s */
+#define LVCS_STEREODELAY_CS_176KHZ                  2048        /* Sample rate 176.4kS/s */
+#define LVCS_STEREODELAY_CS_192KHZ                  2229        /* Sample rate 196kS/s */
+#define LVCS_STEREODELAY_CS_MAX_VAL                 LVCS_STEREODELAY_CS_192KHZ
 
 /* Reverb coefficients for 8000 Hz sample rate, scaled with 1.038030 */
 #define CS_REVERB_8000_A0                          0.667271
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.h b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.h
index 69892b6..f94d4e4 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.h
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.h
@@ -65,7 +65,7 @@
     /* Filter */
     void                        (*pBiquadCallBack) (Biquad_Instance_t*, LVM_INT16*, LVM_INT16*, LVM_INT16);
 #else
-    LVM_FLOAT                   StereoSamples[2 * LVCS_STEREODELAY_CS_48KHZ];
+    LVM_FLOAT                   StereoSamples[2 * LVCS_STEREODELAY_CS_MAX_VAL];
     /* Reverb Level */
     LVM_FLOAT                   ReverbLevel;
     /* Filter */
diff --git a/media/libeffects/lvm/tests/Android.bp b/media/libeffects/lvm/tests/Android.bp
index 8ee807c..003ce9e 100644
--- a/media/libeffects/lvm/tests/Android.bp
+++ b/media/libeffects/lvm/tests/Android.bp
@@ -44,3 +44,16 @@
         "-Wextra",
     ],
 }
+
+cc_test {
+    name: "snr",
+    host_supported: false,
+
+    srcs: ["snr.cpp"],
+
+    cflags: [
+        "-Wall",
+        "-Werror",
+        "-Wextra",
+    ],
+}
diff --git a/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
index 861ee64..41a4f04 100755
--- a/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
+++ b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
@@ -25,16 +25,17 @@
 adb shell mkdir -p $testdir
 adb push $ANDROID_BUILD_TOP/cts/tests/tests/media/res/raw/sinesweepraw.raw $testdir
 adb push $OUT/testcases/lvmtest/arm64/lvmtest $testdir
+adb push $OUT/testcases/snr/arm64/snr $testdir
 
 flags_arr=(
     "-csE"
     "-eqE"
     "-tE"
     "-csE -tE -eqE"
-    "-bE"
+    "-bE -M"
     "-csE -tE"
     "-csE -eqE" "-tE -eqE"
-    "-csE -tE -bE -eqE"
+    "-csE -tE -bE -M -eqE"
 )
 
 fs_arr=(
@@ -79,6 +80,10 @@
             then
                 adb shell cmp $testdir/sinesweep_2_$((fs)).raw \
                     $testdir/sinesweep_$((ch))_$((fs)).raw
+            elif [[ $flags == *"-bE"* ]] && [ "$ch" -gt 2 ]
+            then
+                adb shell $testdir/snr $testdir/sinesweep_2_$((fs)).raw \
+                    $testdir/sinesweep_$((ch))_$((fs)).raw -thr:90.308998
             fi
 
         done
diff --git a/media/libeffects/lvm/tests/snr.cpp b/media/libeffects/lvm/tests/snr.cpp
new file mode 100644
index 0000000..88110c0
--- /dev/null
+++ b/media/libeffects/lvm/tests/snr.cpp
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <assert.h>
+#include <inttypes.h>
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <vector>
+
+template <typename T, typename A = float>
+std::pair<A, A> getSignalNoise(FILE *finp, FILE *fref) {
+  constexpr size_t framesize = 256;
+  std::vector<T> in(framesize);
+  std::vector<T> ref(framesize);
+  A signal{};
+  A noise{};
+
+  for (;;) {
+    size_t read_samples_in = fread(&in[0], sizeof(T), framesize, finp);
+    const size_t read_samples_ref = fread(&ref[0], sizeof(T), framesize, fref);
+    if (read_samples_in != read_samples_ref) {
+      printf("file sizes do not match (last %zu %zu)", read_samples_in, read_samples_ref);
+      read_samples_in = std::min(read_samples_in, read_samples_ref);
+    }
+    if (read_samples_in == 0) {
+        return { signal, noise };
+    }
+    for (size_t i = 0; i < read_samples_in; ++i) {
+       const A value(ref[i]);
+       const A diff(A(in[i]) - value);
+       signal += value * value;
+       noise += diff * diff;
+    }
+  }
+}
+
+void printUsage() {
+  printf("\nUsage: ");
+  printf("\n     snr <ref_file> <test_file> [options]\n");
+  printf("\nwhere, \n     <ref_file>  is the reference file name");
+  printf("\n                  on which will be taken as pure signal");
+  printf("\n     <test_file> is test file for snr calculation");
+  printf("\n     and options are mentioned below");
+  printf("\n");
+  printf("\n     -pcm_format:<pcm format of input files>");
+  printf("\n           0 - 16 bit pcm");
+  printf("\n           1 - 32 bit float");
+  printf("\n           default 0");
+  printf("\n     -thr:<threshold value>");
+  printf("\n           default - negative infinity\n\n");
+}
+
+int main(int argc, const char *argv[]) {
+  if (argc < 3) {
+    printUsage();
+    return -1;
+  }
+  int pcm_format = 0;
+  float thr = - std::numeric_limits<float>::infinity();
+  FILE *fref = fopen(argv[1], "rb");
+  FILE *finp = fopen(argv[2], "rb");
+  for (int i = 3; i < argc; i++) {
+    if (!strncmp(argv[i], "-pcm_format:", 12)) {
+      pcm_format = atoi(argv[i] + 12);
+    } else if (!strncmp(argv[i], "-thr:", 5)) {
+      thr = atof(argv[i] + 5);
+    }
+  }
+  if (finp == nullptr || fref == nullptr) {
+    printf("\nError: missing input/reference files\n");
+    return -1;
+  }
+  auto sn = pcm_format == 0
+      ? getSignalNoise<short>(finp, fref)
+      : getSignalNoise<float>(finp, fref);
+  if (sn.first > 0.f && sn.second > 0.f) {
+    float snr = 10.f * log(sn.first / sn.second);
+    // compare the measured snr value with threshold
+    if (snr < thr) {
+      printf("%.6f less than threshold %.6f\n", snr, thr);
+    } else {
+      printf("%.6f\n", snr);
+    }
+  }
+  fclose(finp);
+  fclose(fref);
+
+  return 0;
+}
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 3efb5de..68dae56 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -213,6 +213,7 @@
         "android.hidl.token@1.0-utils",
         "liblog",
         "libcutils",
+        "libprocessgroup",
         "libutils",
         "libbinder",
         "libsonivox",
diff --git a/media/libmedia/IMediaExtractor.cpp b/media/libmedia/IMediaExtractor.cpp
index e9a6230..fb6d3a2 100644
--- a/media/libmedia/IMediaExtractor.cpp
+++ b/media/libmedia/IMediaExtractor.cpp
@@ -19,6 +19,7 @@
 #include <utils/Log.h>
 
 #include <stdint.h>
+#include <time.h>
 #include <sys/types.h>
 
 #include <binder/IPCThreadState.h>
@@ -219,10 +220,16 @@
     Vector<wp<IMediaSource>> tracks;
     Vector<String8> trackDescriptions;
     String8 toString() const;
+    time_t when;
 } ExtractorInstance;
 
 String8 ExtractorInstance::toString() const {
-    String8 str = name;
+    String8 str;
+    char timeString[32];
+    strftime(timeString, sizeof(timeString), "%m-%d %T", localtime(&when));
+    str.append(timeString);
+    str.append(": ");
+    str.append(name);
     str.append(" for mime ");
     str.append(mime);
     str.append(", source ");
@@ -287,6 +294,7 @@
     ex.sourceDescription = source->toString();
     ex.owner = IPCThreadState::self()->getCallingPid();
     ex.extractor = extractor;
+    ex.when = time(NULL);
 
     {
         Mutex::Autolock lock(sExtractorsLock);
diff --git a/media/libmedia/IMediaMetadataRetriever.cpp b/media/libmedia/IMediaMetadataRetriever.cpp
index 590ba1a..f9fa86e 100644
--- a/media/libmedia/IMediaMetadataRetriever.cpp
+++ b/media/libmedia/IMediaMetadataRetriever.cpp
@@ -23,6 +23,7 @@
 #include <media/IDataSource.h>
 #include <media/IMediaHTTPService.h>
 #include <media/IMediaMetadataRetriever.h>
+#include <processgroup/sched_policy.h>
 #include <utils/String8.h>
 #include <utils/KeyedVector.h>
 
diff --git a/media/libmedia/MediaCodecInfo.cpp b/media/libmedia/MediaCodecInfo.cpp
index 5308e1c..86ad997 100644
--- a/media/libmedia/MediaCodecInfo.cpp
+++ b/media/libmedia/MediaCodecInfo.cpp
@@ -28,6 +28,15 @@
 
 namespace android {
 
+/** This redundant redeclaration is needed for C++ pre 14 */
+constexpr char MediaCodecInfo::Capabilities::FEATURE_ADAPTIVE_PLAYBACK[];
+constexpr char MediaCodecInfo::Capabilities::FEATURE_DYNAMIC_TIMESTAMP[];
+constexpr char MediaCodecInfo::Capabilities::FEATURE_FRAME_PARSING[];
+constexpr char MediaCodecInfo::Capabilities::FEATURE_INTRA_REFRESH[];
+constexpr char MediaCodecInfo::Capabilities::FEATURE_MULTIPLE_FRAMES[];
+constexpr char MediaCodecInfo::Capabilities::FEATURE_SECURE_PLAYBACK[];
+constexpr char MediaCodecInfo::Capabilities::FEATURE_TUNNELED_PLAYBACK[];
+
 void MediaCodecInfo::Capabilities::getSupportedProfileLevels(
         Vector<ProfileLevel> *profileLevels) const {
     profileLevels->clear();
@@ -40,16 +49,11 @@
     colorFormats->appendVector(mColorFormats);
 }
 
-uint32_t MediaCodecInfo::Capabilities::getFlags() const {
-    return mFlags;
-}
-
 const sp<AMessage> MediaCodecInfo::Capabilities::getDetails() const {
     return mDetails;
 }
 
-MediaCodecInfo::Capabilities::Capabilities()
-  : mFlags(0) {
+MediaCodecInfo::Capabilities::Capabilities() {
     mDetails = new AMessage;
 }
 
@@ -73,12 +77,10 @@
             caps->mColorFormats.push_back(color);
         }
     }
-    uint32_t flags = static_cast<uint32_t>(parcel.readInt32());
     sp<AMessage> details = AMessage::FromParcel(parcel);
     if (details == NULL)
         return NULL;
     if (caps != NULL) {
-        caps->mFlags = flags;
         caps->mDetails = details;
     }
     return caps;
@@ -96,7 +98,6 @@
     for (size_t i = 0; i < mColorFormats.size(); i++) {
         parcel->writeInt32(mColorFormats.itemAt(i));
     }
-    parcel->writeInt32(mFlags);
     mDetails->writeToParcel(parcel);
     return OK;
 }
@@ -111,6 +112,14 @@
     mCap->mDetails->setInt32(key, value);
 }
 
+void MediaCodecInfo::CapabilitiesWriter::removeDetail(const char* key) {
+    if (mCap->mDetails->removeEntryAt(mCap->mDetails->findEntryByName(key)) == OK) {
+        ALOGD("successfully removed detail %s", key);
+    } else {
+        ALOGD("detail %s wasn't present to remove", key);
+    }
+}
+
 void MediaCodecInfo::CapabilitiesWriter::addProfileLevel(
         uint32_t profile, uint32_t level) {
     ProfileLevel profileLevel;
@@ -129,32 +138,32 @@
     }
 }
 
-void MediaCodecInfo::CapabilitiesWriter::addFlags(uint32_t flags) {
-    mCap->mFlags |= flags;
-}
-
 MediaCodecInfo::CapabilitiesWriter::CapabilitiesWriter(
         MediaCodecInfo::Capabilities* cap) : mCap(cap) {
 }
 
-bool MediaCodecInfo::isEncoder() const {
-    return mIsEncoder;
+MediaCodecInfo::Attributes MediaCodecInfo::getAttributes() const {
+    return mAttributes;
 }
 
-uint32_t MediaCodecInfo::rank() const {
+uint32_t MediaCodecInfo::getRank() const {
     return mRank;
 }
 
-void MediaCodecInfo::getSupportedMimes(Vector<AString> *mimes) const {
-    mimes->clear();
+void MediaCodecInfo::getAliases(Vector<AString> *aliases) const {
+    *aliases = mAliases;
+}
+
+void MediaCodecInfo::getSupportedMediaTypes(Vector<AString> *mediaTypes) const {
+    mediaTypes->clear();
     for (size_t ix = 0; ix < mCaps.size(); ix++) {
-        mimes->push_back(mCaps.keyAt(ix));
+        mediaTypes->push_back(mCaps.keyAt(ix));
     }
 }
 
 const sp<MediaCodecInfo::Capabilities>
-MediaCodecInfo::getCapabilitiesFor(const char *mime) const {
-    ssize_t ix = getCapabilityIndex(mime);
+MediaCodecInfo::getCapabilitiesFor(const char *mediaType) const {
+    ssize_t ix = getCapabilityIndex(mediaType);
     if (ix >= 0) {
         return mCaps.valueAt(ix);
     }
@@ -173,21 +182,26 @@
 sp<MediaCodecInfo> MediaCodecInfo::FromParcel(const Parcel &parcel) {
     AString name = AString::FromParcel(parcel);
     AString owner = AString::FromParcel(parcel);
-    bool isEncoder = static_cast<bool>(parcel.readInt32());
+    Attributes attributes = static_cast<Attributes>(parcel.readInt32());
     uint32_t rank = parcel.readUint32();
     sp<MediaCodecInfo> info = new MediaCodecInfo;
     info->mName = name;
     info->mOwner = owner;
-    info->mIsEncoder = isEncoder;
+    info->mAttributes = attributes;
     info->mRank = rank;
+    size_t numAliases = static_cast<size_t>(parcel.readInt32());
+    for (size_t i = 0; i < numAliases; i++) {
+        AString alias = AString::FromParcel(parcel);
+        info->mAliases.add(alias);
+    }
     size_t size = static_cast<size_t>(parcel.readInt32());
     for (size_t i = 0; i < size; i++) {
-        AString mime = AString::FromParcel(parcel);
+        AString mediaType = AString::FromParcel(parcel);
         sp<Capabilities> caps = Capabilities::FromParcel(parcel);
         if (caps == NULL)
             return NULL;
         if (info != NULL) {
-            info->mCaps.add(mime, caps);
+            info->mCaps.add(mediaType, caps);
         }
     }
     return info;
@@ -196,8 +210,12 @@
 status_t MediaCodecInfo::writeToParcel(Parcel *parcel) const {
     mName.writeToParcel(parcel);
     mOwner.writeToParcel(parcel);
-    parcel->writeInt32(mIsEncoder);
+    parcel->writeInt32(mAttributes);
     parcel->writeUint32(mRank);
+    parcel->writeInt32(mAliases.size());
+    for (const AString &alias : mAliases) {
+        alias.writeToParcel(parcel);
+    }
     parcel->writeInt32(mCaps.size());
     for (size_t i = 0; i < mCaps.size(); i++) {
         mCaps.keyAt(i).writeToParcel(parcel);
@@ -206,10 +224,10 @@
     return OK;
 }
 
-ssize_t MediaCodecInfo::getCapabilityIndex(const char *mime) const {
-    if (mime) {
+ssize_t MediaCodecInfo::getCapabilityIndex(const char *mediaType) const {
+    if (mediaType) {
         for (size_t ix = 0; ix < mCaps.size(); ix++) {
-            if (mCaps.keyAt(ix).equalsIgnoreCase(mime)) {
+            if (mCaps.keyAt(ix).equalsIgnoreCase(mediaType)) {
                 return ix;
             }
         }
@@ -217,19 +235,26 @@
     return -1;
 }
 
-MediaCodecInfo::MediaCodecInfo() : mRank(0x100) {
+MediaCodecInfo::MediaCodecInfo()
+    : mAttributes((MediaCodecInfo::Attributes)0),
+      mRank(0x100) {
 }
 
 void MediaCodecInfoWriter::setName(const char* name) {
     mInfo->mName = name;
 }
 
+void MediaCodecInfoWriter::addAlias(const char* name) {
+    mInfo->mAliases.add(name);
+}
+
 void MediaCodecInfoWriter::setOwner(const char* owner) {
     mInfo->mOwner = owner;
 }
 
-void MediaCodecInfoWriter::setEncoder(bool isEncoder) {
-    mInfo->mIsEncoder = isEncoder;
+void MediaCodecInfoWriter::setAttributes(
+        typename std::underlying_type<MediaCodecInfo::Attributes>::type attributes) {
+    mInfo->mAttributes = (MediaCodecInfo::Attributes)attributes;
 }
 
 void MediaCodecInfoWriter::setRank(uint32_t rank) {
@@ -237,21 +262,21 @@
 }
 
 std::unique_ptr<MediaCodecInfo::CapabilitiesWriter>
-        MediaCodecInfoWriter::addMime(const char *mime) {
-    ssize_t ix = mInfo->getCapabilityIndex(mime);
+        MediaCodecInfoWriter::addMediaType(const char *mediaType) {
+    ssize_t ix = mInfo->getCapabilityIndex(mediaType);
     if (ix >= 0) {
         return std::unique_ptr<MediaCodecInfo::CapabilitiesWriter>(
                 new MediaCodecInfo::CapabilitiesWriter(
                 mInfo->mCaps.valueAt(ix).get()));
     }
     sp<MediaCodecInfo::Capabilities> caps = new MediaCodecInfo::Capabilities();
-    mInfo->mCaps.add(AString(mime), caps);
+    mInfo->mCaps.add(AString(mediaType), caps);
     return std::unique_ptr<MediaCodecInfo::CapabilitiesWriter>(
             new MediaCodecInfo::CapabilitiesWriter(caps.get()));
 }
 
-bool MediaCodecInfoWriter::removeMime(const char *mime) {
-    ssize_t ix = mInfo->getCapabilityIndex(mime);
+bool MediaCodecInfoWriter::removeMediaType(const char *mediaType) {
+    ssize_t ix = mInfo->getCapabilityIndex(mediaType);
     if (ix >= 0) {
         mInfo->mCaps.removeItemsAt(ix);
         return true;
diff --git a/media/libmedia/TypeConverter.cpp b/media/libmedia/TypeConverter.cpp
index 0ab0e9b..aa77cd3 100644
--- a/media/libmedia/TypeConverter.cpp
+++ b/media/libmedia/TypeConverter.cpp
@@ -74,6 +74,7 @@
     MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_WIRED_HEADSET),
     MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_AUX_DIGITAL),
     MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_HDMI),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_HDMI_ARC),
     MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_TELEPHONY_RX),
     MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_VOICE_CALL),
     MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BACK_MIC),
@@ -208,6 +209,14 @@
     MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_MAT_1_0),
     MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_MAT_2_0),
     MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_MAT_2_1),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LATM),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LATM_LC),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LATM_HE_V1),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LATM_HE_V2),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_CELT),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_APTX_ADAPTIVE),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_LHDC),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_LHDC_LL),
     TERMINATOR
 };
 
diff --git a/media/libmedia/include/media/DrmHal.h b/media/libmedia/include/media/DrmHal.h
index de0f3c7..7be5cf2 100644
--- a/media/libmedia/include/media/DrmHal.h
+++ b/media/libmedia/include/media/DrmHal.h
@@ -38,6 +38,7 @@
 using drm::V1_0::IDrmPlugin;
 using drm::V1_0::IDrmPluginListener;
 using drm::V1_0::KeyStatus;
+using drm::V1_1::SecurityLevel;
 using drm::V1_2::OfflineLicenseState;
 using ::android::hardware::hidl_vec;
 using ::android::hardware::Return;
@@ -62,7 +63,9 @@
 
     virtual status_t initCheck() const;
 
-    virtual bool isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType);
+    virtual bool isCryptoSchemeSupported(const uint8_t uuid[16],
+                                         const String8& mimeType,
+                                         DrmPlugin::SecurityLevel level);
 
     virtual status_t createPlugin(const uint8_t uuid[16],
                                   const String8 &appPackageName);
@@ -223,6 +226,10 @@
     status_t getPropertyStringInternal(String8 const &name, String8 &value) const;
     status_t getPropertyByteArrayInternal(String8 const &name,
                                           Vector<uint8_t> &value) const;
+    bool matchMimeTypeAndSecurityLevel(sp<IDrmFactory> &factory,
+                                       const uint8_t uuid[16],
+                                       const String8 &mimeType,
+                                       DrmPlugin::SecurityLevel level);
 
     DISALLOW_EVIL_CONSTRUCTORS(DrmHal);
 };
diff --git a/media/libmedia/include/media/IDrm.h b/media/libmedia/include/media/IDrm.h
index 49166c6..a32756f 100644
--- a/media/libmedia/include/media/IDrm.h
+++ b/media/libmedia/include/media/IDrm.h
@@ -34,7 +34,9 @@
 
     virtual status_t initCheck() const = 0;
 
-    virtual bool isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType) = 0;
+    virtual bool isCryptoSchemeSupported(const uint8_t uuid[16],
+                                         const String8 &mimeType,
+                                         DrmPlugin::SecurityLevel securityLevel) = 0;
 
     virtual status_t createPlugin(const uint8_t uuid[16],
                                   const String8 &appPackageName) = 0;
diff --git a/media/libmedia/include/media/MediaCodecInfo.h b/media/libmedia/include/media/MediaCodecInfo.h
index b3777d3..54f565a 100644
--- a/media/libmedia/include/media/MediaCodecInfo.h
+++ b/media/libmedia/include/media/MediaCodecInfo.h
@@ -30,6 +30,8 @@
 #include <utils/Vector.h>
 #include <utils/StrongPointer.h>
 
+#include <type_traits>
+
 namespace android {
 
 struct AMessage;
@@ -51,21 +53,47 @@
 
     struct CapabilitiesWriter;
 
+    enum Attributes : int32_t {
+        // attribute flags
+        kFlagIsEncoder = 1 << 0,
+        kFlagIsVendor = 1 << 1,
+        kFlagIsSoftwareOnly = 1 << 2,
+        kFlagIsHardwareAccelerated = 1 << 3,
+    };
+
     struct Capabilities : public RefBase {
-        enum {
-            // decoder flags
-            kFlagSupportsAdaptivePlayback = 1 << 0,
-            kFlagSupportsSecurePlayback = 1 << 1,
-            kFlagSupportsTunneledPlayback = 1 << 2,
+        constexpr static char FEATURE_ADAPTIVE_PLAYBACK[] = "feature-adaptive-playback";
+        constexpr static char FEATURE_DYNAMIC_TIMESTAMP[] = "feature-dynamic-timestamp";
+        constexpr static char FEATURE_FRAME_PARSING[] = "feature-frame-parsing";
+        constexpr static char FEATURE_INTRA_REFRESH[] = "feature-frame-parsing";
+        constexpr static char FEATURE_MULTIPLE_FRAMES[] = "feature-multiple-frames";
+        constexpr static char FEATURE_SECURE_PLAYBACK[] = "feature-secure-playback";
+        constexpr static char FEATURE_TUNNELED_PLAYBACK[] = "feature-tunneled-playback";
 
-            // encoder flags
-            kFlagSupportsIntraRefresh = 1 << 0,
-
-        };
-
+        /**
+         * Returns the supported levels for each supported profile in a target array.
+         *
+         * @param profileLevels target array for the profile levels.
+         */
         void getSupportedProfileLevels(Vector<ProfileLevel> *profileLevels) const;
+
+        /**
+         * Returns the supported color formats in a target array. Only used for video/image
+         * components.
+         *
+         * @param colorFormats target array for the color formats.
+         */
         void getSupportedColorFormats(Vector<uint32_t> *colorFormats) const;
-        uint32_t getFlags() const;
+
+        /**
+         * Returns metadata associated with this codec capability.
+         *
+         * This contains:
+         * - features,
+         * - performance data.
+         *
+         * TODO: expose this as separate API-s and wrap here.
+         */
         const sp<AMessage> getDetails() const;
 
     protected:
@@ -73,7 +101,6 @@
         SortedVector<ProfileLevel> mProfileLevelsSorted;
         Vector<uint32_t> mColorFormats;
         SortedVector<uint32_t> mColorFormatsSorted;
-        uint32_t mFlags;
         sp<AMessage> mDetails;
 
         Capabilities();
@@ -93,8 +120,7 @@
     /**
      * This class is used for modifying information inside a `Capabilities`
      * object. An object of type `CapabilitiesWriter` can be obtained by calling
-     * `MediaCodecInfoWriter::addMime()` or
-     * `MediaCodecInfoWriter::updateMime()`.
+     * `MediaCodecInfoWriter::addMediaType()`.
      */
     struct CapabilitiesWriter {
         /**
@@ -122,6 +148,13 @@
          */
         void addDetail(const char* key, int32_t value);
         /**
+         * Removes a key-value pair from the list of details. If the key is not
+         * present, this call does nothing.
+         *
+         * @param key The key.
+         */
+        void removeDetail(const char* key);
+        /**
          * Add a profile-level pair. If this profile-level pair already exists,
          * it will be ignored.
          *
@@ -136,13 +169,7 @@
          * @param format The color format.
          */
         void addColorFormat(uint32_t format);
-        /**
-         * Add flags. The underlying operation is bitwise-or. In other words,
-         * bits that have already been set will be ignored.
-         *
-         * @param flags The additional flags.
-         */
-        void addFlags(uint32_t flags);
+
     private:
         /**
          * The associated `Capabilities` object.
@@ -158,19 +185,42 @@
         friend MediaCodecInfoWriter;
     };
 
-    bool isEncoder() const;
-    void getSupportedMimes(Vector<AString> *mimes) const;
-    const sp<Capabilities> getCapabilitiesFor(const char *mime) const;
+    inline bool isEncoder() const {
+        return getAttributes() & kFlagIsEncoder;
+    }
+
+    Attributes getAttributes() const;
+    void getSupportedMediaTypes(Vector<AString> *mediaTypes) const;
+    const sp<Capabilities> getCapabilitiesFor(const char *mediaType) const;
     const char *getCodecName() const;
 
     /**
+     * Returns a vector containing alternate names for the codec.
+     *
+     * \param aliases the destination array for the aliases. This is cleared.
+     *
+     * Multiple codecs may share alternate names as long as their supported media types are
+     * distinct; however, these will result in different aliases for the MediaCodec user as
+     * the canonical codec has to be resolved without knowing the media type in
+     * MediaCodec::CreateByComponentName.
+     */
+    void getAliases(Vector<AString> *aliases) const;
+
+    /**
      * Return the name of the service that hosts the codec. This value is not
      * visible at the Java level.
      *
      * Currently, this is the "instance name" of the IOmx service.
      */
     const char *getOwnerName() const;
-    uint32_t rank() const;
+
+    /**
+     * Returns the rank of the component.
+     *
+     * Technically this is defined to be per media type, but that makes ordering the MediaCodecList
+     * impossible as MediaCodecList is ordered by codec name.
+     */
+    uint32_t getRank() const;
 
     /**
      * Serialization over Binder
@@ -181,11 +231,12 @@
 private:
     AString mName;
     AString mOwner;
-    bool mIsEncoder;
+    Attributes mAttributes;
     KeyedVector<AString, sp<Capabilities> > mCaps;
+    Vector<AString> mAliases;
     uint32_t mRank;
 
-    ssize_t getCapabilityIndex(const char *mime) const;
+    ssize_t getCapabilityIndex(const char *mediaType) const;
 
     /**
      * Construct an `MediaCodecInfo` object. After the construction, its
@@ -219,6 +270,13 @@
      */
     void setName(const char* name);
     /**
+     * Adds an alias (alternate name) for the codec. Multiple codecs can share an alternate name
+     * as long as their supported media types are distinct.
+     *
+     * @param name an alternate name.
+     */
+    void addAlias(const char* name);
+    /**
      * Set the owner name of the codec.
      *
      * This "owner name" is the name of the `IOmx` instance that supports this
@@ -228,32 +286,32 @@
      */
     void setOwner(const char* owner);
     /**
-     * Set whether this codec is an encoder or a decoder.
+     * Sets codec attributes.
      *
-     * @param isEncoder Whether this codec is an encoder or a decoder.
+     * @param attributes Codec attributes.
      */
-    void setEncoder(bool isEncoder = true);
+    void setAttributes(typename std::underlying_type<MediaCodecInfo::Attributes>::type attributes);
     /**
-     * Add a mime to an indexed list and return a `CapabilitiesWriter` object
+     * Add a media type to an indexed list and return a `CapabilitiesWriter` object
      * that can be used for modifying the associated `Capabilities`.
      *
-     * If the mime already exists, this function will return the
-     * `CapabilitiesWriter` associated with the mime.
+     * If the media type already exists, this function will return the
+     * `CapabilitiesWriter` associated with the media type.
      *
-     * @param[in] mime The name of a new mime to add.
+     * @param[in] mediaType The name of a new media type to add.
      * @return writer The `CapabilitiesWriter` object for modifying the
-     * `Capabilities` associated with the mime. `writer` will be valid
-     * regardless of whether `mime` already exists or not.
+     * `Capabilities` associated with the media type. `writer` will be valid
+     * regardless of whether `mediaType` already exists or not.
      */
-    std::unique_ptr<MediaCodecInfo::CapabilitiesWriter> addMime(
-            const char* mime);
+    std::unique_ptr<MediaCodecInfo::CapabilitiesWriter> addMediaType(
+            const char* mediaType);
     /**
-     * Remove a mime.
+     * Remove a media type.
      *
-     * @param mime The name of the mime to remove.
-     * @return `true` if `mime` is removed; `false` if `mime` is not found.
+     * @param mediaType The name of the media type to remove.
+     * @return `true` if `mediaType` is removed; `false` if `mediaType` is not found.
      */
-    bool removeMime(const char* mime);
+    bool removeMediaType(const char* mediaType);
     /**
      * Set rank of the codec. MediaCodecList will stable-sort the list according
      * to rank in non-descending order.
diff --git a/media/libmediametrics/Android.bp b/media/libmediametrics/Android.bp
index e188e54..15ea578 100644
--- a/media/libmediametrics/Android.bp
+++ b/media/libmediametrics/Android.bp
@@ -1,6 +1,4 @@
-// TODO: change it back to cc_library_shared when there is a way to
-// expose media metrics as stable API.
-cc_library {
+cc_library_shared {
     name: "libmediametrics",
 
     srcs: [
@@ -32,12 +30,13 @@
         cfi: true,
     },
 
-    // enumerate the stable interface
-// this would mean nobody can use the C++ interface. have to rework some things.
-//  stubs: {
-//      symbol_file: "libmediametrics.map.txt",
-//      versions: [
-//          "1" ,
-//      ]
-//  },
+    // enumerate stable entry points, for apex use
+    stubs: {
+        symbol_file: "libmediametrics.map.txt",
+        versions: [
+            "1" ,
+        ]
+    },
 }
+
+
diff --git a/media/libmediametrics/IMediaAnalyticsService.cpp b/media/libmediametrics/IMediaAnalyticsService.cpp
index 28a7746..9114927 100644
--- a/media/libmediametrics/IMediaAnalyticsService.cpp
+++ b/media/libmediametrics/IMediaAnalyticsService.cpp
@@ -142,7 +142,7 @@
             CHECK_INTERFACE(IMediaAnalyticsService, data, reply);
 
             bool forcenew;
-            MediaAnalyticsItem *item = new MediaAnalyticsItem;
+            MediaAnalyticsItem *item = MediaAnalyticsItem::create();
 
             data.readBool(&forcenew);
             item->readFromParcel(data);
diff --git a/media/libmediametrics/MediaAnalyticsItem.cpp b/media/libmediametrics/MediaAnalyticsItem.cpp
index 448e2d9..02c23b1 100644
--- a/media/libmediametrics/MediaAnalyticsItem.cpp
+++ b/media/libmediametrics/MediaAnalyticsItem.cpp
@@ -52,6 +52,17 @@
 const char * const MediaAnalyticsItem::EnabledPropertyPersist  = "persist.media.metrics.enabled";
 const int MediaAnalyticsItem::EnabledProperty_default  = 1;
 
+// So caller doesn't need to know size of allocated space
+MediaAnalyticsItem *MediaAnalyticsItem::create()
+{
+    return MediaAnalyticsItem::create(kKeyNone);
+}
+
+MediaAnalyticsItem *MediaAnalyticsItem::create(MediaAnalyticsItem::Key key)
+{
+    MediaAnalyticsItem *item = new MediaAnalyticsItem(key);
+    return item;
+}
 
 // access functions for the class
 MediaAnalyticsItem::MediaAnalyticsItem()
@@ -642,6 +653,19 @@
 //
 
 int32_t MediaAnalyticsItem::readFromParcel(const Parcel& data) {
+    int32_t version = data.readInt32();
+
+    switch(version) {
+        case 0:
+          return readFromParcel0(data);
+          break;
+        default:
+          ALOGE("Unsupported MediaAnalyticsItem Parcel version: %d", version);
+          return -1;
+    }
+}
+
+int32_t MediaAnalyticsItem::readFromParcel0(const Parcel& data) {
     // into 'this' object
     // .. we make a copy of the string to put away.
     mKey = data.readCString();
@@ -691,8 +715,23 @@
 }
 
 int32_t MediaAnalyticsItem::writeToParcel(Parcel *data) {
+
     if (data == NULL) return -1;
 
+    int32_t version = 0;
+    data->writeInt32(version);
+
+    switch(version) {
+        case 0:
+          return writeToParcel0(data);
+          break;
+        default:
+          ALOGE("Unsupported MediaAnalyticsItem Parcel version: %d", version);
+          return -1;
+    }
+}
+
+int32_t MediaAnalyticsItem::writeToParcel0(Parcel *data) {
 
     data->writeCString(mKey.c_str());
     data->writeInt32(mPid);
@@ -737,7 +776,6 @@
     return 0;
 }
 
-
 const char *MediaAnalyticsItem::toCString() {
    return toCString(PROTO_LAST);
 }
@@ -876,8 +914,6 @@
         }
         return true;
     } else {
-        std::string p = this->toString();
-        ALOGW("Unable to record: %s [forcenew=%d]", p.c_str(), forcenew);
         return false;
     }
 }
@@ -1035,5 +1071,170 @@
     return true;
 }
 
+// a byte array; contents are
+// overall length (uint32) including the length field itself
+// encoding version (uint32)
+// count of properties (uint32)
+// N copies of:
+//     property name as length(int16), bytes
+//         the bytes WILL include the null terminator of the name
+//     type (uint8 -- 1 byte)
+//     size of value field (int16 -- 2 bytes)
+//     value (size based on type)
+//       int32, int64, double -- little endian 4/8/8 bytes respectively
+//       cstring -- N bytes of value [WITH terminator]
+
+enum { kInt32 = 0, kInt64, kDouble, kRate, kCString};
+
+bool MediaAnalyticsItem::dumpAttributes(char **pbuffer, size_t *plength) {
+
+    char *build = NULL;
+
+    if (pbuffer == NULL || plength == NULL)
+        return false;
+
+    // consistency for the caller, who owns whatever comes back in this pointer.
+    *pbuffer = NULL;
+
+    // first, let's calculate sizes
+    int32_t goal = 0;
+    int32_t version = 0;
+
+    goal += sizeof(uint32_t);   // overall length, including the length field
+    goal += sizeof(uint32_t);   // encoding version
+    goal += sizeof(uint32_t);   // # properties
+
+    int32_t count = mPropCount;
+    for (int i = 0 ; i < count; i++ ) {
+        Prop *prop = &mProps[i];
+        goal += sizeof(uint16_t);           // name length
+        goal += strlen(prop->mName) + 1;    // string + null
+        goal += sizeof(uint8_t);            // type
+        goal += sizeof(uint16_t);           // size of value
+        switch (prop->mType) {
+            case MediaAnalyticsItem::kTypeInt32:
+                    goal += sizeof(uint32_t);
+                    break;
+            case MediaAnalyticsItem::kTypeInt64:
+                    goal += sizeof(uint64_t);
+                    break;
+            case MediaAnalyticsItem::kTypeDouble:
+                    goal += sizeof(double);
+                    break;
+            case MediaAnalyticsItem::kTypeRate:
+                    goal += 2 * sizeof(uint64_t);
+                    break;
+            case MediaAnalyticsItem::kTypeCString:
+                    // length + actual string + null
+                    goal += strlen(prop->u.CStringValue) + 1;
+                    break;
+            default:
+                    ALOGE("found bad Prop type: %d, idx %d, name %s",
+                          prop->mType, i, prop->mName);
+                    return false;
+        }
+    }
+
+    // now that we have a size... let's allocate and fill
+    build = (char *)malloc(goal);
+    if (build == NULL)
+        return false;
+
+    memset(build, 0, goal);
+
+    char *filling = build;
+
+#define _INSERT(val, size) \
+    { memcpy(filling, &(val), (size)); filling += (size);}
+#define _INSERTSTRING(val, size) \
+    { memcpy(filling, (val), (size)); filling += (size);}
+
+    _INSERT(goal, sizeof(int32_t));
+    _INSERT(version, sizeof(int32_t));
+    _INSERT(count, sizeof(int32_t));
+
+    for (int i = 0 ; i < count; i++ ) {
+        Prop *prop = &mProps[i];
+        int16_t attrNameLen = strlen(prop->mName) + 1;
+        _INSERT(attrNameLen, sizeof(int16_t));
+        _INSERTSTRING(prop->mName, attrNameLen);    // termination included
+        int8_t elemtype;
+        int16_t elemsize;
+        switch (prop->mType) {
+            case MediaAnalyticsItem::kTypeInt32:
+                {
+                    elemtype = kInt32;
+                    _INSERT(elemtype, sizeof(int8_t));
+                    elemsize = sizeof(int32_t);
+                    _INSERT(elemsize, sizeof(int16_t));
+
+                    _INSERT(prop->u.int32Value, sizeof(int32_t));
+                    break;
+                }
+            case MediaAnalyticsItem::kTypeInt64:
+                {
+                    elemtype = kInt64;
+                    _INSERT(elemtype, sizeof(int8_t));
+                    elemsize = sizeof(int64_t);
+                    _INSERT(elemsize, sizeof(int16_t));
+
+                    _INSERT(prop->u.int64Value, sizeof(int64_t));
+                    break;
+                }
+            case MediaAnalyticsItem::kTypeDouble:
+                {
+                    elemtype = kDouble;
+                    _INSERT(elemtype, sizeof(int8_t));
+                    elemsize = sizeof(double);
+                    _INSERT(elemsize, sizeof(int16_t));
+
+                    _INSERT(prop->u.doubleValue, sizeof(double));
+                    break;
+                }
+            case MediaAnalyticsItem::kTypeRate:
+                {
+                    elemtype = kRate;
+                    _INSERT(elemtype, sizeof(int8_t));
+                    elemsize = 2 * sizeof(uint64_t);
+                    _INSERT(elemsize, sizeof(int16_t));
+
+                    _INSERT(prop->u.rate.count, sizeof(uint64_t));
+                    _INSERT(prop->u.rate.duration, sizeof(uint64_t));
+                    break;
+                }
+            case MediaAnalyticsItem::kTypeCString:
+                {
+                    elemtype = kCString;
+                    _INSERT(elemtype, sizeof(int8_t));
+                    elemsize = strlen(prop->u.CStringValue) + 1;
+                    _INSERT(elemsize, sizeof(int16_t));
+
+                    _INSERTSTRING(prop->u.CStringValue, elemsize);
+                    break;
+                }
+            default:
+                    // error if can't encode; warning if can't decode
+                    ALOGE("found bad Prop type: %d, idx %d, name %s",
+                          prop->mType, i, prop->mName);
+                    goto badness;
+        }
+    }
+
+    if (build + goal != filling) {
+        ALOGE("problems populating; wrote=%d planned=%d",
+              (int)(filling-build), goal);
+        goto badness;
+    }
+
+    *pbuffer = build;
+    *plength = goal;
+
+    return true;
+
+  badness:
+    free(build);
+    return false;
+}
+
 } // namespace android
 
diff --git a/media/libmediametrics/MediaMetrics.cpp b/media/libmediametrics/MediaMetrics.cpp
index 9b08aa7..6109190 100644
--- a/media/libmediametrics/MediaMetrics.cpp
+++ b/media/libmediametrics/MediaMetrics.cpp
@@ -34,7 +34,7 @@
 
 // manage the overall record
 mediametrics_handle_t mediametrics_create(mediametricskey_t key) {
-    android::MediaAnalyticsItem *item = new android::MediaAnalyticsItem(key);
+    android::MediaAnalyticsItem *item = android::MediaAnalyticsItem::create(key);
     return (mediametrics_handle_t) item;
 }
 
@@ -187,18 +187,9 @@
     return android::MediaAnalyticsItem::isEnabled();
 }
 
-#if 0
-// do not expose this as is.
-// need to revisit (or redefine) how the android::Parcel parameter is handled
-// so that it meets the stable-API criteria for updateable components.
-//
-int32_t mediametrics_writeToParcel(mediametrics_handle_t handle, android::Parcel *parcel) {
+bool mediametrics_getAttributes(mediametrics_handle_t handle, char **buffer, size_t *length) {
     android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
-    if (item == NULL) {
-        return -1;
-    }
-    return item->writeToParcel(parcel);
+    if (item == NULL) return false;
+    return item->dumpAttributes(buffer, length);
+
 }
-#endif
-
-
diff --git a/media/libmediametrics/include/MediaAnalyticsItem.h b/media/libmediametrics/include/MediaAnalyticsItem.h
index b99cd91..2f9e7c2 100644
--- a/media/libmediametrics/include/MediaAnalyticsItem.h
+++ b/media/libmediametrics/include/MediaAnalyticsItem.h
@@ -17,9 +17,10 @@
 #ifndef ANDROID_MEDIA_MEDIAANALYTICSITEM_H
 #define ANDROID_MEDIA_MEDIAANALYTICSITEM_H
 
-#include <cutils/properties.h>
 #include <string>
 #include <sys/types.h>
+
+#include <cutils/properties.h>
 #include <utils/Errors.h>
 #include <utils/KeyedVector.h>
 #include <utils/RefBase.h>
@@ -84,6 +85,10 @@
 
     public:
 
+        // so clients do not need to know size details
+        static MediaAnalyticsItem* create(Key key);
+        static MediaAnalyticsItem* create();
+
         // access functions for the class
         MediaAnalyticsItem();
         MediaAnalyticsItem(Key);
@@ -175,6 +180,9 @@
         int32_t writeToParcel(Parcel *);
         int32_t readFromParcel(const Parcel&);
 
+        // supports the stable interface
+        bool dumpAttributes(char **pbuffer, size_t *plength);
+
         std::string toString();
         std::string toString(int version);
         const char *toCString();
@@ -183,6 +191,11 @@
         // are we collecting analytics data
         static bool isEnabled();
 
+    private:
+        // handle Parcel version 0
+        int32_t writeToParcel0(Parcel *);
+        int32_t readFromParcel0(const Parcel&);
+
     protected:
 
         // merge fields from arg into this
diff --git a/media/libmediametrics/include/MediaMetrics.h b/media/libmediametrics/include/MediaMetrics.h
index 4d2f352..a4e1ed2 100644
--- a/media/libmediametrics/include/MediaMetrics.h
+++ b/media/libmediametrics/include/MediaMetrics.h
@@ -85,13 +85,9 @@
 void mediametrics_setUid(mediametrics_handle_t handle, uid_t uid);
 bool mediametrics_isEnabled();
 
-#if 0
-// do not expose this as is.
-// need to revisit (or redefine) how the android::Parcel parameter is handled
-// so that it meets the stable-API criteria for updateable components.
-//
-int32_t mediametrics_writeToParcel(mediametrics_handle_t handle, android::Parcel *parcel);
-#endif
+// serialized copy of the attributes/values, mostly for upstream getMetrics() calls
+// caller owns the buffer allocated as part of this call.
+bool mediametrics_getAttributes(mediametrics_handle_t handle, char **buffer, size_t *length);
 
 __END_DECLS
 
diff --git a/media/libmediametrics/libmediametrics.map.txt b/media/libmediametrics/libmediametrics.map.txt
new file mode 100644
index 0000000..c46281a
--- /dev/null
+++ b/media/libmediametrics/libmediametrics.map.txt
@@ -0,0 +1,29 @@
+LIBMEDIAMETRICS_1 {
+  global:
+    mediametrics_addDouble; # apex
+    mediametrics_addInt32; # apex
+    mediametrics_addInt64; # apex
+    mediametrics_addRate; # apex
+    mediametrics_count; # apex
+    mediametrics_create; # apex
+    mediametrics_delete; # apex
+    mediametrics_freeCString; # apex
+    mediametrics_getAttributes; # apex
+    mediametrics_getCString; # apex
+    mediametrics_getDouble; # apex
+    mediametrics_getInt32; # apex
+    mediametrics_getInt64; # apex
+    mediametrics_getKey; # apex
+    mediametrics_getRate; # apex
+    mediametrics_isEnabled; # apex
+    mediametrics_readable; # apex
+    mediametrics_selfRecord; # apex
+    mediametrics_setCString; # apex
+    mediametrics_setDouble; # apex
+    mediametrics_setInt32; # apex
+    mediametrics_setInt64; # apex
+    mediametrics_setRate; # apex
+    mediametrics_setUid; # apex
+  local:
+    *;
+};
diff --git a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Interface.h b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Interface.h
index 0c8d016..7804a62 100644
--- a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Interface.h
+++ b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Interface.h
@@ -214,6 +214,8 @@
     virtual status_t setParameter(int key, const Parcel &request) = 0;
     virtual status_t getParameter(int key, Parcel *reply) = 0;
 
+    virtual status_t getMetrics(char **buffer, size_t *length) = 0;
+
     // Invoke a generic method on the player by using opaque parcels
     // for the request and reply.
     //
diff --git a/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h b/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
index 78865c4..2993ab1 100644
--- a/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
+++ b/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
@@ -102,6 +102,7 @@
             status_t        setAudioAttributes(const jobject attributes);
             jobject         getAudioAttributes();
             status_t        getParameter(int key, Parcel* reply);
+            status_t        getMetrics(char **buffer, size_t *length);
 
             // Modular DRM
             status_t        prepareDrm(int64_t srcId,
diff --git a/media/libmediaplayer2/mediaplayer2.cpp b/media/libmediaplayer2/mediaplayer2.cpp
index f75380c..53f2fb1 100644
--- a/media/libmediaplayer2/mediaplayer2.cpp
+++ b/media/libmediaplayer2/mediaplayer2.cpp
@@ -21,7 +21,6 @@
 #include <android/binder_ibinder.h>
 #include <media/AudioSystem.h>
 #include <media/DataSourceDesc.h>
-#include <media/MediaAnalyticsItem.h>
 #include <media/MemoryLeakTrackUtil.h>
 #include <media/NdkWrapper.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -979,6 +978,22 @@
     return status;
 }
 
+// for mediametrics
+status_t MediaPlayer2::getMetrics(char **buffer, size_t *length) {
+    ALOGD("MediaPlayer2::getMetrics()");
+    Mutex::Autolock _l(mLock);
+    if (mPlayer == NULL) {
+        ALOGV("getMetrics: no active player");
+        return INVALID_OPERATION;
+    }
+
+    status_t status =  mPlayer->getMetrics(buffer, length);
+    if (status != OK) {
+        ALOGD("getMetrics returns %d", status);
+    }
+    return status;
+}
+
 void MediaPlayer2::notify(int64_t srcId, int msg, int ext1, int ext2, const PlayerMessage *obj) {
     ALOGV("message received srcId=%lld, msg=%d, ext1=%d, ext2=%d",
           (long long)srcId, msg, ext1, ext2);
diff --git a/media/libmediaplayer2/nuplayer2/Android.bp b/media/libmediaplayer2/nuplayer2/Android.bp
index 71cd50f..0f69b2e 100644
--- a/media/libmediaplayer2/nuplayer2/Android.bp
+++ b/media/libmediaplayer2/nuplayer2/Android.bp
@@ -51,6 +51,7 @@
         "libui",
         "libgui",
         "libmedia",
+        "libmediametrics",
         "libmediandk",
         "libmediandk_utils",
         "libpowermanager",
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
index a5bd62d..9729d86 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
@@ -107,6 +107,8 @@
     mStats->setInt64("frames-total", mNumFramesTotal);
     mStats->setInt64("frames-dropped-input", mNumInputFramesDropped);
     mStats->setInt64("frames-dropped-output", mNumOutputFramesDropped);
+    mStats->setFloat("frame-rate-total", mFrameRateTotal);
+
     return mStats;
 }
 
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
index 56e9471..1b661f2 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
@@ -92,6 +92,7 @@
 static const char *kPlayerHeight = "android.media.mediaplayer.height";
 static const char *kPlayerFrames = "android.media.mediaplayer.frames";
 static const char *kPlayerFramesDropped = "android.media.mediaplayer.dropped";
+static const char *kPlayerFrameRate = "android.media.mediaplayer.fps";
 static const char *kPlayerAMime = "android.media.mediaplayer.audio.mime";
 static const char *kPlayerACodec = "android.media.mediaplayer.audio.codec";
 static const char *kPlayerDuration = "android.media.mediaplayer.durationMs";
@@ -125,7 +126,7 @@
       mMediaClock(new MediaClock),
       mPlayer(new NuPlayer2(pid, uid, mMediaClock, context)),
       mPlayerFlags(0),
-      mAnalyticsItem(NULL),
+      mMetricsHandle(0),
       mClientUid(uid),
       mAtEOS(false),
       mLooping(false),
@@ -136,9 +137,9 @@
 
     mMediaClock->init();
 
-    // set up an analytics record
-    mAnalyticsItem = new MediaAnalyticsItem(kKeyPlayer);
-    mAnalyticsItem->setUid(mClientUid);
+    // set up media metrics record
+    mMetricsHandle = mediametrics_create(kKeyPlayer);
+    mediametrics_setUid(mMetricsHandle, mClientUid);
 
     mNuPlayer2Looper->start(
             false, /* runOnCallingThread */
@@ -159,10 +160,7 @@
     updateMetrics("destructor");
     logMetrics("destructor");
 
-    if (mAnalyticsItem != NULL) {
-        delete mAnalyticsItem;
-        mAnalyticsItem = NULL;
-    }
+    mediametrics_delete(mMetricsHandle);
 }
 
 status_t NuPlayer2Driver::initCheck() {
@@ -453,15 +451,15 @@
 
             if (mime.startsWith("video/")) {
                 int32_t width, height;
-                mAnalyticsItem->setCString(kPlayerVMime, mime.c_str());
+                mediametrics_setCString(mMetricsHandle, kPlayerVMime, mime.c_str());
                 if (!name.empty()) {
-                    mAnalyticsItem->setCString(kPlayerVCodec, name.c_str());
+                    mediametrics_setCString(mMetricsHandle, kPlayerVCodec, name.c_str());
                 }
 
                 if (stats->findInt32("width", &width)
                         && stats->findInt32("height", &height)) {
-                    mAnalyticsItem->setInt32(kPlayerWidth, width);
-                    mAnalyticsItem->setInt32(kPlayerHeight, height);
+                    mediametrics_setInt32(mMetricsHandle, kPlayerWidth, width);
+                    mediametrics_setInt32(mMetricsHandle, kPlayerHeight, height);
                 }
 
                 int64_t numFramesTotal = 0;
@@ -469,14 +467,18 @@
                 stats->findInt64("frames-total", &numFramesTotal);
                 stats->findInt64("frames-dropped-output", &numFramesDropped);
 
-                mAnalyticsItem->setInt64(kPlayerFrames, numFramesTotal);
-                mAnalyticsItem->setInt64(kPlayerFramesDropped, numFramesDropped);
+                mediametrics_setInt64(mMetricsHandle, kPlayerFrames, numFramesTotal);
+                mediametrics_setInt64(mMetricsHandle, kPlayerFramesDropped, numFramesDropped);
 
+                float frameRate = 0;
+                if (stats->findFloat("frame-rate-output", &frameRate)) {
+                    mediametrics_setInt64(mMetricsHandle, kPlayerFrameRate, frameRate);
+		}
 
             } else if (mime.startsWith("audio/")) {
-                mAnalyticsItem->setCString(kPlayerAMime, mime.c_str());
+                mediametrics_setCString(mMetricsHandle, kPlayerAMime, mime.c_str());
                 if (!name.empty()) {
-                    mAnalyticsItem->setCString(kPlayerACodec, name.c_str());
+                    mediametrics_setCString(mMetricsHandle, kPlayerACodec, name.c_str());
                 }
             }
         }
@@ -487,17 +489,17 @@
     // getDuration() uses mLock for mutex -- careful where we use it.
     int64_t duration_ms = -1;
     getDuration(&duration_ms);
-    mAnalyticsItem->setInt64(kPlayerDuration, duration_ms);
+    mediametrics_setInt64(mMetricsHandle, kPlayerDuration, duration_ms);
 
-    mAnalyticsItem->setInt64(kPlayerPlaying, (mPlayingTimeUs+500)/1000 );
+    mediametrics_setInt64(mMetricsHandle, kPlayerPlaying, (mPlayingTimeUs+500)/1000 );
 
     if (mRebufferingEvents != 0) {
-        mAnalyticsItem->setInt64(kPlayerRebuffering, (mRebufferingTimeUs+500)/1000 );
-        mAnalyticsItem->setInt32(kPlayerRebufferingCount, mRebufferingEvents);
-        mAnalyticsItem->setInt32(kPlayerRebufferingAtExit, mRebufferingAtExit);
+        mediametrics_setInt64(mMetricsHandle, kPlayerRebuffering, (mRebufferingTimeUs+500)/1000 );
+        mediametrics_setInt32(mMetricsHandle, kPlayerRebufferingCount, mRebufferingEvents);
+        mediametrics_setInt32(mMetricsHandle, kPlayerRebufferingAtExit, mRebufferingAtExit);
     }
 
-    mAnalyticsItem->setCString(kPlayerDataSourceType, mPlayer->getDataSourceType());
+    mediametrics_setCString(mMetricsHandle, kPlayerDataSourceType, mPlayer->getDataSourceType());
 }
 
 
@@ -507,7 +509,7 @@
     }
     ALOGV("logMetrics(%p) from %s at state %d", this, where, mState);
 
-    if (mAnalyticsItem == NULL || mAnalyticsItem->isEnabled() == false) {
+    if (mMetricsHandle == 0 || mediametrics_isEnabled() == false) {
         return;
     }
 
@@ -516,16 +518,12 @@
     // and that always injects 3 fields (duration, playing time, and
     // datasource) into the record.
     // So the canonical "empty" record has 3 elements in it.
-    if (mAnalyticsItem->count() > 3) {
-
-        mAnalyticsItem->selfrecord();
-
+    if (mediametrics_count(mMetricsHandle) > 3) {
+        mediametrics_selfRecord(mMetricsHandle);
         // re-init in case we prepare() and start() again.
-        delete mAnalyticsItem ;
-        mAnalyticsItem = new MediaAnalyticsItem(kKeyPlayer);
-        if (mAnalyticsItem) {
-            mAnalyticsItem->setUid(mClientUid);
-        }
+        mediametrics_delete(mMetricsHandle);
+        mMetricsHandle = mediametrics_create(kKeyPlayer);
+        mediametrics_setUid(mMetricsHandle, mClientUid);
     } else {
         ALOGV("did not have anything to record");
     }
@@ -649,19 +647,18 @@
     return INVALID_OPERATION;
 }
 
-status_t NuPlayer2Driver::getParameter(int key, Parcel *reply) {
-
-    if (key == FOURCC('m','t','r','X')) {
-        // mtrX -- a play on 'metrics' (not matrix)
-        // gather current info all together, parcel it, and send it back
-        updateMetrics("api");
-        mAnalyticsItem->writeToParcel(reply);
-        return OK;
-    }
-
+status_t NuPlayer2Driver::getParameter(int key __unused, Parcel *reply __unused) {
     return INVALID_OPERATION;
 }
 
+status_t NuPlayer2Driver::getMetrics(char **buffer, size_t *length) {
+    updateMetrics("api");
+    if (mediametrics_getAttributes(mMetricsHandle, buffer, length))
+        return OK;
+    else
+        return FAILED_TRANSACTION;
+}
+
 void NuPlayer2Driver::notifyResetComplete(int64_t /* srcId */) {
     ALOGD("notifyResetComplete(%p)", this);
     Mutex::Autolock autoLock(mLock);
@@ -867,11 +864,11 @@
                 // ext1 is our primary 'error type' value. Only add ext2 when non-zero.
                 // [test against msg is due to fall through from previous switch value]
                 if (msg == MEDIA2_ERROR) {
-                    mAnalyticsItem->setInt32(kPlayerError, ext1);
+                    mediametrics_setInt32(mMetricsHandle, kPlayerError, ext1);
                     if (ext2 != 0) {
-                        mAnalyticsItem->setInt32(kPlayerErrorCode, ext2);
+                        mediametrics_setInt32(mMetricsHandle, kPlayerErrorCode, ext2);
                     }
-                    mAnalyticsItem->setCString(kPlayerErrorState, stateString(mState).c_str());
+                    mediametrics_setCString(mMetricsHandle, kPlayerErrorState, stateString(mState).c_str());
                 }
                 mAtEOS = true;
                 break;
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.h b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.h
index 0ec3a4b..3d299f3 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.h
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.h
@@ -16,7 +16,7 @@
 
 #include <mediaplayer2/MediaPlayer2Interface.h>
 
-#include <media/MediaAnalyticsItem.h>
+#include <media/MediaMetrics.h>
 #include <media/stagefright/foundation/ABase.h>
 #include <mediaplayer2/JObjectHolder.h>
 
@@ -61,6 +61,7 @@
     virtual void setAudioSink(const sp<AudioSink> &audioSink) override;
     virtual status_t setParameter(int key, const Parcel &request) override;
     virtual status_t getParameter(int key, Parcel *reply) override;
+    virtual status_t getMetrics(char **buf, size_t *length) override;
 
     virtual status_t dump(int fd, const Vector<String16> &args) const override;
 
@@ -132,7 +133,7 @@
     sp<AudioSink> mAudioSink;
     uint32_t mPlayerFlags;
 
-    MediaAnalyticsItem *mAnalyticsItem;
+    mediametrics_handle_t mMetricsHandle;
     uid_t mClientUid;
 
     bool mAtEOS;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
index 8d876da..67a0f1e 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
@@ -159,7 +159,8 @@
     if (drm != NULL) {
         for (size_t i = 0; i < psshDRMs.size(); i++) {
             DrmUUID uuid = psshDRMs[i];
-            if (drm->isCryptoSchemeSupported(uuid.ptr(), String8()))
+            if (drm->isCryptoSchemeSupported(uuid.ptr(), String8(),
+                            DrmPlugin::kSecurityLevelUnknown))
                 supportedDRMs.add(uuid);
         }
 
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index dadfe28..a1a2660 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -8682,14 +8682,17 @@
         if (omxNode->configureVideoTunnelMode(
                 kPortIndexOutput, OMX_TRUE, 0, &sidebandHandle) == OK) {
             // tunneled playback includes adaptive playback
-            caps->addFlags(MediaCodecInfo::Capabilities::kFlagSupportsAdaptivePlayback
-                    | MediaCodecInfo::Capabilities::kFlagSupportsTunneledPlayback);
-        } else if (omxNode->setPortMode(
-                kPortIndexOutput, IOMX::kPortModeDynamicANWBuffer) == OK ||
-                omxNode->prepareForAdaptivePlayback(
-                kPortIndexOutput, OMX_TRUE,
-                1280 /* width */, 720 /* height */) == OK) {
-            caps->addFlags(MediaCodecInfo::Capabilities::kFlagSupportsAdaptivePlayback);
+        } else {
+            // tunneled playback is not supported
+            caps->removeDetail(MediaCodecInfo::Capabilities::FEATURE_TUNNELED_PLAYBACK);
+            if (omxNode->setPortMode(
+                    kPortIndexOutput, IOMX::kPortModeDynamicANWBuffer) != OK &&
+                    omxNode->prepareForAdaptivePlayback(
+                        kPortIndexOutput, OMX_TRUE,
+                        1280 /* width */, 720 /* height */) != OK) {
+                // adaptive playback is not supported
+                caps->removeDetail(MediaCodecInfo::Capabilities::FEATURE_ADAPTIVE_PLAYBACK);
+            }
         }
     }
 
@@ -8697,11 +8700,20 @@
         OMX_VIDEO_CONFIG_ANDROID_INTRAREFRESHTYPE params;
         InitOMXParams(&params);
         params.nPortIndex = kPortIndexOutput;
-        // TODO: should we verify if fallback is supported?
+
+        OMX_VIDEO_PARAM_INTRAREFRESHTYPE fallbackParams;
+        InitOMXParams(&fallbackParams);
+        fallbackParams.nPortIndex = kPortIndexOutput;
+        fallbackParams.eRefreshMode = OMX_VIDEO_IntraRefreshCyclic;
+
         if (omxNode->getConfig(
                 (OMX_INDEXTYPE)OMX_IndexConfigAndroidIntraRefresh,
-                &params, sizeof(params)) == OK) {
-            caps->addFlags(MediaCodecInfo::Capabilities::kFlagSupportsIntraRefresh);
+                &params, sizeof(params)) != OK &&
+                omxNode->getParameter(
+                    OMX_IndexParamVideoIntraRefresh, &fallbackParams,
+                    sizeof(fallbackParams)) != OK) {
+            // intra refresh is not supported
+            caps->removeDetail(MediaCodecInfo::Capabilities::FEATURE_INTRA_REFRESH);
         }
     }
 
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index f45cc58..03eef48 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -160,7 +160,6 @@
         "libstagefright_codecbase",
         "libstagefright_foundation",
         "libstagefright_omx_utils",
-        "libstagefright_opus_common",
         "libRScpp",
         "libhidlallocatorutils",
         "libhidlbase",
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 7df1a2d..c4015fb 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -85,7 +85,7 @@
 static const int kTimestampDebugCount = 10;
 static const int kItemIdBase = 10000;
 static const char kExifHeader[] = {'E', 'x', 'i', 'f', '\0', '\0'};
-static const int32_t kTiffHeaderOffset = htonl(sizeof(kExifHeader));
+static const uint8_t kExifApp1Marker[] = {'E', 'x', 'i', 'f', 0xff, 0xe1};
 
 static const uint8_t kMandatoryHevcNalUnitTypes[3] = {
     kHevcNalUnitTypeVps,
@@ -125,7 +125,7 @@
     bool isAudio() const { return mIsAudio; }
     bool isMPEG4() const { return mIsMPEG4; }
     bool usePrefix() const { return mIsAvc || mIsHevc || mIsHeic; }
-    bool isExifData(const MediaBufferBase *buffer) const;
+    bool isExifData(MediaBufferBase *buffer, uint32_t *tiffHdrOffset) const;
     void addChunkOffset(off64_t offset);
     void addItemOffsetAndSize(off64_t offset, size_t size, bool isExif);
     void flushItemRefs();
@@ -364,7 +364,7 @@
 
     Vector<uint16_t> mProperties;
     ItemRefs mDimgRefs;
-    ItemRefs mCdscRefs;
+    Vector<uint16_t> mExifList;
     uint16_t mImageItemId;
     int32_t mIsPrimary;
     int32_t mWidth, mHeight;
@@ -1368,14 +1368,16 @@
 }
 
 off64_t MPEG4Writer::addSample_l(
-        MediaBuffer *buffer, bool usePrefix, bool isExif, size_t *bytesWritten) {
+        MediaBuffer *buffer, bool usePrefix,
+        uint32_t tiffHdrOffset, size_t *bytesWritten) {
     off64_t old_offset = mOffset;
 
     if (usePrefix) {
         addMultipleLengthPrefixedSamples_l(buffer);
     } else {
-        if (isExif) {
-            ::write(mFd, &kTiffHeaderOffset, 4); // exif_tiff_header_offset field
+        if (tiffHdrOffset > 0) {
+            tiffHdrOffset = htonl(tiffHdrOffset);
+            ::write(mFd, &tiffHdrOffset, 4); // exif_tiff_header_offset field
             mOffset += 4;
         }
 
@@ -1803,7 +1805,6 @@
       mStartTimestampUs(-1),
       mRotation(0),
       mDimgRefs("dimg"),
-      mCdscRefs("cdsc"),
       mImageItemId(0),
       mIsPrimary(0),
       mWidth(0),
@@ -1984,11 +1985,34 @@
     return OK;
 }
 
-bool MPEG4Writer::Track::isExifData(const MediaBufferBase *buffer) const {
-    return mIsHeic
-            && (buffer->range_length() > sizeof(kExifHeader))
-            && !memcmp((uint8_t *)buffer->data() + buffer->range_offset(),
-                    kExifHeader, sizeof(kExifHeader));
+bool MPEG4Writer::Track::isExifData(
+        MediaBufferBase *buffer, uint32_t *tiffHdrOffset) const {
+    if (!mIsHeic) {
+        return false;
+    }
+
+    // Exif block starting with 'Exif\0\0'
+    size_t length = buffer->range_length();
+    uint8_t *data = (uint8_t *)buffer->data() + buffer->range_offset();
+    if ((length > sizeof(kExifHeader))
+        && !memcmp(data, kExifHeader, sizeof(kExifHeader))) {
+        *tiffHdrOffset = sizeof(kExifHeader);
+        return true;
+    }
+
+    // Exif block starting with fourcc 'Exif' followed by APP1 marker
+    if ((length > sizeof(kExifApp1Marker) + 2 + sizeof(kExifHeader))
+            && !memcmp(data, kExifApp1Marker, sizeof(kExifApp1Marker))
+            && !memcmp(data + sizeof(kExifApp1Marker) + 2, kExifHeader, sizeof(kExifHeader))) {
+        // skip 'Exif' fourcc
+        buffer->set_range(4, buffer->range_length() - 4);
+
+        // 2-byte APP1 + 2-byte size followed by kExifHeader
+        *tiffHdrOffset = 2 + 2 + sizeof(kExifHeader);
+        return true;
+    }
+
+    return false;
 }
 
 void MPEG4Writer::Track::addChunkOffset(off64_t offset) {
@@ -2014,7 +2038,7 @@
     }
 
     if (isExif) {
-         mCdscRefs.value.push_back(mOwner->addItem_l({
+         mExifList.push_back(mOwner->addItem_l({
             .itemType = "Exif",
             .isPrimary = false,
             .isHidden = false,
@@ -2117,7 +2141,16 @@
 
     if (mImageItemId > 0) {
         mOwner->addRefs_l(mImageItemId, mDimgRefs);
-        mOwner->addRefs_l(mImageItemId, mCdscRefs);
+
+        if (!mExifList.empty()) {
+            // The "cdsc" ref is from the metadata/exif item to the image item.
+            // So the refs all contain the image item.
+            ItemRefs cdscRefs("cdsc");
+            cdscRefs.value.push_back(mImageItemId);
+            for (uint16_t exifItem : mExifList) {
+                mOwner->addRefs_l(exifItem, cdscRefs);
+            }
+        }
     }
 }
 
@@ -2269,14 +2302,16 @@
     while (!chunk->mSamples.empty()) {
         List<MediaBuffer *>::iterator it = chunk->mSamples.begin();
 
-        int32_t isExif;
-        if (!(*it)->meta_data().findInt32(kKeyIsExif, &isExif)) {
-            isExif = 0;
+        uint32_t tiffHdrOffset;
+        if (!(*it)->meta_data().findInt32(
+                kKeyExifTiffOffset, (int32_t*)&tiffHdrOffset)) {
+            tiffHdrOffset = 0;
         }
+        bool isExif = (tiffHdrOffset > 0);
         bool usePrefix = chunk->mTrack->usePrefix() && !isExif;
 
         size_t bytesWritten;
-        off64_t offset = addSample_l(*it, usePrefix, isExif, &bytesWritten);
+        off64_t offset = addSample_l(*it, usePrefix, tiffHdrOffset, &bytesWritten);
 
         if (chunk->mTrack->isHeic()) {
             chunk->mTrack->addItemOffsetAndSize(offset, bytesWritten, isExif);
@@ -3002,10 +3037,11 @@
         }
 
         bool isExif = false;
+        uint32_t tiffHdrOffset = 0;
         int32_t isMuxerData;
         if (buffer->meta_data().findInt32(kKeyIsMuxerData, &isMuxerData) && isMuxerData) {
             // We only support one type of muxer data, which is Exif data block.
-            isExif = isExifData(buffer);
+            isExif = isExifData(buffer, &tiffHdrOffset);
             if (!isExif) {
                 ALOGW("Ignoring bad Exif data block");
                 buffer->release();
@@ -3027,7 +3063,7 @@
         buffer = NULL;
 
         if (isExif) {
-            copy->meta_data().setInt32(kKeyIsExif, 1);
+            copy->meta_data().setInt32(kKeyExifTiffOffset, tiffHdrOffset);
         }
         bool usePrefix = this->usePrefix() && !isExif;
 
@@ -3300,7 +3336,8 @@
         }
         if (!hasMultipleTracks) {
             size_t bytesWritten;
-            off64_t offset = mOwner->addSample_l(copy, usePrefix, isExif, &bytesWritten);
+            off64_t offset = mOwner->addSample_l(
+                    copy, usePrefix, tiffHdrOffset, &bytesWritten);
 
             if (mIsHeic) {
                 addItemOffsetAndSize(offset, bytesWritten, isExif);
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 7816fae..c7da7c7 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -862,9 +862,9 @@
 //static
 sp<CodecBase> MediaCodec::GetCodecBase(const AString &name, const char *owner) {
     if (owner) {
-        if (strncmp(owner, "default", 8) == 0) {
+        if (strcmp(owner, "default") == 0) {
             return new ACodec;
-        } else if (strncmp(owner, "codec2", 7) == 0) {
+        } else if (strncmp(owner, "codec2", 6) == 0) {
             return CreateCCodec();
         }
     }
@@ -911,10 +911,10 @@
             continue;
         }
         mCodecInfo = mcl->getCodecInfo(codecIdx);
-        Vector<AString> mimes;
-        mCodecInfo->getSupportedMimes(&mimes);
-        for (size_t i = 0; i < mimes.size(); i++) {
-            if (mimes[i].startsWith("video/")) {
+        Vector<AString> mediaTypes;
+        mCodecInfo->getSupportedMediaTypes(&mediaTypes);
+        for (size_t i = 0; i < mediaTypes.size(); i++) {
+            if (mediaTypes[i].startsWith("video/")) {
                 mIsVideo = true;
                 break;
             }
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index eaff283..93478e9 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -215,13 +215,9 @@
             mCodecInfos.begin(),
             mCodecInfos.end(),
             [](const sp<MediaCodecInfo> &info1, const sp<MediaCodecInfo> &info2) {
-                if (info2 == nullptr) {
-                    return false;
-                } else if (info1 == nullptr) {
-                    return true;
-                } else {
-                    return info1->rank() < info2->rank();
-                }
+                // null is lowest
+                return info1 == nullptr
+                        || (info2 != nullptr && info1->getRank() < info2->getRank());
             });
 }
 
diff --git a/media/libstagefright/MediaCodecListOverrides.cpp b/media/libstagefright/MediaCodecListOverrides.cpp
index cac53f4..dd7c3e6 100644
--- a/media/libstagefright/MediaCodecListOverrides.cpp
+++ b/media/libstagefright/MediaCodecListOverrides.cpp
@@ -228,18 +228,18 @@
             continue;
         }
 
-        Vector<AString> mimes;
-        info->getSupportedMimes(&mimes);
-        for (size_t i = 0; i < mimes.size(); ++i) {
+        Vector<AString> mediaTypes;
+        info->getSupportedMediaTypes(&mediaTypes);
+        for (size_t i = 0; i < mediaTypes.size(); ++i) {
             const sp<MediaCodecInfo::Capabilities> &caps =
-                    info->getCapabilitiesFor(mimes[i].c_str());
+                    info->getCapabilitiesFor(mediaTypes[i].c_str());
             if (!forceToMeasure &&
                 (caps->getDetails()->contains("max-supported-instances") ||
                  caps->getDetails()->contains("max-concurrent-instances"))) {
                 continue;
             }
 
-            size_t max = doProfileCodecs(info->isEncoder(), name, mimes[i], caps);
+            size_t max = doProfileCodecs(info->isEncoder(), name, mediaTypes[i], caps);
             if (max > 0) {
                 CodecSettings settings;
                 char maxStr[32];
@@ -248,7 +248,7 @@
 
                 AString key = name;
                 key.append(" ");
-                key.append(mimes[i]);
+                key.append(mediaTypes[i]);
 
                 if (info->isEncoder()) {
                     encoder_results->add(key, settings);
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
index 9511931..4ed3382 100644
--- a/media/libstagefright/MediaExtractor.cpp
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -57,7 +57,7 @@
 }
 
 MediaTrack *MediaExtractorCUnwrapper::getTrack(size_t index) {
-    return new MediaTrackCUnwrapper(plugin->getTrack(plugin->data, index));
+    return MediaTrackCUnwrapper::create(plugin->getTrack(plugin->data, index));
 }
 
 status_t MediaExtractorCUnwrapper::getTrackMetaData(
diff --git a/media/libstagefright/MediaTrack.cpp b/media/libstagefright/MediaTrack.cpp
index 036e79d..89c9b25 100644
--- a/media/libstagefright/MediaTrack.cpp
+++ b/media/libstagefright/MediaTrack.cpp
@@ -65,6 +65,13 @@
     bufferGroup = nullptr;
 }
 
+MediaTrackCUnwrapper *MediaTrackCUnwrapper::create(CMediaTrack *cmediatrack) {
+    if (cmediatrack == nullptr) {
+        return nullptr;
+    }
+    return new MediaTrackCUnwrapper(cmediatrack);
+}
+
 MediaTrackCUnwrapper::~MediaTrackCUnwrapper() {
     wrapper->free(wrapper->data);
     free(wrapper);
diff --git a/media/libstagefright/OggWriter.cpp b/media/libstagefright/OggWriter.cpp
index ad55c56..5c13983 100644
--- a/media/libstagefright/OggWriter.cpp
+++ b/media/libstagefright/OggWriter.cpp
@@ -30,7 +30,7 @@
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/OggWriter.h>
 #include <media/stagefright/foundation/ADebug.h>
-#include "OpusHeader.h"
+#include <media/stagefright/foundation/OpusHeader.h>
 
 extern "C" {
 #include <ogg/ogg.h>
@@ -114,30 +114,17 @@
     }
 
     mSampleRate = sampleRate;
-
-    OpusHeader header;
-    header.channels = nChannels;
-    header.num_streams = nChannels;
-    header.num_coupled = 0;
-    header.channel_mapping = ((nChannels > 8) ? 255 : (nChannels > 2));
-    header.gain_db = 0;
-    header.skip_samples = 0;
-
-    // headers are 21-bytes + something driven by channel count
-    // expect numbers in the low 30's here. WriteOpusHeader() will tell us
-    // if things are bad.
-    unsigned char header_data[100];
-    ogg_packet op;
-    ogg_page og;
-
-    const int packet_size = WriteOpusHeader(header, mSampleRate, (uint8_t*)header_data,
-                                            sizeof(header_data));
-
-    if (packet_size < 0) {
-        ALOGE("opus header writing failed");
+    uint32_t type;
+    const void *header_data;
+    size_t packet_size;
+    if (!source->getFormat()->findData(kKeyOpusHeader, &type, &header_data, &packet_size)) {
+        ALOGE("opus header not found");
         return UNKNOWN_ERROR;
     }
-    op.packet = header_data;
+
+    ogg_packet op;
+    ogg_page og;
+    op.packet = (unsigned char *)header_data;
     op.bytes = packet_size;
     op.b_o_s = 1;
     op.e_o_s = 0;
diff --git a/media/libstagefright/OmxInfoBuilder.cpp b/media/libstagefright/OmxInfoBuilder.cpp
index 96b896b..382c947 100644
--- a/media/libstagefright/OmxInfoBuilder.cpp
+++ b/media/libstagefright/OmxInfoBuilder.cpp
@@ -57,14 +57,9 @@
 }
 
 status_t queryCapabilities(
-        const IOmxStore::NodeInfo& node, const char* mime, bool isEncoder,
+        const IOmxStore::NodeInfo& node, const char* mediaType, bool isEncoder,
         MediaCodecInfo::CapabilitiesWriter* caps) {
     sp<ACodec> codec = new ACodec();
-    status_t err = codec->queryCapabilities(
-            node.owner.c_str(), node.name.c_str(), mime, isEncoder, caps);
-    if (err != OK) {
-        return err;
-    }
     for (const auto& attribute : node.attributes) {
         // All features have an int32 value except
         // "feature-bitrate-modes", which has a string value.
@@ -81,6 +76,12 @@
                     attribute.key.c_str(), attribute.value.c_str());
         }
     }
+    // query capabilities may remove capabilities that are not actually supported by the codec
+    status_t err = codec->queryCapabilities(
+            node.owner.c_str(), node.name.c_str(), mediaType, isEncoder, caps);
+    if (err != OK) {
+        return err;
+    }
     return OK;
 }
 
@@ -163,7 +164,10 @@
                     info = c2i->second.get();
                     info->setName(nodeName.c_str());
                     info->setOwner(node.owner.c_str());
-                    info->setEncoder(isEncoder);
+                    info->setAttributes(
+                            // all OMX codecs are vendor codecs (in the vendor partition), but
+                            // treat OMX.google codecs as non-hardware-accelerated and  non-vendor
+                            (isEncoder ? MediaCodecInfo::kFlagIsEncoder : 0));
                     info->setRank(defaultRank);
                 } else {
                     // The node has been seen before. Simply retrieve the
@@ -180,7 +184,19 @@
                         info = c2i->second.get();
                         info->setName(nodeName.c_str());
                         info->setOwner(node.owner.c_str());
-                        info->setEncoder(isEncoder);
+                        typename std::underlying_type<MediaCodecInfo::Attributes>::type attrs =
+                            MediaCodecInfo::kFlagIsVendor;
+                        if (isEncoder) {
+                            attrs |= MediaCodecInfo::kFlagIsEncoder;
+                        }
+                        if (std::count_if(
+                                node.attributes.begin(), node.attributes.end(),
+                                [](const IOmxStore::Attribute &i) -> bool {
+                                    return i.key == "attribute::software-codec";
+                                                                          })) {
+                            attrs |= MediaCodecInfo::kFlagIsHardwareAccelerated;
+                        }
+                        info->setAttributes(attrs);
                         info->setRank(defaultRank);
                     } else {
                         // If preferPlatformNodes is true, this node must be
@@ -195,12 +211,12 @@
                 }
             }
             std::unique_ptr<MediaCodecInfo::CapabilitiesWriter> caps =
-                    info->addMime(typeName.c_str());
+                    info->addMediaType(typeName.c_str());
             if (queryCapabilities(
                     node, typeName.c_str(), isEncoder, caps.get()) != OK) {
-                ALOGW("Fail to add mime %s to codec %s",
+                ALOGW("Fail to add media type %s to codec %s",
                         typeName.c_str(), nodeName.c_str());
-                info->removeMime(typeName.c_str());
+                info->removeMediaType(typeName.c_str());
             }
         }
 
@@ -219,7 +235,18 @@
                     info = c2i->second.get();
                     info->setName(nodeName.c_str());
                     info->setOwner(node->owner.c_str());
-                    info->setEncoder(isEncoder);
+                    typename std::underlying_type<MediaCodecInfo::Attributes>::type attrs =
+                        MediaCodecInfo::kFlagIsVendor;
+                    if (isEncoder) {
+                        attrs |= MediaCodecInfo::kFlagIsEncoder;
+                    }
+                    if (std::count_if(
+                            node->attributes.begin(), node->attributes.end(),
+                            [](const IOmxStore::Attribute &i) -> bool {
+                                return i.key == "attribute::software-codec";
+                                                                      })) {
+                        attrs |= MediaCodecInfo::kFlagIsHardwareAccelerated;
+                    }
                     info->setRank(defaultRank);
                 } else {
                     // The node has been seen before. Simply retrieve the
@@ -227,13 +254,13 @@
                     info = c2i->second.get();
                 }
                 std::unique_ptr<MediaCodecInfo::CapabilitiesWriter> caps =
-                        info->addMime(typeName.c_str());
+                        info->addMediaType(typeName.c_str());
                 if (queryCapabilities(
                         *node, typeName.c_str(), isEncoder, caps.get()) != OK) {
-                    ALOGW("Fail to add mime %s to codec %s "
+                    ALOGW("Fail to add media type %s to codec %s "
                           "after software codecs",
                           typeName.c_str(), nodeName.c_str());
-                    info->removeMime(typeName.c_str());
+                    info->removeMediaType(typeName.c_str());
                 }
             }
         }
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 49e485a..2e7da01 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -37,6 +37,7 @@
 #include <media/stagefright/foundation/ALookup.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/foundation/ByteUtils.h>
+#include <media/stagefright/foundation/OpusHeader.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/AudioSystem.h>
@@ -1745,12 +1746,34 @@
         } else if (mime == MEDIA_MIMETYPE_VIDEO_VP9) {
             meta->setData(kKeyVp9CodecPrivate, 0, csd0->data(), csd0->size());
         } else if (mime == MEDIA_MIMETYPE_AUDIO_OPUS) {
-            meta->setData(kKeyOpusHeader, 0, csd0->data(), csd0->size());
+            size_t opusHeadSize = csd0->size();
+            size_t codecDelayBufSize = 0;
+            size_t seekPreRollBufSize = 0;
+            void *opusHeadBuf = csd0->data();
+            void *codecDelayBuf = NULL;
+            void *seekPreRollBuf = NULL;
             if (msg->findBuffer("csd-1", &csd1)) {
-                meta->setData(kKeyOpusCodecDelay, 0, csd1->data(), csd1->size());
+                codecDelayBufSize = csd1->size();
+                codecDelayBuf = csd1->data();
             }
             if (msg->findBuffer("csd-2", &csd2)) {
-                meta->setData(kKeyOpusSeekPreRoll, 0, csd2->data(), csd2->size());
+                seekPreRollBufSize = csd2->size();
+                seekPreRollBuf = csd2->data();
+            }
+            /* Extract codec delay and seek pre roll from csd-0,
+             * if csd-1 and csd-2 are not present */
+            if (!codecDelayBuf && !seekPreRollBuf) {
+                GetOpusHeaderBuffers(csd0->data(), csd0->size(), &opusHeadBuf,
+                                    &opusHeadSize, &codecDelayBuf,
+                                    &codecDelayBufSize, &seekPreRollBuf,
+                                    &seekPreRollBufSize);
+            }
+            meta->setData(kKeyOpusHeader, 0, opusHeadBuf, opusHeadSize);
+            if (codecDelayBuf) {
+                meta->setData(kKeyOpusCodecDelay, 0, codecDelayBuf, codecDelayBufSize);
+            }
+            if (seekPreRollBuf) {
+                meta->setData(kKeyOpusSeekPreRoll, 0, seekPreRollBuf, seekPreRollBufSize);
             }
         } else if (mime == MEDIA_MIMETYPE_AUDIO_VORBIS) {
             meta->setData(kKeyVorbisInfo, 0, csd0->data(), csd0->size());
diff --git a/media/libstagefright/data/media_codecs_google_c2_audio.xml b/media/libstagefright/data/media_codecs_google_c2_audio.xml
index 0b554a2..88cd08d 100644
--- a/media/libstagefright/data/media_codecs_google_c2_audio.xml
+++ b/media/libstagefright/data/media_codecs_google_c2_audio.xml
@@ -93,5 +93,12 @@
             <Limit name="complexity" range="0-8"  default="5" />
             <Feature name="bitrate-modes" value="CQ" />
         </MediaCodec>
+        <MediaCodec name="c2.android.opus.encoder" type="audio/opus">
+            <Limit name="channel-count" max="2" />
+            <Limit name="sample-rate" ranges="8000,12000,16000,24000,48000" />
+            <Limit name="bitrate" range="500-512000" />
+            <Limit name="complexity" range="0-10"  default="5" />
+            <Feature name="bitrate-modes" value="CQ" />
+        </MediaCodec>
     </Encoders>
 </Included>
diff --git a/media/libstagefright/data/media_codecs_google_c2_telephony.xml b/media/libstagefright/data/media_codecs_google_c2_telephony.xml
new file mode 100644
index 0000000..d1055b3
--- /dev/null
+++ b/media/libstagefright/data/media_codecs_google_c2_telephony.xml
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<Included>
+    <Decoders>
+        <MediaCodec name="c2.android.gsm.decoder" type="audio/gsm">
+            <Limit name="channel-count" max="1" />
+            <Limit name="sample-rate" ranges="8000" />
+            <Limit name="bitrate" range="13000" />
+        </MediaCodec>
+    </Decoders>
+</Included>
diff --git a/media/libstagefright/data/media_codecs_google_c2_tv.xml b/media/libstagefright/data/media_codecs_google_c2_tv.xml
new file mode 100644
index 0000000..fa082c7
--- /dev/null
+++ b/media/libstagefright/data/media_codecs_google_c2_tv.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<Included>
+    <Decoders>
+        <MediaCodec name="c2.android.mpeg2.decoder" type="video/mpeg2">
+            <!-- profiles and levels:  ProfileMain : LevelHL -->
+            <Limit name="size" min="16x16" max="1920x1088" />
+            <Limit name="alignment" value="2x2" />
+            <Limit name="block-size" value="16x16" />
+            <Limit name="blocks-per-second" range="1-244800" />
+            <Limit name="bitrate" range="1-20000000" />
+            <Feature name="adaptive-playback" />
+        </MediaCodec>
+    </Decoders>
+</Included>
diff --git a/media/libstagefright/data/media_codecs_google_c2_video.xml b/media/libstagefright/data/media_codecs_google_c2_video.xml
index adb45b3..c49789e 100644
--- a/media/libstagefright/data/media_codecs_google_c2_video.xml
+++ b/media/libstagefright/data/media_codecs_google_c2_video.xml
@@ -71,6 +71,15 @@
             <Limit name="bitrate" range="1-40000000" />
             <Feature name="adaptive-playback" />
         </MediaCodec>
+        <MediaCodec name="c2.android.av1.decoder" type="video/av01">
+            <Limit name="size" min="96x96" max="1920x1080" />
+            <Limit name="alignment" value="2x2" />
+            <Limit name="block-size" value="16x16" />
+            <Limit name="blocks-per-second" min="24" max="2073600" />
+            <Limit name="bitrate" range="1-120000000" />
+            <Limit name="frame-rate" range="1-60" />
+            <Feature name="adaptive-playback" />
+        </MediaCodec>
     </Decoders>
 
     <Encoders>
diff --git a/media/libstagefright/foundation/Android.bp b/media/libstagefright/foundation/Android.bp
index dd1d904..533cd72 100644
--- a/media/libstagefright/foundation/Android.bp
+++ b/media/libstagefright/foundation/Android.bp
@@ -72,6 +72,7 @@
         "MediaKeys.cpp",
         "MetaData.cpp",
         "MetaDataBase.cpp",
+        "OpusHeader.cpp",
         "avc_utils.cpp",
         "base64.cpp",
         "hexdump.cpp",
diff --git a/media/libstagefright/opus/OpusHeader.cpp b/media/libstagefright/foundation/OpusHeader.cpp
similarity index 69%
rename from media/libstagefright/opus/OpusHeader.cpp
rename to media/libstagefright/foundation/OpusHeader.cpp
index e4a460c..9faede1 100644
--- a/media/libstagefright/opus/OpusHeader.cpp
+++ b/media/libstagefright/foundation/OpusHeader.cpp
@@ -16,7 +16,7 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "SoftOpus"
-
+#include <algorithm>
 #include <cstring>
 #include <stdint.h>
 
@@ -43,9 +43,6 @@
         {0, 6, 1, 2, 3, 4, 5, 7},
 };
 
-// Opus always has a 48kHz output rate. This is true for all Opus, not just this
-// implementation.
-constexpr int kRate = 48000;
 // Size of the Opus header excluding optional mapping information.
 constexpr size_t kOpusHeaderSize = 19;
 // Offset to magic string that starts Opus header.
@@ -76,15 +73,12 @@
 constexpr size_t kOpusHeaderNumCoupledStreamsOffset = 20;
 // Offset to the stream to channel mapping in the Opus header.
 constexpr size_t kOpusHeaderStreamMapOffset = 21;
-// Maximum packet size used in Xiph's opusdec.
-constexpr int kMaxOpusOutputPacketSizeSamples = 960 * 6;
 
 // Default audio output channel layout. Used to initialize |stream_map| in
 // OpusHeader, and passed to opus_multistream_decoder_create() when the header
 // does not contain mapping information. The values are valid only for mono and
 // stereo output: Opus streams with more than 2 channels require a stream map.
 constexpr int kMaxChannelsWithDefaultLayout = 2;
-constexpr uint8_t kDefaultOpusChannelLayout[kMaxChannelsWithDefaultLayout] = {0, 1};
 
 static uint16_t ReadLE16(const uint8_t* data, size_t data_size, uint32_t read_offset) {
     // check whether the 2nd byte is within the buffer
@@ -182,4 +176,88 @@
     }
 }
 
+int WriteOpusHeaders(const OpusHeader &header, int inputSampleRate,
+                     uint8_t* output, size_t outputSize, uint64_t codecDelay,
+                     uint64_t seekPreRoll) {
+    if (outputSize < AOPUS_UNIFIED_CSD_MINSIZE) {
+        ALOGD("Buffer not large enough to hold unified OPUS CSD");
+        return -1;
+    }
+
+    int headerLen = WriteOpusHeader(header, inputSampleRate, output,
+        outputSize);
+    if (headerLen < 0) {
+        ALOGD("WriteOpusHeader failed");
+        return -1;
+    }
+    if (headerLen >= (outputSize - 2 * AOPUS_TOTAL_CSD_SIZE)) {
+        ALOGD("Buffer not large enough to hold codec delay and seek pre roll");
+        return -1;
+    }
+
+    uint64_t length = AOPUS_LENGTH;
+
+    /*
+      Following is the CSD syntax for signalling codec delay and
+      seek pre-roll which is to be appended after OpusHeader
+
+      Marker (8 bytes) | Length (8 bytes) | Samples (8 bytes)
+
+      Markers supported:
+      AOPUSDLY - Signals Codec Delay
+      AOPUSPRL - Signals seek pre roll
+
+      Length should be 8.
+    */
+
+    // Add codec delay
+    memcpy(output + headerLen, AOPUS_CSD_CODEC_DELAY_MARKER, AOPUS_MARKER_SIZE);
+    headerLen += AOPUS_MARKER_SIZE;
+    memcpy(output + headerLen, &length, AOPUS_LENGTH_SIZE);
+    headerLen += AOPUS_LENGTH_SIZE;
+    memcpy(output + headerLen, &codecDelay, AOPUS_CSD_SIZE);
+    headerLen += AOPUS_CSD_SIZE;
+
+    // Add skip pre roll
+    memcpy(output + headerLen, AOPUS_CSD_SEEK_PREROLL_MARKER, AOPUS_MARKER_SIZE);
+    headerLen += AOPUS_MARKER_SIZE;
+    memcpy(output + headerLen, &length, AOPUS_LENGTH_SIZE);
+    headerLen += AOPUS_LENGTH_SIZE;
+    memcpy(output + headerLen, &seekPreRoll, AOPUS_CSD_SIZE);
+    headerLen += AOPUS_CSD_SIZE;
+
+    return headerLen;
+}
+
+void GetOpusHeaderBuffers(const uint8_t *data, size_t data_size,
+                          void **opusHeadBuf, size_t *opusHeadSize,
+                          void **codecDelayBuf, size_t *codecDelaySize,
+                          void **seekPreRollBuf, size_t *seekPreRollSize) {
+    *codecDelayBuf = NULL;
+    *codecDelaySize = 0;
+    *seekPreRollBuf = NULL;
+    *seekPreRollSize = 0;
+    *opusHeadBuf = (void *)data;
+    *opusHeadSize = data_size;
+    if (data_size >= AOPUS_UNIFIED_CSD_MINSIZE) {
+        size_t i = 0;
+        while (i < data_size - AOPUS_TOTAL_CSD_SIZE) {
+            uint8_t *csdBuf = (uint8_t *)data + i;
+            if (!memcmp(csdBuf, AOPUS_CSD_CODEC_DELAY_MARKER, AOPUS_MARKER_SIZE)) {
+                *opusHeadSize = std::min(*opusHeadSize, i);
+                *codecDelayBuf = csdBuf + AOPUS_MARKER_SIZE + AOPUS_LENGTH_SIZE;
+                *codecDelaySize = AOPUS_CSD_SIZE;
+                i += AOPUS_TOTAL_CSD_SIZE;
+            } else if (!memcmp(csdBuf, AOPUS_CSD_SEEK_PREROLL_MARKER, AOPUS_MARKER_SIZE)) {
+                *opusHeadSize = std::min(*opusHeadSize, i);
+                *seekPreRollBuf = csdBuf + AOPUS_MARKER_SIZE + AOPUS_LENGTH_SIZE;
+                *seekPreRollSize = AOPUS_CSD_SIZE;
+                i += AOPUS_TOTAL_CSD_SIZE;
+            } else {
+                i++;
+            }
+        }
+    }
+}
+
 }  // namespace android
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/OpusHeader.h b/media/libstagefright/foundation/include/media/stagefright/foundation/OpusHeader.h
new file mode 100644
index 0000000..9bffccb
--- /dev/null
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/OpusHeader.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * The Opus specification is part of IETF RFC 6716:
+ * http://tools.ietf.org/html/rfc6716
+ */
+
+#ifndef OPUS_HEADER_H_
+#define OPUS_HEADER_H_
+
+namespace android {
+
+/* Constants used for delimiting Opus CSD */
+#define AOPUS_CSD_CODEC_DELAY_MARKER "AOPUSDLY"
+#define AOPUS_CSD_SEEK_PREROLL_MARKER "AOPUSPRL"
+#define AOPUS_CSD_SIZE 8
+#define AOPUS_LENGTH 8
+#define AOPUS_MARKER_SIZE 8
+#define AOPUS_LENGTH_SIZE 8
+#define AOPUS_TOTAL_CSD_SIZE \
+    ((AOPUS_MARKER_SIZE) + (AOPUS_LENGTH_SIZE) + (AOPUS_CSD_SIZE))
+#define AOPUS_CSD0_MINSIZE 19
+#define AOPUS_UNIFIED_CSD_MINSIZE \
+    ((AOPUS_CSD0_MINSIZE) + 2 * (AOPUS_TOTAL_CSD_SIZE))
+
+/* CSD0 at max can be 22 bytes + max number of channels (255) */
+#define AOPUS_CSD0_MAXSIZE 277
+#define AOPUS_UNIFIED_CSD_MAXSIZE \
+    ((AOPUS_CSD0_MAXSIZE) + 2 * (AOPUS_TOTAL_CSD_SIZE))
+
+struct OpusHeader {
+    int channels;
+    int channel_mapping;
+    int num_streams;
+    int num_coupled;
+    int16_t gain_db;
+    int skip_samples;
+    uint8_t stream_map[8];
+};
+
+bool ParseOpusHeader(const uint8_t* data, size_t data_size, OpusHeader* header);
+int WriteOpusHeader(const OpusHeader &header, int input_sample_rate, uint8_t* output, size_t output_size);
+void GetOpusHeaderBuffers(const uint8_t *data, size_t data_size,
+                          void **opusHeadBuf, size_t *opusHeadSize,
+                          void **codecDelayBuf, size_t *codecDelaySize,
+                          void **seekPreRollBuf, size_t *seekPreRollSize);
+int WriteOpusHeaders(const OpusHeader &header, int inputSampleRate,
+                     uint8_t* output, size_t outputSize, uint64_t codecDelay,
+                     uint64_t seekPreRoll);
+}  // namespace android
+
+#endif  // OPUS_HEADER_H_
diff --git a/media/libstagefright/include/media/stagefright/MPEG4Writer.h b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
index 1abef8c..803155d 100644
--- a/media/libstagefright/include/media/stagefright/MPEG4Writer.h
+++ b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
@@ -257,7 +257,9 @@
     void initInternal(int fd, bool isFirstSession);
 
     // Acquire lock before calling these methods
-    off64_t addSample_l(MediaBuffer *buffer, bool usePrefix, bool isExif, size_t *bytesWritten);
+    off64_t addSample_l(
+            MediaBuffer *buffer, bool usePrefix,
+            uint32_t tiffHdrOffset, size_t *bytesWritten);
     void addLengthPrefixedSample_l(MediaBuffer *buffer);
     void addMultipleLengthPrefixedSamples_l(MediaBuffer *buffer);
     uint16_t addProperty_l(const ItemProperty &);
diff --git a/media/libstagefright/include/media/stagefright/MetaDataBase.h b/media/libstagefright/include/media/stagefright/MetaDataBase.h
index b99c14c..2910bd3 100644
--- a/media/libstagefright/include/media/stagefright/MetaDataBase.h
+++ b/media/libstagefright/include/media/stagefright/MetaDataBase.h
@@ -221,7 +221,8 @@
     kKeyFrameCount       = 'nfrm', // int32_t, total number of frame in video track
     kKeyExifOffset       = 'exof', // int64_t, Exif data offset
     kKeyExifSize         = 'exsz', // int64_t, Exif data size
-    kKeyIsExif           = 'exif', // bool (int32_t) buffer contains exif data block
+    kKeyExifTiffOffset   = 'thdr', // int32_t, if > 0, buffer contains exif data block with
+                                   // tiff hdr at specified offset
     kKeyPcmBigEndian     = 'pcmb', // bool (int32_t)
 
     // Key for ALAC Magic Cookie
diff --git a/media/libstagefright/omx/1.0/Omx.cpp b/media/libstagefright/omx/1.0/Omx.cpp
index 4e2d398..121bb1a 100644
--- a/media/libstagefright/omx/1.0/Omx.cpp
+++ b/media/libstagefright/omx/1.0/Omx.cpp
@@ -124,11 +124,11 @@
         } else {
             uint32_t quirks = 0;
             for (const auto& quirk : codec->second.quirkSet) {
-                if (quirk == "requires-allocate-on-input-ports") {
+                if (quirk == "quirk::requires-allocate-on-input-ports") {
                     quirks |= OMXNodeInstance::
                             kRequiresAllocateBufferOnInputPorts;
                 }
-                if (quirk == "requires-allocate-on-output-ports") {
+                if (quirk == "quirk::requires-allocate-on-output-ports") {
                     quirks |= OMXNodeInstance::
                             kRequiresAllocateBufferOnOutputPorts;
                 }
diff --git a/media/libstagefright/opus/Android.bp b/media/libstagefright/opus/Android.bp
deleted file mode 100644
index c5086ec..0000000
--- a/media/libstagefright/opus/Android.bp
+++ /dev/null
@@ -1,21 +0,0 @@
-cc_library_shared {
-    name: "libstagefright_opus_common",
-    vendor_available: true,
-
-    export_include_dirs: ["include"],
-
-    srcs: ["OpusHeader.cpp"],
-
-    shared_libs: ["liblog"],
-
-    cflags: ["-Werror"],
-
-    sanitize: {
-        integer_overflow: true,
-        cfi: true,
-        diag: {
-            integer_overflow: true,
-            cfi: true,
-        },
-    },
-}
\ No newline at end of file
diff --git a/media/libstagefright/opus/include/OpusHeader.h b/media/libstagefright/opus/include/OpusHeader.h
deleted file mode 100644
index f9f79cd..0000000
--- a/media/libstagefright/opus/include/OpusHeader.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- * The Opus specification is part of IETF RFC 6716:
- * http://tools.ietf.org/html/rfc6716
- */
-
-#ifndef OPUS_HEADER_H_
-#define OPUS_HEADER_H_
-
-namespace android {
-
-struct OpusHeader {
-    int channels;
-    int channel_mapping;
-    int num_streams;
-    int num_coupled;
-    int16_t gain_db;
-    int skip_samples;
-    uint8_t stream_map[8];
-};
-
-bool ParseOpusHeader(const uint8_t* data, size_t data_size, OpusHeader* header);
-int WriteOpusHeader(const OpusHeader &header, int input_sample_rate, uint8_t* output, size_t output_size);
-}  // namespace android
-
-#endif  // OPUS_HEADER_H_
diff --git a/media/libstagefright/webm/Android.bp b/media/libstagefright/webm/Android.bp
index 1f840b7..64ecc2d 100644
--- a/media/libstagefright/webm/Android.bp
+++ b/media/libstagefright/webm/Android.bp
@@ -28,7 +28,6 @@
 
     shared_libs: [
         "libstagefright_foundation",
-        "libstagefright_opus_common",
         "libutils",
         "liblog",
     ],
diff --git a/media/libstagefright/webm/WebmWriter.cpp b/media/libstagefright/webm/WebmWriter.cpp
index 7b4b23a..b0a303e 100644
--- a/media/libstagefright/webm/WebmWriter.cpp
+++ b/media/libstagefright/webm/WebmWriter.cpp
@@ -24,7 +24,7 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/hexdump.h>
-#include <OpusHeader.h>
+#include <media/stagefright/foundation/OpusHeader.h>
 
 #include <utils/Errors.h>
 
diff --git a/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp b/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp
index 2dec9fa..6e541ba 100644
--- a/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp
+++ b/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp
@@ -26,8 +26,9 @@
 #include <sys/stat.h>
 #include <expat.h>
 
-#include <cctype>
 #include <algorithm>
+#include <cctype>
+#include <string>
 
 namespace android {
 
@@ -326,8 +327,8 @@
         case SECTION_DECODER:
         case SECTION_ENCODER:
         {
-            if (strEq(name, "Quirk")) {
-                (void)addQuirk(attrs);
+            if (strEq(name, "Quirk") || strEq(name, "Attribute")) {
+                (void)addQuirk(attrs, name);
             } else if (strEq(name, "Type")) {
                 (void)addTypeFromAttributes(attrs,
                         (mCurrentSection == SECTION_ENCODER));
@@ -348,6 +349,8 @@
             if (outside &&
                     (strEq(name, "Limit") || strEq(name, "Feature"))) {
                 ALOGW("ignoring %s specified outside of a Type", name);
+            } else if (strEq(name, "Alias")) {
+                (void)addAlias(attrs);
             } else if (strEq(name, "Limit")) {
                 (void)addLimit(attrs);
             } else if (strEq(name, "Feature")) {
@@ -579,7 +582,7 @@
     return OK;
 }
 
-status_t MediaCodecsXmlParser::addQuirk(const char **attrs) {
+status_t MediaCodecsXmlParser::addQuirk(const char **attrs, const char *tag) {
     if (mCurrentCodec == mCodecMap.end()) {
         return BAD_VALUE;
     }
@@ -606,7 +609,12 @@
         return BAD_VALUE;
     }
 
-    mCurrentCodec->second.quirkSet.emplace(name);
+    std::string tagString = tag;
+    std::transform(tagString.begin(), tagString.end(), tagString.begin(), ::tolower);
+    tagString.append("::");
+    tagString.append(name);
+    mCurrentCodec->second.quirkSet.emplace(tagString.c_str());
+    ALOGI("adding %s to %s", tagString.c_str(), mCurrentCodec->first.c_str());
     return OK;
 }
 
@@ -760,6 +768,7 @@
             strEq(a_name, "quality") ||
             strEq(a_name, "size") ||
             strEq(a_name, "measured-blocks-per-second") ||
+            strHasPrefix(a_name, "performance-point-") ||
             strHasPrefix(a_name, "measured-frame-rate-")) {
         // "range" is specified in exactly one of the following forms:
         // 1) min-max
@@ -964,6 +973,34 @@
     return OK;
 }
 
+status_t MediaCodecsXmlParser::addAlias(const char **attrs) {
+    size_t i = 0;
+    const char *name = nullptr;
+
+    while (attrs[i] != nullptr) {
+        if (strEq(attrs[i], "name")) {
+            if (attrs[++i] == nullptr) {
+                ALOGE("addAlias: name is null");
+                return BAD_VALUE;
+            }
+            name = attrs[i];
+        } else {
+            ALOGE("addAlias: unrecognized attribute: %s", attrs[i]);
+            return BAD_VALUE;
+        }
+        ++i;
+    }
+
+    // Every feature must have a name.
+    if (name == nullptr) {
+        ALOGE("alias with no 'name' attribute");
+        return BAD_VALUE;
+    }
+
+    mCurrentCodec->second.aliases.emplace_back(name);
+    return OK;
+}
+
 const MediaCodecsXmlParser::AttributeMap&
         MediaCodecsXmlParser::getServiceAttributeMap() const {
     return mServiceAttributeMap;
@@ -1041,11 +1078,18 @@
 
             NodeInfo nodeInfo;
             nodeInfo.name = codecName;
+            // NOTE: no aliases are exposed in role info
+            // attribute quirks are exposed as node attributes
             nodeInfo.attributeList.reserve(typeAttributeMap.size());
             for (const auto& attribute : typeAttributeMap) {
                 nodeInfo.attributeList.push_back(
                         Attribute{attribute.first, attribute.second});
             }
+            for (const std::string &quirk : codec.second.quirkSet) {
+                if (strHasPrefix(quirk.c_str(), "attribute::")) {
+                    nodeInfo.attributeList.push_back(Attribute{quirk, "present"});
+                }
+            }
             nodeList->insert(std::make_pair(
                     std::move(order), std::move(nodeInfo)));
         }
diff --git a/media/libstagefright/xmlparser/include/media/stagefright/xmlparser/MediaCodecsXmlParser.h b/media/libstagefright/xmlparser/include/media/stagefright/xmlparser/MediaCodecsXmlParser.h
index cc69e52..fd949da 100644
--- a/media/libstagefright/xmlparser/include/media/stagefright/xmlparser/MediaCodecsXmlParser.h
+++ b/media/libstagefright/xmlparser/include/media/stagefright/xmlparser/MediaCodecsXmlParser.h
@@ -65,6 +65,7 @@
         size_t order;      ///< Order of appearance in the file (starting from 0)
         QuirkSet quirkSet; ///< Set of quirks requested by this codec
         TypeMap typeMap;   ///< Map of types supported by this codec
+        std::vector<std::string> aliases; ///< Name aliases for this codec
     };
 
     typedef std::pair<std::string, CodecProperties> Codec;
@@ -76,6 +77,7 @@
     struct NodeInfo {
         std::string name;
         std::vector<Attribute> attributeList;
+        // note: aliases are not exposed here as they are not part of the role map
     };
 
     /**
@@ -171,8 +173,9 @@
     void addMediaCodec(bool encoder, const char *name,
             const char *type = nullptr);
 
-    status_t addQuirk(const char **attrs);
+    status_t addQuirk(const char **attrs, const char *tag);
     status_t addTypeFromAttributes(const char **attrs, bool encoder);
+    status_t addAlias(const char **attrs);
     status_t addLimit(const char **attrs);
     status_t addFeature(const char **attrs);
     void addType(const char *name);
diff --git a/media/mediaserver/mediaserver.rc b/media/mediaserver/mediaserver.rc
index f6c325c..8cfcd79 100644
--- a/media/mediaserver/mediaserver.rc
+++ b/media/mediaserver/mediaserver.rc
@@ -2,5 +2,7 @@
     class main
     user media
     group audio camera inet net_bt net_bt_admin net_bw_acct drmrpc mediadrm
+    # TODO(b/123275379): Remove updatable when http://aosp/878198 has landed
+    updatable
     ioprio rt 4
     writepid /dev/cpuset/foreground/tasks /dev/stune/foreground/tasks
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index 55afb33..9082f62 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -274,7 +274,7 @@
     }
 
     String8 mimeStr = mimeType ? String8(mimeType) : String8("");
-    return drm->isCryptoSchemeSupported(uuid, mimeStr);
+    return drm->isCryptoSchemeSupported(uuid, mimeStr, DrmPlugin::kSecurityLevelUnknown);
 }
 
 EXPORT
diff --git a/media/ndk/NdkMediaExtractor.cpp b/media/ndk/NdkMediaExtractor.cpp
index 8296598..28e4f12 100644
--- a/media/ndk/NdkMediaExtractor.cpp
+++ b/media/ndk/NdkMediaExtractor.cpp
@@ -46,6 +46,18 @@
     sp<ABuffer> mPsshBuf;
 };
 
+sp<ABuffer> U32ArrayToSizeBuf(size_t numSubSamples, uint32_t *data) {
+    if (numSubSamples >  SIZE_MAX / sizeof(size_t)) {
+        return NULL;
+    }
+    sp<ABuffer> sizebuf = new ABuffer(numSubSamples * sizeof(size_t));
+    size_t *sizes = (size_t *)sizebuf->data();
+    for (size_t i = 0; sizes != NULL && i < numSubSamples; i++) {
+        sizes[i] = data[i];
+    }
+    return sizebuf;
+}
+
 extern "C" {
 
 EXPORT
@@ -339,7 +351,7 @@
     if (!meta->findData(kKeyEncryptedSizes, &type, &crypteddata, &cryptedsize)) {
         return NULL;
     }
-    size_t numSubSamples = cryptedsize / sizeof(size_t);
+    size_t numSubSamples = cryptedsize / sizeof(uint32_t);
 
     const void *cleardata;
     size_t clearsize;
@@ -373,6 +385,16 @@
         mode = CryptoPlugin::kMode_AES_CTR;
     }
 
+    if (sizeof(uint32_t) != sizeof(size_t)) {
+        sp<ABuffer> clearbuf   = U32ArrayToSizeBuf(numSubSamples, (uint32_t *)cleardata);
+        sp<ABuffer> cryptedbuf = U32ArrayToSizeBuf(numSubSamples, (uint32_t *)crypteddata);
+        cleardata   = clearbuf    == NULL ? NULL : clearbuf->data();
+        crypteddata = crypteddata == NULL ? NULL : cryptedbuf->data();
+        if(crypteddata == NULL || cleardata == NULL) {
+            return NULL;
+        }
+    }
+
     return AMediaCodecCryptoInfo_new(
             numSubSamples,
             (uint8_t*) key,
diff --git a/packages/MediaComponents/apex/java/android/media/session/MediaSession.java b/packages/MediaComponents/apex/java/android/media/session/MediaSession.java
index 73e16a6..3cbeff9 100644
--- a/packages/MediaComponents/apex/java/android/media/session/MediaSession.java
+++ b/packages/MediaComponents/apex/java/android/media/session/MediaSession.java
@@ -1077,8 +1077,7 @@
 
         private static RemoteUserInfo createRemoteUserInfo(String packageName, int pid, int uid,
                 ISessionControllerCallback caller) {
-            return new RemoteUserInfo(packageName, pid, uid,
-                    caller != null ? caller.asBinder() : null);
+            return new RemoteUserInfo(packageName, pid, uid);
         }
 
         @Override
diff --git a/packages/MediaComponents/apex/java/android/service/media/MediaBrowserService.java b/packages/MediaComponents/apex/java/android/service/media/MediaBrowserService.java
index a66ec35..76c99b9 100644
--- a/packages/MediaComponents/apex/java/android/service/media/MediaBrowserService.java
+++ b/packages/MediaComponents/apex/java/android/service/media/MediaBrowserService.java
@@ -544,8 +544,7 @@
             throw new IllegalStateException("This should be called inside of onGetRoot or"
                     + " onLoadChildren or onLoadItem methods");
         }
-        return new RemoteUserInfo(mCurConnection.pkg, mCurConnection.pid, mCurConnection.uid,
-                mCurConnection.callbacks.asBinder());
+        return new RemoteUserInfo(mCurConnection.pkg, mCurConnection.pid, mCurConnection.uid);
     }
 
     /**
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 26f76c0..0d6ef46 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -2379,7 +2379,8 @@
         return BAD_VALUE;
     }
 
-    sp<ThreadBase> thread = openInput_l(module, input, config, *devices, address, source, flags);
+    sp<ThreadBase> thread = openInput_l(
+            module, input, config, *devices, address, source, flags, AUDIO_DEVICE_NONE, String8{});
 
     if (thread != 0) {
         // notify client processes of the new input creation
@@ -2395,7 +2396,9 @@
                                                          audio_devices_t devices,
                                                          const String8& address,
                                                          audio_source_t source,
-                                                         audio_input_flags_t flags)
+                                                         audio_input_flags_t flags,
+                                                         audio_devices_t outputDevice,
+                                                         const String8& outputDeviceAddress)
 {
     AudioHwDevice *inHwDev = findSuitableHwDev_l(module, devices);
     if (inHwDev == NULL) {
@@ -2424,7 +2427,8 @@
     sp<DeviceHalInterface> inHwHal = inHwDev->hwDevice();
     sp<StreamInHalInterface> inStream;
     status_t status = inHwHal->openInputStream(
-            *input, devices, &halconfig, flags, address.string(), source, &inStream);
+            *input, devices, &halconfig, flags, address.string(), source,
+            outputDevice, outputDeviceAddress, &inStream);
     ALOGV("openInput_l() openInputStream returned input %p, devices %#x, SamplingRate %d"
            ", Format %#x, Channels %#x, flags %#x, status %d addr %s",
             inStream.get(),
@@ -2447,7 +2451,8 @@
         ALOGV("openInput_l() reopening with proposed sampling rate and channel mask");
         inStream.clear();
         status = inHwHal->openInputStream(
-                *input, devices, &halconfig, flags, address.string(), source, &inStream);
+                *input, devices, &halconfig, flags, address.string(), source,
+                outputDevice, outputDeviceAddress, &inStream);
         // FIXME log this new status; HAL should not propose any further changes
     }
 
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 6c698f6..c1169d2 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -579,6 +579,10 @@
         virtual binder::Status   stop();
         virtual binder::Status   getActiveMicrophones(
                 std::vector<media::MicrophoneInfo>* activeMicrophones);
+        virtual binder::Status   setMicrophoneDirection(
+                int /*audio_microphone_direction_t*/ direction);
+        virtual binder::Status   setMicrophoneFieldDimension(float zoom);
+
     private:
         const sp<RecordThread::RecordTrack> mRecordTrack;
 
@@ -620,7 +624,9 @@
                                            audio_devices_t device,
                                            const String8& address,
                                            audio_source_t source,
-                                           audio_input_flags_t flags);
+                                           audio_input_flags_t flags,
+                                           audio_devices_t outputDevice,
+                                           const String8& outputDeviceAddress);
               sp<ThreadBase> openOutput_l(audio_module_handle_t module,
                                               audio_io_handle_t *output,
                                               audio_config_t *config,
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index 63a9ec4..3381e4d 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -211,6 +211,8 @@
                 ((patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE) &&
                  ((patch->sinks[0].ext.device.hw_module != srcModule) ||
                   !audioHwDevice->supportsAudioPatches()))) {
+                audio_devices_t outputDevice = AUDIO_DEVICE_NONE;
+                String8 outputDeviceAddress;
                 if (patch->num_sources == 2) {
                     if (patch->sources[1].type != AUDIO_PORT_TYPE_MIX ||
                             (patch->num_sinks != 0 && patch->sinks[0].ext.device.hw_module !=
@@ -261,6 +263,8 @@
                         goto exit;
                     }
                     newPatch.mPlayback.setThread(reinterpret_cast<PlaybackThread*>(thread.get()));
+                    outputDevice = device;
+                    outputDeviceAddress = address;
                 }
                 audio_devices_t device = patch->sources[0].ext.device.type;
                 String8 address = String8(patch->sources[0].ext.device.address);
@@ -293,7 +297,9 @@
                                                                     device,
                                                                     address,
                                                                     AUDIO_SOURCE_MIC,
-                                                                    flags);
+                                                                    flags,
+                                                                    outputDevice,
+                                                                    outputDeviceAddress);
                 ALOGV("mAudioFlinger.openInput_l() returned %p inChannelMask %08x",
                       thread.get(), config.channel_mask);
                 if (thread == 0) {
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index 85f5456..32af7d5 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -71,6 +71,9 @@
 
             status_t    getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
 
+            status_t    setMicrophoneDirection(audio_microphone_direction_t direction);
+            status_t    setMicrophoneFieldDimension(float zoom);
+
     static  bool        checkServerLatencySupported(
                                 audio_format_t format, audio_input_flags_t flags) {
                             return audio_is_linear_pcm(format)
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 607d2d1..31a8c7d 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -7582,6 +7582,20 @@
     return status;
 }
 
+status_t AudioFlinger::RecordThread::setMicrophoneDirection(audio_microphone_direction_t direction)
+{
+    ALOGV("RecordThread::setMicrophoneDirection");
+    AutoMutex _l(mLock);
+    return mInput->stream->setMicrophoneDirection(direction);
+}
+
+status_t AudioFlinger::RecordThread::setMicrophoneFieldDimension(float zoom)
+{
+    ALOGV("RecordThread::setMicrophoneFieldDimension");
+    AutoMutex _l(mLock);
+    return mInput->stream->setMicrophoneFieldDimension(zoom);
+}
+
 void AudioFlinger::RecordThread::updateMetadata_l()
 {
     if (mInput == nullptr || mInput->stream == nullptr ||
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 5d06773..aab7601 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -1545,6 +1545,9 @@
 
             status_t    getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
 
+            status_t    setMicrophoneDirection(audio_microphone_direction_t direction);
+            status_t    setMicrophoneFieldDimension(float zoom);
+
             void        updateMetadata_l() override;
 
             bool        fastTrackAvailable() const { return mFastTrackAvail; }
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 9a7f1f1..d23d19d 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -1710,6 +1710,18 @@
             mRecordTrack->getActiveMicrophones(activeMicrophones));
 }
 
+binder::Status AudioFlinger::RecordHandle::setMicrophoneDirection(
+        int /*audio_microphone_direction_t*/ direction) {
+    ALOGV("%s()", __func__);
+    return binder::Status::fromStatusT(mRecordTrack->setMicrophoneDirection(
+            static_cast<audio_microphone_direction_t>(direction)));
+}
+
+binder::Status AudioFlinger::RecordHandle::setMicrophoneFieldDimension(float zoom) {
+    ALOGV("%s()", __func__);
+    return binder::Status::fromStatusT(mRecordTrack->setMicrophoneFieldDimension(zoom));
+}
+
 // ----------------------------------------------------------------------------
 #undef LOG_TAG
 #define LOG_TAG "AF::RecordTrack"
@@ -2004,6 +2016,27 @@
     }
 }
 
+status_t AudioFlinger::RecordThread::RecordTrack::setMicrophoneDirection(
+        audio_microphone_direction_t direction) {
+    sp<ThreadBase> thread = mThread.promote();
+    if (thread != 0) {
+        RecordThread *recordThread = (RecordThread *)thread.get();
+        return recordThread->setMicrophoneDirection(direction);
+    } else {
+        return BAD_VALUE;
+    }
+}
+
+status_t AudioFlinger::RecordThread::RecordTrack::setMicrophoneFieldDimension(float zoom) {
+    sp<ThreadBase> thread = mThread.promote();
+    if (thread != 0) {
+        RecordThread *recordThread = (RecordThread *)thread.get();
+        return recordThread->setMicrophoneFieldDimension(zoom);
+    } else {
+        return BAD_VALUE;
+    }
+}
+
 // ----------------------------------------------------------------------------
 #undef LOG_TAG
 #define LOG_TAG "AF::PatchRecord"
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 1c2b9d7..cf2ce99 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -75,14 +75,16 @@
     virtual status_t setDeviceConnectionState(audio_devices_t device,
                                               audio_policy_dev_state_t state,
                                               const char *device_address,
-                                              const char *device_name) = 0;
+                                              const char *device_name,
+                                              audio_format_t encodedFormat) = 0;
     // retrieve a device connection status
     virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
                                                                           const char *device_address) = 0;
     // indicate a change in device configuration
     virtual status_t handleDeviceConfigChange(audio_devices_t device,
                                               const char *device_address,
-                                              const char *device_name) = 0;
+                                              const char *device_name,
+                                              audio_format_t encodedFormat) = 0;
     // indicate a change in phone state. Valid phones states are defined by audio_mode_t
     virtual void setPhoneState(audio_mode_t state) = 0;
     // force using a specific device category for the specified usage
@@ -234,6 +236,9 @@
 
     virtual bool     isHapticPlaybackSupported() = 0;
 
+    virtual status_t getHwOffloadEncodingFormatsSupportedForA2DP(
+                std::vector<audio_format_t> *formats) = 0;
+
     virtual void     setAppState(uid_t uid, app_state_t state);
 };
 
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
index 46a2a40..837ca47 100644
--- a/services/audiopolicy/common/include/policy.h
+++ b/services/audiopolicy/common/include/policy.h
@@ -76,6 +76,21 @@
 }
 
 /**
+ * Check whether audio device has encoding capability.
+ *
+ * @param[in] device to consider
+ *
+ * @return true if device has encoding capability, false otherwise..
+ */
+static inline bool device_has_encoding_capability(audio_devices_t device)
+{
+    if (device & AUDIO_DEVICE_OUT_ALL_A2DP) {
+        return true;
+    }
+    return false;
+}
+
+/**
  * Returns the priority of a given audio source for capture. The priority is used when more than one
  * capture session is active on a given input stream to determine which session drives routing and
  * effect configuration.
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index fa9ba0b..d4cfd1e 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -23,11 +23,12 @@
 #include "AudioIODescriptorInterface.h"
 #include "AudioPort.h"
 #include "ClientDescriptor.h"
+#include "DeviceDescriptor.h"
 #include "EffectDescriptor.h"
+#include "IOProfile.h"
 
 namespace android {
 
-class IOProfile;
 class AudioMix;
 class AudioPolicyClientInterface;
 
@@ -42,10 +43,16 @@
     audio_port_handle_t getId() const;
     audio_module_handle_t getModuleHandle() const;
 
+    audio_devices_t getDeviceType() const { return (mDevice != nullptr) ?
+                    mDevice->type() : AUDIO_DEVICE_NONE; }
+    sp<DeviceDescriptor> getDevice() const { return mDevice; }
+    void setDevice(const sp<DeviceDescriptor> &device) { mDevice = device; }
+    DeviceVector supportedDevices() const  {
+        return mProfile != nullptr ? mProfile->getSupportedDevices() :  DeviceVector(); }
+
     void dump(String8 *dst) const override;
 
     audio_io_handle_t   mIoHandle = AUDIO_IO_HANDLE_NONE; // input handle
-    audio_devices_t     mDevice = AUDIO_DEVICE_NONE;  // current device this input is routed to
     AudioMix            *mPolicyMix = nullptr;        // non NULL when used by a dynamic policy
     const sp<IOProfile> mProfile;                     // I/O profile this output derives from
 
@@ -61,6 +68,7 @@
     bool isSourceActive(audio_source_t source) const;
     audio_source_t source() const;
     bool isSoundTrigger() const;
+    audio_attributes_t getHighestPriorityAttributes() const;
     void setClientActive(const sp<RecordClientDescriptor>& client, bool active);
     int32_t activeCount() { return mGlobalActiveCount; }
     void trackEffectEnabled(const sp<EffectDescriptor> &effect, bool enabled);
@@ -71,8 +79,7 @@
     void setPatchHandle(audio_patch_handle_t handle) override;
 
     status_t open(const audio_config_t *config,
-                  audio_devices_t device,
-                  const String8& address,
+                  const sp<DeviceDescriptor> &device,
                   audio_source_t source,
                   audio_input_flags_t flags,
                   audio_io_handle_t *input);
@@ -99,6 +106,8 @@
 
     audio_patch_handle_t mPatchHandle = AUDIO_PATCH_HANDLE_NONE;
     audio_port_handle_t  mId = AUDIO_PORT_HANDLE_NONE;
+    sp<DeviceDescriptor> mDevice = nullptr; /**< current device this input is routed to */
+
     // Because a preemptible capture session can preempt another one, we end up in an endless loop
     // situation were each session is allowed to restart after being preempted,
     // thus preempting the other one which restarts and so on.
@@ -120,8 +129,8 @@
     sp<AudioInputDescriptor> getInputFromId(audio_port_handle_t id) const;
 
     // count active capture sessions using one of the specified devices.
-    // ignore devices if AUDIO_DEVICE_IN_DEFAULT is passed
-    uint32_t activeInputsCountOnDevices(audio_devices_t devices = AUDIO_DEVICE_IN_DEFAULT) const;
+    // ignore devices if empty vector is passed
+    uint32_t activeInputsCountOnDevices(const DeviceVector &devices) const;
 
     /**
      * return io handle of active input or 0 if no input is active
@@ -130,8 +139,6 @@
      */
     Vector<sp <AudioInputDescriptor> > getActiveInputs();
 
-    audio_devices_t getSupportedDevices(audio_io_handle_t handle) const;
-
     sp<AudioInputDescriptor> getInputForClient(audio_port_handle_t portId);
 
     void trackEffectEnabled(const sp<EffectDescriptor> &effect, bool enabled);
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index ed995e0..e1ecc61 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -26,13 +26,14 @@
 #include "AudioIODescriptorInterface.h"
 #include "AudioPort.h"
 #include "ClientDescriptor.h"
+#include "DeviceDescriptor.h"
+#include <map>
 
 namespace android {
 
 class IOProfile;
 class AudioMix;
 class AudioPolicyClientInterface;
-class DeviceDescriptor;
 
 // descriptor for audio outputs. Used to maintain current configuration of each opened audio output
 // and keep track of the usage of this output by each audio stream type.
@@ -48,14 +49,12 @@
     void        log(const char* indent);
 
     audio_port_handle_t getId() const;
-    virtual audio_devices_t device() const;
-    virtual bool sharesHwModuleWith(const sp<AudioOutputDescriptor>& outputDesc);
-    virtual audio_devices_t supportedDevices();
+    virtual DeviceVector devices() const { return mDevices; }
+    bool sharesHwModuleWith(const sp<AudioOutputDescriptor>& outputDesc);
+    virtual DeviceVector supportedDevices() const  { return mDevices; }
     virtual bool isDuplicated() const { return false; }
     virtual uint32_t latency() { return 0; }
     virtual bool isFixedVolume(audio_devices_t device);
-    virtual sp<AudioOutputDescriptor> subOutput1() { return 0; }
-    virtual sp<AudioOutputDescriptor> subOutput2() { return 0; }
     virtual bool setVolume(float volume,
                            audio_stream_type_t stream,
                            audio_devices_t device,
@@ -119,7 +118,7 @@
         return mActiveClients;
     }
 
-    audio_devices_t mDevice = AUDIO_DEVICE_NONE; // current device this output is routed to
+    DeviceVector mDevices; /**< current devices this output is routed to */
     nsecs_t mStopTime[AUDIO_STREAM_CNT];
     int mMuteCount[AUDIO_STREAM_CNT];            // mute request counter
     bool mStrategyMutedByDevice[NUM_STRATEGIES]; // strategies muted because of incompatible
@@ -151,14 +150,16 @@
     virtual ~SwAudioOutputDescriptor() {}
 
             void dump(String8 *dst) const override;
-    virtual audio_devices_t device() const;
-    virtual bool sharesHwModuleWith(const sp<AudioOutputDescriptor>& outputDesc);
-    virtual audio_devices_t supportedDevices();
+    virtual DeviceVector devices() const;
+    void setDevices(const DeviceVector &devices) { mDevices = devices; }
+    bool sharesHwModuleWith(const sp<SwAudioOutputDescriptor>& outputDesc);
+    virtual DeviceVector supportedDevices() const;
+    virtual bool deviceSupportsEncodedFormats(audio_devices_t device);
     virtual uint32_t latency();
     virtual bool isDuplicated() const { return (mOutput1 != NULL && mOutput2 != NULL); }
     virtual bool isFixedVolume(audio_devices_t device);
-    virtual sp<AudioOutputDescriptor> subOutput1() { return mOutput1; }
-    virtual sp<AudioOutputDescriptor> subOutput2() { return mOutput2; }
+    sp<SwAudioOutputDescriptor> subOutput1() { return mOutput1; }
+    sp<SwAudioOutputDescriptor> subOutput2() { return mOutput2; }
             void changeStreamActiveCount(
                     const sp<TrackClientDescriptor>& client, int delta) override;
     virtual bool setVolume(float volume,
@@ -171,22 +172,49 @@
                            const struct audio_port_config *srcConfig = NULL) const;
     virtual void toAudioPort(struct audio_port *port) const;
 
-            status_t open(const audio_config_t *config,
-                          audio_devices_t device,
-                          const String8& address,
-                          audio_stream_type_t stream,
-                          audio_output_flags_t flags,
-                          audio_io_handle_t *output);
-            // Called when a stream is about to be started
-            // Note: called before setClientActive(true);
-            status_t start();
-            // Called after a stream is stopped.
-            // Note: called after setClientActive(false);
-            void stop();
-            void close();
-            status_t openDuplicating(const sp<SwAudioOutputDescriptor>& output1,
-                                     const sp<SwAudioOutputDescriptor>& output2,
-                                     audio_io_handle_t *ioHandle);
+        status_t open(const audio_config_t *config,
+                      const DeviceVector &devices,
+                      audio_stream_type_t stream,
+                      audio_output_flags_t flags,
+                      audio_io_handle_t *output);
+
+        // Called when a stream is about to be started
+        // Note: called before setClientActive(true);
+        status_t start();
+        // Called after a stream is stopped.
+        // Note: called after setClientActive(false);
+        void stop();
+        void close();
+        status_t openDuplicating(const sp<SwAudioOutputDescriptor>& output1,
+                                 const sp<SwAudioOutputDescriptor>& output2,
+                                 audio_io_handle_t *ioHandle);
+
+    /**
+     * @brief supportsDevice
+     * @param device to be checked against
+     * @return true if the device is supported by type (for non bus / remote submix devices),
+     *         true if the device is supported (both type and address) for bus / remote submix
+     *         false otherwise
+     */
+    bool supportsDevice(const sp<DeviceDescriptor> &device) const;
+
+    /**
+     * @brief supportsAllDevices
+     * @param devices to be checked against
+     * @return true if the device is weakly supported by type (e.g. for non bus / rsubmix devices),
+     *         true if the device is supported (both type and address) for bus / remote submix
+     *         false otherwise
+     */
+    bool supportsAllDevices(const DeviceVector &devices) const;
+
+    /**
+     * @brief filterSupportedDevices takes a vector of devices and filters them according to the
+     * device supported by this output (the profile from which this output derives from)
+     * @param devices reference device vector to be filtered
+     * @return vector of devices filtered from the supported devices of this output (weakly or not
+     * depending on the device type)
+     */
+    DeviceVector filterSupportedDevices(const DeviceVector &devices) const;
 
     const sp<IOProfile> mProfile;          // I/O profile this output derives from
     audio_io_handle_t mIoHandle;           // output handle
@@ -208,7 +236,6 @@
 
             void dump(String8 *dst) const override;
 
-    virtual audio_devices_t supportedDevices();
     virtual bool setVolume(float volume,
                            audio_stream_type_t stream,
                            audio_devices_t device,
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
index 955e87b..2932296 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
@@ -16,15 +16,17 @@
 
 #pragma once
 
+#include "DeviceDescriptor.h"
 #include <utils/RefBase.h>
 #include <media/AudioPolicy.h>
 #include <utils/KeyedVector.h>
 #include <system/audio.h>
 #include <utils/String8.h>
 
-namespace android {
+#include <DeviceDescriptor.h>
+#include <AudioOutputDescriptor.h>
 
-class SwAudioOutputDescriptor;
+namespace android {
 
 /**
  * custom mix entry in mPolicyMixes
@@ -74,9 +76,21 @@
     status_t getOutputForAttr(audio_attributes_t attributes, uid_t uid,
             sp<SwAudioOutputDescriptor> &desc);
 
-    audio_devices_t getDeviceAndMixForInputSource(audio_source_t inputSource,
-                                                  audio_devices_t availableDeviceTypes,
-                                                  AudioMix **policyMix);
+    sp<DeviceDescriptor> getDeviceAndMixForInputSource(audio_source_t inputSource,
+                                                       const DeviceVector &availableDeviceTypes,
+                                                       AudioMix **policyMix);
+
+    /**
+     * @brief try to find a matching mix for a given output descriptor and returns the associated
+     * output device.
+     * @param output to be considered
+     * @param availableOutputDevices list of output devices currently reachable
+     * @param policyMix to be returned if any mix matching ouput descriptor
+     * @return device selected from the mix attached to the output, null pointer otherwise
+     */
+    sp<DeviceDescriptor> getDeviceAndMixForOutput(const sp<SwAudioOutputDescriptor> &output,
+                                                  const DeviceVector &availableOutputDevices,
+                                                  AudioMix **policyMix = nullptr);
 
     status_t getInputMixForAttr(audio_attributes_t attr, AudioMix **policyMix);
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
index bb9cad8..1b5a2d6 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
@@ -65,6 +65,7 @@
     uint32_t getFlags() const { return mFlags; }
 
     virtual void attach(const sp<HwModule>& module);
+    virtual void detach();
     bool isAttached() { return mModule != 0; }
 
     // Audio port IDs are in a different namespace than AudioFlinger unique IDs
@@ -161,7 +162,7 @@
                                    const struct audio_port_config *srcConfig = NULL) const = 0;
     virtual sp<AudioPort> getAudioPort() const = 0;
     virtual bool hasSameHwModuleAs(const sp<AudioPortConfig>& other) const {
-        return (other != 0) &&
+        return (other != 0) && (other->getAudioPort() != 0) && (getAudioPort() != 0) &&
                 (other->getAudioPort()->getModuleHandle() == getAudioPort()->getModuleHandle());
     }
     unsigned int mSamplingRate = 0u;
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index d02123c..cc43fe6 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -44,8 +44,18 @@
 
     const FormatVector& encodedFormats() const { return mEncodedFormats; }
 
+    audio_format_t getEncodedFormat() { return mCurrentEncodedFormat; }
+
+    void setEncodedFormat(audio_format_t format) {
+        mCurrentEncodedFormat = format;
+    }
+
     bool equals(const sp<DeviceDescriptor>& other) const;
 
+    bool hasCurrentEncodedFormat() const;
+
+    bool supportsFormat(audio_format_t format);
+
     // AudioPortConfig
     virtual sp<AudioPort> getAudioPort() const { return (AudioPort*) this; }
     virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
@@ -53,6 +63,8 @@
 
     // AudioPort
     virtual void attach(const sp<HwModule>& module);
+    virtual void detach();
+
     virtual void toAudioPort(struct audio_port *port) const;
     virtual void importAudioPort(const sp<AudioPort>& port, bool force = false);
 
@@ -67,6 +79,7 @@
     audio_devices_t     mDeviceType;
     FormatVector        mEncodedFormats;
     audio_port_handle_t mId = AUDIO_PORT_HANDLE_NONE;
+    audio_format_t      mCurrentEncodedFormat;
 };
 
 class DeviceVector : public SortedVector<sp<DeviceDescriptor> >
@@ -86,9 +99,10 @@
 
     audio_devices_t types() const { return mDeviceTypes; }
 
-    // If 'address' is empty, a device with a non-empty address may be returned
-    // if there is no device with the specified 'type' and empty address.
-    sp<DeviceDescriptor> getDevice(audio_devices_t type, const String8 &address = {}) const;
+    // If 'address' is empty and 'codec' is AUDIO_FORMAT_DEFAULT, a device with a non-empty
+    // address may be returned if there is no device with the specified 'type' and empty address.
+    sp<DeviceDescriptor> getDevice(audio_devices_t type, const String8 &address,
+                                   audio_format_t codec) const;
     DeviceVector getDevicesFromTypeMask(audio_devices_t types) const;
 
     /**
@@ -164,6 +178,23 @@
         return !operator==(right);
     }
 
+    /**
+     * @brief getFirstValidAddress
+     * @return the first valid address of a list of device, "" if no device with valid address
+     * found.
+     * This helper function helps maintaining compatibility with legacy where we used to have a
+     * devices mask and an address.
+     */
+    String8 getFirstValidAddress() const
+    {
+        for (const auto &device : *this) {
+            if (device->address() != "") {
+                return device->address();
+            }
+        }
+        return String8("");
+    }
+
     std::string toString() const;
 
     void dump(String8 *dst, const String8 &tag, int spaces = 0, bool verbose = true) const;
diff --git a/services/audiopolicy/common/managerdefinitions/include/HwModule.h b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
index 2b57fa9..eb34da4 100644
--- a/services/audiopolicy/common/managerdefinitions/include/HwModule.h
+++ b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
@@ -46,6 +46,22 @@
 
     const DeviceVector &getDeclaredDevices() const { return mDeclaredDevices; }
     void setDeclaredDevices(const DeviceVector &devices);
+    DeviceVector getAllDevices() const
+    {
+        DeviceVector devices = mDeclaredDevices;
+        devices.merge(mDynamicDevices);
+        return devices;
+    }
+    void addDynamicDevice(const sp<DeviceDescriptor> &device)
+    {
+        mDynamicDevices.add(device);
+    }
+
+    bool removeDynamicDevice(const sp<DeviceDescriptor> &device)
+    {
+        return mDynamicDevices.remove(device) >= 0;
+    }
+    DeviceVector getDynamicDevices() const { return mDynamicDevices; }
 
     const InputProfileCollection &getInputProfiles() const { return mInputProfiles; }
     const OutputProfileCollection &getOutputProfiles() const { return mOutputProfiles; }
@@ -104,6 +120,7 @@
     InputProfileCollection mInputProfiles;  // input profiles exposed by this module
     uint32_t mHalVersion; // audio HAL API version
     DeviceVector mDeclaredDevices; // devices declared in audio_policy configuration file.
+    DeviceVector mDynamicDevices; /**< devices that can be added/removed at runtime (e.g. rsbumix)*/
     AudioRouteVector mRoutes;
     AudioPortVector mPorts;
 };
@@ -113,13 +130,63 @@
 public:
     sp<HwModule> getModuleFromName(const char *name) const;
 
-    sp<HwModule> getModuleForDevice(audio_devices_t device) const;
+    sp<HwModule> getModuleForDeviceTypes(audio_devices_t device,
+                                         audio_format_t encodedFormat) const;
 
-    sp<DeviceDescriptor> getDeviceDescriptor(const audio_devices_t device,
-                                             const char *device_address,
-                                             const char *device_name,
+    sp<HwModule> getModuleForDevice(const sp<DeviceDescriptor> &device,
+                                    audio_format_t encodedFormat) const;
+
+    DeviceVector getAvailableDevicesFromModuleName(const char *name,
+                                                   const DeviceVector &availableDevices) const;
+
+    /**
+     * @brief getDeviceDescriptor returns a device descriptor associated to the device type and
+     * device address (if matchAddress is true).
+     * It may loop twice on all modules to check if allowToCreate is true
+     *      -first loop will check if the device is found on a module since declared in the list
+     * of device port in configuration file
+     *      -(allowToCreate is true)second loop will check if the device is weakly supported by one
+     * or more profiles on a given module and will add as a supported device for this module.
+     *       The device will also be added to the dynamic list of device of this module
+     * @param type of the device requested
+     * @param address of the device requested
+     * @param name of the device that requested
+     * @param encodedFormat if not AUDIO_FORMAT_DEFAULT, must match one supported format
+     * @param matchAddress true if a strong match is required
+     * @param allowToCreate true if allowed to create dynamic device (e.g. hdmi, usb...)
+     * @return device descriptor associated to the type (and address if matchAddress is true)
+     */
+    sp<DeviceDescriptor> getDeviceDescriptor(const audio_devices_t type,
+                                             const char *address,
+                                             const char *name,
+                                             audio_format_t encodedFormat,
+                                             bool allowToCreate = false,
                                              bool matchAddress = true) const;
 
+    /**
+     * @brief createDevice creates a new device from the type and address given. It checks that
+     * according to the device type, a module is supporting this device (weak check).
+     * This concerns only dynamic device, aka device with a specific address and not
+     * already supported by module/underlying profiles.
+     * @param type of the device to be created
+     * @param address of the device to be created
+     * @param name of the device to be created
+     * @return device descriptor if a module is supporting this type, nullptr otherwise.
+     */
+    sp<DeviceDescriptor> createDevice(const audio_devices_t type,
+                                      const char *address,
+                                      const char *name,
+                                      const audio_format_t encodedFormat) const;
+
+    /**
+     * @brief cleanUpForDevice: loop on all profiles of all modules to remove device from
+     * the list of supported device. If this device is a dynamic device (aka a device not in the
+     * xml file with a runtime address), it is also removed from the module collection of dynamic
+     * devices.
+     * @param device that has been disconnected
+     */
+    void cleanUpForDevice(const sp<DeviceDescriptor> &device);
+
     void dump(String8 *dst) const;
 };
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index ca6ca56..e0b56d4 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -57,12 +57,25 @@
         }
     }
 
-    // This method is used for input and direct output, and is not used for other output.
-    // If parameter updatedSamplingRate is non-NULL, it is assigned the actual sample rate.
-    // For input, flags is interpreted as audio_input_flags_t.
-    // TODO: merge audio_output_flags_t and audio_input_flags_t.
-    bool isCompatibleProfile(audio_devices_t device,
-                             const String8& address,
+    /**
+     * @brief isCompatibleProfile: This method is used for input and direct output,
+     * and is not used for other output.
+     * Checks if the IO profile is compatible with specified parameters.
+     * For input, flags is interpreted as audio_input_flags_t.
+     * TODO: merge audio_output_flags_t and audio_input_flags_t.
+     *
+     * @param devices vector of devices to be checked for compatibility
+     * @param samplingRate to be checked for compatibility. Must be specified
+     * @param updatedSamplingRate if non-NULL, it is assigned the actual sample rate.
+     * @param format to be checked for compatibility. Must be specified
+     * @param updatedFormat if non-NULL, it is assigned the actual format
+     * @param channelMask to be checked for compatibility. Must be specified
+     * @param updatedChannelMask if non-NULL, it is assigned the actual channel mask
+     * @param flags to be checked for compatibility
+     * @param exactMatchRequiredForInputFlags true if exact match is required on flags
+     * @return true if the profile is compatible, false otherwise.
+     */
+    bool isCompatibleProfile(const DeviceVector &devices,
                              uint32_t samplingRate,
                              uint32_t *updatedSamplingRate,
                              audio_format_t format,
@@ -78,49 +91,61 @@
 
     bool hasSupportedDevices() const { return !mSupportedDevices.isEmpty(); }
 
-    bool supportDevice(audio_devices_t device) const
+    bool supportsDeviceTypes(audio_devices_t device) const
     {
         if (audio_is_output_devices(device)) {
-            return mSupportedDevices.types() & device;
+            if (deviceSupportsEncodedFormats(device)) {
+                return mSupportedDevices.types() & device;
+            }
+            return false;
         }
         return mSupportedDevices.types() & (device & ~AUDIO_DEVICE_BIT_IN);
     }
 
-    bool supportDeviceAddress(const String8 &address) const
+    /**
+     * @brief supportsDevice
+     * @param device to be checked against
+     *        forceCheckOnAddress if true, check on type and address whatever the type, otherwise
+     *        the address enforcement is limited to "offical devices" that distinguishe on address
+     * @return true if the device is supported by type (for non bus / remote submix devices),
+     *         true if the device is supported (both type and address) for bus / remote submix
+     *         false otherwise
+     */
+    bool supportsDevice(const sp<DeviceDescriptor> &device, bool forceCheckOnAddress = false) const
     {
-        return mSupportedDevices[0]->address() == address;
-    }
-
-    // chose first device present in mSupportedDevices also part of deviceType
-    audio_devices_t getSupportedDeviceForType(audio_devices_t deviceType) const
-    {
-        for (size_t k = 0; k  < mSupportedDevices.size(); k++) {
-            audio_devices_t profileType = mSupportedDevices[k]->type();
-            if (profileType & deviceType) {
-                return profileType;
-            }
+        if (!device_distinguishes_on_address(device->type()) && !forceCheckOnAddress) {
+            return supportsDeviceTypes(device->type());
         }
-        return AUDIO_DEVICE_NONE;
+        return mSupportedDevices.contains(device);
     }
 
-    audio_devices_t getSupportedDevicesType() const { return mSupportedDevices.types(); }
+    bool deviceSupportsEncodedFormats(audio_devices_t device) const
+    {
+        if (device == AUDIO_DEVICE_NONE) {
+            return true; // required for isOffloadSupported() check
+        }
+        DeviceVector deviceList =
+            mSupportedDevices.getDevicesFromTypeMask(device);
+        if (!deviceList.empty()) {
+            return deviceList.itemAt(0)->hasCurrentEncodedFormat();
+        }
+        return false;
+    }
 
     void clearSupportedDevices() { mSupportedDevices.clear(); }
     void addSupportedDevice(const sp<DeviceDescriptor> &device)
     {
         mSupportedDevices.add(device);
     }
-
+    void removeSupportedDevice(const sp<DeviceDescriptor> &device)
+    {
+        mSupportedDevices.remove(device);
+    }
     void setSupportedDevices(const DeviceVector &devices)
     {
         mSupportedDevices = devices;
     }
 
-    sp<DeviceDescriptor> getSupportedDeviceByAddress(audio_devices_t type, String8 address) const
-    {
-        return mSupportedDevices.getDevice(type, address);
-    }
-
     const DeviceVector &getSupportedDevices() const { return mSupportedDevices; }
 
     bool canOpenNewIo() {
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 0bc88a5..c880e67 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -21,7 +21,6 @@
 #include <policy.h>
 #include <AudioPolicyInterface.h>
 #include "AudioInputDescriptor.h"
-#include "IOProfile.h"
 #include "AudioGain.h"
 #include "HwModule.h"
 
@@ -55,30 +54,7 @@
 
 audio_source_t AudioInputDescriptor::source() const
 {
-    audio_source_t source = AUDIO_SOURCE_DEFAULT;
-
-    for (bool activeOnly : { true, false }) {
-        int32_t topPriority = -1;
-        app_state_t topState = APP_STATE_IDLE;
-        for (const auto &client : getClientIterable()) {
-            if (activeOnly && !client->active()) {
-                continue;
-            }
-            app_state_t curState = client->appState();
-            if (curState >= topState) {
-                int32_t curPriority = source_priority(client->source());
-                if (curPriority > topPriority) {
-                    source = client->source();
-                    topPriority = curPriority;
-                }
-                topState = curState;
-            }
-        }
-        if (source != AUDIO_SOURCE_DEFAULT) {
-            break;
-        }
-    }
-    return source;
+    return getHighestPriorityAttributes().source;
 }
 
 void AudioInputDescriptor::toAudioPortConfig(struct audio_port_config *dstConfig,
@@ -148,6 +124,34 @@
     return false;
 }
 
+audio_attributes_t AudioInputDescriptor::getHighestPriorityAttributes() const
+{
+    audio_attributes_t attributes = { .source = AUDIO_SOURCE_DEFAULT };
+
+    for (bool activeOnly : { true, false }) {
+        int32_t topPriority = -1;
+        app_state_t topState = APP_STATE_IDLE;
+        for (const auto &client : getClientIterable()) {
+            if (activeOnly && !client->active()) {
+              continue;
+            }
+            app_state_t curState = client->appState();
+            if (curState >= topState) {
+                int32_t curPriority = source_priority(client->source());
+                if (curPriority > topPriority) {
+                    attributes = client->attributes();
+                    topPriority = curPriority;
+                }
+                topState = curState;
+            }
+        }
+        if (attributes.source != AUDIO_SOURCE_DEFAULT) {
+            break;
+        }
+    }
+    return attributes;
+}
+
 bool AudioInputDescriptor::isSoundTrigger() const {
     // sound trigger and non sound trigger clients are not mixed on a given input
     // so check only first client
@@ -180,8 +184,7 @@
 }
 
 status_t AudioInputDescriptor::open(const audio_config_t *config,
-                                       audio_devices_t device,
-                                       const String8& address,
+                                       const sp<DeviceDescriptor> &device,
                                        audio_source_t source,
                                        audio_input_flags_t flags,
                                        audio_io_handle_t *input)
@@ -198,24 +201,26 @@
 
     mDevice = device;
 
-    ALOGV("opening input for device %08x address %s profile %p name %s",
-          mDevice, address.string(), mProfile.get(), mProfile->getName().string());
+    ALOGV("opening input for device %s profile %p name %s",
+          mDevice->toString().c_str(), mProfile.get(), mProfile->getName().string());
+
+    audio_devices_t deviceType = mDevice->type();
 
     status_t status = mClientInterface->openInput(mProfile->getModuleHandle(),
                                                   input,
                                                   &lConfig,
-                                                  &mDevice,
-                                                  address,
+                                                  &deviceType,
+                                                  mDevice->address(),
                                                   source,
                                                   flags);
-    LOG_ALWAYS_FATAL_IF(mDevice != device,
+    LOG_ALWAYS_FATAL_IF(mDevice->type() != deviceType,
                         "%s openInput returned device %08x when given device %08x",
-                        __FUNCTION__, mDevice, device);
+                        __FUNCTION__, mDevice->type(), deviceType);
 
     if (status == NO_ERROR) {
         LOG_ALWAYS_FATAL_IF(*input == AUDIO_IO_HANDLE_NONE,
-                            "%s openInput returned input handle %d for device %08x",
-                            __FUNCTION__, *input, device);
+                            "%s openInput returned input handle %d for device %s",
+                            __FUNCTION__, *input, mDevice->toString().c_str());
         mSamplingRate = lConfig.sample_rate;
         mChannelMask = lConfig.channel_mask;
         mFormat = lConfig.format;
@@ -252,15 +257,21 @@
 void AudioInputDescriptor::close()
 {
     if (mIoHandle != AUDIO_IO_HANDLE_NONE) {
+        // clean up active clients if any (can happen if close() is called to force
+        // clients to reconnect
+        for (const auto &client : getClientIterable()) {
+            if (client->active()) {
+                ALOGW("%s client with port ID %d still active on input %d",
+                    __func__, client->portId(), mId);
+                setClientActive(client, false);
+                stop();
+            }
+        }
+
         mClientInterface->closeInput(mIoHandle);
         LOG_ALWAYS_FATAL_IF(mProfile->curOpenCount < 1, "%s profile open count %u",
                             __FUNCTION__, mProfile->curOpenCount);
-        // do not call stop() here as stop() is supposed to be called after
-        //  setClientActive(client, false) and we don't know how many clients
-        // are still active at this time
-        if (isActive()) {
-            mProfile->curActiveCount--;
-        }
+
         mProfile->curOpenCount--;
         LOG_ALWAYS_FATAL_IF(mProfile->curOpenCount <  mProfile->curActiveCount,
                 "%s(%d): mProfile->curOpenCount %d < mProfile->curActiveCount %d.",
@@ -423,7 +434,7 @@
     dst->appendFormat(" Sampling rate: %d\n", mSamplingRate);
     dst->appendFormat(" Format: %d\n", mFormat);
     dst->appendFormat(" Channels: %08x\n", mChannelMask);
-    dst->appendFormat(" Devices %08x\n", mDevice);
+    dst->appendFormat(" Devices %s\n", mDevice->toString().c_str());
     getEnabledEffects().dump(dst, 1 /*spaces*/, false /*verbose*/);
     dst->append(" AudioRecord Clients:\n");
     ClientMapHandler<RecordClientDescriptor>::dump(dst);
@@ -452,14 +463,13 @@
     return NULL;
 }
 
-uint32_t AudioInputCollection::activeInputsCountOnDevices(audio_devices_t devices) const
+uint32_t AudioInputCollection::activeInputsCountOnDevices(const DeviceVector &devices) const
 {
     uint32_t count = 0;
     for (size_t i = 0; i < size(); i++) {
         const sp<AudioInputDescriptor>  inputDescriptor = valueAt(i);
         if (inputDescriptor->isActive() &&
-                ((devices == AUDIO_DEVICE_IN_DEFAULT) ||
-                 ((inputDescriptor->mDevice & devices & ~AUDIO_DEVICE_BIT_IN) != 0))) {
+                (devices.isEmpty() || devices.contains(inputDescriptor->getDevice()))) {
             count++;
         }
     }
@@ -479,13 +489,6 @@
     return activeInputs;
 }
 
-audio_devices_t AudioInputCollection::getSupportedDevices(audio_io_handle_t handle) const
-{
-    sp<AudioInputDescriptor> inputDesc = valueFor(handle);
-    audio_devices_t devices = inputDesc->mProfile->getSupportedDevicesType();
-    return devices;
-}
-
 sp<AudioInputDescriptor> AudioInputCollection::getInputForClient(audio_port_handle_t portId)
 {
     for (size_t i = 0; i < size(); i++) {
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 97504ab..78b3f45 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -82,25 +82,10 @@
     return mId;
 }
 
-audio_devices_t AudioOutputDescriptor::device() const
-{
-    return mDevice;
-}
-
-audio_devices_t AudioOutputDescriptor::supportedDevices()
-{
-    return mDevice;
-}
-
 bool AudioOutputDescriptor::sharesHwModuleWith(
         const sp<AudioOutputDescriptor>& outputDesc)
 {
-    if (outputDesc->isDuplicated()) {
-        return sharesHwModuleWith(outputDesc->subOutput1()) ||
-                    sharesHwModuleWith(outputDesc->subOutput2());
-    } else {
-        return hasSameHwModuleAs(outputDesc);
-    }
+    return hasSameHwModuleAs(outputDesc);
 }
 
 void AudioOutputDescriptor::changeStreamActiveCount(const sp<TrackClientDescriptor>& client,
@@ -282,7 +267,7 @@
     dst->appendFormat(" Sampling rate: %d\n", mSamplingRate);
     dst->appendFormat(" Format: %08x\n", mFormat);
     dst->appendFormat(" Channels: %08x\n", mChannelMask);
-    dst->appendFormat(" Devices: %08x\n", device());
+    dst->appendFormat(" Devices: %s\n", devices().toString().c_str());
     dst->appendFormat(" Global active count: %u\n", mGlobalActiveCount);
     dst->append(" Stream volume activeCount muteCount\n");
     for (int i = 0; i < (int)AUDIO_STREAM_CNT; i++) {
@@ -330,17 +315,18 @@
     AudioOutputDescriptor::dump(dst);
 }
 
-audio_devices_t SwAudioOutputDescriptor::device() const
+DeviceVector SwAudioOutputDescriptor::devices() const
 {
     if (isDuplicated()) {
-        return (audio_devices_t)(mOutput1->mDevice | mOutput2->mDevice);
-    } else {
-        return mDevice;
+        DeviceVector devices = mOutput1->devices();
+        devices.merge(mOutput2->devices());
+        return devices;
     }
+    return mDevices;
 }
 
 bool SwAudioOutputDescriptor::sharesHwModuleWith(
-        const sp<AudioOutputDescriptor>& outputDesc)
+        const sp<SwAudioOutputDescriptor>& outputDesc)
 {
     if (isDuplicated()) {
         return mOutput1->sharesHwModuleWith(outputDesc) || mOutput2->sharesHwModuleWith(outputDesc);
@@ -352,12 +338,39 @@
     }
 }
 
-audio_devices_t SwAudioOutputDescriptor::supportedDevices()
+DeviceVector SwAudioOutputDescriptor::supportedDevices() const
 {
     if (isDuplicated()) {
-        return (audio_devices_t)(mOutput1->supportedDevices() | mOutput2->supportedDevices());
+        DeviceVector supportedDevices = mOutput1->supportedDevices();
+        supportedDevices.merge(mOutput2->supportedDevices());
+        return supportedDevices;
+    }
+    return mProfile->getSupportedDevices();
+}
+
+bool SwAudioOutputDescriptor::supportsDevice(const sp<DeviceDescriptor> &device) const
+{
+    return supportedDevices().contains(device);
+}
+
+bool SwAudioOutputDescriptor::supportsAllDevices(const DeviceVector &devices) const
+{
+    return supportedDevices().containsAllDevices(devices);
+}
+
+DeviceVector SwAudioOutputDescriptor::filterSupportedDevices(const DeviceVector &devices) const
+{
+    DeviceVector filteredDevices = supportedDevices();
+    return filteredDevices.filter(devices);
+}
+
+bool SwAudioOutputDescriptor::deviceSupportsEncodedFormats(audio_devices_t device)
+{
+    if (isDuplicated()) {
+        return (mOutput1->deviceSupportsEncodedFormats(device)
+                    || mOutput2->deviceSupportsEncodedFormats(device));
     } else {
-        return mProfile->getSupportedDevicesType();
+       return mProfile->deviceSupportsEncodedFormats(device);
     }
 }
 
@@ -443,12 +456,15 @@
 }
 
 status_t SwAudioOutputDescriptor::open(const audio_config_t *config,
-                                       audio_devices_t device,
-                                       const String8& address,
+                                       const DeviceVector &devices,
                                        audio_stream_type_t stream,
                                        audio_output_flags_t flags,
                                        audio_io_handle_t *output)
 {
+    mDevices = devices;
+    const String8& address = devices.getFirstValidAddress();
+    audio_devices_t device = devices.types();
+
     audio_config_t lConfig;
     if (config == nullptr) {
         lConfig = AUDIO_CONFIG_INITIALIZER;
@@ -459,7 +475,6 @@
         lConfig = *config;
     }
 
-    mDevice = device;
     // if the selected profile is offloaded and no offload info was specified,
     // create a default one
     if ((mProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) &&
@@ -477,19 +492,19 @@
 
     mFlags = (audio_output_flags_t)(mFlags | flags);
 
-    ALOGV("opening output for device %08x address %s profile %p name %s",
-          mDevice, address.string(), mProfile.get(), mProfile->getName().string());
+    ALOGV("opening output for device %s profile %p name %s",
+          mDevices.toString().c_str(), mProfile.get(), mProfile->getName().string());
 
     status_t status = mClientInterface->openOutput(mProfile->getModuleHandle(),
                                                    output,
                                                    &lConfig,
-                                                   &mDevice,
+                                                   &device,
                                                    address,
                                                    &mLatency,
                                                    mFlags);
-    LOG_ALWAYS_FATAL_IF(mDevice != device,
+    LOG_ALWAYS_FATAL_IF(mDevices.types() != device,
                         "%s openOutput returned device %08x when given device %08x",
-                        __FUNCTION__, mDevice, device);
+                        __FUNCTION__, mDevices.types(), device);
 
     if (status == NO_ERROR) {
         LOG_ALWAYS_FATAL_IF(*output == AUDIO_IO_HANDLE_NONE,
@@ -548,6 +563,17 @@
 void SwAudioOutputDescriptor::close()
 {
     if (mIoHandle != AUDIO_IO_HANDLE_NONE) {
+        // clean up active clients if any (can happen if close() is called to force
+        // clients to reconnect
+        for (const auto &client : getClientIterable()) {
+            if (client->active()) {
+                ALOGW("%s client with port ID %d still active on output %d",
+                      __func__, client->portId(), mId);
+                setClientActive(client, false);
+                stop();
+            }
+        }
+
         AudioParameter param;
         param.add(String8("closing"), String8("true"));
         mClientInterface->setParameters(mIoHandle, param.toString());
@@ -556,11 +582,6 @@
 
         LOG_ALWAYS_FATAL_IF(mProfile->curOpenCount < 1, "%s profile open count %u",
                             __FUNCTION__, mProfile->curOpenCount);
-        // do not call stop() here as stop() is supposed to be called after setClientActive(false)
-        // and we don't know how many streams are still active at this time
-        if (isActive()) {
-            mProfile->curActiveCount--;
-        }
         mProfile->curOpenCount--;
         mIoHandle = AUDIO_IO_HANDLE_NONE;
     }
@@ -605,11 +626,6 @@
     mSource->dump(dst, 0, 0);
 }
 
-audio_devices_t HwAudioOutputDescriptor::supportedDevices()
-{
-    return mDevice;
-}
-
 void HwAudioOutputDescriptor::toAudioPortConfig(
                                                  struct audio_port_config *dstConfig,
                                                  const struct audio_port_config *srcConfig) const
@@ -657,7 +673,7 @@
     for (size_t i = 0; i < this->size(); i++) {
         const sp<SwAudioOutputDescriptor> outputDesc = this->valueAt(i);
         if (outputDesc->isStreamActive(stream, inPastMs, sysTime)
-                && ((outputDesc->device() & APM_AUDIO_OUT_DEVICE_REMOTE_ALL) == 0)) {
+                && ((outputDesc->devices().types() & APM_AUDIO_OUT_DEVICE_REMOTE_ALL) == 0)) {
             return true;
         }
     }
@@ -670,7 +686,7 @@
     nsecs_t sysTime = systemTime();
     for (size_t i = 0; i < size(); i++) {
         const sp<SwAudioOutputDescriptor> outputDesc = valueAt(i);
-        if (((outputDesc->device() & APM_AUDIO_OUT_DEVICE_REMOTE_ALL) != 0) &&
+        if (((outputDesc->devices().types() & APM_AUDIO_OUT_DEVICE_REMOTE_ALL) != 0) &&
                 outputDesc->isStreamActive(stream, inPastMs, sysTime)) {
             // do not consider re routing (when the output is going to a dynamic policy)
             // as "remote playback"
@@ -686,7 +702,10 @@
 {
     for (size_t i = 0; i < size(); i++) {
         sp<SwAudioOutputDescriptor> outputDesc = valueAt(i);
-        if (!outputDesc->isDuplicated() && outputDesc->device() & AUDIO_DEVICE_OUT_ALL_A2DP) {
+        if (!outputDesc->isDuplicated() &&
+             outputDesc->devices().types()  & AUDIO_DEVICE_OUT_ALL_A2DP &&
+             outputDesc->deviceSupportsEncodedFormats(
+                     AUDIO_DEVICE_OUT_BLUETOOTH_A2DP)) {
             return this->keyAt(i);
         }
     }
@@ -700,10 +719,9 @@
     if ((primaryOutput != NULL) && (primaryOutput->mProfile != NULL)
         && (primaryOutput->mProfile->getModule() != NULL)) {
         sp<HwModule> primaryHwModule = primaryOutput->mProfile->getModule();
-        Vector <sp<IOProfile>> primaryHwModuleOutputProfiles =
-                                   primaryHwModule->getOutputProfiles();
-        for (size_t i = 0; i < primaryHwModuleOutputProfiles.size(); i++) {
-            if (primaryHwModuleOutputProfiles[i]->supportDevice(AUDIO_DEVICE_OUT_ALL_A2DP)) {
+
+        for (const auto &outputProfile : primaryHwModule->getOutputProfiles()) {
+            if (outputProfile->supportsDeviceTypes(AUDIO_DEVICE_OUT_ALL_A2DP)) {
                 return true;
             }
         }
@@ -754,13 +772,6 @@
     return false;
 }
 
-audio_devices_t SwAudioOutputCollection::getSupportedDevices(audio_io_handle_t handle) const
-{
-    sp<SwAudioOutputDescriptor> outputDesc = valueFor(handle);
-    audio_devices_t devices = outputDesc->mProfile->getSupportedDevicesType();
-    return devices;
-}
-
 sp<SwAudioOutputDescriptor> SwAudioOutputCollection::getOutputForClient(audio_port_handle_t portId)
 {
     for (size_t i = 0; i < size(); i++) {
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index 4d0916e..3b9411a 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -280,13 +280,32 @@
     return BAD_VALUE;
 }
 
-audio_devices_t AudioPolicyMixCollection::getDeviceAndMixForInputSource(audio_source_t inputSource,
-                                                                        audio_devices_t availDevices,
-                                                                        AudioMix **policyMix)
+sp<DeviceDescriptor> AudioPolicyMixCollection::getDeviceAndMixForOutput(
+        const sp<SwAudioOutputDescriptor> &output,
+        const DeviceVector &availableOutputDevices,
+        AudioMix **policyMix)
+{
+    for (size_t i = 0; i < size(); i++) {
+        if (valueAt(i)->getOutput() == output) {
+            AudioMix *mix = valueAt(i)->getMix();
+            if (policyMix != nullptr)
+                *policyMix = mix;
+            // This Desc is involved in a Mix, which has the highest prio
+            audio_devices_t deviceType = mix->mDeviceType;
+            String8 address = mix->mDeviceAddress;
+            ALOGV("%s: device (0x%x, addr=%s) forced by mix",
+                  __FUNCTION__, deviceType, address.c_str());
+            return availableOutputDevices.getDevice(deviceType, address, AUDIO_FORMAT_DEFAULT);
+        }
+    }
+    return nullptr;
+}
+
+sp<DeviceDescriptor> AudioPolicyMixCollection::getDeviceAndMixForInputSource(
+        audio_source_t inputSource, const DeviceVector &availDevices, AudioMix **policyMix)
 {
     for (size_t i = 0; i < size(); i++) {
         AudioMix *mix = valueAt(i)->getMix();
-
         if (mix->mMixType != MIX_TYPE_RECORDERS) {
             continue;
         }
@@ -295,17 +314,22 @@
                     mix->mCriteria[j].mValue.mSource == inputSource) ||
                (RULE_EXCLUDE_ATTRIBUTE_CAPTURE_PRESET == mix->mCriteria[j].mRule &&
                     mix->mCriteria[j].mValue.mSource != inputSource)) {
-                if (availDevices & AUDIO_DEVICE_IN_REMOTE_SUBMIX) {
+                // assuming PolicyMix only for remote submix for input
+                // so mix->mDeviceType can only be AUDIO_DEVICE_OUT_REMOTE_SUBMIX
+                audio_devices_t device = AUDIO_DEVICE_IN_REMOTE_SUBMIX;
+                auto mixDevice =
+                        availDevices.getDevice(device, mix->mDeviceAddress, AUDIO_FORMAT_DEFAULT);
+                if (mixDevice != nullptr) {
                     if (policyMix != NULL) {
                         *policyMix = mix;
                     }
-                    return AUDIO_DEVICE_IN_REMOTE_SUBMIX;
+                    return mixDevice;
                 }
                 break;
             }
         }
     }
-    return AUDIO_DEVICE_NONE;
+    return nullptr;
 }
 
 status_t AudioPolicyMixCollection::getInputMixForAttr(audio_attributes_t attr, AudioMix **policyMix)
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
index 19dde6a..9fcf5e7 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
@@ -31,9 +31,15 @@
 // --- AudioPort class implementation
 void AudioPort::attach(const sp<HwModule>& module)
 {
+    ALOGV("%s: attaching module %s to port %s", __FUNCTION__, getModuleName(), mName.string());
     mModule = module;
 }
 
+void AudioPort::detach()
+{
+    mModule = nullptr;
+}
+
 // Note that is a different namespace than AudioFlinger unique IDs
 audio_port_handle_t AudioPort::getNextUniqueId()
 {
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 04cbcd1..dc5b238 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -18,6 +18,7 @@
 //#define LOG_NDEBUG 0
 
 #include <audio_utils/string.h>
+#include <set>
 #include "DeviceDescriptor.h"
 #include "TypeConverter.h"
 #include "AudioGain.h"
@@ -37,11 +38,16 @@
                                              AUDIO_PORT_ROLE_SOURCE),
     mTagName(tagName), mDeviceType(type), mEncodedFormats(encodedFormats)
 {
+    mCurrentEncodedFormat = AUDIO_FORMAT_DEFAULT;
     if (type == AUDIO_DEVICE_IN_REMOTE_SUBMIX || type == AUDIO_DEVICE_OUT_REMOTE_SUBMIX ) {
         mAddress = String8("0");
     }
-    /* FIXME: read from APM config file */
-    if (type == AUDIO_DEVICE_OUT_HDMI) {
+    /* If framework runs against a pre 5.0 Audio HAL, encoded formats are absent from the config.
+     * FIXME: APM should know the version of the HAL and don't add the formats for V5.0.
+     * For now, the workaround to remove AC3 and IEC61937 support on HDMI is to declare
+     * something like 'encodedFormats="AUDIO_FORMAT_PCM_16_BIT"' on the HDMI devicePort.
+     */
+    if (type == AUDIO_DEVICE_OUT_HDMI && mEncodedFormats.isEmpty()) {
         mEncodedFormats.add(AUDIO_FORMAT_AC3);
         mEncodedFormats.add(AUDIO_FORMAT_IEC61937);
     }
@@ -58,15 +64,57 @@
     mId = getNextUniqueId();
 }
 
+void DeviceDescriptor::detach() {
+    mId = AUDIO_PORT_HANDLE_NONE;
+    AudioPort::detach();
+}
+
+template<typename T>
+bool checkEqual(const T& f1, const T& f2)
+{
+    std::set<typename T::value_type> s1(f1.begin(), f1.end());
+    std::set<typename T::value_type> s2(f2.begin(), f2.end());
+    return s1 == s2;
+}
+
 bool DeviceDescriptor::equals(const sp<DeviceDescriptor>& other) const
 {
     // Devices are considered equal if they:
     // - are of the same type (a device type cannot be AUDIO_DEVICE_NONE)
     // - have the same address
+    // - have the same encodingFormats (if device supports encoding)
     if (other == 0) {
         return false;
     }
-    return (mDeviceType == other->mDeviceType) && (mAddress == other->mAddress);
+
+    return (mDeviceType == other->mDeviceType) && (mAddress == other->mAddress) &&
+           checkEqual(mEncodedFormats, other->mEncodedFormats);
+}
+
+bool DeviceDescriptor::hasCurrentEncodedFormat() const
+{
+    if (!device_has_encoding_capability(type())) {
+        return true;
+    }
+    if (mEncodedFormats.isEmpty()) {
+        return true;
+    }
+
+    return (mCurrentEncodedFormat != AUDIO_FORMAT_DEFAULT);
+}
+
+bool DeviceDescriptor::supportsFormat(audio_format_t format)
+{
+    if (mEncodedFormats.isEmpty()) {
+        return true;
+    }
+
+    for (const auto& devFormat : mEncodedFormats) {
+        if (devFormat == format) {
+            return true;
+        }
+    }
+    return false;
 }
 
 void DeviceVector::refreshTypes()
@@ -161,12 +209,17 @@
     return deviceTypes;
 }
 
-sp<DeviceDescriptor> DeviceVector::getDevice(audio_devices_t type, const String8& address) const
+sp<DeviceDescriptor> DeviceVector::getDevice(audio_devices_t type, const String8& address,
+                                             audio_format_t format) const
 {
     sp<DeviceDescriptor> device;
     for (size_t i = 0; i < size(); i++) {
         if (itemAt(i)->type() == type) {
-            if (address == "" || itemAt(i)->address() == address) {
+            // Assign device if address is empty or matches and
+            // format is default or matches
+            if (((address == "" || itemAt(i)->address() == address) &&
+                 format == AUDIO_FORMAT_DEFAULT) ||
+                itemAt(i)->supportsFormat(format)) {
                 device = itemAt(i);
                 if (itemAt(i)->address() == address) {
                     break;
@@ -174,8 +227,8 @@
             }
         }
     }
-    ALOGV("DeviceVector::%s() for type %08x address \"%s\" found %p",
-            __func__, type, address.string(), device.get());
+    ALOGV("DeviceVector::%s() for type %08x address \"%s\" found %p format %08x",
+            __func__, type, address.string(), device.get(), format);
     return device;
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index 80af88d..85d9bce 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -52,6 +52,9 @@
 
     sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device);
     devDesc->setAddress(address);
+    addDynamicDevice(devDesc);
+    // Reciprocally attach the device to the module
+    devDesc->attach(this);
     profile->addSupportedDevice(devDesc);
 
     return addOutputProfile(profile);
@@ -97,6 +100,9 @@
 {
     for (size_t i = 0; i < mOutputProfiles.size(); i++) {
         if (mOutputProfiles[i]->getName() == name) {
+            for (const auto &device : mOutputProfiles[i]->getSupportedDevices()) {
+                removeDynamicDevice(device);
+            }
             mOutputProfiles.removeAt(i);
             break;
         }
@@ -114,6 +120,9 @@
 
     sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device);
     devDesc->setAddress(address);
+    addDynamicDevice(devDesc);
+    // Reciprocally attach the device to the module
+    devDesc->attach(this);
     profile->addSupportedDevice(devDesc);
 
     ALOGV("addInputProfile() name %s rate %d mask 0x%08x",
@@ -126,6 +135,9 @@
 {
     for (size_t i = 0; i < mInputProfiles.size(); i++) {
         if (mInputProfiles[i]->getName() == name) {
+            for (const auto &device : mInputProfiles[i]->getSupportedDevices()) {
+                removeDynamicDevice(device);
+            }
             mInputProfiles.removeAt(i);
             break;
         }
@@ -247,6 +259,7 @@
         }
     }
     mDeclaredDevices.dump(dst, String8("Declared"), 2, true);
+    mDynamicDevices.dump(dst, String8("Dynamic"),  2, true);
     mRoutes.dump(dst, 2);
 }
 
@@ -260,44 +273,153 @@
     return nullptr;
 }
 
-sp <HwModule> HwModuleCollection::getModuleForDevice(audio_devices_t device) const
+sp <HwModule> HwModuleCollection::getModuleForDeviceTypes(audio_devices_t type,
+                                                          audio_format_t encodedFormat) const
 {
     for (const auto& module : *this) {
-        const auto& profiles = audio_is_output_device(device) ?
+        const auto& profiles = audio_is_output_device(type) ?
                 module->getOutputProfiles() : module->getInputProfiles();
         for (const auto& profile : profiles) {
-            if (profile->supportDevice(device)) {
-                return module;
+            if (profile->supportsDeviceTypes(type)) {
+                if (encodedFormat != AUDIO_FORMAT_DEFAULT) {
+                    DeviceVector declaredDevices = module->getDeclaredDevices();
+                    sp <DeviceDescriptor> deviceDesc =
+                            declaredDevices.getDevice(type, String8(), encodedFormat);
+                    if (deviceDesc) {
+                        return module;
+                    }
+                } else {
+                    return module;
+                }
             }
         }
     }
     return nullptr;
 }
 
-sp<DeviceDescriptor> HwModuleCollection::getDeviceDescriptor(const audio_devices_t device,
-                                                             const char *device_address,
-                                                             const char *device_name,
+sp<HwModule> HwModuleCollection::getModuleForDevice(const sp<DeviceDescriptor> &device,
+                                                     audio_format_t encodedFormat) const
+{
+    return getModuleForDeviceTypes(device->type(), encodedFormat);
+}
+
+DeviceVector HwModuleCollection::getAvailableDevicesFromModuleName(
+        const char *name, const DeviceVector &availableDevices) const
+{
+    sp<HwModule> module = getModuleFromName(name);
+    if (module == nullptr) {
+        return DeviceVector();
+    }
+    return availableDevices.getDevicesFromHwModule(module->getHandle());
+}
+
+sp<DeviceDescriptor> HwModuleCollection::getDeviceDescriptor(const audio_devices_t deviceType,
+                                                             const char *address,
+                                                             const char *name,
+                                                             const audio_format_t encodedFormat,
+                                                             bool allowToCreate,
                                                              bool matchAddress) const
 {
-    String8 address = (device_address == nullptr || !matchAddress) ?
-            String8("") : String8(device_address);
+    String8 devAddress = (address == nullptr || !matchAddress) ? String8("") : String8(address);
     // handle legacy remote submix case where the address was not always specified
-    if (device_distinguishes_on_address(device) && (address.length() == 0)) {
-        address = String8("0");
+    if (device_distinguishes_on_address(deviceType) && (devAddress.length() == 0)) {
+        devAddress = String8("0");
     }
 
     for (const auto& hwModule : *this) {
-        DeviceVector declaredDevices = hwModule->getDeclaredDevices();
-        sp<DeviceDescriptor> deviceDesc = declaredDevices.getDevice(device, address);
-        if (deviceDesc) {
-            return deviceDesc;
+        DeviceVector moduleDevices = hwModule->getAllDevices();
+        auto moduleDevice = moduleDevices.getDevice(deviceType, devAddress, encodedFormat);
+        if (moduleDevice) {
+            if (encodedFormat != AUDIO_FORMAT_DEFAULT) {
+                moduleDevice->setEncodedFormat(encodedFormat);
+                if (moduleDevice->address() != devAddress) {
+                    moduleDevice->setAddress(devAddress);
+                }
+            }
+            if (allowToCreate) {
+                moduleDevice->attach(hwModule);
+            }
+            return moduleDevice;
         }
     }
+    if (!allowToCreate) {
+        ALOGE("%s: could not find HW module for device %s %04x address %s", __FUNCTION__,
+              name, deviceType, address);
+        return nullptr;
+    }
+    return createDevice(deviceType, address, name, encodedFormat);
+}
 
-    sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device);
-    devDesc->setName(String8(device_name));
-    devDesc->setAddress(address);
-    return devDesc;
+sp<DeviceDescriptor> HwModuleCollection::createDevice(const audio_devices_t type,
+                                                      const char *address,
+                                                      const char *name,
+                                                      const audio_format_t encodedFormat) const
+{
+    sp<HwModule> hwModule = getModuleForDeviceTypes(type, encodedFormat);
+    if (hwModule == 0) {
+        ALOGE("%s: could not find HW module for device %04x address %s", __FUNCTION__, type,
+              address);
+        return nullptr;
+    }
+    sp<DeviceDescriptor> device = new DeviceDescriptor(type, String8(name));
+    device->setName(String8(name));
+    device->setAddress(String8(address));
+    device->setEncodedFormat(encodedFormat);
+
+  // Add the device to the list of dynamic devices
+    hwModule->addDynamicDevice(device);
+    // Reciprocally attach the device to the module
+    device->attach(hwModule);
+    ALOGD("%s: adding dynamic device %s to module %s", __FUNCTION__,
+          device->toString().c_str(), hwModule->getName());
+
+    const auto &profiles = (audio_is_output_device(type) ? hwModule->getOutputProfiles() :
+                                                             hwModule->getInputProfiles());
+    for (const auto &profile : profiles) {
+        // Add the device as supported to all profile supporting "weakly" or not the device
+        // according to its type
+        if (profile->supportsDevice(device, false /*matchAdress*/)) {
+
+            // @todo quid of audio profile? import the profile from device of the same type?
+            const auto &isoTypeDeviceForProfile =
+                profile->getSupportedDevices().getDevice(type, String8(), AUDIO_FORMAT_DEFAULT);
+            device->importAudioPort(isoTypeDeviceForProfile, true /* force */);
+
+            ALOGV("%s: adding device %s to profile %s", __FUNCTION__,
+                  device->toString().c_str(), profile->getTagName().c_str());
+            profile->addSupportedDevice(device);
+        }
+    }
+    return device;
+}
+
+void HwModuleCollection::cleanUpForDevice(const sp<DeviceDescriptor> &device)
+{
+    for (const auto& hwModule : *this) {
+        DeviceVector moduleDevices = hwModule->getAllDevices();
+        if (!moduleDevices.contains(device)) {
+            continue;
+        }
+        device->detach();
+        // Only remove from dynamic list, not from declared list!!!
+        if (!hwModule->getDynamicDevices().contains(device)) {
+            return;
+        }
+        hwModule->removeDynamicDevice(device);
+        ALOGV("%s: removed dynamic device %s from module %s", __FUNCTION__,
+              device->toString().c_str(), hwModule->getName());
+
+        const IOProfileCollection &profiles = audio_is_output_device(device->type()) ?
+                    hwModule->getOutputProfiles() : hwModule->getInputProfiles();
+        for (const auto &profile : profiles) {
+            // For cleanup, strong match is required
+            if (profile->supportsDevice(device, true /*matchAdress*/)) {
+                ALOGV("%s: removing device %s from profile %s", __FUNCTION__,
+                      device->toString().c_str(), profile->getTagName().c_str());
+                profile->removeSupportedDevice(device);
+            }
+        }
+    }
 }
 
 void HwModuleCollection::dump(String8 *dst) const
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index 3788244..fe2eaee 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -25,11 +25,7 @@
 
 namespace android {
 
-// checks if the IO profile is compatible with specified parameters.
-// Sampling rate, format and channel mask must be specified in order to
-// get a valid a match
-bool IOProfile::isCompatibleProfile(audio_devices_t device,
-                                    const String8& address,
+bool IOProfile::isCompatibleProfile(const DeviceVector &devices,
                                     uint32_t samplingRate,
                                     uint32_t *updatedSamplingRate,
                                     audio_format_t format,
@@ -46,14 +42,8 @@
             getType() == AUDIO_PORT_TYPE_MIX && getRole() == AUDIO_PORT_ROLE_SINK;
     ALOG_ASSERT(isPlaybackThread != isRecordThread);
 
-
-    if (device != AUDIO_DEVICE_NONE) {
-        // just check types if multiple devices are selected
-        if (popcount(device & ~AUDIO_DEVICE_BIT_IN) > 1) {
-            if ((mSupportedDevices.types() & device) != device) {
-                return false;
-            }
-        } else if (mSupportedDevices.getDevice(device, address) == 0) {
+    if (!devices.isEmpty()) {
+        if (!mSupportedDevices.containsAllDevices(devices)) {
             return false;
         }
     }
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index 1154654..98d375c 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -140,6 +140,8 @@
         static constexpr const char *roleSource = "source"; /**< <attribute role source value>. */
         /** optional: device address, char string less than 64. */
         static constexpr const char *address = "address";
+        /** optional: the list of encoded audio formats that are known to be supported. */
+        static constexpr const char *encodedFormats = "encodedFormats";
     };
 
     static Return<Element> deserialize(const xmlNode *cur, PtrSerializingCtx serializingContext);
@@ -511,7 +513,13 @@
         ALOGW("%s: bad type %08x", __func__, type);
         return Status::fromStatusT(BAD_VALUE);
     }
-    Element deviceDesc = new DeviceDescriptor(type, String8(name.c_str()));
+    std::string encodedFormatsLiteral = getXmlAttribute(cur, Attributes::encodedFormats);
+    ALOGV("%s: %s %s=%s", __func__, tag, Attributes::encodedFormats, encodedFormatsLiteral.c_str());
+    FormatVector encodedFormats;
+    if (!encodedFormatsLiteral.empty()) {
+        encodedFormats = formatsFromString(encodedFormatsLiteral, " ");
+    }
+    Element deviceDesc = new DeviceDescriptor(type, encodedFormats, String8(name.c_str()));
 
     std::string address = getXmlAttribute(cur, Attributes::address);
     if (!address.empty()) {
diff --git a/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp b/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
index 620f361..2625733 100644
--- a/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
@@ -26,16 +26,22 @@
 {
     ALOG_ASSERT(!mCurvePoints.isEmpty(), "Invalid volume curve");
 
-    size_t nbCurvePoints = mCurvePoints.size();
-    // the volume index in the UI is relative to the min and max volume indices for this stream
-    int nbSteps = 1 + mCurvePoints[nbCurvePoints - 1].mIndex - mCurvePoints[0].mIndex;
     if (indexInUi < volIndexMin) {
+        // an index of 0 means mute request when volIndexMin > 0
+        if (indexInUi == 0) {
+            ALOGV("VOLUME forcing mute for index 0 with min index %d", volIndexMin);
+            return VOLUME_MIN_DB;
+        }
         ALOGV("VOLUME remapping index from %d to min index %d", indexInUi, volIndexMin);
         indexInUi = volIndexMin;
     } else if (indexInUi > volIndexMax) {
         ALOGV("VOLUME remapping index from %d to max index %d", indexInUi, volIndexMax);
         indexInUi = volIndexMax;
     }
+
+    size_t nbCurvePoints = mCurvePoints.size();
+    // the volume index in the UI is relative to the min and max volume indices for this stream
+    int nbSteps = 1 + mCurvePoints[nbCurvePoints - 1].mIndex - mCurvePoints[0].mIndex;
     int volIdx = (nbSteps * (indexInUi - volIndexMin)) / (volIndexMax - volIndexMin);
 
     // Where would this volume index been inserted in the curve point
diff --git a/services/audiopolicy/config/audio_policy_configuration_generic.xml b/services/audiopolicy/config/audio_policy_configuration_generic.xml
index 58768c3..40dcc22 100644
--- a/services/audiopolicy/config/audio_policy_configuration_generic.xml
+++ b/services/audiopolicy/config/audio_policy_configuration_generic.xml
@@ -37,4 +37,10 @@
 
     <!-- End of Volume section -->
 
+    <!-- Surround Sound configuration -->
+
+    <xi:include href="surround_sound_configuration_5_0.xml"/>
+
+    <!-- End of Surround Sound configuration -->
+
 </audioPolicyConfiguration>
diff --git a/services/audiopolicy/config/audio_policy_configuration_generic_tv.xml b/services/audiopolicy/config/audio_policy_configuration_generic_tv.xml
new file mode 100644
index 0000000..5f1ca31
--- /dev/null
+++ b/services/audiopolicy/config/audio_policy_configuration_generic_tv.xml
@@ -0,0 +1,49 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+    <!-- version section contains a “version” tag in the form “major.minor” e.g version=”1.0” -->
+
+    <!-- Global configuration Decalaration -->
+    <globalConfiguration speaker_drc_enabled="false"/>
+
+    <modules>
+        <!-- Primary Audio HAL -->
+        <xi:include href="primary_audio_policy_configuration_tv.xml"/>
+
+        <!-- Usb Audio HAL -->
+        <xi:include href="usb_audio_policy_configuration.xml"/>
+
+        <!-- Remote Submix Audio HAL -->
+        <xi:include href="r_submix_audio_policy_configuration.xml"/>
+
+    </modules>
+    <!-- End of Modules section -->
+
+    <!-- Volume section -->
+
+    <xi:include href="audio_policy_volumes.xml"/>
+    <xi:include href="default_volume_tables.xml"/>
+
+    <!-- End of Volume section -->
+
+    <!-- Surround Sound configuration -->
+
+    <xi:include href="surround_sound_configuration_5_0.xml"/>
+
+    <!-- End of Surround Sound configuration -->
+
+</audioPolicyConfiguration>
diff --git a/services/audiopolicy/config/audio_policy_configuration_stub.xml b/services/audiopolicy/config/audio_policy_configuration_stub.xml
index 26c381f..8350eb8 100644
--- a/services/audiopolicy/config/audio_policy_configuration_stub.xml
+++ b/services/audiopolicy/config/audio_policy_configuration_stub.xml
@@ -15,6 +15,9 @@
 -->
 
 <audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+    <!-- Global configuration Decalaration -->
+    <globalConfiguration speaker_drc_enabled="false"/>
+
     <modules>
         <!-- Stub Audio HAL -->
         <xi:include href="stub_audio_policy_configuration.xml"/>
@@ -26,5 +29,6 @@
 
     <xi:include href="audio_policy_volumes.xml"/>
     <xi:include href="default_volume_tables.xml"/>
+    <xi:include href="surround_sound_configuration_5_0.xml"/>
 
 </audioPolicyConfiguration>
diff --git a/services/audiopolicy/config/primary_audio_policy_configuration.xml b/services/audiopolicy/config/primary_audio_policy_configuration.xml
index 5b7ae7f..eedc96b 100644
--- a/services/audiopolicy/config/primary_audio_policy_configuration.xml
+++ b/services/audiopolicy/config/primary_audio_policy_configuration.xml
@@ -1,5 +1,5 @@
 <?xml version="1.0" encoding="UTF-8"?>
-<!-- Default Primary Audio HAL Module Audio Policy Configuration include flie -->
+<!-- Default Primary Audio HAL Module Audio Policy Configuration include file -->
 <module name="primary" halVersion="2.0">
     <attachedDevices>
         <item>Speaker</item>
diff --git a/services/audiopolicy/config/primary_audio_policy_configuration_tv.xml b/services/audiopolicy/config/primary_audio_policy_configuration_tv.xml
new file mode 100644
index 0000000..826015a
--- /dev/null
+++ b/services/audiopolicy/config/primary_audio_policy_configuration_tv.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Default Primary Audio HAL Module Audio Policy Configuration include file for TV -->
+<module name="primary" halVersion="2.0">
+    <attachedDevices>
+        <item>Speaker</item>
+    </attachedDevices>
+    <defaultOutputDevice>Speaker</defaultOutputDevice>
+    <mixPorts>
+        <mixPort name="primary output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </mixPort>
+        <mixPort name="direct" role="source" flags="AUDIO_OUTPUT_FLAG_DIRECT" />
+        <mixPort name="tunnel" role="source"
+                 flags="AUDIO_OUTPUT_FLAG_DIRECT|AUDIO_OUTPUT_FLAG_HW_AV_SYNC" />
+   </mixPorts>
+   <devicePorts>
+        <devicePort tagName="Speaker" type="AUDIO_DEVICE_OUT_SPEAKER" role="sink" />
+        <devicePort tagName="Out Aux Digital" type="AUDIO_DEVICE_OUT_AUX_DIGITAL" role="sink"
+                    encodedFormats="AUDIO_FORMAT_AC3 AUDIO_FORMAT_IEC61937" />
+    </devicePorts>
+    <routes>
+        <route type="mix" sink="Speaker" sources="primary output"/>
+        <route type="mix" sink="Out Aux Digital" sources="primary output,direct,tunnel"/>
+    </routes>
+</module>
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw
index eb11980..7c87c80 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw
@@ -28,6 +28,7 @@
 				TelephonyMode IsNot InCall
 				TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes RemoteSubmix
+				AvailableOutputDevicesAddresses Includes 0
 
 				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
 					remote_submix = 1
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_dtmf.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_dtmf.pfw
index 883c741..c830c42 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_dtmf.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_dtmf.pfw
@@ -20,6 +20,7 @@
 				TelephonyMode IsNot InCall
 				TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes RemoteSubmix
+				AvailableOutputDevicesAddresses Includes 0
 
 				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
 					remote_submix = 1
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_enforced_audible.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_enforced_audible.pfw
index f504631..c641138 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_enforced_audible.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_enforced_audible.pfw
@@ -61,6 +61,7 @@
 		domain: Device2
 			conf: RemoteSubmix
 				AvailableOutputDevices Includes RemoteSubmix
+				AvailableOutputDevicesAddresses Includes 0
 
 				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
 					remote_submix = 1
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_media.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_media.pfw
index bdb6ae0..f8bab3d 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_media.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_media.pfw
@@ -19,6 +19,7 @@
 		domain: Device2
 			conf: RemoteSubmix
 				AvailableOutputDevices Includes RemoteSubmix
+				AvailableOutputDevicesAddresses Includes 0
 
 				component: /Policy/policy/strategies/media/selected_output_devices/mask
 					speaker = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_rerouting.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_rerouting.pfw
index 04e62f7..28a3629 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_rerouting.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_rerouting.pfw
@@ -24,6 +24,7 @@
 		domain: Device2
 			conf: RemoteSubmix
 				AvailableOutputDevices Includes RemoteSubmix
+				AvailableOutputDevicesAddresses Includes 0
 
 				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
 					remote_submix = 1
diff --git a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
index fc6c1e4..1934fa4 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
+++ b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
@@ -295,8 +295,8 @@
 
     auto criterionType = criterion->getCriterionType();
     int deviceAddressId;
-    if (not criterionType->getNumericalValue(devDesc->mAddress.string(), deviceAddressId)) {
-        ALOGE("%s: unknown device address reported (%s)", __FUNCTION__, devDesc->mAddress.c_str());
+    if (not criterionType->getNumericalValue(devDesc->address().string(), deviceAddressId)) {
+        ALOGW("%s: unknown device address reported (%s)", __FUNCTION__, devDesc->address().c_str());
         return BAD_TYPE;
     }
     int currentValueMask = criterion->getCriterionState();
diff --git a/services/audiopolicy/engineconfigurable/wrapper/config/policy_criterion_types.xml.in b/services/audiopolicy/engineconfigurable/wrapper/config/policy_criterion_types.xml.in
index 6cb799f..fe17369 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/config/policy_criterion_types.xml.in
+++ b/services/audiopolicy/engineconfigurable/wrapper/config/policy_criterion_types.xml.in
@@ -16,7 +16,12 @@
 <criterion_types>
     <criterion_type name="OutputDevicesMaskType" type="inclusive"/>
     <criterion_type name="InputDevicesMaskType" type="inclusive"/>
-    <criterion_type name="OutputDevicesAddressesType" type="inclusive"/>
+    <criterion_type name="OutputDevicesAddressesType" type="inclusive">
+        <values>
+            <!-- legacy remote submix -->
+            <value literal="0" numerical="1"/>
+        </values>
+    </criterion_type>
     <criterion_type name="InputDevicesAddressesType" type="inclusive"/>
     <criterion_type name="AndroidModeType" type="exclusive"/>
     <criterion_type name="BooleanType" type="exclusive">
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 0ef6f52..cc5a025 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -322,7 +322,7 @@
             // a primary device
             // FIXME: this is not the right way of solving this problem
             audio_devices_t availPrimaryOutputDevices =
-                (primaryOutput->supportedDevices() | AUDIO_DEVICE_OUT_HEARING_AID) &
+                (primaryOutput->supportedDevices().types() | AUDIO_DEVICE_OUT_HEARING_AID) &
                 availableOutputDevices.types();
 
             if (((availableInputDevices.types() &
@@ -475,7 +475,7 @@
             // compressed format as they would likely not be mixed and dropped.
             for (size_t i = 0; i < outputs.size(); i++) {
                 sp<AudioOutputDescriptor> desc = outputs.valueAt(i);
-                audio_devices_t devices = desc->device() &
+                audio_devices_t devices = desc->devices().types() &
                     (AUDIO_DEVICE_OUT_HDMI | AUDIO_DEVICE_OUT_SPDIF | AUDIO_DEVICE_OUT_HDMI_ARC);
                 if (desc->isActive() && !audio_is_linear_pcm(desc->mFormat) &&
                         devices != AUDIO_DEVICE_NONE) {
@@ -506,7 +506,7 @@
         if (strategy != STRATEGY_SONIFICATION) {
             // no sonification on remote submix (e.g. WFD)
             if (availableOutputDevices.getDevice(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
-                                                 String8("0")) != 0) {
+                                                 String8("0"), AUDIO_FORMAT_DEFAULT) != 0) {
                 device2 = availableOutputDevices.types() & AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
             }
         }
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 5544821..cf9c298 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -81,43 +81,49 @@
 status_t AudioPolicyManager::setDeviceConnectionState(audio_devices_t device,
                                                       audio_policy_dev_state_t state,
                                                       const char *device_address,
-                                                      const char *device_name)
+                                                      const char *device_name,
+                                                      audio_format_t encodedFormat)
 {
-    status_t status = setDeviceConnectionStateInt(device, state, device_address, device_name);
+    status_t status = setDeviceConnectionStateInt(device, state, device_address,
+                                                  device_name, encodedFormat);
     nextAudioPortGeneration();
     return status;
 }
 
-void AudioPolicyManager::broadcastDeviceConnectionState(audio_devices_t device,
-                                                        audio_policy_dev_state_t state,
-                                                        const String8 &device_address)
+void AudioPolicyManager::broadcastDeviceConnectionState(const sp<DeviceDescriptor> &device,
+                                                        audio_policy_dev_state_t state)
 {
-    AudioParameter param(device_address);
+    AudioParameter param(device->address());
     const String8 key(state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE ?
                 AudioParameter::keyStreamConnect : AudioParameter::keyStreamDisconnect);
-    param.addInt(key, device);
+    param.addInt(key, device->type());
     mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
 }
 
-status_t AudioPolicyManager::setDeviceConnectionStateInt(audio_devices_t device,
+status_t AudioPolicyManager::setDeviceConnectionStateInt(audio_devices_t deviceType,
                                                          audio_policy_dev_state_t state,
                                                          const char *device_address,
-                                                         const char *device_name)
+                                                         const char *device_name,
+                                                         audio_format_t encodedFormat)
 {
-    ALOGV("setDeviceConnectionStateInt() device: 0x%X, state %d, address %s name %s",
-            device, state, device_address, device_name);
+    ALOGV("setDeviceConnectionStateInt() device: 0x%X, state %d, address %s name %s format 0x%X",
+            deviceType, state, device_address, device_name, encodedFormat);
 
     // connect/disconnect only 1 device at a time
-    if (!audio_is_output_device(device) && !audio_is_input_device(device)) return BAD_VALUE;
+    if (!audio_is_output_device(deviceType) && !audio_is_input_device(deviceType)) return BAD_VALUE;
 
-    sp<DeviceDescriptor> devDesc =
-            mHwModules.getDeviceDescriptor(device, device_address, device_name);
+    sp<DeviceDescriptor> device =
+            mHwModules.getDeviceDescriptor(deviceType, device_address, device_name, encodedFormat,
+                                           state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE);
+    if (device == 0) {
+        return INVALID_OPERATION;
+    }
 
     // handle output devices
-    if (audio_is_output_device(device)) {
+    if (audio_is_output_device(deviceType)) {
         SortedVector <audio_io_handle_t> outputs;
 
-        ssize_t index = mAvailableOutputDevices.indexOf(devDesc);
+        ssize_t index = mAvailableOutputDevices.indexOf(device);
 
         // save a copy of the opened output descriptors before any output is opened or closed
         // by checkOutputsForDevice(). This will be needed by checkOutputForAllStrategies()
@@ -127,21 +133,23 @@
         // handle output device connection
         case AUDIO_POLICY_DEVICE_STATE_AVAILABLE: {
             if (index >= 0) {
-                ALOGW("setDeviceConnectionState() device already connected: %x", device);
+                ALOGW("%s() device already connected: %s", __func__, device->toString().c_str());
                 return INVALID_OPERATION;
             }
-            ALOGV("setDeviceConnectionState() connecting device %x", device);
+            ALOGV("%s() connecting device %s format %x",
+                    __func__, device->toString().c_str(), encodedFormat);
 
             // register new device as available
-            index = mAvailableOutputDevices.add(devDesc);
+            index = mAvailableOutputDevices.add(device);
             if (index >= 0) {
-                sp<HwModule> module = mHwModules.getModuleForDevice(device);
+                sp<HwModule> module = mHwModules.getModuleForDevice(device, encodedFormat);
                 if (module == 0) {
-                    ALOGD("setDeviceConnectionState() could not find HW module for device %08x",
-                          device);
-                    mAvailableOutputDevices.remove(devDesc);
+                    ALOGD("setDeviceConnectionState() could not find HW module for device %s",
+                          device->toString().c_str());
+                    mAvailableOutputDevices.remove(device);
                     return INVALID_OPERATION;
                 }
+                ALOGV("setDeviceConnectionState() module name=%s", module->getName());
                 mAvailableOutputDevices[index]->attach(module);
             } else {
                 return NO_MEMORY;
@@ -149,48 +157,51 @@
 
             // Before checking outputs, broadcast connect event to allow HAL to retrieve dynamic
             // parameters on newly connected devices (instead of opening the outputs...)
-            broadcastDeviceConnectionState(device, state, devDesc->address());
+            broadcastDeviceConnectionState(device, state);
 
-            if (checkOutputsForDevice(devDesc, state, outputs, devDesc->address()) != NO_ERROR) {
-                mAvailableOutputDevices.remove(devDesc);
+            if (checkOutputsForDevice(device, state, outputs) != NO_ERROR) {
+                mAvailableOutputDevices.remove(device);
 
-                broadcastDeviceConnectionState(device, AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
-                                               devDesc->address());
+                mHwModules.cleanUpForDevice(device);
+
+                broadcastDeviceConnectionState(device, AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE);
                 return INVALID_OPERATION;
             }
             // Propagate device availability to Engine
-            mEngine->setDeviceConnectionState(devDesc, state);
+            mEngine->setDeviceConnectionState(device, state);
 
             // outputs should never be empty here
             ALOG_ASSERT(outputs.size() != 0, "setDeviceConnectionState():"
                     "checkOutputsForDevice() returned no outputs but status OK");
-            ALOGV("setDeviceConnectionState() checkOutputsForDevice() returned %zu outputs",
-                  outputs.size());
+            ALOGV("%s() checkOutputsForDevice() returned %zu outputs", __func__, outputs.size());
 
             } break;
         // handle output device disconnection
         case AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE: {
             if (index < 0) {
-                ALOGW("setDeviceConnectionState() device not connected: %x", device);
+                ALOGW("%s() device not connected: %s", __func__, device->toString().c_str());
                 return INVALID_OPERATION;
             }
 
-            ALOGV("setDeviceConnectionState() disconnecting output device %x", device);
+            ALOGV("%s() disconnecting output device %s", __func__, device->toString().c_str());
 
             // Send Disconnect to HALs
-            broadcastDeviceConnectionState(device, state, devDesc->address());
+            broadcastDeviceConnectionState(device, state);
 
             // remove device from available output devices
-            mAvailableOutputDevices.remove(devDesc);
+            mAvailableOutputDevices.remove(device);
 
-            checkOutputsForDevice(devDesc, state, outputs, devDesc->address());
+            checkOutputsForDevice(device, state, outputs);
+
+            // Reset active device codec
+            device->setEncodedFormat(AUDIO_FORMAT_DEFAULT);
 
             // Propagate device availability to Engine
-            mEngine->setDeviceConnectionState(devDesc, state);
+            mEngine->setDeviceConnectionState(device, state);
             } break;
 
         default:
-            ALOGE("setDeviceConnectionState() invalid state: %x", state);
+            ALOGE("%s() invalid state: %x", __func__, state);
             return BAD_VALUE;
         }
 
@@ -199,8 +210,8 @@
             if (!outputs.isEmpty()) {
                 for (audio_io_handle_t output : outputs) {
                     sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
-                    // close unused outputs after device disconnection or direct outputs that have been
-                    // opened by checkOutputsForDevice() to query dynamic parameters
+                    // close unused outputs after device disconnection or direct outputs that have
+                    // been opened by checkOutputsForDevice() to query dynamic parameters
                     if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) ||
                             (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
                              (desc->mDirectOpenCount == 0))) {
@@ -214,29 +225,28 @@
         });
 
         if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) {
-            audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
-            updateCallRouting(newDevice);
+            DeviceVector newDevices = getNewOutputDevices(mPrimaryOutput, false /*fromCache*/);
+            updateCallRouting(newDevices);
         }
-        const audio_devices_t msdOutDevice = getModuleDeviceTypes(
-                mAvailableOutputDevices, AUDIO_HARDWARE_MODULE_ID_MSD);
+        const DeviceVector msdOutDevices = getMsdAudioOutDevices();
         for (size_t i = 0; i < mOutputs.size(); i++) {
             sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
             if ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) || (desc != mPrimaryOutput)) {
-                audio_devices_t newDevice = getNewOutputDevice(desc, true /*fromCache*/);
+                DeviceVector newDevices = getNewOutputDevices(desc, true /*fromCache*/);
                 // do not force device change on duplicated output because if device is 0, it will
                 // also force a device 0 for the two outputs it is duplicated to which may override
                 // a valid device selection on those outputs.
-                bool force = (msdOutDevice == AUDIO_DEVICE_NONE || msdOutDevice != desc->device())
+                bool force = (msdOutDevices.isEmpty() || msdOutDevices != desc->devices())
                         && !desc->isDuplicated()
-                        && (!device_distinguishes_on_address(device)
+                        && (!device_distinguishes_on_address(deviceType)
                                 // always force when disconnecting (a non-duplicated device)
                                 || (state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE));
-                setOutputDevice(desc, newDevice, force, 0);
+                setOutputDevices(desc, newDevices, force, 0);
             }
         }
 
         if (state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) {
-            cleanUpForDevice(devDesc);
+            cleanUpForDevice(device);
         }
 
         mpClientInterface->onAudioPortListUpdate();
@@ -244,67 +254,66 @@
     }  // end if is output device
 
     // handle input devices
-    if (audio_is_input_device(device)) {
+    if (audio_is_input_device(deviceType)) {
         SortedVector <audio_io_handle_t> inputs;
 
-        ssize_t index = mAvailableInputDevices.indexOf(devDesc);
+        ssize_t index = mAvailableInputDevices.indexOf(device);
         switch (state)
         {
         // handle input device connection
         case AUDIO_POLICY_DEVICE_STATE_AVAILABLE: {
             if (index >= 0) {
-                ALOGW("setDeviceConnectionState() device already connected: %d", device);
+                ALOGW("%s() device already connected: %s", __func__, device->toString().c_str());
                 return INVALID_OPERATION;
             }
-            sp<HwModule> module = mHwModules.getModuleForDevice(device);
+            sp<HwModule> module = mHwModules.getModuleForDevice(device, AUDIO_FORMAT_DEFAULT);
             if (module == NULL) {
-                ALOGW("setDeviceConnectionState(): could not find HW module for device %08x",
-                      device);
+                ALOGW("setDeviceConnectionState(): could not find HW module for device %s",
+                      device->toString().c_str());
                 return INVALID_OPERATION;
             }
 
             // Before checking intputs, broadcast connect event to allow HAL to retrieve dynamic
             // parameters on newly connected devices (instead of opening the inputs...)
-            broadcastDeviceConnectionState(device, state, devDesc->address());
+            broadcastDeviceConnectionState(device, state);
 
-            if (checkInputsForDevice(devDesc, state, inputs, devDesc->address()) != NO_ERROR) {
-                broadcastDeviceConnectionState(device, AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
-                                               devDesc->address());
+            if (checkInputsForDevice(device, state, inputs) != NO_ERROR) {
+                broadcastDeviceConnectionState(device, AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE);
+
+                mHwModules.cleanUpForDevice(device);
+
                 return INVALID_OPERATION;
             }
 
-            index = mAvailableInputDevices.add(devDesc);
-            if (index >= 0) {
-                mAvailableInputDevices[index]->attach(module);
-            } else {
+            if (mAvailableInputDevices.add(device) < 0) {
                 return NO_MEMORY;
             }
 
             // Propagate device availability to Engine
-            mEngine->setDeviceConnectionState(devDesc, state);
+            mEngine->setDeviceConnectionState(device, state);
         } break;
 
         // handle input device disconnection
         case AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE: {
             if (index < 0) {
-                ALOGW("setDeviceConnectionState() device not connected: %d", device);
+                ALOGW("%s() device not connected: %s", __func__, device->toString().c_str());
                 return INVALID_OPERATION;
             }
 
-            ALOGV("setDeviceConnectionState() disconnecting input device %x", device);
+            ALOGV("%s() disconnecting input device %s", __func__, device->toString().c_str());
 
             // Set Disconnect to HALs
-            broadcastDeviceConnectionState(device, state, devDesc->address());
+            broadcastDeviceConnectionState(device, state);
 
-            checkInputsForDevice(devDesc, state, inputs, devDesc->address());
-            mAvailableInputDevices.remove(devDesc);
+            checkInputsForDevice(device, state, inputs);
+            mAvailableInputDevices.remove(device);
 
             // Propagate device availability to Engine
-            mEngine->setDeviceConnectionState(devDesc, state);
+            mEngine->setDeviceConnectionState(device, state);
         } break;
 
         default:
-            ALOGE("setDeviceConnectionState() invalid state: %x", state);
+            ALOGE("%s() invalid state: %x", __func__, state);
             return BAD_VALUE;
         }
 
@@ -314,19 +323,19 @@
         updateDevicesAndOutputs();
 
         if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) {
-            audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
-            updateCallRouting(newDevice);
+            DeviceVector newDevices = getNewOutputDevices(mPrimaryOutput, false /*fromCache*/);
+            updateCallRouting(newDevices);
         }
 
         if (state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) {
-            cleanUpForDevice(devDesc);
+            cleanUpForDevice(device);
         }
 
         mpClientInterface->onAudioPortListUpdate();
         return NO_ERROR;
     } // end if is input device
 
-    ALOGW("setDeviceConnectionState() invalid device: %x", device);
+    ALOGW("%s() invalid device: %s", __func__, device->toString().c_str());
     return BAD_VALUE;
 }
 
@@ -334,7 +343,8 @@
                                                                       const char *device_address)
 {
     sp<DeviceDescriptor> devDesc =
-            mHwModules.getDeviceDescriptor(device, device_address, "",
+            mHwModules.getDeviceDescriptor(device, device_address, "", AUDIO_FORMAT_DEFAULT,
+                                           false /* allowToCreate */,
                                            (strlen(device_address) != 0)/*matchAddress*/);
 
     if (devDesc == 0) {
@@ -350,55 +360,65 @@
     } else if (audio_is_input_device(device)) {
         deviceVector = &mAvailableInputDevices;
     } else {
-        ALOGW("getDeviceConnectionState() invalid device type %08x", device);
+        ALOGW("%s() invalid device type %08x", __func__, device);
         return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
     }
 
-    return (deviceVector->getDevice(device, String8(device_address)) != 0) ?
+    return (deviceVector->getDevice(
+                device, String8(device_address), AUDIO_FORMAT_DEFAULT) != 0) ?
             AUDIO_POLICY_DEVICE_STATE_AVAILABLE : AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
 }
 
 status_t AudioPolicyManager::handleDeviceConfigChange(audio_devices_t device,
                                                       const char *device_address,
-                                                      const char *device_name)
+                                                      const char *device_name,
+                                                      audio_format_t encodedFormat)
 {
     status_t status;
     String8 reply;
     AudioParameter param;
     int isReconfigA2dpSupported = 0;
 
-    ALOGV("handleDeviceConfigChange(() device: 0x%X, address %s name %s",
-          device, device_address, device_name);
+    ALOGV("handleDeviceConfigChange(() device: 0x%X, address %s name %s encodedFormat: 0x%X",
+          device, device_address, device_name, encodedFormat);
 
     // connect/disconnect only 1 device at a time
     if (!audio_is_output_device(device) && !audio_is_input_device(device)) return BAD_VALUE;
 
     // Check if the device is currently connected
-    sp<DeviceDescriptor> devDesc =
-            mHwModules.getDeviceDescriptor(device, device_address, device_name);
-    ssize_t index = mAvailableOutputDevices.indexOf(devDesc);
-    if (index < 0) {
+    DeviceVector availableDevices = getAvailableOutputDevices();
+    DeviceVector deviceList = availableDevices.getDevicesFromTypeMask(device);
+    if (deviceList.empty()) {
         // Nothing to do: device is not connected
         return NO_ERROR;
     }
+    sp<DeviceDescriptor> devDesc = deviceList.itemAt(0);
 
     // For offloaded A2DP, Hw modules may have the capability to
-    // configure codecs. Check if any of the loaded hw modules
-    // supports this.
-    // If supported, send a set parameter to configure A2DP codecs
-    // and return. No need to toggle device state.
+    // configure codecs.
+    // Handle two specific cases by sending a set parameter to
+    // configure A2DP codecs. No need to toggle device state.
+    // Case 1: A2DP active device switches from primary to primary
+    // module
+    // Case 2: A2DP device config changes on primary module.
     if (device & AUDIO_DEVICE_OUT_ALL_A2DP) {
-        reply = mpClientInterface->getParameters(
-                    AUDIO_IO_HANDLE_NONE,
-                    String8(AudioParameter::keyReconfigA2dpSupported));
-        AudioParameter repliedParameters(reply);
-        repliedParameters.getInt(
-                String8(AudioParameter::keyReconfigA2dpSupported), isReconfigA2dpSupported);
-        if (isReconfigA2dpSupported) {
-            const String8 key(AudioParameter::keyReconfigA2dp);
-            param.add(key, String8("true"));
-            mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
-            return NO_ERROR;
+        sp<HwModule> module = mHwModules.getModuleForDeviceTypes(device, encodedFormat);
+        audio_module_handle_t primaryHandle = mPrimaryOutput->getModuleHandle();
+        if (availablePrimaryOutputDevices().contains(devDesc) &&
+           (module != 0 && module->getHandle() == primaryHandle)) {
+            reply = mpClientInterface->getParameters(
+                        AUDIO_IO_HANDLE_NONE,
+                        String8(AudioParameter::keyReconfigA2dpSupported));
+            AudioParameter repliedParameters(reply);
+            repliedParameters.getInt(
+                    String8(AudioParameter::keyReconfigA2dpSupported), isReconfigA2dpSupported);
+            if (isReconfigA2dpSupported) {
+                const String8 key(AudioParameter::keyReconfigA2dp);
+                param.add(key, String8("true"));
+                mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
+                devDesc->setEncodedFormat(encodedFormat);
+                return NO_ERROR;
+            }
         }
     }
 
@@ -406,7 +426,8 @@
     // This will force reading again the device configuration
     status = setDeviceConnectionState(device,
                                       AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
-                                      device_address, device_name);
+                                      device_address, device_name,
+                                      devDesc->getEncodedFormat());
     if (status != NO_ERROR) {
         ALOGW("handleDeviceConfigChange() error disabling connection state: %d",
               status);
@@ -415,7 +436,7 @@
 
     status = setDeviceConnectionState(device,
                                       AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
-                                      device_address, device_name);
+                                      device_address, device_name, encodedFormat);
     if (status != NO_ERROR) {
         ALOGW("handleDeviceConfigChange() error enabling connection state: %d",
               status);
@@ -425,16 +446,59 @@
     return NO_ERROR;
 }
 
-uint32_t AudioPolicyManager::updateCallRouting(audio_devices_t rxDevice, uint32_t delayMs)
+status_t AudioPolicyManager::getHwOffloadEncodingFormatsSupportedForA2DP(
+                                    std::vector<audio_format_t> *formats)
+{
+    ALOGV("getHwOffloadEncodingFormatsSupportedForA2DP()");
+    char *tok = NULL, *saveptr;
+    status_t status = NO_ERROR;
+    char encoding_formats_list[PROPERTY_VALUE_MAX];
+    audio_format_t format = AUDIO_FORMAT_DEFAULT;
+    // FIXME This list should not come from a property but the supported encoded
+    // formats of declared A2DP devices in primary module
+    property_get("persist.bluetooth.a2dp_offload.cap", encoding_formats_list, "");
+    tok = strtok_r(encoding_formats_list, "-", &saveptr);
+    for (;tok != NULL; tok = strtok_r(NULL, "-", &saveptr)) {
+        if (strcmp(tok, "sbc") == 0) {
+            ALOGV("%s: SBC offload supported\n",__func__);
+            format = AUDIO_FORMAT_SBC;
+        } else if (strcmp(tok, "aptx") == 0) {
+            ALOGV("%s: APTX offload supported\n",__func__);
+            format = AUDIO_FORMAT_APTX;
+        } else if (strcmp(tok, "aptxhd") == 0) {
+            ALOGV("%s: APTX HD offload supported\n",__func__);
+            format = AUDIO_FORMAT_APTX_HD;
+        } else if (strcmp(tok, "ldac") == 0) {
+            ALOGV("%s: LDAC offload supported\n",__func__);
+            format = AUDIO_FORMAT_LDAC;
+        } else if (strcmp(tok, "aac") == 0) {
+            ALOGV("%s: AAC offload supported\n",__func__);
+            format = AUDIO_FORMAT_AAC;
+        } else {
+            ALOGE("%s: undefined token - %s\n",__func__, tok);
+            continue;
+        }
+        formats->push_back(format);
+    }
+    return status;
+}
+
+uint32_t AudioPolicyManager::updateCallRouting(const DeviceVector &rxDevices, uint32_t delayMs)
 {
     bool createTxPatch = false;
+    bool createRxPatch = false;
     uint32_t muteWaitMs = 0;
 
-    if(!hasPrimaryOutput() || mPrimaryOutput->device() == AUDIO_DEVICE_OUT_STUB) {
+    if(!hasPrimaryOutput() || mPrimaryOutput->devices().types() == AUDIO_DEVICE_OUT_STUB) {
         return muteWaitMs;
     }
-    audio_devices_t txDevice = getDeviceAndMixForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
-    ALOGV("updateCallRouting device rxDevice %08x txDevice %08x", rxDevice, txDevice);
+    ALOG_ASSERT(!rxDevices.isEmpty(), "updateCallRouting() no selected output device");
+
+    audio_attributes_t attr = { .source = AUDIO_SOURCE_VOICE_COMMUNICATION };
+    auto txSourceDevice = getDeviceAndMixForAttributes(attr);
+    ALOG_ASSERT(txSourceDevice != 0, "updateCallRouting() input selected device not available");
+    ALOGV("updateCallRouting device rxDevice %s txDevice %s", 
+          rxDevices.itemAt(0)->toString().c_str(), txSourceDevice->toString().c_str());
 
     // release existing RX patch if any
     if (mCallRxPatch != 0) {
@@ -447,49 +511,88 @@
         mCallTxPatch.clear();
     }
 
-    // If the RX device is on the primary HW module, then use legacy routing method for voice calls
-    // via setOutputDevice() on primary output.
-    // Otherwise, create two audio patches for TX and RX path.
-    if (availablePrimaryOutputDevices() & rxDevice) {
-        muteWaitMs = setOutputDevice(mPrimaryOutput, rxDevice, true, delayMs);
+    auto telephonyRxModule =
+        mHwModules.getModuleForDeviceTypes(AUDIO_DEVICE_IN_TELEPHONY_RX, AUDIO_FORMAT_DEFAULT);
+    auto telephonyTxModule =
+        mHwModules.getModuleForDeviceTypes(AUDIO_DEVICE_OUT_TELEPHONY_TX, AUDIO_FORMAT_DEFAULT);
+    // retrieve Rx Source and Tx Sink device descriptors
+    sp<DeviceDescriptor> rxSourceDevice =
+        mAvailableInputDevices.getDevice(AUDIO_DEVICE_IN_TELEPHONY_RX,
+                                         String8(),
+                                         AUDIO_FORMAT_DEFAULT);
+    sp<DeviceDescriptor> txSinkDevice =
+        mAvailableOutputDevices.getDevice(AUDIO_DEVICE_OUT_TELEPHONY_TX,
+                                          String8(),
+                                          AUDIO_FORMAT_DEFAULT);
+
+    // RX and TX Telephony device are declared by Primary Audio HAL
+    if (isPrimaryModule(telephonyRxModule) && isPrimaryModule(telephonyTxModule) &&
+            (telephonyRxModule->getHalVersionMajor() >= 3)) {
+        if (rxSourceDevice == 0 || txSinkDevice == 0) {
+            // RX / TX Telephony device(s) is(are) not currently available
+            ALOGE("updateCallRouting() no telephony Tx and/or RX device");
+            return muteWaitMs;
+        }
+        // do not create a patch (aka Sw Bridging) if Primary HW module has declared supporting a
+        // route between telephony RX to Sink device and Source device to telephony TX
+        const auto &primaryModule = telephonyRxModule;
+        createRxPatch = !primaryModule->supportsPatch(rxSourceDevice, rxDevices.itemAt(0));
+        createTxPatch = !primaryModule->supportsPatch(txSourceDevice, txSinkDevice);
+    } else {
+        // If the RX device is on the primary HW module, then use legacy routing method for
+        // voice calls via setOutputDevice() on primary output.
+        // Otherwise, create two audio patches for TX and RX path.
+        createRxPatch = !(availablePrimaryOutputDevices().contains(rxDevices.itemAt(0))) &&
+                (rxSourceDevice != 0);
         // If the TX device is also on the primary HW module, setOutputDevice() will take care
         // of it due to legacy implementation. If not, create a patch.
-        if ((availablePrimaryInputDevices() & txDevice & ~AUDIO_DEVICE_BIT_IN)
-                == AUDIO_DEVICE_NONE) {
-            createTxPatch = true;
-        }
+        createTxPatch = !(availablePrimaryModuleInputDevices().contains(txSourceDevice)) &&
+                (txSinkDevice != 0);
+    }
+    // Use legacy routing method for voice calls via setOutputDevice() on primary output.
+    // Otherwise, create two audio patches for TX and RX path.
+    if (!createRxPatch) {
+        muteWaitMs = setOutputDevices(mPrimaryOutput, rxDevices, true, delayMs);
     } else { // create RX path audio patch
-        mCallRxPatch = createTelephonyPatch(true /*isRx*/, rxDevice, delayMs);
-        createTxPatch = true;
+        mCallRxPatch = createTelephonyPatch(true /*isRx*/, rxDevices.itemAt(0), delayMs);
+        ALOG_ASSERT(createTxPatch, "No Tx Patch will be created, nor legacy routing done");
     }
     if (createTxPatch) { // create TX path audio patch
-        mCallTxPatch = createTelephonyPatch(false /*isRx*/, txDevice, delayMs);
+        mCallTxPatch = createTelephonyPatch(false /*isRx*/, txSourceDevice, delayMs);
     }
 
     return muteWaitMs;
 }
 
 sp<AudioPatch> AudioPolicyManager::createTelephonyPatch(
-        bool isRx, audio_devices_t device, uint32_t delayMs) {
+        bool isRx, const sp<DeviceDescriptor> &device, uint32_t delayMs) {
     PatchBuilder patchBuilder;
 
-    sp<DeviceDescriptor> txSourceDeviceDesc;
+    if (device == nullptr) {
+        return nullptr;
+    }
     if (isRx) {
-        patchBuilder.addSink(findDevice(mAvailableOutputDevices, device)).
-                addSource(findDevice(mAvailableInputDevices, AUDIO_DEVICE_IN_TELEPHONY_RX));
+        patchBuilder.addSink(device).
+                addSource(mAvailableInputDevices.getDevice(
+                    AUDIO_DEVICE_IN_TELEPHONY_RX, String8(), AUDIO_FORMAT_DEFAULT));
     } else {
-        patchBuilder.addSource(txSourceDeviceDesc = findDevice(mAvailableInputDevices, device)).
-                addSink(findDevice(mAvailableOutputDevices, AUDIO_DEVICE_OUT_TELEPHONY_TX));
+        patchBuilder.addSource(device).
+                addSink(mAvailableOutputDevices.getDevice(
+                    AUDIO_DEVICE_OUT_TELEPHONY_TX, String8(), AUDIO_FORMAT_DEFAULT));
     }
 
-    audio_devices_t outputDevice = isRx ? device : AUDIO_DEVICE_OUT_TELEPHONY_TX;
-    SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(outputDevice, mOutputs);
-    audio_io_handle_t output = selectOutput(outputs);
+    // @TODO: still ignoring the address, or not dealing platform with mutliple telephonydevices
+    const sp<DeviceDescriptor> outputDevice = isRx ?
+                device : mAvailableOutputDevices.getDevice(
+                    AUDIO_DEVICE_OUT_TELEPHONY_TX, String8(), AUDIO_FORMAT_DEFAULT);
+    SortedVector<audio_io_handle_t> outputs =
+            getOutputsForDevices(DeviceVector(outputDevice), mOutputs);
+    audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE, AUDIO_FORMAT_INVALID);
     // request to reuse existing output stream if one is already opened to reach the target device
     if (output != AUDIO_IO_HANDLE_NONE) {
         sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
-        ALOG_ASSERT(!outputDesc->isDuplicated(),
-                "%s() %#x device output %d is duplicated", __func__, outputDevice, output);
+        ALOG_ASSERT(!outputDesc->isDuplicated(), "%s() %s device output %d is duplicated", __func__,
+                    outputDevice->toString().c_str(), output);
         patchBuilder.addSource(outputDesc, { .stream = AUDIO_STREAM_PATCH });
     }
 
@@ -499,7 +602,7 @@
         // call TX device but this information is not in the audio patch and logic here must be
         // symmetric to the one in startInput()
         for (const auto& activeDesc : mInputs.getActiveInputs()) {
-            if (activeDesc->hasSameHwModuleAs(txSourceDeviceDesc)) {
+            if (activeDesc->hasSameHwModuleAs(device)) {
                 closeActiveClients(activeDesc);
             }
         }
@@ -599,17 +702,17 @@
     }
 
     if (hasPrimaryOutput()) {
-        // Note that despite the fact that getNewOutputDevice() is called on the primary output,
+        // Note that despite the fact that getNewOutputDevices() is called on the primary output,
         // the device returned is not necessarily reachable via this output
-        audio_devices_t rxDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
+        DeviceVector rxDevices = getNewOutputDevices(mPrimaryOutput, false /*fromCache*/);
         // force routing command to audio hardware when ending call
         // even if no device change is needed
-        if (isStateInCall(oldState) && rxDevice == AUDIO_DEVICE_NONE) {
-            rxDevice = mPrimaryOutput->device();
+        if (isStateInCall(oldState) && rxDevices.isEmpty()) {
+            rxDevices = mPrimaryOutput->devices();
         }
 
         if (state == AUDIO_MODE_IN_CALL) {
-            updateCallRouting(rxDevice, delayMs);
+            updateCallRouting(rxDevices, delayMs);
         } else if (oldState == AUDIO_MODE_IN_CALL) {
             if (mCallRxPatch != 0) {
                 mpClientInterface->releaseAudioPatch(mCallRxPatch->mAfPatchHandle, 0);
@@ -619,18 +722,18 @@
                 mpClientInterface->releaseAudioPatch(mCallTxPatch->mAfPatchHandle, 0);
                 mCallTxPatch.clear();
             }
-            setOutputDevice(mPrimaryOutput, rxDevice, force, 0);
+            setOutputDevices(mPrimaryOutput, rxDevices, force, 0);
         } else {
-            setOutputDevice(mPrimaryOutput, rxDevice, force, 0);
+            setOutputDevices(mPrimaryOutput, rxDevices, force, 0);
         }
     }
 
     // reevaluate routing on all outputs in case tracks have been started during the call
     for (size_t i = 0; i < mOutputs.size(); i++) {
         sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
-        audio_devices_t newDevice = getNewOutputDevice(desc, true /*fromCache*/);
+        DeviceVector newDevices = getNewOutputDevices(desc, true /*fromCache*/);
         if (state != AUDIO_MODE_IN_CALL || desc != mPrimaryOutput) {
-            setOutputDevice(desc, newDevice, (newDevice != AUDIO_DEVICE_NONE), 0 /*delayMs*/);
+            setOutputDevices(desc, newDevices, !newDevices.isEmpty(), 0 /*delayMs*/);
         }
     }
 
@@ -654,7 +757,7 @@
 }
 
 void AudioPolicyManager::setForceUse(audio_policy_force_use_t usage,
-                                         audio_policy_forced_cfg_t config)
+                                     audio_policy_forced_cfg_t config)
 {
     ALOGV("setForceUse() usage %d, config %d, mPhoneState %d", usage, config, mEngine->getPhoneState());
     if (config == mEngine->getForceUse(usage)) {
@@ -680,26 +783,27 @@
         delayMs = TOUCH_SOUND_FIXED_DELAY_MS;
     }
     if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) {
-        audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, true /*fromCache*/);
-        waitMs = updateCallRouting(newDevice, delayMs);
+        DeviceVector newDevices = getNewOutputDevices(mPrimaryOutput, true /*fromCache*/);
+        waitMs = updateCallRouting(newDevices, delayMs);
     }
     for (size_t i = 0; i < mOutputs.size(); i++) {
         sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(i);
-        audio_devices_t newDevice = getNewOutputDevice(outputDesc, true /*fromCache*/);
+        DeviceVector newDevices = getNewOutputDevices(outputDesc, true /*fromCache*/);
         if ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) || (outputDesc != mPrimaryOutput)) {
-            waitMs = setOutputDevice(outputDesc, newDevice, (newDevice != AUDIO_DEVICE_NONE),
-                                     delayMs);
+            // As done in setDeviceConnectionState, we could also fix default device issue by
+            // preventing the force re-routing in case of default dev that distinguishes on address.
+            // Let's give back to engine full device choice decision however.
+            waitMs = setOutputDevices(outputDesc, newDevices, !newDevices.isEmpty(), delayMs);
         }
-        if (forceVolumeReeval && (newDevice != AUDIO_DEVICE_NONE)) {
-            applyStreamVolumes(outputDesc, newDevice, waitMs, true);
+        if (forceVolumeReeval && !newDevices.isEmpty()) {
+            applyStreamVolumes(outputDesc, newDevices.types(), waitMs, true);
         }
     }
 
     for (const auto& activeDesc : mInputs.getActiveInputs()) {
-        audio_devices_t newDevice = getNewInputDevice(activeDesc);
+        auto newDevice = getNewInputDevice(activeDesc);
         // Force new input selection if the new device can not be reached via current input
-        if (activeDesc->mProfile->getSupportedDevices().types() &
-                (newDevice & ~AUDIO_DEVICE_BIT_IN)) {
+        if (activeDesc->mProfile->getSupportedDevices().contains(newDevice)) {
             setInputDevice(activeDesc->mIoHandle, newDevice);
         } else {
             closeInput(activeDesc->mIoHandle);
@@ -715,7 +819,7 @@
 // Find an output profile compatible with the parameters passed. When "directOnly" is set, restrict
 // search to profiles for direct outputs.
 sp<IOProfile> AudioPolicyManager::getProfileForOutput(
-                                                   audio_devices_t device,
+                                                   const DeviceVector& devices,
                                                    uint32_t samplingRate,
                                                    audio_format_t format,
                                                    audio_channel_mask_t channelMask,
@@ -736,7 +840,7 @@
 
     for (const auto& hwModule : mHwModules) {
         for (const auto& curProfile : hwModule->getOutputProfiles()) {
-            if (!curProfile->isCompatibleProfile(device, String8(""),
+            if (!curProfile->isCompatibleProfile(devices,
                     samplingRate, NULL /*updatedSamplingRate*/,
                     format, NULL /*updatedFormat*/,
                     channelMask, NULL /*updatedChannelMask*/,
@@ -744,7 +848,11 @@
                 continue;
             }
             // reject profiles not corresponding to a device currently available
-            if ((mAvailableOutputDevices.types() & curProfile->getSupportedDevicesType()) == 0) {
+            if (!mAvailableOutputDevices.containsAtLeastOne(curProfile->getSupportedDevices())) {
+                continue;
+            }
+            // reject profiles if connected device does not support codec
+            if (!curProfile->deviceSupportsEncodedFormats(devices.types())) {
                 continue;
             }
             if (!directOnly) return curProfile;
@@ -765,7 +873,7 @@
 audio_io_handle_t AudioPolicyManager::getOutput(audio_stream_type_t stream)
 {
     routing_strategy strategy = getStrategy(stream);
-    audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
+    DeviceVector devices = getDevicesForStrategy(strategy, false /*fromCache*/);
 
     // Note that related method getOutputForAttr() uses getOutputForDevice() not selectOutput().
     // We use selectOutput() here since we don't have the desired AudioTrack sample rate,
@@ -773,10 +881,11 @@
     // getOutput() solely on audio_stream_type such as AudioSystem::getOutputFrameCount()
     // and AudioSystem::getOutputSamplingRate().
 
-    SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
-    audio_io_handle_t output = selectOutput(outputs);
+    SortedVector<audio_io_handle_t> outputs = getOutputsForDevices(devices, mOutputs);
+    audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE, AUDIO_FORMAT_INVALID);
 
-    ALOGV("getOutput() stream %d selected device %08x, output %d", stream, device, output);
+    ALOGV("getOutput() stream %d selected devices %s, output %d", stream,
+          devices.toString().c_str(), output);
     return output;
 }
 
@@ -813,12 +922,11 @@
                                                  audio_output_flags_t *flags,
                                                  audio_port_handle_t *selectedDeviceId)
 {
-    DeviceVector outputDevices;
+    DeviceVector devices;
     routing_strategy strategy;
-    audio_devices_t device;
-    const audio_port_handle_t requestedDeviceId = *selectedDeviceId;
-    audio_devices_t msdDevice =
-            getModuleDeviceTypes(mAvailableOutputDevices, AUDIO_HARDWARE_MODULE_ID_MSD);
+    audio_devices_t deviceType = AUDIO_DEVICE_NONE;
+    const audio_port_handle_t requestedPortId = *selectedDeviceId;
+    DeviceVector msdDevices = getMsdAudioOutDevices();
 
     status_t status = getAudioAttributes(resultAttr, attr, *stream);
     if (status != NO_ERROR) {
@@ -829,17 +937,16 @@
           " session %d selectedDeviceId %d",
           __func__,
           resultAttr->usage, resultAttr->content_type, resultAttr->tags, resultAttr->flags,
-          session, requestedDeviceId);
+          session, requestedPortId);
 
     *stream = streamTypefromAttributesInt(resultAttr);
 
     strategy = getStrategyForAttr(resultAttr);
 
     // First check for explicit routing (eg. setPreferredDevice)
-    if (requestedDeviceId != AUDIO_PORT_HANDLE_NONE) {
-        sp<DeviceDescriptor> deviceDesc =
-            mAvailableOutputDevices.getDeviceFromId(requestedDeviceId);
-        device = deviceDesc->type();
+    sp<DeviceDescriptor> requestedDevice = mAvailableOutputDevices.getDeviceFromId(requestedPortId);
+    if (requestedDevice != nullptr) {
+        deviceType = requestedDevice->type();
     } else {
         // If no explict route, is there a matching dynamic policy that applies?
         sp<SwAudioOutputDescriptor> desc;
@@ -852,7 +959,8 @@
             *output = desc->mIoHandle;
             AudioMix *mix = desc->mPolicyMix;
             sp<DeviceDescriptor> deviceDesc =
-                mAvailableOutputDevices.getDevice(mix->mDeviceType, mix->mDeviceAddress);
+                mAvailableOutputDevices.getDevice(
+                        mix->mDeviceType, mix->mDeviceAddress, AUDIO_FORMAT_DEFAULT);
             *selectedDeviceId = deviceDesc != 0 ? deviceDesc->getId() : AUDIO_PORT_HANDLE_NONE;
             ALOGV("%s returns output %d", __func__, *output);
             return NO_ERROR;
@@ -863,7 +971,7 @@
             ALOGW("%s no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE", __func__);
             return BAD_VALUE;
         }
-        device = getDeviceForStrategy(strategy, false /*fromCache*/);
+        deviceType = getDeviceForStrategy(strategy, false /*fromCache*/);
     }
 
     if ((resultAttr->flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
@@ -875,42 +983,45 @@
     // FIXME: provide a more generic approach which is not device specific and move this back
     // to getOutputForDevice.
     // TODO: Remove check of AUDIO_STREAM_MUSIC once migration is completed on the app side.
-    if (device == AUDIO_DEVICE_OUT_TELEPHONY_TX &&
+    if (deviceType == AUDIO_DEVICE_OUT_TELEPHONY_TX &&
         (*stream == AUDIO_STREAM_MUSIC || resultAttr->usage == AUDIO_USAGE_VOICE_COMMUNICATION) &&
         audio_is_linear_pcm(config->format) &&
         isInCall()) {
-        if (requestedDeviceId != AUDIO_PORT_HANDLE_NONE) {
+        if (requestedPortId != AUDIO_PORT_HANDLE_NONE) {
             *flags = (audio_output_flags_t)AUDIO_OUTPUT_FLAG_INCALL_MUSIC;
         } else {
             // Get the devce type directly from the engine to bypass preferred route logic
-            device = mEngine->getDeviceForStrategy(strategy);
+            deviceType = mEngine->getDeviceForStrategy(strategy);
         }
     }
 
     ALOGV("%s device 0x%x, sampling rate %d, format %#x, channel mask %#x, "
           "flags %#x",
-          __func__, device, config->sample_rate, config->format, config->channel_mask, *flags);
+          __func__,
+          deviceType, config->sample_rate, config->format, config->channel_mask, *flags);
 
     *output = AUDIO_IO_HANDLE_NONE;
-    if (msdDevice != AUDIO_DEVICE_NONE) {
-        *output = getOutputForDevice(msdDevice, session, *stream, config, flags);
-        if (*output != AUDIO_IO_HANDLE_NONE && setMsdPatch(device) == NO_ERROR) {
-            ALOGV("%s() Using MSD device 0x%x instead of device 0x%x",
-                    __func__, msdDevice, device);
-            device = msdDevice;
+    if (!msdDevices.isEmpty()) {
+        *output = getOutputForDevices(msdDevices, session, *stream, config, flags);
+        sp<DeviceDescriptor> deviceDesc =
+                mAvailableOutputDevices.getDevice(deviceType, String8(), AUDIO_FORMAT_DEFAULT);
+        if (*output != AUDIO_IO_HANDLE_NONE && setMsdPatch(deviceDesc) == NO_ERROR) {
+            ALOGV("%s() Using MSD devices %s instead of device %s",
+                    __func__, msdDevices.toString().c_str(), deviceDesc->toString().c_str());
+            deviceType = msdDevices.types();
         } else {
             *output = AUDIO_IO_HANDLE_NONE;
         }
     }
+    devices = mAvailableOutputDevices.getDevicesFromTypeMask(deviceType);
     if (*output == AUDIO_IO_HANDLE_NONE) {
-        *output = getOutputForDevice(device, session, *stream, config, flags);
+        *output = getOutputForDevices(devices, session, *stream, config, flags);
     }
     if (*output == AUDIO_IO_HANDLE_NONE) {
         return INVALID_OPERATION;
     }
 
-    outputDevices = mAvailableOutputDevices.getDevicesFromTypeMask(device);
-    *selectedDeviceId = getFirstDeviceId(outputDevices);
+    *selectedDeviceId = getFirstDeviceId(devices);
 
     ALOGV("%s returns output %d selectedDeviceId %d", __func__, *output, *selectedDeviceId);
 
@@ -931,7 +1042,7 @@
     if (*portId != AUDIO_PORT_HANDLE_NONE) {
         return INVALID_OPERATION;
     }
-    const audio_port_handle_t requestedDeviceId = *selectedDeviceId;
+    const audio_port_handle_t requestedPortId = *selectedDeviceId;
     audio_attributes_t resultAttr;
     status_t status = getOutputForAttrInt(&resultAttr, output, session, attr, stream, uid,
             config, flags, selectedDeviceId);
@@ -946,20 +1057,20 @@
 
     sp<TrackClientDescriptor> clientDesc =
         new TrackClientDescriptor(*portId, uid, session, resultAttr, clientConfig,
-                                  requestedDeviceId, *stream,
+                                  requestedPortId, *stream,
                                   getStrategyForAttr(&resultAttr),
                                   *flags);
     sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(*output);
     outputDesc->addClient(clientDesc);
 
     ALOGV("%s returns output %d selectedDeviceId %d for port ID %d",
-          __func__, *output, requestedDeviceId, *portId);
+          __func__, *output, requestedPortId, *portId);
 
     return NO_ERROR;
 }
 
-audio_io_handle_t AudioPolicyManager::getOutputForDevice(
-        audio_devices_t device,
+audio_io_handle_t AudioPolicyManager::getOutputForDevices(
+        const DeviceVector &devices,
         audio_session_t session,
         audio_stream_type_t stream,
         const audio_config_t *config,
@@ -1017,7 +1128,7 @@
 
     if (((*flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) ||
             !(mEffects.isNonOffloadableEffectEnabled() || mMasterMono)) {
-        profile = getProfileForOutput(device,
+        profile = getProfileForOutput(devices,
                                    config->sample_rate,
                                    config->format,
                                    config->channel_mask,
@@ -1037,7 +1148,7 @@
                     (config->channel_mask == desc->mChannelMask) &&
                     (session == desc->mDirectClientSession)) {
                     desc->mDirectOpenCount++;
-                    ALOGI("getOutputForDevice() reusing direct output %d for session %d",
+                    ALOGI("%s reusing direct output %d for session %d", __func__, 
                         mOutputs.keyAt(i), session);
                     return mOutputs.keyAt(i);
                 }
@@ -1051,8 +1162,7 @@
         sp<SwAudioOutputDescriptor> outputDesc =
                 new SwAudioOutputDescriptor(profile, mpClientInterface);
 
-        DeviceVector outputDevices = mAvailableOutputDevices.getDevicesFromTypeMask(device);
-        String8 address = getFirstDeviceAddress(outputDevices);
+        String8 address = getFirstDeviceAddress(devices);
 
         // MSD patch may be using the only output stream that can service this request. Release
         // MSD patch to prioritize this request over any active output on MSD.
@@ -1062,7 +1172,7 @@
             for (size_t j = 0; j < patch->mPatch.num_sinks; ++j) {
                 const struct audio_port_config *sink = &patch->mPatch.sinks[j];
                 if (sink->type == AUDIO_PORT_TYPE_DEVICE &&
-                        (sink->ext.device.type & device) != AUDIO_DEVICE_NONE &&
+                        (sink->ext.device.type & devices.types()) != AUDIO_DEVICE_NONE &&
                         (address.isEmpty() || strncmp(sink->ext.device.address, address.string(),
                                 AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0)) {
                     releaseAudioPatch(patch->mHandle, mUidCached);
@@ -1071,15 +1181,15 @@
             }
         }
 
-        status = outputDesc->open(config, device, address, stream, *flags, &output);
+        status = outputDesc->open(config, devices, stream, *flags, &output);
 
         // only accept an output with the requested parameters
         if (status != NO_ERROR ||
             (config->sample_rate != 0 && config->sample_rate != outputDesc->mSamplingRate) ||
             (config->format != AUDIO_FORMAT_DEFAULT && config->format != outputDesc->mFormat) ||
             (config->channel_mask != 0 && config->channel_mask != outputDesc->mChannelMask)) {
-            ALOGV("getOutputForDevice() failed opening direct output: output %d sample rate %d %d,"
-                    "format %d %d, channel mask %04x %04x", output, config->sample_rate,
+            ALOGV("%s failed opening direct output: output %d sample rate %d %d," 
+                    "format %d %d, channel mask %04x %04x", __func__, output, config->sample_rate,
                     outputDesc->mSamplingRate, config->format, outputDesc->mFormat,
                     config->channel_mask, outputDesc->mChannelMask);
             if (output != AUDIO_IO_HANDLE_NONE) {
@@ -1097,7 +1207,7 @@
 
         addOutput(output, outputDesc);
         mPreviousOutputs = mOutputs;
-        ALOGV("getOutputForDevice() returns new direct output %d", output);
+        ALOGV("%s returns new direct output %d", __func__, output);
         mpClientInterface->onAudioPortListUpdate();
         return output;
     }
@@ -1118,14 +1228,14 @@
     if (audio_is_linear_pcm(config->format)) {
         // get which output is suitable for the specified stream. The actual
         // routing change will happen when startOutput() will be called
-        SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
+        SortedVector<audio_io_handle_t> outputs = getOutputsForDevices(devices, mOutputs);
 
         // at this stage we should ignore the DIRECT flag as no direct output could be found earlier
         *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_DIRECT);
         output = selectOutput(outputs, *flags, config->format,
                 config->channel_mask, config->sample_rate);
     }
-    ALOGW_IF((output == 0), "getOutputForDevice() could not find output for stream %d, "
+    ALOGW_IF((output == 0), "getOutputForDevices() could not find output for stream %d, "
             "sampling rate %d, format %#x, channels %#x, flags %#x",
             stream, config->sample_rate, config->format, config->channel_mask, *flags);
 
@@ -1133,13 +1243,14 @@
 }
 
 sp<DeviceDescriptor> AudioPolicyManager::getMsdAudioInDevice() const {
-    sp<HwModule> msdModule = mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD);
-    if (msdModule != 0) {
-        DeviceVector msdInputDevices = mAvailableInputDevices.getDevicesFromHwModule(
-                msdModule->getHandle());
-        if (!msdInputDevices.isEmpty()) return msdInputDevices.itemAt(0);
-    }
-    return 0;
+    auto msdInDevices = mHwModules.getAvailableDevicesFromModuleName(AUDIO_HARDWARE_MODULE_ID_MSD,
+                                                                     mAvailableInputDevices);
+    return msdInDevices.isEmpty()? nullptr : msdInDevices.itemAt(0);
+}
+
+DeviceVector AudioPolicyManager::getMsdAudioOutDevices() const {
+    return mHwModules.getAvailableDevicesFromModuleName(AUDIO_HARDWARE_MODULE_ID_MSD,
+                                                        mAvailableOutputDevices);
 }
 
 const AudioPatchCollection AudioPolicyManager::getMsdPatches() const {
@@ -1160,7 +1271,7 @@
     return msdPatches;
 }
 
-status_t AudioPolicyManager::getBestMsdAudioProfileFor(audio_devices_t outputDevice,
+status_t AudioPolicyManager::getBestMsdAudioProfileFor(const sp<DeviceDescriptor> &outputDevice,
         bool hwAvSync, audio_port_config *sourceConfig, audio_port_config *sinkConfig) const
 {
     sp<HwModule> msdModule = mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD);
@@ -1168,9 +1279,9 @@
         ALOGE("%s() unable to get MSD module", __func__);
         return NO_INIT;
     }
-    sp<HwModule> deviceModule = mHwModules.getModuleForDevice(outputDevice);
+    sp<HwModule> deviceModule = mHwModules.getModuleForDevice(outputDevice, AUDIO_FORMAT_DEFAULT);
     if (deviceModule == nullptr) {
-        ALOGE("%s() unable to get module for %#x", __func__, outputDevice);
+        ALOGE("%s() unable to get module for %s", __func__, outputDevice->toString().c_str());
         return NO_INIT;
     }
     const InputProfileCollection &inputProfiles = msdModule->getInputProfiles();
@@ -1180,7 +1291,7 @@
     }
     const OutputProfileCollection &outputProfiles = deviceModule->getOutputProfiles();
     if (outputProfiles.isEmpty()) {
-        ALOGE("%s() no output profiles for device %#x", __func__, outputDevice);
+        ALOGE("%s() no output profiles for device %s", __func__, outputDevice->toString().c_str());
         return NO_INIT;
     }
     AudioProfileVector msdProfiles;
@@ -1201,8 +1312,8 @@
             compressedFormatsOrder, surroundChannelMasksOrder, true /*preferHigherSamplingRates*/,
             &bestSinkConfig);
     if (result != NO_ERROR) {
-        ALOGD("%s() no matching profiles found for device: %#x, hwAvSync: %d",
-                __func__, outputDevice, hwAvSync);
+        ALOGD("%s() no matching profiles found for device: %s, hwAvSync: %d",
+                __func__, outputDevice->toString().c_str(), hwAvSync);
         return result;
     }
     sinkConfig->sample_rate = bestSinkConfig.sample_rate;
@@ -1231,11 +1342,10 @@
     return NO_ERROR;
 }
 
-PatchBuilder AudioPolicyManager::buildMsdPatch(audio_devices_t outputDevice) const
+PatchBuilder AudioPolicyManager::buildMsdPatch(const sp<DeviceDescriptor> &outputDevice) const
 {
     PatchBuilder patchBuilder;
-    patchBuilder.addSource(getMsdAudioInDevice()).
-            addSink(findDevice(mAvailableOutputDevices, outputDevice));
+    patchBuilder.addSource(getMsdAudioInDevice()).addSink(outputDevice);
     audio_port_config sourceConfig = patchBuilder.patch()->sources[0];
     audio_port_config sinkConfig = patchBuilder.patch()->sinks[0];
     // TODO: Figure out whether MSD module has HW_AV_SYNC flag set in the AP config file.
@@ -1253,15 +1363,18 @@
     return patchBuilder;
 }
 
-status_t AudioPolicyManager::setMsdPatch(audio_devices_t outputDevice) {
-    ALOGV("%s() for outputDevice %#x", __func__, outputDevice);
-    if (outputDevice == AUDIO_DEVICE_NONE) {
+status_t AudioPolicyManager::setMsdPatch(const sp<DeviceDescriptor> &outputDevice) {
+    sp<DeviceDescriptor> device = outputDevice;
+    if (device == nullptr) {
         // Use media strategy for unspecified output device. This should only
         // occur on checkForDeviceAndOutputChanges(). Device connection events may
         // therefore invalidate explicit routing requests.
-        outputDevice = getDeviceForStrategy(STRATEGY_MEDIA, false /*fromCache*/);
+        DeviceVector devices = getDevicesForStrategy(STRATEGY_MEDIA, false /*fromCache*/);
+        LOG_ALWAYS_FATAL_IF(devices.isEmpty(), "no outpudevice to set Msd Patch");
+        device = devices.itemAt(0);
     }
-    PatchBuilder patchBuilder = buildMsdPatch(outputDevice);
+    ALOGV("%s() for device %s", __func__, device->toString().c_str());
+    PatchBuilder patchBuilder = buildMsdPatch(device);
     const struct audio_patch* patch = patchBuilder.patch();
     const AudioPatchCollection msdPatches = getMsdPatches();
     if (!msdPatches.isEmpty()) {
@@ -1277,8 +1390,9 @@
             patch, 0 /*delayMs*/, mUidCached, nullptr /*patchDescPtr*/);
     ALOGE_IF(status != NO_ERROR, "%s() error %d creating MSD audio patch", __func__, status);
     ALOGI_IF(status == NO_ERROR, "%s() Patch created from MSD_IN to "
-           "device:%#x (format:%#x channels:%#x samplerate:%d)", __func__, outputDevice,
-           patch->sources[0].format, patch->sources[0].channel_mask, patch->sources[0].sample_rate);
+           "device:%s (format:%#x channels:%#x samplerate:%d)", __func__,
+             device->toString().c_str(), patch->sources[0].format,
+             patch->sources[0].channel_mask, patch->sources[0].sample_rate);
     return status;
 }
 
@@ -1289,7 +1403,7 @@
                                                        uint32_t samplingRate)
 {
     // select one output among several that provide a path to a particular device or set of
-    // devices (the list was previously build by getOutputsForDevice()).
+    // devices (the list was previously build by getOutputsForDevices()).
     // The priority is as follows:
     // 1: the output supporting haptic playback when requesting haptic playback
     // 2: the output with the highest number of requested policy flags
@@ -1451,17 +1565,20 @@
     bool force = !outputDesc->isActive() &&
             (outputDesc->getPatchHandle() == AUDIO_PATCH_HANDLE_NONE);
 
-    audio_devices_t device = AUDIO_DEVICE_NONE;
+    DeviceVector devices;
     AudioMix *policyMix = NULL;
     const char *address = NULL;
     if (outputDesc->mPolicyMix != NULL) {
         policyMix = outputDesc->mPolicyMix;
+        audio_devices_t newDeviceType;
         address = policyMix->mDeviceAddress.string();
         if ((policyMix->mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
-            device = policyMix->mDeviceType;
+            newDeviceType = policyMix->mDeviceType;
         } else {
-            device = AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
+            newDeviceType = AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
         }
+        devices.add(mAvailableOutputDevices.getDevice(newDeviceType,
+                                                      String8(address), AUDIO_FORMAT_DEFAULT));
     }
 
     // requiresMuteCheck is false when we can bypass mute strategy.
@@ -1476,8 +1593,8 @@
     outputDesc->setClientActive(client, true);
 
     if (client->hasPreferredDevice(true)) {
-        device = getNewOutputDevice(outputDesc, false /*fromCache*/);
-        if (device != outputDesc->device()) {
+        devices = getNewOutputDevices(outputDesc, false /*fromCache*/);
+        if (devices != outputDesc->devices()) {
             checkStrategyRoute(getStrategy(stream), outputDesc->mIoHandle);
         }
     }
@@ -1486,10 +1603,10 @@
         selectOutputForMusicEffects();
     }
 
-    if (outputDesc->streamActiveCount(stream) == 1 || device != AUDIO_DEVICE_NONE) {
+    if (outputDesc->streamActiveCount(stream) == 1 || !devices.isEmpty()) {
         // starting an output being rerouted?
-        if (device == AUDIO_DEVICE_NONE) {
-            device = getNewOutputDevice(outputDesc, false /*fromCache*/);
+        if (devices.isEmpty()) {
+            devices = getNewOutputDevices(outputDesc, false /*fromCache*/);
         }
 
         routing_strategy strategy = getStrategy(stream);
@@ -1498,13 +1615,13 @@
                             (beaconMuteLatency > 0);
         uint32_t waitMs = beaconMuteLatency;
         for (size_t i = 0; i < mOutputs.size(); i++) {
-            sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
+            sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
             if (desc != outputDesc) {
                 // An output has a shared device if
                 // - managed by the same hw module
                 // - supports the currently selected device
                 const bool sharedDevice = outputDesc->sharesHwModuleWith(desc)
-                        && (desc->supportedDevices() & device) != AUDIO_DEVICE_NONE;
+                        && (!desc->filterSupportedDevices(devices).isEmpty());
 
                 // force a device change if any other output is:
                 // - managed by the same hw module
@@ -1514,7 +1631,7 @@
                 // In this case, the audio HAL must receive the new device selection so that it can
                 // change the device currently selected by the other output.
                 if (sharedDevice &&
-                        desc->device() != device &&
+                        desc->devices() != devices &&
                         desc->getPatchHandle() != AUDIO_PATCH_HANDLE_NONE) {
                     force = true;
                 }
@@ -1537,13 +1654,13 @@
         }
 
         const uint32_t muteWaitMs =
-                setOutputDevice(outputDesc, device, force, 0, NULL, address, requiresMuteCheck);
+                setOutputDevices(outputDesc, devices, force, 0, NULL, requiresMuteCheck);
 
         // apply volume rules for current stream and device if necessary
         checkAndSetVolume(stream,
-                          mVolumeCurves->getVolumeIndex(stream, outputDesc->device()),
+                          mVolumeCurves->getVolumeIndex(stream, outputDesc->devices().types()),
                           outputDesc,
-                          outputDesc->device());
+                          outputDesc->devices().types());
 
         // update the outputs if starting an output with a stream that can affect notification
         // routing
@@ -1574,12 +1691,13 @@
 
     // Automatically enable the remote submix input when output is started on a re routing mix
     // of type MIX_TYPE_RECORDERS
-    if (audio_is_remote_submix_device(device) && policyMix != NULL &&
+    if (audio_is_remote_submix_device(devices.types()) && policyMix != NULL &&
         policyMix->mMixType == MIX_TYPE_RECORDERS) {
         setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
                                     AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
                                     address,
-                                    "remote-submix");
+                                    "remote-submix",
+                                    AUDIO_FORMAT_DEFAULT);
     }
 
     return NO_ERROR;
@@ -1619,13 +1737,13 @@
         if (outputDesc->streamActiveCount(stream) == 1) {
             // Automatically disable the remote submix input when output is stopped on a
             // re routing mix of type MIX_TYPE_RECORDERS
-            if (audio_is_remote_submix_device(outputDesc->mDevice) &&
+            if (audio_is_remote_submix_device(outputDesc->devices().types()) &&
                 outputDesc->mPolicyMix != NULL &&
                 outputDesc->mPolicyMix->mMixType == MIX_TYPE_RECORDERS) {
                 setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
                                             AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
                                             outputDesc->mPolicyMix->mDeviceAddress,
-                                            "remote-submix");
+                                            "remote-submix", AUDIO_FORMAT_DEFAULT);
             }
         }
         bool forceDeviceUpdate = false;
@@ -1640,33 +1758,31 @@
         // store time at which the stream was stopped - see isStreamActive()
         if (outputDesc->streamActiveCount(stream) == 0 || forceDeviceUpdate) {
             outputDesc->mStopTime[stream] = systemTime();
-            audio_devices_t newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
+            DeviceVector newDevices = getNewOutputDevices(outputDesc, false /*fromCache*/);
             // delay the device switch by twice the latency because stopOutput() is executed when
             // the track stop() command is received and at that time the audio track buffer can
             // still contain data that needs to be drained. The latency only covers the audio HAL
             // and kernel buffers. Also the latency does not always include additional delay in the
             // audio path (audio DSP, CODEC ...)
-            setOutputDevice(outputDesc, newDevice, false, outputDesc->latency()*2);
+            setOutputDevices(outputDesc, newDevices, false, outputDesc->latency()*2);
 
             // force restoring the device selection on other active outputs if it differs from the
             // one being selected for this output
             uint32_t delayMs = outputDesc->latency()*2;
             for (size_t i = 0; i < mOutputs.size(); i++) {
-                sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
+                sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
                 if (desc != outputDesc &&
                         desc->isActive() &&
                         outputDesc->sharesHwModuleWith(desc) &&
-                        (newDevice != desc->device())) {
-                    audio_devices_t newDevice2 = getNewOutputDevice(desc, false /*fromCache*/);
-                    bool force = desc->device() != newDevice2;
+                        (newDevices != desc->devices())) {
+                    DeviceVector newDevices2 = getNewOutputDevices(desc, false /*fromCache*/);
+                    bool force = desc->devices() != newDevices2;
 
-                    setOutputDevice(desc,
-                                    newDevice2,
-                                    force,
-                                    delayMs);
+                    setOutputDevices(desc, newDevices2, force, delayMs);
+
                     // re-apply device specific volume if not done by setOutputDevice()
                     if (!force) {
-                        applyStreamVolumes(desc, newDevice2, delayMs);
+                        applyStreamVolumes(desc, newDevices2.types(), delayMs);
                     }
                 }
             }
@@ -1739,29 +1855,27 @@
           attr->source, config->sample_rate, config->format, config->channel_mask, session, flags);
 
     status_t status = NO_ERROR;
-    // handle legacy remote submix case where the address was not always specified
-    String8 address = String8("");
     audio_source_t halInputSource;
-    audio_source_t inputSource = attr->source;
+    audio_attributes_t attributes = *attr;
     AudioMix *policyMix = NULL;
-    DeviceVector inputDevices;
+    sp<DeviceDescriptor> device;
     sp<AudioInputDescriptor> inputDesc;
     sp<RecordClientDescriptor> clientDesc;
     audio_port_handle_t requestedDeviceId = *selectedDeviceId;
     bool isSoundTrigger;
-    audio_devices_t device;
 
     // The supplied portId must be AUDIO_PORT_HANDLE_NONE
     if (*portId != AUDIO_PORT_HANDLE_NONE) {
         return INVALID_OPERATION;
     }
 
-    if (inputSource == AUDIO_SOURCE_DEFAULT) {
-        inputSource = AUDIO_SOURCE_MIC;
+    if (attr->source == AUDIO_SOURCE_DEFAULT) {
+        attributes.source = AUDIO_SOURCE_MIC;
     }
 
     // Explicit routing?
-    sp<DeviceDescriptor> deviceDesc = mAvailableInputDevices.getDeviceFromId(*selectedDeviceId);
+    sp<DeviceDescriptor> explicitRoutingDevice = 
+            mAvailableInputDevices.getDeviceFromId(*selectedDeviceId);
 
     // special case for mmap capture: if an input IO handle is specified, we reuse this input if
     // possible
@@ -1802,7 +1916,7 @@
             }
         }
         *inputType = API_INPUT_LEGACY;
-        device = inputDesc->mDevice;
+        device = inputDesc->getDevice();
 
         ALOGI("%s reusing MMAP input %d for session %d", __FUNCTION__, *input, session);
         goto exit;
@@ -1811,44 +1925,40 @@
     *input = AUDIO_IO_HANDLE_NONE;
     *inputType = API_INPUT_INVALID;
 
-    halInputSource = inputSource;
+    halInputSource = attributes.source;
 
-    if (inputSource == AUDIO_SOURCE_REMOTE_SUBMIX &&
-            strncmp(attr->tags, "addr=", strlen("addr=")) == 0) {
-        status = mPolicyMixes.getInputMixForAttr(*attr, &policyMix);
+    if (attributes.source == AUDIO_SOURCE_REMOTE_SUBMIX &&
+            strncmp(attributes.tags, "addr=", strlen("addr=")) == 0) {
+        status = mPolicyMixes.getInputMixForAttr(attributes, &policyMix);
         if (status != NO_ERROR) {
             goto error;
         }
         *inputType = API_INPUT_MIX_EXT_POLICY_REROUTE;
-        device = AUDIO_DEVICE_IN_REMOTE_SUBMIX;
-        address = String8(attr->tags + strlen("addr="));
+        device = mAvailableInputDevices.getDevice(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
+                                                  String8(attr->tags + strlen("addr=")),
+                                                  AUDIO_FORMAT_DEFAULT);
     } else {
-        if (deviceDesc != 0) {
-            device = deviceDesc->type();
+        if (explicitRoutingDevice != nullptr) {
+            device = explicitRoutingDevice;
         } else {
-            device = getDeviceAndMixForInputSource(inputSource, &policyMix);
+            device = getDeviceAndMixForAttributes(attributes, &policyMix);
         }
-        if (device == AUDIO_DEVICE_NONE) {
-            ALOGW("getInputForAttr() could not find device for source %d", inputSource);
+        if (device == nullptr) {
+            ALOGW("getInputForAttr() could not find device for source %d", attributes.source);
             status = BAD_VALUE;
             goto error;
         }
-        if (policyMix != NULL) {
-            address = policyMix->mDeviceAddress;
-            if (policyMix->mMixType == MIX_TYPE_RECORDERS) {
-                // there is an external policy, but this input is attached to a mix of recorders,
-                // meaning it receives audio injected into the framework, so the recorder doesn't
-                // know about it and is therefore considered "legacy"
-                *inputType = API_INPUT_LEGACY;
-            } else {
-                // recording a mix of players defined by an external policy, we're rerouting for
-                // an external policy
-                *inputType = API_INPUT_MIX_EXT_POLICY_REROUTE;
-            }
-        } else if (audio_is_remote_submix_device(device)) {
-            address = String8("0");
+        if (policyMix != nullptr) {
+            ALOG_ASSERT(policyMix->mMixType == MIX_TYPE_RECORDERS, "Invalid Mix Type");
+            // there is an external policy, but this input is attached to a mix of recorders,
+            // meaning it receives audio injected into the framework, so the recorder doesn't
+            // know about it and is therefore considered "legacy"
+            *inputType = API_INPUT_LEGACY;
+        } else if (audio_is_remote_submix_device(device->type())) {
+            device = mAvailableInputDevices.getDevice(AUDIO_DEVICE_IN_REMOTE_SUBMIX, String8("0"),
+                                                      AUDIO_FORMAT_DEFAULT);
             *inputType = API_INPUT_MIX_CAPTURE;
-        } else if (device == AUDIO_DEVICE_IN_TELEPHONY_RX) {
+        } else if (device->type() == AUDIO_DEVICE_IN_TELEPHONY_RX) {
             *inputType = API_INPUT_TELEPHONY_RX;
         } else {
             *inputType = API_INPUT_LEGACY;
@@ -1856,7 +1966,7 @@
 
     }
 
-    *input = getInputForDevice(device, address, session, inputSource,
+    *input = getInputForDevice(device, session, attributes.source,
                                config, flags,
                                policyMix);
     if (*input == AUDIO_IO_HANDLE_NONE) {
@@ -1866,16 +1976,16 @@
 
 exit:
 
-    inputDevices = mAvailableInputDevices.getDevicesFromTypeMask(device);
-    *selectedDeviceId = getFirstDeviceId(inputDevices);
+    *selectedDeviceId = mAvailableInputDevices.contains(device) ? 
+            device->getId() : AUDIO_PORT_HANDLE_NONE;
 
-    isSoundTrigger = inputSource == AUDIO_SOURCE_HOTWORD &&
+    isSoundTrigger = attributes.source == AUDIO_SOURCE_HOTWORD &&
         mSoundTriggerSessions.indexOfKey(session) > 0;
     *portId = AudioPort::getNextUniqueId();
 
-    clientDesc = new RecordClientDescriptor(*portId, uid, session,
-                                  *attr, *config, requestedDeviceId,
-                                  inputSource,flags, isSoundTrigger);
+    clientDesc = new RecordClientDescriptor(*portId, uid, session, *attr, *config,
+                                            requestedDeviceId, attributes.source, flags,
+                                            isSoundTrigger);
     inputDesc = mInputs.valueFor(*input);
     inputDesc->addClient(clientDesc);
 
@@ -1889,8 +1999,7 @@
 }
 
 
-audio_io_handle_t AudioPolicyManager::getInputForDevice(audio_devices_t device,
-                                                        String8 address,
+audio_io_handle_t AudioPolicyManager::getInputForDevice(const sp<DeviceDescriptor> &device,
                                                         audio_session_t session,
                                                         audio_source_t inputSource,
                                                         const audio_config_base_t *config,
@@ -1926,8 +2035,7 @@
     audio_input_flags_t profileFlags = flags;
     for (;;) {
         profileFormat = config->format; // reset each time through loop, in case it is updated
-        profile = getInputProfile(device, address,
-                                  profileSamplingRate, profileFormat, profileChannelMask,
+        profile = getInputProfile(device, profileSamplingRate, profileFormat, profileChannelMask,
                                   profileFlags);
         if (profile != 0) {
             break; // success
@@ -1936,9 +2044,9 @@
         } else if (profileFlags != AUDIO_INPUT_FLAG_NONE) {
             profileFlags = AUDIO_INPUT_FLAG_NONE; // retry
         } else { // fail
-            ALOGW("getInputForDevice() could not find profile for device 0x%X, "
-                  "sampling rate %u, format %#x, channel mask 0x%X, flags %#x",
-                    device, config->sample_rate, config->format, config->channel_mask, flags);
+            ALOGW("%s could not find profile for device %s, sampling rate %u, format %#x, "
+                  "channel mask 0x%X, flags %#x", __func__, device->toString().c_str(), 
+                  config->sample_rate, config->format, config->channel_mask, flags);
             return input;
         }
     }
@@ -1995,14 +2103,7 @@
     lConfig.channel_mask = profileChannelMask;
     lConfig.format = profileFormat;
 
-    if (address == "") {
-        DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromTypeMask(device);
-        // the inputs vector must be of size >= 1, but we don't want to crash here
-        address = getFirstDeviceAddress(inputDevices);
-    }
-
-    status_t status = inputDesc->open(&lConfig, device, address,
-            halInputSource, profileFlags, &input);
+    status_t status = inputDesc->open(&lConfig, device, halInputSource, profileFlags, &input);
 
     // only accept input with the exact requested set of parameters
     if (status != NO_ERROR || input == AUDIO_IO_HANDLE_NONE ||
@@ -2059,7 +2160,7 @@
 
     // indicate active capture to sound trigger service if starting capture from a mic on
     // primary HW module
-    audio_devices_t device = getNewInputDevice(inputDesc);
+    sp<DeviceDescriptor> device = getNewInputDevice(inputDesc);
     setInputDevice(input, device, true /* force */);
 
     if (inputDesc->activeCount()  == 1) {
@@ -2070,8 +2171,8 @@
                     MIX_STATE_MIXING);
         }
 
-        audio_devices_t primaryInputDevices = availablePrimaryInputDevices();
-        if (((device & primaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
+        DeviceVector primaryInputDevices = availablePrimaryModuleInputDevices();
+        if (primaryInputDevices.contains(device) &&
                 mInputs.activeInputsCountOnDevices(primaryInputDevices) == 1) {
             SoundTrigger::setCaptureState(true);
         }
@@ -2079,7 +2180,7 @@
         // automatically enable the remote submix output when input is started if not
         // used by a policy mix of type MIX_TYPE_RECORDERS
         // For remote submix (a virtual device), we open only one input per capture request.
-        if (audio_is_remote_submix_device(inputDesc->mDevice)) {
+        if (audio_is_remote_submix_device(inputDesc->getDeviceType())) {
             String8 address = String8("");
             if (inputDesc->mPolicyMix == NULL) {
                 address = String8("0");
@@ -2089,7 +2190,7 @@
             if (address != "") {
                 setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
                         AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
-                        address, "remote-submix");
+                        address, "remote-submix", AUDIO_FORMAT_DEFAULT);
             }
         }
     }
@@ -2130,7 +2231,7 @@
 
         // automatically disable the remote submix output when input is stopped if not
         // used by a policy mix of type MIX_TYPE_RECORDERS
-        if (audio_is_remote_submix_device(inputDesc->mDevice)) {
+        if (audio_is_remote_submix_device(inputDesc->getDeviceType())) {
             String8 address = String8("");
             if (inputDesc->mPolicyMix == NULL) {
                 address = String8("0");
@@ -2140,17 +2241,15 @@
             if (address != "") {
                 setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
                                          AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
-                                         address, "remote-submix");
+                                         address, "remote-submix", AUDIO_FORMAT_DEFAULT);
             }
         }
-
-        audio_devices_t device = inputDesc->mDevice;
         resetInputDevice(input);
 
         // indicate inactive capture to sound trigger service if stopping capture from a mic on
         // primary HW module
-        audio_devices_t primaryInputDevices = availablePrimaryInputDevices();
-        if (((device & primaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
+        DeviceVector primaryInputDevices = availablePrimaryModuleInputDevices();
+        if (primaryInputDevices.contains(inputDesc->getDevice()) &&
                 mInputs.activeInputsCountOnDevices(primaryInputDevices) == 0) {
             SoundTrigger::setCaptureState(false);
         }
@@ -2228,6 +2327,10 @@
                                             int indexMax)
 {
     ALOGV("initStreamVolume() stream %d, min %d, max %d", stream , indexMin, indexMax);
+    if (indexMin < 0 || indexMax < 0) {
+        ALOGE("%s for stream %d: invalid min %d or max %d", __func__, stream , indexMin, indexMax);
+        return;
+    }
     mVolumeCurves->initStreamVolume(stream, indexMin, indexMax);
 
     // initialize other private stream volumes which follow this one
@@ -2244,10 +2347,11 @@
                                                   audio_devices_t device)
 {
 
-    // VOICE_CALL stream has minVolumeIndex > 0  but can be muted directly by an
-    // app that has MODIFY_PHONE_STATE permission.
+    // VOICE_CALL and BLUETOOTH_SCO stream have minVolumeIndex > 0 but
+    // can be muted directly by an app that has MODIFY_PHONE_STATE permission.
     if (((index < mVolumeCurves->getVolumeIndexMin(stream)) &&
-            !(stream == AUDIO_STREAM_VOICE_CALL && index == 0)) ||
+            !((stream == AUDIO_STREAM_VOICE_CALL || stream == AUDIO_STREAM_BLUETOOTH_SCO) &&
+            index == 0)) ||
             (index > mVolumeCurves->getVolumeIndexMax(stream))) {
         return BAD_VALUE;
     }
@@ -2280,7 +2384,7 @@
     status_t status = NO_ERROR;
     for (size_t i = 0; i < mOutputs.size(); i++) {
         sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
-        audio_devices_t curDevice = desc->device();
+        audio_devices_t curDevice = desc->devices().types();
         for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
             if (!(streamsMatchForvolume(stream, (audio_stream_type_t)curStream))) {
                 continue;
@@ -2356,8 +2460,8 @@
     // 4: the first output in the list
 
     routing_strategy strategy = getStrategy(AUDIO_STREAM_MUSIC);
-    audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
-    SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
+    DeviceVector devices = getDevicesForStrategy(strategy, false /*fromCache*/);
+    SortedVector<audio_io_handle_t> outputs = getOutputsForDevices(devices, mOutputs);
 
     if (outputs.size() == 0) {
         return AUDIO_IO_HANDLE_NONE;
@@ -2562,27 +2666,31 @@
             if (mix.mMixType == MIX_TYPE_PLAYERS) {
                 setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
                         AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
-                        address.string(), "remote-submix");
+                        address.string(), "remote-submix", AUDIO_FORMAT_DEFAULT);
             } else {
                 setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
                         AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
-                        address.string(), "remote-submix");
+                        address.string(), "remote-submix", AUDIO_FORMAT_DEFAULT);
             }
         } else if ((mix.mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
             String8 address = mix.mDeviceAddress;
-            audio_devices_t device = mix.mDeviceType;
+            audio_devices_t type = mix.mDeviceType;
             ALOGV(" registerPolicyMixes() mix %zu of %zu is RENDER, dev=0x%X addr=%s",
-                    i, mixes.size(), device, address.string());
+                    i, mixes.size(), type, address.string());
+
+            sp<DeviceDescriptor> device = mHwModules.getDeviceDescriptor(
+                    mix.mDeviceType, mix.mDeviceAddress,
+                    String8(), AUDIO_FORMAT_DEFAULT);
+            if (device == nullptr) {
+                res = INVALID_OPERATION;
+                break;
+            }
 
             bool foundOutput = false;
             for (size_t j = 0 ; j < mOutputs.size() ; j++) {
                 sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(j);
-                sp<AudioPatch> patch = mAudioPatches.valueFor(desc->getPatchHandle());
-                if ((patch != 0) && (patch->mPatch.num_sinks != 0)
-                        && (patch->mPatch.sinks[0].type == AUDIO_PORT_TYPE_DEVICE)
-                        && (patch->mPatch.sinks[0].ext.device.type == device)
-                        && (strncmp(patch->mPatch.sinks[0].ext.device.address, address.string(),
-                                AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0)) {
+
+                if (desc->supportedDevices().contains(device)) {
                     if (mPolicyMixes.registerMix(address, mix, desc) != NO_ERROR) {
                         res = INVALID_OPERATION;
                     } else {
@@ -2594,12 +2702,12 @@
 
             if (res != NO_ERROR) {
                 ALOGE(" Error registering mix %zu for device 0x%X addr %s",
-                        i, device, address.string());
+                        i, type, address.string());
                 res = INVALID_OPERATION;
                 break;
             } else if (!foundOutput) {
                 ALOGE(" Output not found for mix %zu for device 0x%X addr %s",
-                        i, device, address.string());
+                        i, type, address.string());
                 res = INVALID_OPERATION;
                 break;
             }
@@ -2640,13 +2748,13 @@
                     AUDIO_POLICY_DEVICE_STATE_AVAILABLE)  {
                 setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
                         AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
-                        address.string(), "remote-submix");
+                        address.string(), "remote-submix", AUDIO_FORMAT_DEFAULT);
             }
             if (getDeviceConnectionState(AUDIO_DEVICE_OUT_REMOTE_SUBMIX, address.string()) ==
                     AUDIO_POLICY_DEVICE_STATE_AVAILABLE)  {
                 setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
                         AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
-                        address.string(), "remote-submix");
+                        address.string(), "remote-submix", AUDIO_FORMAT_DEFAULT);
             }
             rSubmixModule->removeOutputProfile(address);
             rSubmixModule->removeInputProfile(address);
@@ -2690,11 +2798,11 @@
         // reevaluate outputs for all given devices
         for (size_t i = 0; i < devices.size(); i++) {
             sp<DeviceDescriptor> devDesc = mHwModules.getDeviceDescriptor(
-                            devices[i].mType, devices[i].mAddress, String8());
+                            devices[i].mType, devices[i].mAddress, String8(),
+                            AUDIO_FORMAT_DEFAULT);
             SortedVector<audio_io_handle_t> outputs;
             if (checkOutputsForDevice(devDesc, AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
-                    outputs,
-                    devDesc->address()) != NO_ERROR) {
+                    outputs) != NO_ERROR) {
                 ALOGE("setUidDeviceAffinities() error in checkOutputsForDevice for device=%08x"
                         " addr=%s", devices[i].mType, devices[i].mAddress.string());
                 return INVALID_OPERATION;
@@ -2712,11 +2820,11 @@
         // reevaluate outputs for all found devices
         for (size_t i = 0; i < devices.size(); i++) {
             sp<DeviceDescriptor> devDesc = mHwModules.getDeviceDescriptor(
-                    devices[i].mType, devices[i].mAddress, String8());
+                    devices[i].mType, devices[i].mAddress, String8(),
+                    AUDIO_FORMAT_DEFAULT);
             SortedVector<audio_io_handle_t> outputs;
             if (checkOutputsForDevice(devDesc, AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
-                    outputs,
-                    devDesc->address()) != NO_ERROR) {
+                    outputs) != NO_ERROR) {
                 ALOGE("%s() error in checkOutputsForDevice for device=%08x addr=%s",
                         __FUNCTION__, devices[i].mType, devices[i].mAddress.string());
                 return INVALID_OPERATION;
@@ -2836,7 +2944,7 @@
 
     // See if there is a profile to support this.
     // AUDIO_DEVICE_NONE
-    sp<IOProfile> profile = getProfileForOutput(AUDIO_DEVICE_NONE /*ignore device */,
+    sp<IOProfile> profile = getProfileForOutput(DeviceVector() /*ignore device */,
                                             offloadInfo.sample_rate,
                                             offloadInfo.format,
                                             offloadInfo.channel_mask,
@@ -2850,7 +2958,7 @@
                                                  const audio_attributes_t& attributes) {
     audio_output_flags_t output_flags = AUDIO_OUTPUT_FLAG_NONE;
     audio_attributes_flags_to_audio_output_flags(attributes.flags, output_flags);
-    sp<IOProfile> profile = getProfileForOutput(AUDIO_DEVICE_NONE /*ignore device */,
+    sp<IOProfile> profile = getProfileForOutput(DeviceVector() /*ignore device */,
                                             config.sample_rate,
                                             config.format,
                                             config.channel_mask,
@@ -3044,8 +3152,7 @@
                 return BAD_VALUE;
             }
 
-            if (!outputDesc->mProfile->isCompatibleProfile(devDesc->type(),
-                                                           devDesc->address(),
+            if (!outputDesc->mProfile->isCompatibleProfile(DeviceVector(devDesc),
                                                            patch->sources[0].sample_rate,
                                                            NULL,  // updatedSamplingRate
                                                            patch->sources[0].format,
@@ -3066,7 +3173,7 @@
         // TODO: reconfigure output format and channels here
         ALOGV("createAudioPatch() setting device %08x on output %d",
               devices.types(), outputDesc->mIoHandle);
-        setOutputDevice(outputDesc, devices.types(), true, 0, handle);
+        setOutputDevices(outputDesc, devices, true, 0, handle);
         index = mAudioPatches.indexOfKey(*handle);
         if (index >= 0) {
             if (patchDesc != 0 && patchDesc != mAudioPatches.valueAt(index)) {
@@ -3095,14 +3202,13 @@
                     return BAD_VALUE;
                 }
             }
-            sp<DeviceDescriptor> devDesc =
+            sp<DeviceDescriptor> device =
                     mAvailableInputDevices.getDeviceFromId(patch->sources[0].id);
-            if (devDesc == 0) {
+            if (device == 0) {
                 return BAD_VALUE;
             }
 
-            if (!inputDesc->mProfile->isCompatibleProfile(devDesc->type(),
-                                                          devDesc->address(),
+            if (!inputDesc->mProfile->isCompatibleProfile(DeviceVector(device),
                                                           patch->sinks[0].sample_rate,
                                                           NULL, /*updatedSampleRate*/
                                                           patch->sinks[0].format,
@@ -3116,9 +3222,9 @@
                 return INVALID_OPERATION;
             }
             // TODO: reconfigure output format and channels here
-            ALOGV("createAudioPatch() setting device %08x on output %d",
-                                                  devDesc->type(), inputDesc->mIoHandle);
-            setInputDevice(inputDesc->mIoHandle, devDesc->type(), true, handle);
+            ALOGV("%s() setting device %s on output %d", __func__,
+                  device->toString().c_str(), inputDesc->mIoHandle);
+            setInputDevice(inputDesc->mIoHandle, device, true, handle);
             index = mAudioPatches.indexOfKey(*handle);
             if (index >= 0) {
                 if (patchDesc != 0 && patchDesc != mAudioPatches.valueAt(index)) {
@@ -3138,16 +3244,16 @@
                     return BAD_VALUE;
                 }
             }
-            sp<DeviceDescriptor> srcDeviceDesc =
+            sp<DeviceDescriptor> srcDevice =
                     mAvailableInputDevices.getDeviceFromId(patch->sources[0].id);
-            if (srcDeviceDesc == 0) {
+            if (srcDevice == 0) {
                 return BAD_VALUE;
             }
 
             //update source and sink with our own data as the data passed in the patch may
             // be incomplete.
             struct audio_patch newPatch = *patch;
-            srcDeviceDesc->toAudioPortConfig(&newPatch.sources[0], &patch->sources[0]);
+            srcDevice->toAudioPortConfig(&newPatch.sources[0], &patch->sources[0]);
 
             for (size_t i = 0; i < patch->num_sinks; i++) {
                 if (patch->sinks[i].type != AUDIO_PORT_TYPE_DEVICE) {
@@ -3155,26 +3261,26 @@
                     return INVALID_OPERATION;
                 }
 
-                sp<DeviceDescriptor> sinkDeviceDesc =
+                sp<DeviceDescriptor> sinkDevice =
                         mAvailableOutputDevices.getDeviceFromId(patch->sinks[i].id);
-                if (sinkDeviceDesc == 0) {
+                if (sinkDevice == 0) {
                     return BAD_VALUE;
                 }
-                sinkDeviceDesc->toAudioPortConfig(&newPatch.sinks[i], &patch->sinks[i]);
+                sinkDevice->toAudioPortConfig(&newPatch.sinks[i], &patch->sinks[i]);
 
                 // create a software bridge in PatchPanel if:
                 // - source and sink devices are on different HW modules OR
                 // - audio HAL version is < 3.0
                 // - audio HAL version is >= 3.0 but no route has been declared between devices
-                if (!srcDeviceDesc->hasSameHwModuleAs(sinkDeviceDesc) ||
-                        (srcDeviceDesc->getModuleVersionMajor() < 3) ||
-                        !srcDeviceDesc->getModule()->supportsPatch(srcDeviceDesc, sinkDeviceDesc)) {
+                if (!srcDevice->hasSameHwModuleAs(sinkDevice) ||
+                        (srcDevice->getModuleVersionMajor() < 3) ||
+                        !srcDevice->getModule()->supportsPatch(srcDevice, sinkDevice)) {
                     // support only one sink device for now to simplify output selection logic
                     if (patch->num_sinks > 1) {
                         return INVALID_OPERATION;
                     }
                     SortedVector<audio_io_handle_t> outputs =
-                                            getOutputsForDevice(sinkDeviceDesc->type(), mOutputs);
+                            getOutputsForDevices(DeviceVector(sinkDevice), mOutputs);
                     // if the sink device is reachable via an opened output stream, request to go via
                     // this output stream by adding a second source to the patch description
                     audio_io_handle_t output = selectOutput(outputs);
@@ -3232,11 +3338,11 @@
             return BAD_VALUE;
         }
 
-        setOutputDevice(outputDesc,
-                        getNewOutputDevice(outputDesc, true /*fromCache*/),
-                       true,
-                       0,
-                       NULL);
+        setOutputDevices(outputDesc,
+                         getNewOutputDevices(outputDesc, true /*fromCache*/),
+                         true,
+                         0,
+                         NULL);
     } else if (patch->sources[0].type == AUDIO_PORT_TYPE_DEVICE) {
         if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
             sp<AudioInputDescriptor> inputDesc = mInputs.getInputFromId(patch->sinks[0].id);
@@ -3359,8 +3465,8 @@
 void AudioPolicyManager::checkStrategyRoute(routing_strategy strategy,
                                             audio_io_handle_t ouptutToSkip)
 {
-    audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
-    SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
+    DeviceVector devices = getDevicesForStrategy(strategy, false /*fromCache*/);
+    SortedVector<audio_io_handle_t> outputs = getOutputsForDevices(devices, mOutputs);
     for (size_t j = 0; j < mOutputs.size(); j++) {
         if (mOutputs.keyAt(j) == ouptutToSkip) {
             continue;
@@ -3379,8 +3485,8 @@
                 }
             }
         } else {
-            audio_devices_t newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
-            setOutputDevice(outputDesc, newDevice, false);
+            setOutputDevices(
+                        outputDesc, getNewOutputDevices(outputDesc, false /*fromCache*/), false);
         }
     }
 }
@@ -3443,7 +3549,8 @@
 {
     *session = (audio_session_t)mpClientInterface->newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
     *ioHandle = (audio_io_handle_t)mpClientInterface->newAudioUniqueId(AUDIO_UNIQUE_ID_USE_INPUT);
-    *device = getDeviceAndMixForInputSource(AUDIO_SOURCE_HOTWORD);
+    audio_attributes_t attr = { .source = AUDIO_SOURCE_HOTWORD };
+    *device = getDeviceAndMixForAttributes(attr)->type();
 
     return mSoundTriggerSessions.acquireSession(*session, *ioHandle);
 }
@@ -3469,10 +3576,11 @@
         return INVALID_OPERATION;
     }
 
-    sp<DeviceDescriptor> srcDeviceDesc =
+    sp<DeviceDescriptor> srcDevice =
             mAvailableInputDevices.getDevice(source->ext.device.type,
-                                              String8(source->ext.device.address));
-    if (srcDeviceDesc == 0) {
+                                             String8(source->ext.device.address),
+                                             AUDIO_FORMAT_DEFAULT);
+    if (srcDevice == 0) {
         ALOGW("%s source->ext.device.type %08x not found", __FUNCTION__, source->ext.device.type);
         return BAD_VALUE;
     }
@@ -3483,7 +3591,7 @@
     sp<AudioPatch> patchDesc = new AudioPatch(&dummyPatch, uid);
 
     sp<SourceClientDescriptor> sourceDesc =
-        new SourceClientDescriptor(*portId, uid, *attributes, patchDesc, srcDeviceDesc,
+        new SourceClientDescriptor(*portId, uid, *attributes, patchDesc, srcDevice,
                                    streamTypefromAttributesInt(attributes),
                                    getStrategyForAttr(attributes));
 
@@ -3504,18 +3612,20 @@
     audio_attributes_t attributes = sourceDesc->attributes();
     routing_strategy strategy = getStrategyForAttr(&attributes);
     audio_stream_type_t stream = sourceDesc->stream();
-    sp<DeviceDescriptor> srcDeviceDesc = sourceDesc->srcDevice();
+    sp<DeviceDescriptor> srcDevice = sourceDesc->srcDevice();
 
-    audio_devices_t sinkDevice = getDeviceForStrategy(strategy, true);
-    sp<DeviceDescriptor> sinkDeviceDesc =
-            mAvailableOutputDevices.getDevice(sinkDevice, String8(""));
+    DeviceVector sinkDevices = getDevicesForStrategy(strategy, true);
+    ALOG_ASSERT(!sinkDevices.isEmpty(), "connectAudioSource(): no device found for strategy");
+    sp<DeviceDescriptor> sinkDevice = sinkDevices.itemAt(0);
+    ALOG_ASSERT(mAvailableOutputDevices.contains(sinkDevice), "%s: Device %s not available",
+                __FUNCTION__, sinkDevice->toString().c_str());
 
     audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
 
-    if (srcDeviceDesc->hasSameHwModuleAs(sinkDeviceDesc) &&
-            srcDeviceDesc->getModuleVersionMajor() >= 3 &&
-            sinkDeviceDesc->getModule()->supportsPatch(srcDeviceDesc, sinkDeviceDesc) &&
-            srcDeviceDesc->getAudioPort()->mGains.size() > 0) {
+    if (srcDevice->hasSameHwModuleAs(sinkDevice) &&
+            srcDevice->getModuleVersionMajor() >= 3 &&
+            sinkDevice->getModule()->supportsPatch(srcDevice, sinkDevice) &&
+            srcDevice->getAudioPort()->mGains.size() > 0) {
         ALOGV("%s Device to Device route supported by >=3.0 HAL", __FUNCTION__);
         // TODO: may explicitly specify whether we should use HW or SW patch
         //   create patch between src device and output device
@@ -3532,12 +3642,12 @@
         getOutputForAttrInt(&resultAttr, &output, AUDIO_SESSION_NONE,
                 &attributes, &stream, sourceDesc->uid(), &config, &flags, &selectedDeviceId);
         if (output == AUDIO_IO_HANDLE_NONE) {
-            ALOGV("%s no output for device %08x", __FUNCTION__, sinkDevice);
+            ALOGV("%s no output for device %08x", __FUNCTION__, sinkDevices.types());
             return INVALID_OPERATION;
         }
         sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
         if (outputDesc->isDuplicated()) {
-            ALOGV("%s output for device %08x is duplicated", __FUNCTION__, sinkDevice);
+            ALOGV("%s output for device %08x is duplicated", __FUNCTION__, sinkDevices.types());
             return INVALID_OPERATION;
         }
         status_t status = outputDesc->start();
@@ -3551,7 +3661,7 @@
         // - the sink is defined by whatever output device is currently selected for the output
         // though which this patch is routed.
         PatchBuilder patchBuilder;
-        patchBuilder.addSource(srcDeviceDesc).addSource(outputDesc, { .stream = stream });
+        patchBuilder.addSource(srcDevice).addSource(outputDesc, { .stream = stream });
         status = mpClientInterface->createAudioPatch(patchBuilder.patch(),
                                                               &afPatchHandle,
                                                               0);
@@ -3753,14 +3863,16 @@
         status_t status = setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_HDMI,
                                                       AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
                                                       address.c_str(),
-                                                      name.c_str());
+                                                      name.c_str(),
+                                                      AUDIO_FORMAT_DEFAULT);
         if (status != NO_ERROR) {
             continue;
         }
         status = setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_HDMI,
                                              AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
                                              address.c_str(),
-                                             name.c_str());
+                                             name.c_str(),
+                                             AUDIO_FORMAT_DEFAULT);
         profileUpdated |= (status == NO_ERROR);
     }
     // FIXME: Why doing this for input HDMI devices if we don't augment their reported formats?
@@ -3773,14 +3885,16 @@
         status_t status = setDeviceConnectionStateInt(AUDIO_DEVICE_IN_HDMI,
                                                       AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
                                                       address.c_str(),
-                                                      name.c_str());
+                                                      name.c_str(),
+                                                      AUDIO_FORMAT_DEFAULT);
         if (status != NO_ERROR) {
             continue;
         }
         status = setDeviceConnectionStateInt(AUDIO_DEVICE_IN_HDMI,
                                              AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
                                              address.c_str(),
-                                             name.c_str());
+                                             name.c_str(),
+                                             AUDIO_FORMAT_DEFAULT);
         profileUpdated |= (status == NO_ERROR);
     }
 
@@ -3978,8 +4092,6 @@
 
     // mAvailableOutputDevices and mAvailableInputDevices now contain all attached devices
     // open all output streams needed to access attached devices
-    audio_devices_t outputDeviceTypes = mAvailableOutputDevices.types();
-    audio_devices_t inputDeviceTypes = mAvailableInputDevices.types() & ~AUDIO_DEVICE_BIT_IN;
     for (const auto& hwModule : mHwModulesAll) {
         hwModule->setHandle(mpClientInterface->loadHwModule(hwModule->getName()));
         if (hwModule->getHandle() == AUDIO_MODULE_HANDLE_NONE) {
@@ -4008,51 +4120,49 @@
             if ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
                 continue;
             }
-            audio_devices_t profileType = outProfile->getSupportedDevicesType();
-            if ((profileType & mDefaultOutputDevice->type()) != AUDIO_DEVICE_NONE) {
-                profileType = mDefaultOutputDevice->type();
+            const DeviceVector &supportedDevices = outProfile->getSupportedDevices();
+            DeviceVector availProfileDevices = supportedDevices.filter(mAvailableOutputDevices);
+            sp<DeviceDescriptor> supportedDevice = 0;
+            if (supportedDevices.contains(mDefaultOutputDevice)) {
+                supportedDevice = mDefaultOutputDevice;
             } else {
-                // chose first device present in profile's SupportedDevices also part of
-                // outputDeviceTypes
-                profileType = outProfile->getSupportedDeviceForType(outputDeviceTypes);
+                // choose first device present in profile's SupportedDevices also part of
+                // mAvailableOutputDevices.
+                if (availProfileDevices.isEmpty()) {
+                    continue;
+                }
+                supportedDevice = availProfileDevices.itemAt(0);
             }
-            if ((profileType & outputDeviceTypes) == 0) {
+            if (!mAvailableOutputDevices.contains(supportedDevice)) {
                 continue;
             }
             sp<SwAudioOutputDescriptor> outputDesc = new SwAudioOutputDescriptor(outProfile,
                                                                                  mpClientInterface);
-            const DeviceVector &supportedDevices = outProfile->getSupportedDevices();
-            const DeviceVector &devicesForType = supportedDevices.getDevicesFromTypeMask(
-                    profileType);
-            String8 address = getFirstDeviceAddress(devicesForType);
             audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
-            status_t status = outputDesc->open(nullptr, profileType, address,
-                                           AUDIO_STREAM_DEFAULT, AUDIO_OUTPUT_FLAG_NONE, &output);
-
+            status_t status = outputDesc->open(nullptr, DeviceVector(supportedDevice),
+                                               AUDIO_STREAM_DEFAULT,
+                                               AUDIO_OUTPUT_FLAG_NONE, &output);
             if (status != NO_ERROR) {
-                ALOGW("Cannot open output stream for device %08x on hw module %s",
-                      outputDesc->mDevice,
-                      hwModule->getName());
-            } else {
-                for (const auto& dev : supportedDevices) {
-                    ssize_t index = mAvailableOutputDevices.indexOf(dev);
-                    // give a valid ID to an attached device once confirmed it is reachable
-                    if (index >= 0 && !mAvailableOutputDevices[index]->isAttached()) {
-                        mAvailableOutputDevices[index]->attach(hwModule);
-                    }
-                }
-                if (mPrimaryOutput == 0 &&
-                        outProfile->getFlags() & AUDIO_OUTPUT_FLAG_PRIMARY) {
-                    mPrimaryOutput = outputDesc;
-                }
-                addOutput(output, outputDesc);
-                setOutputDevice(outputDesc,
-                                profileType,
-                                true,
-                                0,
-                                NULL,
-                                address);
+                ALOGW("Cannot open output stream for devices %s on hw module %s",
+                      supportedDevice->toString().c_str(), hwModule->getName());
+                continue;
             }
+            for (const auto &device : availProfileDevices) {
+                // give a valid ID to an attached device once confirmed it is reachable
+                if (!device->isAttached()) {
+                    device->attach(hwModule);
+                }
+            }
+            if (mPrimaryOutput == 0 &&
+                    outProfile->getFlags() & AUDIO_OUTPUT_FLAG_PRIMARY) {
+                mPrimaryOutput = outputDesc;
+            }
+            addOutput(output, outputDesc);
+            setOutputDevices(outputDesc,
+                             DeviceVector(supportedDevice),
+                             true,
+                             0,
+                             NULL);
         }
         // open input streams needed to access attached devices to validate
         // mAvailableInputDevices list
@@ -4067,75 +4177,59 @@
                 continue;
             }
             // chose first device present in profile's SupportedDevices also part of
-            // inputDeviceTypes
-            audio_devices_t profileType = inProfile->getSupportedDeviceForType(inputDeviceTypes);
-
-            if ((profileType & inputDeviceTypes) == 0) {
+            // available input devices
+            const DeviceVector &supportedDevices = inProfile->getSupportedDevices();
+            DeviceVector availProfileDevices = supportedDevices.filter(mAvailableInputDevices);
+            if (availProfileDevices.isEmpty()) {
+                ALOGE("%s: Input device list is empty!", __FUNCTION__);
                 continue;
             }
             sp<AudioInputDescriptor> inputDesc =
                     new AudioInputDescriptor(inProfile, mpClientInterface);
 
-            DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromTypeMask(profileType);
-            //   the inputs vector must be of size >= 1, but we don't want to crash here
-            String8 address = getFirstDeviceAddress(inputDevices);
-            ALOGV("  for input device 0x%x using address %s", profileType, address.string());
-            ALOGE_IF(inputDevices.size() == 0, "Input device list is empty!");
-
             audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
             status_t status = inputDesc->open(nullptr,
-                                              profileType,
-                                              address,
+                                              availProfileDevices.itemAt(0),
                                               AUDIO_SOURCE_MIC,
                                               AUDIO_INPUT_FLAG_NONE,
                                               &input);
-
-            if (status == NO_ERROR) {
-                for (const auto& dev : inProfile->getSupportedDevices()) {
-                    ssize_t index = mAvailableInputDevices.indexOf(dev);
-                    // give a valid ID to an attached device once confirmed it is reachable
-                    if (index >= 0) {
-                        sp<DeviceDescriptor> devDesc = mAvailableInputDevices[index];
-                        if (!devDesc->isAttached()) {
-                            devDesc->attach(hwModule);
-                            devDesc->importAudioPort(inProfile, true);
-                        }
-                    }
-                }
-                inputDesc->close();
-            } else {
-                ALOGW("Cannot open input stream for device %08x on hw module %s",
-                      profileType,
+            if (status != NO_ERROR) {
+                ALOGW("Cannot open input stream for device %s on hw module %s",
+                      availProfileDevices.toString().c_str(),
                       hwModule->getName());
+                continue;
             }
+            for (const auto &device : availProfileDevices) {
+                // give a valid ID to an attached device once confirmed it is reachable
+                if (!device->isAttached()) {
+                    device->attach(hwModule);
+                    device->importAudioPort(inProfile, true);
+                }
+            }
+            inputDesc->close();
         }
     }
     // make sure all attached devices have been allocated a unique ID
-    for (size_t i = 0; i  < mAvailableOutputDevices.size();) {
-        if (!mAvailableOutputDevices[i]->isAttached()) {
-            ALOGW("Output device %08x unreachable", mAvailableOutputDevices[i]->type());
-            mAvailableOutputDevices.remove(mAvailableOutputDevices[i]);
-            continue;
+    auto checkAndSetAvailable = [this](auto& devices) {
+        for (size_t i = 0; i < devices.size();) {
+            const auto &device = devices[i];
+            if (!device->isAttached()) {
+                ALOGW("device %s is unreachable", device->toString().c_str());
+                devices.remove(device);
+                continue;
+            }
+            // Device is now validated and can be appended to the available devices of the engine
+            mEngine->setDeviceConnectionState(device, AUDIO_POLICY_DEVICE_STATE_AVAILABLE);
+            i++;
         }
-        // The device is now validated and can be appended to the available devices of the engine
-        mEngine->setDeviceConnectionState(mAvailableOutputDevices[i],
-                                          AUDIO_POLICY_DEVICE_STATE_AVAILABLE);
-        i++;
-    }
-    for (size_t i = 0; i  < mAvailableInputDevices.size();) {
-        if (!mAvailableInputDevices[i]->isAttached()) {
-            ALOGW("Input device %08x unreachable", mAvailableInputDevices[i]->type());
-            mAvailableInputDevices.remove(mAvailableInputDevices[i]);
-            continue;
-        }
-        // The device is now validated and can be appended to the available devices of the engine
-        mEngine->setDeviceConnectionState(mAvailableInputDevices[i],
-                                          AUDIO_POLICY_DEVICE_STATE_AVAILABLE);
-        i++;
-    }
+    };
+    checkAndSetAvailable(mAvailableOutputDevices);
+    checkAndSetAvailable(mAvailableInputDevices);
+
     // make sure default device is reachable
-    if (mDefaultOutputDevice == 0 || mAvailableOutputDevices.indexOf(mDefaultOutputDevice) < 0) {
-        ALOGE("Default device %08x is unreachable", mDefaultOutputDevice->type());
+    if (mDefaultOutputDevice == 0 || !mAvailableOutputDevices.contains(mDefaultOutputDevice)) {
+        ALOGE_IF(mDefaultOutputDevice != 0, "Default device %s is unreachable",
+                 mDefaultOutputDevice->toString().c_str());
         status = NO_INIT;
     }
     // If microphones address is empty, set it according to device type
@@ -4208,44 +4302,28 @@
     nextAudioPortGeneration();
 }
 
-void AudioPolicyManager::findIoHandlesByAddress(const sp<SwAudioOutputDescriptor>& desc /*in*/,
-        const audio_devices_t device /*in*/,
-        const String8& address /*in*/,
-        SortedVector<audio_io_handle_t>& outputs /*out*/) {
-    sp<DeviceDescriptor> devDesc =
-        desc->mProfile->getSupportedDeviceByAddress(device, address);
-    if (devDesc != 0) {
-        ALOGV("findIoHandlesByAddress(): adding opened output %d on same address %s",
-              desc->mIoHandle, address.string());
-        outputs.add(desc->mIoHandle);
-    }
-}
-
-status_t AudioPolicyManager::checkOutputsForDevice(const sp<DeviceDescriptor>& devDesc,
+status_t AudioPolicyManager::checkOutputsForDevice(const sp<DeviceDescriptor>& device,
                                                    audio_policy_dev_state_t state,
-                                                   SortedVector<audio_io_handle_t>& outputs,
-                                                   const String8& address)
+                                                   SortedVector<audio_io_handle_t>& outputs)
 {
-    audio_devices_t device = devDesc->type();
+    audio_devices_t deviceType = device->type();
+    const String8 &address = device->address();
     sp<SwAudioOutputDescriptor> desc;
 
-    if (audio_device_is_digital(device)) {
+    if (audio_device_is_digital(deviceType)) {
         // erase all current sample rates, formats and channel masks
-        devDesc->clearAudioProfiles();
+        device->clearAudioProfiles();
     }
 
     if (state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE) {
         // first list already open outputs that can be routed to this device
         for (size_t i = 0; i < mOutputs.size(); i++) {
             desc = mOutputs.valueAt(i);
-            if (!desc->isDuplicated() && (desc->supportedDevices() & device)) {
-                if (!device_distinguishes_on_address(device)) {
-                    ALOGV("checkOutputsForDevice(): adding opened output %d", mOutputs.keyAt(i));
-                    outputs.add(mOutputs.keyAt(i));
-                } else {
-                    ALOGV("  checking address match due to device 0x%x", device);
-                    findIoHandlesByAddress(desc, device, address, outputs);
-                }
+            if (!desc->isDuplicated() && desc->supportsDevice(device)
+                    && desc->deviceSupportsEncodedFormats(deviceType)) {
+                ALOGV("checkOutputsForDevice(): adding opened output %d on device %s",
+                      mOutputs.keyAt(i), device->toString().c_str());
+                outputs.add(mOutputs.keyAt(i));
             }
         }
         // then look for output profiles that can be routed to this device
@@ -4253,13 +4331,10 @@
         for (const auto& hwModule : mHwModules) {
             for (size_t j = 0; j < hwModule->getOutputProfiles().size(); j++) {
                 sp<IOProfile> profile = hwModule->getOutputProfiles()[j];
-                if (profile->supportDevice(device)) {
-                    if (!device_distinguishes_on_address(device) ||
-                            profile->supportDeviceAddress(address)) {
-                        profiles.add(profile);
-                        ALOGV("checkOutputsForDevice(): adding profile %zu from module %s",
-                                j, hwModule->getName());
-                    }
+                if (profile->supportsDevice(device)) {
+                    profiles.add(profile);
+                    ALOGV("checkOutputsForDevice(): adding profile %zu from module %s",
+                          j, hwModule->getName());
                 }
             }
         }
@@ -4267,7 +4342,7 @@
         ALOGV("  found %zu profiles, %zu outputs", profiles.size(), outputs.size());
 
         if (profiles.isEmpty() && outputs.isEmpty()) {
-            ALOGW("checkOutputsForDevice(): No output available for device %04x", device);
+            ALOGW("checkOutputsForDevice(): No output available for device %04x", deviceType);
             return BAD_VALUE;
         }
 
@@ -4283,8 +4358,8 @@
                 if (!desc->isDuplicated() && desc->mProfile == profile) {
                     // matching profile: save the sample rates, format and channel masks supported
                     // by the profile in our device descriptor
-                    if (audio_device_is_digital(device)) {
-                        devDesc->importAudioPort(profile);
+                    if (audio_device_is_digital(deviceType)) {
+                        device->importAudioPort(profile);
                     }
                     break;
                 }
@@ -4300,20 +4375,20 @@
             }
 
             ALOGV("opening output for device %08x with params %s profile %p name %s",
-                  device, address.string(), profile.get(), profile->getName().string());
+                  deviceType, address.string(), profile.get(), profile->getName().string());
             desc = new SwAudioOutputDescriptor(profile, mpClientInterface);
             audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
-            status_t status = desc->open(nullptr, device, address,
+            status_t status = desc->open(nullptr, DeviceVector(device),
                                          AUDIO_STREAM_DEFAULT, AUDIO_OUTPUT_FLAG_NONE, &output);
 
             if (status == NO_ERROR) {
                 // Here is where the out_set_parameters() for card & device gets called
                 if (!address.isEmpty()) {
-                    char *param = audio_device_address_to_parameter(device, address);
+                    char *param = audio_device_address_to_parameter(deviceType, address);
                     mpClientInterface->setParameters(output, String8(param));
                     free(param);
                 }
-                updateAudioProfiles(devDesc, output, profile->getAudioProfiles());
+                updateAudioProfiles(device, output, profile->getAudioProfiles());
                 if (!profile->hasValidAudioProfile()) {
                     ALOGW("checkOutputsForDevice() missing param");
                     desc->close();
@@ -4328,7 +4403,8 @@
                     config.offload_info.channel_mask = config.channel_mask;
                     config.offload_info.format = config.format;
 
-                    status_t status = desc->open(&config, device, address, AUDIO_STREAM_DEFAULT,
+                    status_t status = desc->open(&config, DeviceVector(device),
+                                                 AUDIO_STREAM_DEFAULT,
                                                  AUDIO_OUTPUT_FLAG_NONE, &output);
                     if (status != NO_ERROR) {
                         output = AUDIO_IO_HANDLE_NONE;
@@ -4337,14 +4413,15 @@
 
                 if (output != AUDIO_IO_HANDLE_NONE) {
                     addOutput(output, desc);
-                    if (device_distinguishes_on_address(device) && address != "0") {
+                    if (device_distinguishes_on_address(deviceType) && address != "0") {
                         sp<AudioPolicyMix> policyMix;
-                        if (mPolicyMixes.getAudioPolicyMix(address, policyMix) != NO_ERROR) {
-                            ALOGE("checkOutputsForDevice() cannot find policy for address %s",
+                        if (mPolicyMixes.getAudioPolicyMix(address, policyMix) == NO_ERROR) {
+                            policyMix->setOutput(desc);
+                            desc->mPolicyMix = policyMix->getMix();
+                        } else {
+                            ALOGW("checkOutputsForDevice() cannot find policy for address %s",
                                   address.string());
                         }
-                        policyMix->setOutput(desc);
-                        desc->mPolicyMix = policyMix->getMix();
 
                     } else if (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) == 0) &&
                                     hasPrimaryOutput()) {
@@ -4376,28 +4453,28 @@
                 output = AUDIO_IO_HANDLE_NONE;
             }
             if (output == AUDIO_IO_HANDLE_NONE) {
-                ALOGW("checkOutputsForDevice() could not open output for device %x", device);
+                ALOGW("checkOutputsForDevice() could not open output for device %x", deviceType);
                 profiles.removeAt(profile_index);
                 profile_index--;
             } else {
                 outputs.add(output);
                 // Load digital format info only for digital devices
-                if (audio_device_is_digital(device)) {
-                    devDesc->importAudioPort(profile);
+                if (audio_device_is_digital(deviceType)) {
+                    device->importAudioPort(profile);
                 }
 
-                if (device_distinguishes_on_address(device)) {
-                    ALOGV("checkOutputsForDevice(): setOutputDevice(dev=0x%x, addr=%s)",
-                            device, address.string());
-                    setOutputDevice(desc, device, true/*force*/, 0/*delay*/,
-                            NULL/*patch handle*/, address.string());
+                if (device_distinguishes_on_address(deviceType)) {
+                    ALOGV("checkOutputsForDevice(): setOutputDevices %s",
+                            device->toString().c_str());
+                    setOutputDevices(desc, DeviceVector(device), true/*force*/, 0/*delay*/,
+                                     NULL/*patch handle*/);
                 }
                 ALOGV("checkOutputsForDevice(): adding output %d", output);
             }
         }
 
         if (profiles.isEmpty()) {
-            ALOGW("checkOutputsForDevice(): No output available for device %04x", device);
+            ALOGW("checkOutputsForDevice(): No output available for device %04x", deviceType);
             return BAD_VALUE;
         }
     } else { // Disconnect
@@ -4406,10 +4483,10 @@
             desc = mOutputs.valueAt(i);
             if (!desc->isDuplicated()) {
                 // exact match on device
-                if (device_distinguishes_on_address(device) &&
-                        (desc->supportedDevices() == device)) {
-                    findIoHandlesByAddress(desc, device, address, outputs);
-                } else if (!(desc->supportedDevices() & mAvailableOutputDevices.types())) {
+                if (device_distinguishes_on_address(deviceType) && desc->supportsDevice(device)
+                        && desc->deviceSupportsEncodedFormats(deviceType)) {
+                    outputs.add(mOutputs.keyAt(i));
+                } else if (!mAvailableOutputDevices.containsAtLeastOne(desc->supportedDevices())) {
                     ALOGV("checkOutputsForDevice(): disconnecting adding output %d",
                             mOutputs.keyAt(i));
                     outputs.add(mOutputs.keyAt(i));
@@ -4420,7 +4497,7 @@
         for (const auto& hwModule : mHwModules) {
             for (size_t j = 0; j < hwModule->getOutputProfiles().size(); j++) {
                 sp<IOProfile> profile = hwModule->getOutputProfiles()[j];
-                if (profile->supportDevice(device)) {
+                if (profile->supportsDevice(device)) {
                     ALOGV("checkOutputsForDevice(): "
                             "clearing direct output profile %zu on module %s",
                             j, hwModule->getName());
@@ -4432,24 +4509,22 @@
     return NO_ERROR;
 }
 
-status_t AudioPolicyManager::checkInputsForDevice(const sp<DeviceDescriptor>& devDesc,
+status_t AudioPolicyManager::checkInputsForDevice(const sp<DeviceDescriptor>& device,
                                                   audio_policy_dev_state_t state,
-                                                  SortedVector<audio_io_handle_t>& inputs,
-                                                  const String8& address)
+                                                  SortedVector<audio_io_handle_t>& inputs)
 {
-    audio_devices_t device = devDesc->type();
     sp<AudioInputDescriptor> desc;
 
-    if (audio_device_is_digital(device)) {
+    if (audio_device_is_digital(device->type())) {
         // erase all current sample rates, formats and channel masks
-        devDesc->clearAudioProfiles();
+        device->clearAudioProfiles();
     }
 
     if (state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE) {
         // first list already open inputs that can be routed to this device
         for (size_t input_index = 0; input_index < mInputs.size(); input_index++) {
             desc = mInputs.valueAt(input_index);
-            if (desc->mProfile->supportDevice(device)) {
+            if (desc->mProfile->supportsDeviceTypes(device->type())) {
                 ALOGV("checkInputsForDevice(): adding opened input %d", mInputs.keyAt(input_index));
                inputs.add(mInputs.keyAt(input_index));
             }
@@ -4463,19 +4538,16 @@
                  profile_index++) {
                 sp<IOProfile> profile = hwModule->getInputProfiles()[profile_index];
 
-                if (profile->supportDevice(device)) {
-                    if (!device_distinguishes_on_address(device) ||
-                            profile->supportDeviceAddress(address)) {
-                        profiles.add(profile);
-                        ALOGV("checkInputsForDevice(): adding profile %zu from module %s",
-                                profile_index, hwModule->getName());
-                    }
+                if (profile->supportsDevice(device)) {
+                    profiles.add(profile);
+                    ALOGV("checkInputsForDevice(): adding profile %zu from module %s",
+                          profile_index, hwModule->getName());
                 }
             }
         }
 
         if (profiles.isEmpty() && inputs.isEmpty()) {
-            ALOGW("checkInputsForDevice(): No input available for device 0x%X", device);
+            ALOGW("%s: No input available for device %s", __func__, device->toString().c_str());
             return BAD_VALUE;
         }
 
@@ -4490,8 +4562,8 @@
             for (input_index = 0; input_index < mInputs.size(); input_index++) {
                 desc = mInputs.valueAt(input_index);
                 if (desc->mProfile == profile) {
-                    if (audio_device_is_digital(device)) {
-                        devDesc->importAudioPort(profile);
+                    if (audio_device_is_digital(device->type())) {
+                        device->importAudioPort(profile);
                     }
                     break;
                 }
@@ -4510,18 +4582,18 @@
             audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
             status_t status = desc->open(nullptr,
                                          device,
-                                         address,
                                          AUDIO_SOURCE_MIC,
                                          AUDIO_INPUT_FLAG_NONE,
                                          &input);
 
             if (status == NO_ERROR) {
+                const String8& address = device->address();
                 if (!address.isEmpty()) {
-                    char *param = audio_device_address_to_parameter(device, address);
+                    char *param = audio_device_address_to_parameter(device->type(), address);
                     mpClientInterface->setParameters(input, String8(param));
                     free(param);
                 }
-                updateAudioProfiles(devDesc, input, profile->getAudioProfiles());
+                updateAudioProfiles(device, input, profile->getAudioProfiles());
                 if (!profile->hasValidAudioProfile()) {
                     ALOGW("checkInputsForDevice() direct input missing param");
                     desc->close();
@@ -4534,20 +4606,21 @@
             } // endif input != 0
 
             if (input == AUDIO_IO_HANDLE_NONE) {
-                ALOGW("checkInputsForDevice() could not open input for device 0x%X", device);
+                ALOGW("%s could not open input for device %s", __func__,  
+                       device->toString().c_str());
                 profiles.removeAt(profile_index);
                 profile_index--;
             } else {
                 inputs.add(input);
-                if (audio_device_is_digital(device)) {
-                    devDesc->importAudioPort(profile);
+                if (audio_device_is_digital(device->type())) {
+                    device->importAudioPort(profile);
                 }
                 ALOGV("checkInputsForDevice(): adding input %d", input);
             }
         } // end scan profiles
 
         if (profiles.isEmpty()) {
-            ALOGW("checkInputsForDevice(): No input available for device 0x%X", device);
+            ALOGW("%s: No input available for device %s", __func__,  device->toString().c_str());
             return BAD_VALUE;
         }
     } else {
@@ -4555,7 +4628,7 @@
         // check if one opened input is not needed any more after disconnecting one device
         for (size_t input_index = 0; input_index < mInputs.size(); input_index++) {
             desc = mInputs.valueAt(input_index);
-            if (!(desc->mProfile->supportDevice(mAvailableInputDevices.types()))) {
+            if (!mAvailableInputDevices.containsAtLeastOne(desc->supportedDevices())) {
                 ALOGV("checkInputsForDevice(): disconnecting adding input %d",
                       mInputs.keyAt(input_index));
                 inputs.add(mInputs.keyAt(input_index));
@@ -4567,7 +4640,7 @@
                  profile_index < hwModule->getInputProfiles().size();
                  profile_index++) {
                 sp<IOProfile> profile = hwModule->getInputProfiles()[profile_index];
-                if (profile->supportDevice(device)) {
+                if (profile->supportsDevice(device)) {
                     ALOGV("checkInputsForDevice(): clearing direct input profile %zu on module %s",
                             profile_index, hwModule->getName());
                     profile->clearAudioProfiles();
@@ -4641,7 +4714,7 @@
 
     // MSD patches may have been released to support a non-MSD direct output. Reset MSD patch if
     // no direct outputs are open.
-    if (mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD) != 0) {
+    if (!getMsdAudioOutDevices().isEmpty()) {
         bool directOutputOpen = false;
         for (size_t i = 0; i < mOutputs.size(); i++) {
             if (mOutputs[i]->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
@@ -4668,7 +4741,7 @@
 
     nextAudioPortGeneration();
 
-    audio_devices_t device = inputDesc->mDevice;
+    sp<DeviceDescriptor> device = inputDesc->getDevice();
     ssize_t index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
     if (index >= 0) {
         sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
@@ -4680,26 +4753,27 @@
     inputDesc->close();
     mInputs.removeItem(input);
 
-    audio_devices_t primaryInputDevices = availablePrimaryInputDevices();
-    if (((device & primaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
+    DeviceVector primaryInputDevices = availablePrimaryModuleInputDevices();
+    if (primaryInputDevices.contains(device) &&
             mInputs.activeInputsCountOnDevices(primaryInputDevices) == 0) {
         SoundTrigger::setCaptureState(false);
     }
 }
 
-SortedVector<audio_io_handle_t> AudioPolicyManager::getOutputsForDevice(
-                                                                audio_devices_t device,
-                                                                const SwAudioOutputCollection& openOutputs)
+SortedVector<audio_io_handle_t> AudioPolicyManager::getOutputsForDevices(
+            const DeviceVector &devices,
+            const SwAudioOutputCollection& openOutputs)
 {
     SortedVector<audio_io_handle_t> outputs;
 
-    ALOGVV("getOutputsForDevice() device %04x", device);
+    ALOGVV("%s() devices %s", __func__, devices.toString().c_str());
     for (size_t i = 0; i < openOutputs.size(); i++) {
-        ALOGVV("output %zu isDuplicated=%d device=%04x",
+        ALOGVV("output %zu isDuplicated=%d device=%s",
                 i, openOutputs.valueAt(i)->isDuplicated(),
-                openOutputs.valueAt(i)->supportedDevices());
-        if ((device & openOutputs.valueAt(i)->supportedDevices()) == device) {
-            ALOGVV("getOutputsForDevice() found output %d", openOutputs.keyAt(i));
+                openOutputs.valueAt(i)->supportedDevices().toString().c_str());
+        if (openOutputs.valueAt(i)->supportsAllDevices(devices)
+                && openOutputs.valueAt(i)->deviceSupportsEncodedFormats(devices.types())) {
+            ALOGVV("%s() found output %d", __func__, openOutputs.keyAt(i));
             outputs.add(openOutputs.keyAt(i));
         }
     }
@@ -4721,10 +4795,10 @@
 
 void AudioPolicyManager::checkOutputForStrategy(routing_strategy strategy)
 {
-    audio_devices_t oldDevice = getDeviceForStrategy(strategy, true /*fromCache*/);
-    audio_devices_t newDevice = getDeviceForStrategy(strategy, false /*fromCache*/);
-    SortedVector<audio_io_handle_t> srcOutputs = getOutputsForDevice(oldDevice, mPreviousOutputs);
-    SortedVector<audio_io_handle_t> dstOutputs = getOutputsForDevice(newDevice, mOutputs);
+    DeviceVector oldDevices = getDevicesForStrategy(strategy, true /*fromCache*/);
+    DeviceVector newDevices = getDevicesForStrategy(strategy, false /*fromCache*/);
+    SortedVector<audio_io_handle_t> srcOutputs = getOutputsForDevices(oldDevices, mPreviousOutputs);
+    SortedVector<audio_io_handle_t> dstOutputs = getOutputsForDevices(newDevices, mOutputs);
 
     // also take into account external policy-related changes: add all outputs which are
     // associated with policies in the "before" and "after" output vectors
@@ -4744,7 +4818,7 @@
         }
     }
 
-    if (srcOutputs != dstOutputs) {
+    if (!dstOutputs.isEmpty() && srcOutputs != dstOutputs) {
         // get maximum latency of all source outputs to determine the minimum mute time guaranteeing
         // audio from invalidated tracks will be rendered when unmuting
         uint32_t maxLatency = 0;
@@ -4754,14 +4828,17 @@
                 maxLatency = desc->latency();
             }
         }
-        ALOGV("checkOutputForStrategy() strategy %d, moving from output %d to output %d",
-              strategy, srcOutputs[0], dstOutputs[0]);
+        ALOGV_IF(!(srcOutputs.isEmpty() || dstOutputs.isEmpty()),
+              "%s: strategy %d, moving from output %s to output %s", __func__, strategy,
+              std::to_string(srcOutputs[0]).c_str(),
+              std::to_string(dstOutputs[0]).c_str());
         // mute strategy while moving tracks from one output to another
         for (audio_io_handle_t srcOut : srcOutputs) {
             sp<SwAudioOutputDescriptor> desc = mPreviousOutputs.valueFor(srcOut);
             if (desc != 0 && isStrategyActive(desc, strategy)) {
                 setStrategyMute(strategy, true, desc);
-                setStrategyMute(strategy, false, desc, maxLatency * LATENCY_MUTE_FACTOR, newDevice);
+                setStrategyMute(strategy, false, desc, maxLatency * LATENCY_MUTE_FACTOR,
+                                newDevices.types());
             }
             sp<SourceClientDescriptor> source =
                     getSourceForStrategyOnOutput(srcOut, strategy);
@@ -4880,26 +4957,35 @@
     return device;
 }
 
-audio_devices_t AudioPolicyManager::getNewOutputDevice(const sp<AudioOutputDescriptor>& outputDesc,
-                                                       bool fromCache)
+DeviceVector AudioPolicyManager::getNewOutputDevices(const sp<SwAudioOutputDescriptor>& outputDesc,
+                                                     bool fromCache)
 {
+    DeviceVector devices;
+
     ssize_t index = mAudioPatches.indexOfKey(outputDesc->getPatchHandle());
     if (index >= 0) {
         sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
         if (patchDesc->mUid != mUidCached) {
-            ALOGV("getNewOutputDevice() device %08x forced by patch %d",
-                  outputDesc->device(), outputDesc->getPatchHandle());
-            return outputDesc->device();
+            ALOGV("%s device %s forced by patch %d", __func__,
+                  outputDesc->devices().toString().c_str(), outputDesc->getPatchHandle());
+            return  outputDesc->devices();
         }
     }
 
     // Honor explicit routing requests only if no client using default routing is active on this
     // input: a specific app can not force routing for other apps by setting a preferred device.
     bool active; // unused
-    sp<DeviceDescriptor> deviceDesc =
+    sp<DeviceDescriptor> device =
         findPreferredDevice(outputDesc, STRATEGY_NONE, active, mAvailableOutputDevices);
-    if (deviceDesc != nullptr) {
-        return deviceDesc->type();
+    if (device != nullptr) {
+        return DeviceVector(device);
+    }
+
+    // Legacy Engine cannot take care of bus devices and mix, so we need to handle the conflict
+    // of setForceUse / Default Bus device here
+    device = mPolicyMixes.getDeviceAndMixForOutput(outputDesc, mAvailableOutputDevices);
+    if (device != nullptr) {
+        return DeviceVector(device);
     }
 
     // check the following by order of priority to request a routing change if necessary:
@@ -4925,66 +5011,65 @@
     // FIXME: extend use of isStrategyActiveOnSameModule() to all strategies
     // with a refined rule considering mutually exclusive devices (using same backend)
     // as opposed to all streams on the same audio HAL module.
-    audio_devices_t device = AUDIO_DEVICE_NONE;
     if (isStrategyActive(outputDesc, STRATEGY_ENFORCED_AUDIBLE) &&
         mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
-        device = getDeviceForStrategy(STRATEGY_ENFORCED_AUDIBLE, fromCache);
+        devices = getDevicesForStrategy(STRATEGY_ENFORCED_AUDIBLE, fromCache);
     } else if (isInCall() ||
                isStrategyActiveOnSameModule(outputDesc, STRATEGY_PHONE)) {
-        device = getDeviceForStrategy(STRATEGY_PHONE, fromCache);
+        devices = getDevicesForStrategy(STRATEGY_PHONE, fromCache);
     } else if (isStrategyActiveOnSameModule(outputDesc, STRATEGY_SONIFICATION)) {
-        device = getDeviceForStrategy(STRATEGY_SONIFICATION, fromCache);
+        devices = getDevicesForStrategy(STRATEGY_SONIFICATION, fromCache);
     } else if (isStrategyActive(outputDesc, STRATEGY_ENFORCED_AUDIBLE)) {
-        device = getDeviceForStrategy(STRATEGY_ENFORCED_AUDIBLE, fromCache);
+        devices = getDevicesForStrategy(STRATEGY_ENFORCED_AUDIBLE, fromCache);
     } else if (isStrategyActive(outputDesc, STRATEGY_ACCESSIBILITY)) {
-        device = getDeviceForStrategy(STRATEGY_ACCESSIBILITY, fromCache);
+        devices = getDevicesForStrategy(STRATEGY_ACCESSIBILITY, fromCache);
     } else if (isStrategyActive(outputDesc, STRATEGY_SONIFICATION_RESPECTFUL)) {
-        device = getDeviceForStrategy(STRATEGY_SONIFICATION_RESPECTFUL, fromCache);
+        devices = getDevicesForStrategy(STRATEGY_SONIFICATION_RESPECTFUL, fromCache);
     } else if (isStrategyActive(outputDesc, STRATEGY_MEDIA)) {
-        device = getDeviceForStrategy(STRATEGY_MEDIA, fromCache);
+        devices = getDevicesForStrategy(STRATEGY_MEDIA, fromCache);
     } else if (isStrategyActive(outputDesc, STRATEGY_DTMF)) {
-        device = getDeviceForStrategy(STRATEGY_DTMF, fromCache);
+        devices = getDevicesForStrategy(STRATEGY_DTMF, fromCache);
     } else if (isStrategyActive(outputDesc, STRATEGY_TRANSMITTED_THROUGH_SPEAKER)) {
-        device = getDeviceForStrategy(STRATEGY_TRANSMITTED_THROUGH_SPEAKER, fromCache);
+        devices = getDevicesForStrategy(STRATEGY_TRANSMITTED_THROUGH_SPEAKER, fromCache);
     } else if (isStrategyActive(outputDesc, STRATEGY_REROUTING)) {
-        device = getDeviceForStrategy(STRATEGY_REROUTING, fromCache);
+        devices = getDevicesForStrategy(STRATEGY_REROUTING, fromCache);
     }
 
-    ALOGV("getNewOutputDevice() selected device %x", device);
-    return device;
+    ALOGV("getNewOutputDevice() selected devices %s", devices.toString().c_str());
+    return devices;
 }
 
-audio_devices_t AudioPolicyManager::getNewInputDevice(const sp<AudioInputDescriptor>& inputDesc)
+sp<DeviceDescriptor> AudioPolicyManager::getNewInputDevice(
+        const sp<AudioInputDescriptor>& inputDesc)
 {
-    audio_devices_t device = AUDIO_DEVICE_NONE;
+    sp<DeviceDescriptor> device;
 
     ssize_t index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
     if (index >= 0) {
         sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
         if (patchDesc->mUid != mUidCached) {
-            ALOGV("getNewInputDevice() device %08x forced by patch %d",
-                  inputDesc->mDevice, inputDesc->getPatchHandle());
-            return inputDesc->mDevice;
+            ALOGV("getNewInputDevice() device %s forced by patch %d",
+                  inputDesc->getDevice()->toString().c_str(), inputDesc->getPatchHandle());
+            return inputDesc->getDevice();
         }
     }
 
     // Honor explicit routing requests only if no client using default routing is active on this
     // input: a specific app can not force routing for other apps by setting a preferred device.
     bool active;
-    sp<DeviceDescriptor> deviceDesc =
-        findPreferredDevice(inputDesc, AUDIO_SOURCE_DEFAULT, active, mAvailableInputDevices);
-    if (deviceDesc != nullptr) {
-        return deviceDesc->type();
+    device = findPreferredDevice(inputDesc, AUDIO_SOURCE_DEFAULT, active, mAvailableInputDevices);
+    if (device != nullptr) {
+        return device;
     }
 
     // If we are not in call and no client is active on this input, this methods returns
     // AUDIO_DEVICE_NONE, causing the patch on the input stream to be released.
-    audio_source_t source = inputDesc->source();
-    if (source == AUDIO_SOURCE_DEFAULT && isInCall()) {
-        source = AUDIO_SOURCE_VOICE_COMMUNICATION;
+    audio_attributes_t attributes = inputDesc->getHighestPriorityAttributes();
+    if (attributes.source == AUDIO_SOURCE_DEFAULT && isInCall()) {
+        attributes.source = AUDIO_SOURCE_VOICE_COMMUNICATION;
     }
-    if (source != AUDIO_SOURCE_DEFAULT) {
-        device = getDeviceAndMixForInputSource(source);
+    if (attributes.source != AUDIO_SOURCE_DEFAULT) {
+        device = getDeviceAndMixForAttributes(attributes);
     }
 
     return device;
@@ -5006,36 +5091,37 @@
     if (stream < (audio_stream_type_t) 0 || stream >= AUDIO_STREAM_PUBLIC_CNT) {
         return AUDIO_DEVICE_NONE;
     }
-    audio_devices_t activeDevices = AUDIO_DEVICE_NONE;
-    audio_devices_t devices = AUDIO_DEVICE_NONE;
+    DeviceVector activeDevices;
+    DeviceVector devices;
     for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
         if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
             continue;
         }
         routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
-        audio_devices_t curDevices =
-                getDeviceForStrategy((routing_strategy)curStrategy, false /*fromCache*/);
-        devices |= curDevices;
-        for (audio_io_handle_t output : getOutputsForDevice(curDevices, mOutputs)) {
+        DeviceVector curDevices =
+                getDevicesForStrategy((routing_strategy)curStrategy, false /*fromCache*/);
+        devices.merge(curDevices);
+        for (audio_io_handle_t output : getOutputsForDevices(curDevices, mOutputs)) {
             sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
             if (outputDesc->isStreamActive((audio_stream_type_t)curStream)) {
-                activeDevices |= outputDesc->device();
+                activeDevices.merge(outputDesc->devices());
             }
         }
     }
 
     // Favor devices selected on active streams if any to report correct device in case of
     // explicit device selection
-    if (activeDevices != AUDIO_DEVICE_NONE) {
+    if (!activeDevices.isEmpty()) {
         devices = activeDevices;
     }
     /*Filter SPEAKER_SAFE out of results, as AudioService doesn't know about it
       and doesn't really need to.*/
-    if (devices & AUDIO_DEVICE_OUT_SPEAKER_SAFE) {
-        devices |= AUDIO_DEVICE_OUT_SPEAKER;
-        devices &= ~AUDIO_DEVICE_OUT_SPEAKER_SAFE;
+    DeviceVector speakerSafeDevices = devices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_SPEAKER_SAFE);
+    if (!speakerSafeDevices.isEmpty()) {
+        devices.merge(mAvailableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_SPEAKER));
+        devices.remove(speakerSafeDevices);
     }
-    return devices;
+    return devices.types();
 }
 
 routing_strategy AudioPolicyManager::getStrategy(audio_stream_type_t stream) const
@@ -5126,34 +5212,33 @@
     return 0;
 }
 
-audio_devices_t AudioPolicyManager::getDeviceForStrategy(routing_strategy strategy,
-                                                         bool fromCache)
+DeviceVector AudioPolicyManager::getDevicesForStrategy(routing_strategy strategy, bool fromCache)
 {
     // Honor explicit routing requests only if all active clients have a preferred route in which
     // case the last active client route is used
-    sp<DeviceDescriptor> deviceDesc = findPreferredDevice(mOutputs, strategy, mAvailableOutputDevices);
-    if (deviceDesc != nullptr) {
-        return deviceDesc->type();
+    sp<DeviceDescriptor> device = findPreferredDevice(mOutputs, strategy, mAvailableOutputDevices);
+    if (device != nullptr) {
+        return DeviceVector(device);
     }
 
     if (fromCache) {
-        ALOGVV("getDeviceForStrategy() from cache strategy %d, device %x",
-              strategy, mDeviceForStrategy[strategy]);
-        return mDeviceForStrategy[strategy];
+        ALOGVV("%s from cache strategy %d, device %s", __func__, strategy,
+               mDevicesForStrategy[strategy].toString().c_str());
+        return mDevicesForStrategy[strategy];
     }
-    return mEngine->getDeviceForStrategy(strategy);
+    return mAvailableOutputDevices.getDevicesFromTypeMask(mEngine->getDeviceForStrategy(strategy));
 }
 
 void AudioPolicyManager::updateDevicesAndOutputs()
 {
     for (int i = 0; i < NUM_STRATEGIES; i++) {
-        mDeviceForStrategy[i] = getDeviceForStrategy((routing_strategy)i, false /*fromCache*/);
+        mDevicesForStrategy[i] = getDevicesForStrategy((routing_strategy)i, false /*fromCache*/);
     }
     mPreviousOutputs = mOutputs;
 }
 
 uint32_t AudioPolicyManager::checkDeviceMuteStrategies(const sp<AudioOutputDescriptor>& outputDesc,
-                                                       audio_devices_t prevDevice,
+                                                       audio_devices_t prevDeviceType,
                                                        uint32_t delayMs)
 {
     // mute/unmute strategies using an incompatible device combination
@@ -5164,13 +5249,14 @@
     }
 
     uint32_t muteWaitMs = 0;
-    audio_devices_t device = outputDesc->device();
-    bool shouldMute = outputDesc->isActive() && (popcount(device) >= 2);
+    audio_devices_t deviceType = outputDesc->devices().types();
+    bool shouldMute = outputDesc->isActive() && (popcount(deviceType) >= 2);
 
     for (size_t i = 0; i < NUM_STRATEGIES; i++) {
-        audio_devices_t curDevice = getDeviceForStrategy((routing_strategy)i, false /*fromCache*/);
-        curDevice = curDevice & outputDesc->supportedDevices();
-        bool mute = shouldMute && (curDevice & device) && (curDevice != device);
+        audio_devices_t curDeviceType =
+                getDeviceForStrategy((routing_strategy)i, false /*fromCache*/);
+        curDeviceType = curDeviceType & outputDesc->supportedDevices().types();
+        bool mute = shouldMute && (curDeviceType & deviceType) && (curDeviceType != deviceType);
         bool doMute = false;
 
         if (mute && !outputDesc->mStrategyMutedByDevice[i]) {
@@ -5184,12 +5270,11 @@
             for (size_t j = 0; j < mOutputs.size(); j++) {
                 sp<AudioOutputDescriptor> desc = mOutputs.valueAt(j);
                 // skip output if it does not share any device with current output
-                if ((desc->supportedDevices() & outputDesc->supportedDevices())
-                        == AUDIO_DEVICE_NONE) {
+                if (!desc->supportedDevices().containsAtLeastOne(outputDesc->supportedDevices())) {
                     continue;
                 }
                 ALOGVV("checkDeviceMuteStrategies() %s strategy %zu (curDevice %04x)",
-                      mute ? "muting" : "unmuting", i, curDevice);
+                      mute ? "muting" : "unmuting", i, curDeviceType);
                 setStrategyMute((routing_strategy)i, mute, desc, mute ? 0 : delayMs);
                 if (isStrategyActive(desc, (routing_strategy)i)) {
                     if (mute) {
@@ -5209,7 +5294,7 @@
 
     // temporary mute output if device selection changes to avoid volume bursts due to
     // different per device volumes
-    if (outputDesc->isActive() && (device != prevDevice)) {
+    if (outputDesc->isActive() && (deviceType != prevDeviceType)) {
         uint32_t tempMuteWaitMs = outputDesc->latency() * 2;
         // temporary mute duration is conservatively set to 4 times the reported latency
         uint32_t tempMuteDurationMs = outputDesc->latency() * 4;
@@ -5223,7 +5308,7 @@
                 // delayed device change
                 setStrategyMute((routing_strategy)i, true, outputDesc, delayMs);
                 setStrategyMute((routing_strategy)i, false, outputDesc,
-                                delayMs + tempMuteDurationMs, device);
+                                delayMs + tempMuteDurationMs, deviceType);
             }
         }
     }
@@ -5237,46 +5322,45 @@
     return 0;
 }
 
-uint32_t AudioPolicyManager::setOutputDevice(const sp<AudioOutputDescriptor>& outputDesc,
-                                             audio_devices_t device,
-                                             bool force,
-                                             int delayMs,
-                                             audio_patch_handle_t *patchHandle,
-                                             const char *address,
-                                             bool requiresMuteCheck)
+uint32_t AudioPolicyManager::setOutputDevices(const sp<SwAudioOutputDescriptor>& outputDesc,
+                                              const DeviceVector &devices,
+                                              bool force,
+                                              int delayMs,
+                                              audio_patch_handle_t *patchHandle,
+                                              bool requiresMuteCheck)
 {
-    ALOGV("setOutputDevice() device %04x delayMs %d", device, delayMs);
-    AudioParameter param;
+    ALOGV("%s device %s delayMs %d", __func__, devices.toString().c_str(), delayMs);
     uint32_t muteWaitMs;
 
     if (outputDesc->isDuplicated()) {
-        muteWaitMs = setOutputDevice(outputDesc->subOutput1(), device, force, delayMs,
-                nullptr /* patchHandle */, nullptr /* address */, requiresMuteCheck);
-        muteWaitMs += setOutputDevice(outputDesc->subOutput2(), device, force, delayMs,
-                nullptr /* patchHandle */, nullptr /* address */, requiresMuteCheck);
+        muteWaitMs = setOutputDevices(outputDesc->subOutput1(), devices, force, delayMs,
+                nullptr /* patchHandle */, requiresMuteCheck);
+        muteWaitMs += setOutputDevices(outputDesc->subOutput2(), devices, force, delayMs,
+                nullptr /* patchHandle */, requiresMuteCheck);
         return muteWaitMs;
     }
-    // no need to proceed if new device is not AUDIO_DEVICE_NONE and not supported by current
-    // output profile
-    if ((device != AUDIO_DEVICE_NONE) &&
-            ((device & outputDesc->supportedDevices()) == AUDIO_DEVICE_NONE)) {
-        return 0;
-    }
 
     // filter devices according to output selected
-    device = (audio_devices_t)(device & outputDesc->supportedDevices());
+    DeviceVector filteredDevices = outputDesc->filterSupportedDevices(devices);
 
-    audio_devices_t prevDevice = outputDesc->mDevice;
+    // no need to proceed if new device is not AUDIO_DEVICE_NONE and not supported by current
+    // output profile
+    if (!devices.isEmpty() && filteredDevices.isEmpty()) {
+        ALOGV("%s: unsupported device %s for output", __func__, devices.toString().c_str());
+        return 0;
+    }
 
-    ALOGV("setOutputDevice() prevDevice 0x%04x", prevDevice);
+    DeviceVector prevDevices = outputDesc->devices();
 
-    if (device != AUDIO_DEVICE_NONE) {
-        outputDesc->mDevice = device;
+    ALOGV("setOutputDevices() prevDevice %s", prevDevices.toString().c_str());
+
+    if (!filteredDevices.isEmpty()) {
+        outputDesc->setDevices(filteredDevices);
     }
 
     // if the outputs are not materially active, there is no need to mute.
     if (requiresMuteCheck) {
-        muteWaitMs = checkDeviceMuteStrategies(outputDesc, prevDevice, delayMs);
+        muteWaitMs = checkDeviceMuteStrategies(outputDesc, prevDevices.types(), delayMs);
     } else {
         ALOGV("%s: suppressing checkDeviceMuteStrategies", __func__);
         muteWaitMs = 0;
@@ -5287,42 +5371,32 @@
     //      OR the requested device is the same as current device
     //  AND force is not specified
     //  AND the output is connected by a valid audio patch.
-    // Doing this check here allows the caller to call setOutputDevice() without conditions
-    if ((device == AUDIO_DEVICE_NONE || device == prevDevice) &&
-        !force &&
-        outputDesc->getPatchHandle() != 0) {
-        ALOGV("setOutputDevice() setting same device 0x%04x or null device", device);
+    // Doing this check here allows the caller to call setOutputDevices() without conditions
+    if ((filteredDevices.isEmpty() || filteredDevices == prevDevices) &&
+            !force && outputDesc->getPatchHandle() != 0) {
+        ALOGV("%s setting same device %s or null device, force=%d, patch handle=%d", __func__,
+              filteredDevices.toString().c_str(), force, outputDesc->getPatchHandle());
         return muteWaitMs;
     }
 
-    ALOGV("setOutputDevice() changing device");
+    ALOGV("%s changing device to %s", __func__, filteredDevices.toString().c_str());
 
     // do the routing
-    if (device == AUDIO_DEVICE_NONE) {
+    if (filteredDevices.isEmpty()) {
         resetOutputDevice(outputDesc, delayMs, NULL);
     } else {
-        DeviceVector deviceList;
-        if ((address == NULL) || (strlen(address) == 0)) {
-            deviceList = mAvailableOutputDevices.getDevicesFromTypeMask(device);
-        } else {
-            sp<DeviceDescriptor> deviceDesc = mAvailableOutputDevices.getDevice(
-                    device, String8(address));
-            if (deviceDesc) deviceList.add(deviceDesc);
+        PatchBuilder patchBuilder;
+        patchBuilder.addSource(outputDesc);
+        ALOG_ASSERT(filteredDevices.size() <= AUDIO_PATCH_PORTS_MAX, "Too many sink ports");
+        for (const auto &filteredDevice : filteredDevices) {
+            patchBuilder.addSink(filteredDevice);
         }
 
-        if (!deviceList.isEmpty()) {
-            PatchBuilder patchBuilder;
-            patchBuilder.addSource(outputDesc);
-            ALOG_ASSERT(deviceList.size() <= AUDIO_PATCH_PORTS_MAX, "Too many sink ports");
-            for (const auto &device : deviceList) {
-                patchBuilder.addSink(device);
-            }
-            installPatch(__func__, patchHandle, outputDesc.get(), patchBuilder.patch(), delayMs);
-        }
+        installPatch(__func__, patchHandle, outputDesc.get(), patchBuilder.patch(), delayMs);
     }
 
     // update stream volumes according to new device
-    applyStreamVolumes(outputDesc, device, delayMs);
+    applyStreamVolumes(outputDesc, filteredDevices.types(), delayMs);
 
     return muteWaitMs;
 }
@@ -5351,18 +5425,17 @@
 }
 
 status_t AudioPolicyManager::setInputDevice(audio_io_handle_t input,
-                                            audio_devices_t device,
+                                            const sp<DeviceDescriptor> &device,
                                             bool force,
                                             audio_patch_handle_t *patchHandle)
 {
     status_t status = NO_ERROR;
 
     sp<AudioInputDescriptor> inputDesc = mInputs.valueFor(input);
-    if ((device != AUDIO_DEVICE_NONE) && ((device != inputDesc->mDevice) || force)) {
-        inputDesc->mDevice = device;
+    if ((device != nullptr) && ((device != inputDesc->getDevice()) || force)) {
+        inputDesc->setDevice(device);
 
-        DeviceVector deviceList = mAvailableInputDevices.getDevicesFromTypeMask(device);
-        if (!deviceList.isEmpty()) {
+        if (mAvailableInputDevices.contains(device)) {
             PatchBuilder patchBuilder;
             patchBuilder.addSink(inputDesc,
             // AUDIO_SOURCE_HOTWORD is for internal use only:
@@ -5374,7 +5447,7 @@
                         }
                         return result; }).
             //only one input device for now
-                    addSource(deviceList.itemAt(0));
+                    addSource(device);
             status = installPatch(__func__, patchHandle, inputDesc.get(), patchBuilder.patch(), 0);
         }
     }
@@ -5404,8 +5477,7 @@
     return status;
 }
 
-sp<IOProfile> AudioPolicyManager::getInputProfile(audio_devices_t device,
-                                                  const String8& address,
+sp<IOProfile> AudioPolicyManager::getInputProfile(const sp<DeviceDescriptor> &device,
                                                   uint32_t& samplingRate,
                                                   audio_format_t& format,
                                                   audio_channel_mask_t& channelMask,
@@ -5425,7 +5497,7 @@
         for (const auto& profile : hwModule->getInputProfiles()) {
             // profile->log();
             //updatedFormat = format;
-            if (profile->isCompatibleProfile(device, address, samplingRate,
+            if (profile->isCompatibleProfile(DeviceVector(device), samplingRate,
                                              &samplingRate  /*updatedSamplingRate*/,
                                              format,
                                              &format,       /*updatedFormat*/
@@ -5436,7 +5508,7 @@
                                              true /*exactMatchRequiredForInputFlags*/)) {
                 return profile;
             }
-            if (firstInexact == nullptr && profile->isCompatibleProfile(device, address,
+            if (firstInexact == nullptr && profile->isCompatibleProfile(DeviceVector(device),
                                              samplingRate,
                                              &updatedSamplingRate,
                                              format,
@@ -5460,32 +5532,34 @@
     return NULL;
 }
 
-
-audio_devices_t AudioPolicyManager::getDeviceAndMixForInputSource(audio_source_t inputSource,
-                                                                  AudioMix **policyMix)
+sp<DeviceDescriptor> AudioPolicyManager::getDeviceAndMixForAttributes(
+        const audio_attributes_t &attributes, AudioMix **policyMix)
 {
     // Honor explicit routing requests only if all active clients have a preferred route in which
     // case the last active client route is used
-    sp<DeviceDescriptor> deviceDesc =
-        findPreferredDevice(mInputs, inputSource, mAvailableInputDevices);
-    if (deviceDesc != nullptr) {
-        return deviceDesc->type();
+    sp<DeviceDescriptor> device =
+        findPreferredDevice(mInputs, attributes.source, mAvailableInputDevices);
+    if (device != nullptr) {
+        return device;
     }
 
-
-    audio_devices_t availableDeviceTypes = mAvailableInputDevices.types() & ~AUDIO_DEVICE_BIT_IN;
-    audio_devices_t selectedDeviceFromMix =
-           mPolicyMixes.getDeviceAndMixForInputSource(inputSource, availableDeviceTypes, policyMix);
-
-    if (selectedDeviceFromMix != AUDIO_DEVICE_NONE) {
-        return selectedDeviceFromMix;
-    }
-    return getDeviceForInputSource(inputSource);
+    sp<DeviceDescriptor> selectedDeviceFromMix =
+           mPolicyMixes.getDeviceAndMixForInputSource(attributes.source, mAvailableInputDevices,
+                                                      policyMix);
+    return (selectedDeviceFromMix != nullptr) ?
+           selectedDeviceFromMix : getDeviceForAttributes(attributes);
 }
 
-audio_devices_t AudioPolicyManager::getDeviceForInputSource(audio_source_t inputSource)
+sp<DeviceDescriptor> AudioPolicyManager::getDeviceForAttributes(const audio_attributes_t &attributes)
 {
-    return mEngine->getDeviceForInputSource(inputSource);
+    audio_devices_t device = mEngine->getDeviceForInputSource(attributes.source);
+    if (attributes.source == AUDIO_SOURCE_REMOTE_SUBMIX &&
+                strncmp(attributes.tags, "addr=", strlen("addr=")) == 0) {
+        return mAvailableInputDevices.getDevice(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
+                                                String8(attributes.tags + strlen("addr=")),
+                                                AUDIO_FORMAT_DEFAULT);
+    }
+    return mAvailableInputDevices.getDevice(device, String8(), AUDIO_FORMAT_DEFAULT);
 }
 
 float AudioPolicyManager::computeVolume(audio_stream_type_t stream,
@@ -5603,6 +5677,15 @@
     float minDst = (float)mVolumeCurves->getVolumeIndexMin(dstStream);
     float maxDst = (float)mVolumeCurves->getVolumeIndexMax(dstStream);
 
+    // preserve mute request or correct range
+    if (srcIndex < minSrc) {
+        if (srcIndex == 0) {
+            return 0;
+        }
+        srcIndex = minSrc;
+    } else if (srcIndex > maxSrc) {
+        srcIndex = maxSrc;
+    }
     return (int)(minDst + ((srcIndex - minSrc) * (maxDst - minDst)) / (maxSrc - minSrc));
 }
 
@@ -5630,7 +5713,7 @@
     }
 
     if (device == AUDIO_DEVICE_NONE) {
-        device = outputDesc->device();
+        device = outputDesc->devices().types();
     }
 
     float volumeDb = computeVolume(stream, index, device);
@@ -5701,7 +5784,7 @@
                                            audio_devices_t device)
 {
     if (device == AUDIO_DEVICE_NONE) {
-        device = outputDesc->device();
+        device = outputDesc->devices().types();
     }
 
     ALOGVV("setStreamMute() stream %d, mute %d, mMuteCount %d device %04x",
@@ -5798,9 +5881,9 @@
     return false;
 }
 
-bool AudioPolicyManager::isStrategyActiveOnSameModule(const sp<AudioOutputDescriptor>& outputDesc,
-                                          routing_strategy strategy, uint32_t inPastMs,
-                                          nsecs_t sysTime) const
+bool AudioPolicyManager::isStrategyActiveOnSameModule(const sp<SwAudioOutputDescriptor>& outputDesc,
+                                                      routing_strategy strategy, uint32_t inPastMs,
+                                                      nsecs_t sysTime) const
 {
     for (size_t i = 0; i < mOutputs.size(); i++) {
         sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
@@ -5859,6 +5942,8 @@
             releaseAudioPatch(patchDesc->mHandle, patchDesc->mUid);
         }
     }
+
+    mHwModules.cleanUpForDevice(deviceDesc);
 }
 
 void AudioPolicyManager::modifySurroundFormats(
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 9eb1dcf..de6d489 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -97,12 +97,14 @@
         virtual status_t setDeviceConnectionState(audio_devices_t device,
                                                           audio_policy_dev_state_t state,
                                                           const char *device_address,
-                                                          const char *device_name);
+                                                          const char *device_name,
+                                                          audio_format_t encodedFormat);
         virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
                                                                               const char *device_address);
         virtual status_t handleDeviceConfigChange(audio_devices_t device,
                                                   const char *device_address,
-                                                  const char *device_name);
+                                                  const char *device_name,
+                                                  audio_format_t encodedFormat);
         virtual void setPhoneState(audio_mode_t state);
         virtual void setForceUse(audio_policy_force_use_t usage,
                                  audio_policy_forced_cfg_t config);
@@ -239,6 +241,9 @@
                                             bool reported);
         virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled);
 
+        virtual status_t getHwOffloadEncodingFormatsSupportedForA2DP(
+                    std::vector<audio_format_t> *formats);
+
         // return the strategy corresponding to a given stream type
         routing_strategy getStrategy(audio_stream_type_t stream) const;
 
@@ -313,36 +318,40 @@
         //  where conditions are changing (setDeviceConnectionState(), setPhoneState()...) AND
         //  before updateDevicesAndOutputs() is called.
         virtual audio_devices_t getDeviceForStrategy(routing_strategy strategy,
-                                                     bool fromCache);
+                                                     bool fromCache)
+        {
+            return getDevicesForStrategy(strategy, fromCache).types();
+        }
+
+        DeviceVector getDevicesForStrategy(routing_strategy strategy, bool fromCache);
 
         bool isStrategyActive(const sp<AudioOutputDescriptor>& outputDesc, routing_strategy strategy,
                               uint32_t inPastMs = 0, nsecs_t sysTime = 0) const;
 
-        bool isStrategyActiveOnSameModule(const sp<AudioOutputDescriptor>& outputDesc,
-                                                  routing_strategy strategy, uint32_t inPastMs = 0,
-                                                  nsecs_t sysTime = 0) const;
+        bool isStrategyActiveOnSameModule(const sp<SwAudioOutputDescriptor>& outputDesc,
+                                          routing_strategy strategy, uint32_t inPastMs = 0,
+                                          nsecs_t sysTime = 0) const;
 
         // change the route of the specified output. Returns the number of ms we have slept to
         // allow new routing to take effect in certain cases.
-        virtual uint32_t setOutputDevice(const sp<AudioOutputDescriptor>& outputDesc,
-                             audio_devices_t device,
-                             bool force = false,
-                             int delayMs = 0,
-                             audio_patch_handle_t *patchHandle = NULL,
-                             const char *address = nullptr,
-                             bool requiresMuteCheck = true);
+        uint32_t setOutputDevices(const sp<SwAudioOutputDescriptor>& outputDesc,
+                                  const DeviceVector &device,
+                                  bool force = false,
+                                  int delayMs = 0,
+                                  audio_patch_handle_t *patchHandle = NULL,
+                                  bool requiresMuteCheck = true);
         status_t resetOutputDevice(const sp<AudioOutputDescriptor>& outputDesc,
                                    int delayMs = 0,
                                    audio_patch_handle_t *patchHandle = NULL);
         status_t setInputDevice(audio_io_handle_t input,
-                                audio_devices_t device,
+                                const sp<DeviceDescriptor> &device,
                                 bool force = false,
                                 audio_patch_handle_t *patchHandle = NULL);
         status_t resetInputDevice(audio_io_handle_t input,
                                   audio_patch_handle_t *patchHandle = NULL);
 
         // select input device corresponding to requested audio source
-        virtual audio_devices_t getDeviceForInputSource(audio_source_t inputSource);
+        sp<DeviceDescriptor> getDeviceForAttributes(const audio_attributes_t &attributes);
 
         // compute the actual volume for a given stream according to the requested index and a particular
         // device
@@ -391,15 +400,13 @@
         // when a device is disconnected, checks if an output is not used any more and
         // returns its handle if any.
         // transfers the audio tracks and effects from one output thread to another accordingly.
-        status_t checkOutputsForDevice(const sp<DeviceDescriptor>& devDesc,
+        status_t checkOutputsForDevice(const sp<DeviceDescriptor>& device,
                                        audio_policy_dev_state_t state,
-                                       SortedVector<audio_io_handle_t>& outputs,
-                                       const String8& address);
+                                       SortedVector<audio_io_handle_t>& outputs);
 
-        status_t checkInputsForDevice(const sp<DeviceDescriptor>& devDesc,
+        status_t checkInputsForDevice(const sp<DeviceDescriptor>& device,
                                       audio_policy_dev_state_t state,
-                                      SortedVector<audio_io_handle_t>& inputs,
-                                      const String8& address);
+                                      SortedVector<audio_io_handle_t>& inputs);
 
         // close an output and its companion duplicating output.
         void closeOutput(audio_io_handle_t output);
@@ -437,8 +444,8 @@
         // must be called every time a condition that affects the device choice for a given output is
         // changed: connected device, phone state, force use, output start, output stop..
         // see getDeviceForStrategy() for the use of fromCache parameter
-        audio_devices_t getNewOutputDevice(const sp<AudioOutputDescriptor>& outputDesc,
-                                           bool fromCache);
+        DeviceVector getNewOutputDevices(const sp<SwAudioOutputDescriptor>& outputDesc,
+                                         bool fromCache);
 
         // updates cache of device used by all strategies (mDeviceForStrategy[])
         // must be called every time a condition that affects the device choice for a given strategy is
@@ -448,7 +455,7 @@
         void updateDevicesAndOutputs();
 
         // selects the most appropriate device on input for current state
-        audio_devices_t getNewInputDevice(const sp<AudioInputDescriptor>& inputDesc);
+        sp<DeviceDescriptor> getNewInputDevice(const sp<AudioInputDescriptor>& inputDesc);
 
         virtual uint32_t getMaxEffectsCpuLoad()
         {
@@ -460,16 +467,16 @@
             return mEffects.getMaxEffectsMemory();
         }
 
-        SortedVector<audio_io_handle_t> getOutputsForDevice(audio_devices_t device,
-                                                            const SwAudioOutputCollection& openOutputs);
+        SortedVector<audio_io_handle_t> getOutputsForDevices(
+                const DeviceVector &devices, const SwAudioOutputCollection& openOutputs);
 
         // mute/unmute strategies using an incompatible device combination
         // if muting, wait for the audio in pcm buffer to be drained before proceeding
         // if unmuting, unmute only after the specified delay
         // Returns the number of ms waited
         virtual uint32_t  checkDeviceMuteStrategies(const sp<AudioOutputDescriptor>& outputDesc,
-                                            audio_devices_t prevDevice,
-                                            uint32_t delayMs);
+                                                    audio_devices_t prevDeviceType,
+                                                    uint32_t delayMs);
 
         audio_io_handle_t selectOutput(const SortedVector<audio_io_handle_t>& outputs,
                                        audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
@@ -477,13 +484,22 @@
                                        audio_channel_mask_t channelMask = AUDIO_CHANNEL_NONE,
                                        uint32_t samplingRate = 0);
         // samplingRate, format, channelMask are in/out and so may be modified
-        sp<IOProfile> getInputProfile(audio_devices_t device,
-                                      const String8& address,
+        sp<IOProfile> getInputProfile(const sp<DeviceDescriptor> & device,
                                       uint32_t& samplingRate,
                                       audio_format_t& format,
                                       audio_channel_mask_t& channelMask,
                                       audio_input_flags_t flags);
-        sp<IOProfile> getProfileForOutput(audio_devices_t device,
+        /**
+         * @brief getProfileForOutput
+         * @param devices vector of descriptors, may be empty if ignoring the device is required
+         * @param samplingRate
+         * @param format
+         * @param channelMask
+         * @param flags
+         * @param directOnly
+         * @return IOProfile to be used if found, nullptr otherwise
+         */
+        sp<IOProfile> getProfileForOutput(const DeviceVector &devices,
                                           uint32_t samplingRate,
                                           audio_format_t format,
                                           audio_channel_mask_t channelMask,
@@ -501,19 +517,26 @@
             return mAudioPatches.removeAudioPatch(handle);
         }
 
-        audio_devices_t availablePrimaryOutputDevices() const
+        bool isPrimaryModule(const sp<HwModule> &module) const
         {
-            if (!hasPrimaryOutput()) {
-                return AUDIO_DEVICE_NONE;
+            if (module == 0 || !hasPrimaryOutput()) {
+                return false;
             }
-            return mPrimaryOutput->supportedDevices() & mAvailableOutputDevices.types();
+            return module->getHandle() == mPrimaryOutput->getModuleHandle();
         }
-        audio_devices_t availablePrimaryInputDevices() const
+        DeviceVector availablePrimaryOutputDevices() const
         {
             if (!hasPrimaryOutput()) {
-                return AUDIO_DEVICE_NONE;
+                return DeviceVector();
             }
-            return mAvailableInputDevices.getDeviceTypesFromHwModule(
+            return mAvailableOutputDevices.filter(mPrimaryOutput->supportedDevices());
+        }
+        DeviceVector availablePrimaryModuleInputDevices() const
+        {
+            if (!hasPrimaryOutput()) {
+                return DeviceVector();
+            }
+            return mAvailableInputDevices.getDevicesFromHwModule(
                     mPrimaryOutput->getModuleHandle());
         }
         /**
@@ -530,8 +553,9 @@
             return (devices.size() > 0) ? devices.itemAt(0)->address() : String8("");
         }
 
-        uint32_t updateCallRouting(audio_devices_t rxDevice, uint32_t delayMs = 0);
-        sp<AudioPatch> createTelephonyPatch(bool isRx, audio_devices_t device, uint32_t delayMs);
+        uint32_t updateCallRouting(const DeviceVector &rxDevices, uint32_t delayMs = 0);
+        sp<AudioPatch> createTelephonyPatch(bool isRx, const sp<DeviceDescriptor> &device,
+                                            uint32_t delayMs);
         sp<DeviceDescriptor> findDevice(
                 const DeviceVector& devices, audio_devices_t device) const;
         audio_devices_t getModuleDeviceTypes(
@@ -581,7 +605,16 @@
         DeviceVector  mAvailableInputDevices;  // all available input devices
 
         bool    mLimitRingtoneVolume;        // limit ringtone volume to music volume if headset connected
-        audio_devices_t mDeviceForStrategy[NUM_STRATEGIES];
+
+        /**
+         * @brief mDevicesForStrategy vector of devices that are assigned for a given strategy.
+         * Note: in case of removal of device (@see setDeviceConnectionState), the device descriptor
+         * will be removed from the @see mAvailableOutputDevices or @see mAvailableInputDevices
+         * but the devices for strategies will be reevaluated within the
+         * @see setDeviceConnectionState function.
+         */
+        DeviceVector mDevicesForStrategy[NUM_STRATEGIES];
+
         float   mLastVoiceVolume;            // last voice volume value sent to audio HAL
         bool    mA2dpSuspended;  // true if A2DP output is suspended
 
@@ -637,13 +670,14 @@
 
         // Support for Multi-Stream Decoder (MSD) module
         sp<DeviceDescriptor> getMsdAudioInDevice() const;
+        DeviceVector getMsdAudioOutDevices() const;
         const AudioPatchCollection getMsdPatches() const;
-        status_t getBestMsdAudioProfileFor(audio_devices_t outputDevice,
+        status_t getBestMsdAudioProfileFor(const sp<DeviceDescriptor> &outputDevice,
                                            bool hwAvSync,
                                            audio_port_config *sourceConfig,
                                            audio_port_config *sinkConfig) const;
-        PatchBuilder buildMsdPatch(audio_devices_t outputDevice) const;
-        status_t setMsdPatch(audio_devices_t outputDevice = AUDIO_DEVICE_NONE);
+        PatchBuilder buildMsdPatch(const sp<DeviceDescriptor> &outputDevice) const;
+        status_t setMsdPatch(const sp<DeviceDescriptor> &outputDevice = nullptr);
 
         // If any, resolve any "dynamic" fields of an Audio Profiles collection
         void updateAudioProfiles(const sp<DeviceDescriptor>& devDesc, audio_io_handle_t ioHandle,
@@ -654,22 +688,12 @@
         // It can give a chance to HAL implementer to retrieve dynamic capabilities associated
         // to this device for example.
         // TODO avoid opening stream to retrieve capabilities of a profile.
-        void broadcastDeviceConnectionState(audio_devices_t device,
-                                            audio_policy_dev_state_t state,
-                                            const String8 &device_address);
+        void broadcastDeviceConnectionState(const sp<DeviceDescriptor> &device,
+                                            audio_policy_dev_state_t state);
 
         // updates device caching and output for streams that can influence the
         //    routing of notifications
         void handleNotificationRoutingForStream(audio_stream_type_t stream);
-        // find the outputs on a given output descriptor that have the given address.
-        // to be called on an AudioOutputDescriptor whose supported devices (as defined
-        //   in mProfile->mSupportedDevices) matches the device whose address is to be matched.
-        // see deviceDistinguishesOnAddress(audio_devices_t) for whether the device type is one
-        //   where addresses are used to distinguish between one connected device and another.
-        void findIoHandlesByAddress(const sp<SwAudioOutputDescriptor>& desc /*in*/,
-                const audio_devices_t device /*in*/,
-                const String8& address /*in*/,
-                SortedVector<audio_io_handle_t>& outputs /*out*/);
         uint32_t curAudioPortGeneration() const { return mAudioPortGeneration; }
         // internal method, get audio_attributes_t from either a source audio_attributes_t
         // or audio_stream_type_t, respectively.
@@ -687,15 +711,14 @@
                 audio_output_flags_t *flags,
                 audio_port_handle_t *selectedDeviceId);
         // internal method to return the output handle for the given device and format
-        audio_io_handle_t getOutputForDevice(
-                audio_devices_t device,
+        audio_io_handle_t getOutputForDevices(
+                const DeviceVector &devices,
                 audio_session_t session,
                 audio_stream_type_t stream,
                 const audio_config_t *config,
                 audio_output_flags_t *flags);
         // internal method to return the input handle for the given device and format
-        audio_io_handle_t getInputForDevice(audio_devices_t device,
-                String8 address,
+        audio_io_handle_t getInputForDevice(const sp<DeviceDescriptor> &device,
                 audio_session_t session,
                 audio_source_t inputSource,
                 const audio_config_base_t *config,
@@ -713,14 +736,15 @@
 
         // select input device corresponding to requested audio source and return associated policy
         // mix if any. Calls getDeviceForInputSource().
-        audio_devices_t getDeviceAndMixForInputSource(audio_source_t inputSource,
-                                                        AudioMix **policyMix = NULL);
+        sp<DeviceDescriptor> getDeviceAndMixForAttributes(const audio_attributes_t &attributes,
+                                                          AudioMix **policyMix = NULL);
 
         // Called by setDeviceConnectionState().
-        status_t setDeviceConnectionStateInt(audio_devices_t device,
-                                                          audio_policy_dev_state_t state,
-                                                          const char *device_address,
-                                                          const char *device_name);
+        status_t setDeviceConnectionStateInt(audio_devices_t deviceType,
+                                             audio_policy_dev_state_t state,
+                                             const char *device_address,
+                                             const char *device_name,
+                                             audio_format_t encodedFormat);
         void updateMono(audio_io_handle_t output) {
             AudioParameter param;
             param.addInt(String8(AudioParameter::keyMonoOutput), (int)mMasterMono);
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 2c904d9..49c541c 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -32,7 +32,8 @@
 status_t AudioPolicyService::setDeviceConnectionState(audio_devices_t device,
                                                   audio_policy_dev_state_t state,
                                                   const char *device_address,
-                                                  const char *device_name)
+                                                  const char *device_name,
+                                                  audio_format_t encodedFormat)
 {
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
@@ -49,7 +50,7 @@
     Mutex::Autolock _l(mLock);
     AutoCallerClear acc;
     return mAudioPolicyManager->setDeviceConnectionState(device, state,
-                                                         device_address, device_name);
+                                                         device_address, device_name, encodedFormat);
 }
 
 audio_policy_dev_state_t AudioPolicyService::getDeviceConnectionState(
@@ -66,7 +67,8 @@
 
 status_t AudioPolicyService::handleDeviceConfigChange(audio_devices_t device,
                                                   const char *device_address,
-                                                  const char *device_name)
+                                                  const char *device_name,
+                                                  audio_format_t encodedFormat)
 {
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
@@ -79,7 +81,7 @@
     Mutex::Autolock _l(mLock);
     AutoCallerClear acc;
     return mAudioPolicyManager->handleDeviceConfigChange(device, device_address,
-                                                         device_name);
+                                                         device_name, encodedFormat);
 }
 
 status_t AudioPolicyService::setPhoneState(audio_mode_t state)
@@ -1138,6 +1140,17 @@
                                                    surroundFormatsEnabled, reported);
 }
 
+status_t AudioPolicyService::getHwOffloadEncodingFormatsSupportedForA2DP(
+                                        std::vector<audio_format_t> *formats)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+    Mutex::Autolock _l(mLock);
+    AutoCallerClear acc;
+    return mAudioPolicyManager->getHwOffloadEncodingFormatsSupportedForA2DP(formats);
+}
+
 status_t AudioPolicyService::setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled)
 {
     if (mAudioPolicyManager == NULL) {
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 959e757..c073b7c 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -61,13 +61,15 @@
     virtual status_t setDeviceConnectionState(audio_devices_t device,
                                               audio_policy_dev_state_t state,
                                               const char *device_address,
-                                              const char *device_name);
+                                              const char *device_name,
+                                              audio_format_t encodedFormat);
     virtual audio_policy_dev_state_t getDeviceConnectionState(
                                                                 audio_devices_t device,
                                                                 const char *device_address);
     virtual status_t handleDeviceConfigChange(audio_devices_t device,
                                               const char *device_address,
-                                              const char *device_name);
+                                              const char *device_name,
+                                              audio_format_t encodedFormat);
     virtual status_t setPhoneState(audio_mode_t state);
     virtual status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
     virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage);
@@ -218,6 +220,8 @@
                                         audio_format_t *surroundFormats,
                                         bool *surroundFormatsEnabled,
                                         bool reported);
+    virtual status_t getHwOffloadEncodingFormatsSupportedForA2DP(
+                                        std::vector<audio_format_t> *formats);
     virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled);
 
     virtual status_t setAssistantUid(uid_t uid);
diff --git a/services/audiopolicy/tests/Android.mk b/services/audiopolicy/tests/Android.mk
index 2ccb542..e4fba0f 100644
--- a/services/audiopolicy/tests/Android.mk
+++ b/services/audiopolicy/tests/Android.mk
@@ -29,6 +29,8 @@
 
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
 
+LOCAL_COMPATIBILITY_SUITE := device-tests
+
 include $(BUILD_NATIVE_TEST)
 
 # system/audio.h utilities test
@@ -55,4 +57,6 @@
 
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
 
+LOCAL_COMPATIBILITY_SUITE := device-tests
+
 include $(BUILD_NATIVE_TEST)
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index 24326bb..e9f4657 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -117,9 +117,14 @@
     explicit PatchCountCheck(AudioPolicyManagerTestClient *client)
             : mClient{client},
               mInitialCount{mClient->getActivePatchesCount()} {}
-    void assertDelta(int delta) const {
-        ASSERT_EQ(mInitialCount + delta, mClient->getActivePatchesCount()); }
-    void assertNoChange() const { assertDelta(0); }
+    int deltaFromSnapshot() const {
+        size_t currentCount = mClient->getActivePatchesCount();
+        if (mInitialCount <= currentCount) {
+            return currentCount - mInitialCount;
+        } else {
+            return -(static_cast<int>(mInitialCount - currentCount));
+        }
+    }
   private:
     const AudioPolicyManagerTestClient *mClient;
     const size_t mInitialCount;
@@ -139,7 +144,7 @@
             int sampleRate,
             audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
             audio_port_handle_t *portId = nullptr);
-    PatchCountCheck snapPatchCount() { return PatchCountCheck(mClient.get()); }
+    PatchCountCheck snapshotPatchCount() { return PatchCountCheck(mClient.get()); }
 
     std::unique_ptr<AudioPolicyManagerTestClient> mClient;
     std::unique_ptr<AudioPolicyTestManager> mManager;
@@ -225,7 +230,7 @@
 TEST_F(AudioPolicyManagerTest, CreateAudioPatchFailure) {
     audio_patch patch{};
     audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
-    const PatchCountCheck patchCount = snapPatchCount();
+    const PatchCountCheck patchCount = snapshotPatchCount();
     ASSERT_EQ(BAD_VALUE, mManager->createAudioPatch(nullptr, &handle, 0));
     ASSERT_EQ(BAD_VALUE, mManager->createAudioPatch(&patch, nullptr, 0));
     ASSERT_EQ(BAD_VALUE, mManager->createAudioPatch(&patch, &handle, 0));
@@ -252,20 +257,20 @@
     ASSERT_EQ(INVALID_OPERATION, mManager->createAudioPatch(&patch, &handle, 0));
     // Verify that the handle is left unchanged.
     ASSERT_EQ(AUDIO_PATCH_HANDLE_NONE, handle);
-    patchCount.assertNoChange();
+    ASSERT_EQ(0, patchCount.deltaFromSnapshot());
 }
 
 TEST_F(AudioPolicyManagerTest, CreateAudioPatchFromMix) {
     audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
     uid_t uid = 42;
-    const PatchCountCheck patchCount = snapPatchCount();
+    const PatchCountCheck patchCount = snapshotPatchCount();
     ASSERT_FALSE(mManager->getConfig().getAvailableInputDevices().isEmpty());
     PatchBuilder patchBuilder;
     patchBuilder.addSource(mManager->getConfig().getAvailableInputDevices()[0]).
             addSink(mManager->getConfig().getDefaultOutputDevice());
     ASSERT_EQ(NO_ERROR, mManager->createAudioPatch(patchBuilder.patch(), &handle, uid));
     ASSERT_NE(AUDIO_PATCH_HANDLE_NONE, handle);
-    patchCount.assertDelta(1);
+    ASSERT_EQ(1, patchCount.deltaFromSnapshot());
 }
 
 // TODO: Add patch creation tests that involve already existing patch
@@ -350,84 +355,82 @@
 }
 
 TEST_F(AudioPolicyManagerTestMsd, PatchCreationOnSetForceUse) {
-    const PatchCountCheck patchCount = snapPatchCount();
+    const PatchCountCheck patchCount = snapshotPatchCount();
     mManager->setForceUse(AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND,
             AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS);
-    patchCount.assertDelta(1);
+    ASSERT_EQ(1, patchCount.deltaFromSnapshot());
 }
 
 TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrEncodedRoutesToMsd) {
-    const PatchCountCheck patchCount = snapPatchCount();
+    const PatchCountCheck patchCount = snapshotPatchCount();
     audio_port_handle_t selectedDeviceId;
     getOutputForAttr(&selectedDeviceId,
             AUDIO_FORMAT_AC3, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT);
     ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
-    patchCount.assertDelta(1);
+    ASSERT_EQ(1, patchCount.deltaFromSnapshot());
 }
 
 TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrPcmRoutesToMsd) {
-    const PatchCountCheck patchCount = snapPatchCount();
+    const PatchCountCheck patchCount = snapshotPatchCount();
     audio_port_handle_t selectedDeviceId;
     getOutputForAttr(&selectedDeviceId,
             AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO, 48000);
     ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
-    patchCount.assertDelta(1);
+    ASSERT_EQ(1, patchCount.deltaFromSnapshot());
 }
 
 TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrEncodedPlusPcmRoutesToMsd) {
-    const PatchCountCheck patchCount = snapPatchCount();
+    const PatchCountCheck patchCount = snapshotPatchCount();
     audio_port_handle_t selectedDeviceId;
     getOutputForAttr(&selectedDeviceId,
             AUDIO_FORMAT_AC3, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT);
     ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
-    patchCount.assertDelta(1);
+    ASSERT_EQ(1, patchCount.deltaFromSnapshot());
     getOutputForAttr(&selectedDeviceId,
             AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO, 48000);
     ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
-    patchCount.assertDelta(1);
+    ASSERT_EQ(1, patchCount.deltaFromSnapshot());
 }
 
 TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrUnsupportedFormatBypassesMsd) {
-    const PatchCountCheck patchCount = snapPatchCount();
+    const PatchCountCheck patchCount = snapshotPatchCount();
     audio_port_handle_t selectedDeviceId;
     getOutputForAttr(&selectedDeviceId,
             AUDIO_FORMAT_DTS, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT);
     ASSERT_NE(selectedDeviceId, mMsdOutputDevice->getId());
-    patchCount.assertNoChange();
+    ASSERT_EQ(0, patchCount.deltaFromSnapshot());
 }
 
 TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrFormatSwitching) {
     // Switch between formats that are supported and not supported by MSD.
     {
-        const PatchCountCheck patchCount = snapPatchCount();
+        const PatchCountCheck patchCount = snapshotPatchCount();
         audio_port_handle_t selectedDeviceId, portId;
         getOutputForAttr(&selectedDeviceId,
                 AUDIO_FORMAT_AC3, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT,
                 &portId);
         ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
-        patchCount.assertDelta(1);
+        ASSERT_EQ(1, patchCount.deltaFromSnapshot());
         mManager->releaseOutput(portId);
-        patchCount.assertDelta(1);  // compared to the state at the block entry
-        // TODO: make PatchCountCheck asserts more obvious. It's easy to
-        // miss the fact that it is immutable.
+        ASSERT_EQ(1, patchCount.deltaFromSnapshot());
     }
     {
-        const PatchCountCheck patchCount = snapPatchCount();
+        const PatchCountCheck patchCount = snapshotPatchCount();
         audio_port_handle_t selectedDeviceId, portId;
         getOutputForAttr(&selectedDeviceId,
                 AUDIO_FORMAT_DTS, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT,
                 &portId);
         ASSERT_NE(selectedDeviceId, mMsdOutputDevice->getId());
-        patchCount.assertDelta(-1);
+        ASSERT_EQ(-1, patchCount.deltaFromSnapshot());
         mManager->releaseOutput(portId);
-        patchCount.assertNoChange();
+        ASSERT_EQ(0, patchCount.deltaFromSnapshot());
     }
     {
-        const PatchCountCheck patchCount = snapPatchCount();
+        const PatchCountCheck patchCount = snapshotPatchCount();
         audio_port_handle_t selectedDeviceId;
         getOutputForAttr(&selectedDeviceId,
                 AUDIO_FORMAT_AC3, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT);
         ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
-        patchCount.assertNoChange();
+        ASSERT_EQ(0, patchCount.deltaFromSnapshot());
     }
 }
diff --git a/services/mediacodec/Android.mk b/services/mediacodec/Android.mk
index 3b6dc80..6a71d7d 100644
--- a/services/mediacodec/Android.mk
+++ b/services/mediacodec/Android.mk
@@ -69,9 +69,12 @@
 include $(CLEAR_VARS)
 # seccomp is not required for coverage build.
 ifneq ($(NATIVE_COVERAGE),true)
-LOCAL_REQUIRED_MODULES_arm := crash_dump.policy mediacodec.policy
-LOCAL_REQUIRED_MODULES_x86 := crash_dump.policy mediacodec.policy
+LOCAL_REQUIRED_MODULES_arm := crash_dump.policy mediaswcodec.policy
+LOCAL_REQUIRED_MODULES_arm64 := crash_dump.policy mediaswcodec.policy
+LOCAL_REQUIRED_MODULES_x86 := crash_dump.policy mediaswcodec.policy
+LOCAL_REQUIRED_MODULES_x86_64 := crash_dump.policy mediaswcodec.policy
 endif
+
 LOCAL_SRC_FILES := \
     main_swcodecservice.cpp \
     MediaCodecUpdateService.cpp \
@@ -107,8 +110,12 @@
 
 LOCAL_MODULE := mediaswcodec
 LOCAL_INIT_RC := mediaswcodec.rc
-LOCAL_32_BIT_ONLY := true
 LOCAL_SANITIZE := scudo
+ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), x86_64 arm64))
+  LOCAL_MULTILIB := both
+  LOCAL_MODULE_STEM_32 := $(LOCAL_MODULE)32
+  LOCAL_MODULE_STEM_64 := $(LOCAL_MODULE)
+endif
 
 sanitizer_runtime_libraries :=
 llndk_libraries :=
@@ -137,4 +144,16 @@
 include $(BUILD_PREBUILT)
 endif
 
+####################################################################
+
+# sw service seccomp policy
+ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), x86 x86_64 arm arm64))
+include $(CLEAR_VARS)
+LOCAL_MODULE := mediaswcodec.policy
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_PATH := $(TARGET_OUT)/etc/seccomp_policy
+LOCAL_SRC_FILES := seccomp_policy/mediaswcodec-$(TARGET_ARCH).policy
+include $(BUILD_PREBUILT)
+endif
+
 include $(call all-makefiles-under, $(LOCAL_PATH))
diff --git a/services/mediacodec/main_swcodecservice.cpp b/services/mediacodec/main_swcodecservice.cpp
index 1168825..05b5695 100644
--- a/services/mediacodec/main_swcodecservice.cpp
+++ b/services/mediacodec/main_swcodecservice.cpp
@@ -26,12 +26,10 @@
 
 using namespace android;
 
-// TODO: replace policy with software codec-only policies
-// Must match location in Android.mk.
 static const char kSystemSeccompPolicyPath[] =
-        "/system/etc/seccomp_policy/mediacodec.policy";
+        "/system/etc/seccomp_policy/mediaswcodec.policy";
 static const char kVendorSeccompPolicyPath[] =
-        "/vendor/etc/seccomp_policy/mediacodec.policy";
+        "/vendor/etc/seccomp_policy/mediaswcodec.policy";
 
 // Disable Scudo's mismatch allocation check, as it is being triggered
 // by some third party code.
@@ -47,8 +45,11 @@
 
     ::android::hardware::configureRpcThreadpool(64, false);
 
-    // codec libs are currently 32-bit only
+#ifdef __LP64__
+    loadFromApex("/apex/com.android.media.swcodec/lib64");
+#else
     loadFromApex("/apex/com.android.media.swcodec/lib");
+#endif
 
     ::android::hardware::joinRpcThreadpool();
 }
diff --git a/services/mediacodec/mediaswcodec.rc b/services/mediacodec/mediaswcodec.rc
index dfe3381..3549666 100644
--- a/services/mediacodec/mediaswcodec.rc
+++ b/services/mediacodec/mediaswcodec.rc
@@ -2,5 +2,6 @@
     class main
     user mediacodec
     group camera drmrpc mediadrm
+    updatable
     ioprio rt 4
     writepid /dev/cpuset/foreground/tasks
diff --git a/services/mediacodec/registrant/Android.bp b/services/mediacodec/registrant/Android.bp
index 8c40ad1..80d3630 100644
--- a/services/mediacodec/registrant/Android.bp
+++ b/services/mediacodec/registrant/Android.bp
@@ -38,6 +38,7 @@
         "libcodec2_soft_mp3dec",
         "libcodec2_soft_vorbisdec",
         "libcodec2_soft_opusdec",
+        "libcodec2_soft_opusenc",
         "libcodec2_soft_vp8dec",
         "libcodec2_soft_vp9dec",
         "libcodec2_soft_av1dec",
@@ -49,7 +50,5 @@
         "libcodec2_soft_gsmdec",
         "libcodec2_soft_xaacdec",
     ],
-
-    compile_multilib: "32",
 }
 
diff --git a/services/mediacodec/seccomp_policy/mediacodec-x86.policy b/services/mediacodec/seccomp_policy/mediacodec-x86.policy
index 966e214..6d88c84 100644
--- a/services/mediacodec/seccomp_policy/mediacodec-x86.policy
+++ b/services/mediacodec/seccomp_policy/mediacodec-x86.policy
@@ -18,15 +18,19 @@
 openat: 1
 open: 1
 getuid32: 1
+getuid: 1
+getrlimit: 1
 writev: 1
 ioctl: 1
 close: 1
 mmap2: 1
+mmap: 1
 fstat64: 1
 stat64: 1
 statfs64: 1
 madvise: 1
 fstatat64: 1
+newfstatat: 1
 futex: 1
 munmap: 1
 faccessat: 1
diff --git a/services/mediacodec/seccomp_policy/mediaswcodec-arm.policy b/services/mediacodec/seccomp_policy/mediaswcodec-arm.policy
new file mode 100644
index 0000000..588141a
--- /dev/null
+++ b/services/mediacodec/seccomp_policy/mediaswcodec-arm.policy
@@ -0,0 +1,60 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+futex: 1
+# ioctl calls are filtered via the selinux policy.
+ioctl: 1
+sched_yield: 1
+close: 1
+dup: 1
+ppoll: 1
+mprotect: arg2 in ~PROT_EXEC || arg2 in ~PROT_WRITE
+mmap2: arg2 in ~PROT_EXEC || arg2 in ~PROT_WRITE
+
+# mremap: Ensure |flags| are (MREMAP_MAYMOVE | MREMAP_FIXED) TODO: Once minijail
+# parser support for '<' is in this needs to be modified to also prevent
+# |old_address| and |new_address| from touching the exception vector page, which
+# on ARM is statically loaded at 0xffff 0000. See
+# http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0211h/Babfeega.html
+# for more details.
+mremap: arg3 == 3
+munmap: 1
+prctl: 1
+getuid32: 1
+writev: 1
+sigaltstack: 1
+clone: 1
+exit: 1
+lseek: 1
+rt_sigprocmask: 1
+openat: 1
+fstat64: 1
+write: 1
+nanosleep: 1
+setpriority: 1
+set_tid_address: 1
+getdents64: 1
+readlinkat: 1
+read: 1
+pread64: 1
+fstatfs64: 1
+gettimeofday: 1
+faccessat: 1
+_llseek: 1
+fstatat64: 1
+ugetrlimit: 1
+exit_group: 1
+restart_syscall: 1
+rt_sigreturn: 1
+getrandom: 1
diff --git a/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy b/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy
new file mode 100644
index 0000000..1bee1b5
--- /dev/null
+++ b/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy
@@ -0,0 +1,61 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+futex: 1
+# ioctl calls are filtered via the selinux policy.
+ioctl: 1
+sched_yield: 1
+close: 1
+dup: 1
+ppoll: 1
+mprotect: arg2 in ~PROT_EXEC || arg2 in ~PROT_WRITE
+mmap: arg2 in ~PROT_EXEC || arg2 in ~PROT_WRITE
+getuid: 1
+getrlimit: 1
+fstat: 1
+newfstatat: 1
+fstatfs: 1
+
+# mremap: Ensure |flags| are (MREMAP_MAYMOVE | MREMAP_FIXED) TODO: Once minijail
+# parser support for '<' is in this needs to be modified to also prevent
+# |old_address| and |new_address| from touching the exception vector page, which
+# on ARM is statically loaded at 0xffff 0000. See
+# http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0211h/Babfeega.html
+# for more details.
+mremap: arg3 == 3
+munmap: 1
+prctl: 1
+writev: 1
+sigaltstack: 1
+clone: 1
+exit: 1
+lseek: 1
+rt_sigprocmask: 1
+openat: 1
+write: 1
+nanosleep: 1
+setpriority: 1
+set_tid_address: 1
+getdents64: 1
+readlinkat: 1
+read: 1
+pread64: 1
+gettimeofday: 1
+faccessat: 1
+exit_group: 1
+restart_syscall: 1
+rt_sigreturn: 1
+getrandom: 1
+madvise: 1
+
diff --git a/services/mediacodec/seccomp_policy/mediaswcodec-x86.policy b/services/mediacodec/seccomp_policy/mediaswcodec-x86.policy
new file mode 120000
index 0000000..ab2592a
--- /dev/null
+++ b/services/mediacodec/seccomp_policy/mediaswcodec-x86.policy
@@ -0,0 +1 @@
+mediacodec-x86.policy
\ No newline at end of file
diff --git a/services/mediacodec/seccomp_policy/mediaswcodec-x86_64.policy b/services/mediacodec/seccomp_policy/mediaswcodec-x86_64.policy
new file mode 120000
index 0000000..ab2592a
--- /dev/null
+++ b/services/mediacodec/seccomp_policy/mediaswcodec-x86_64.policy
@@ -0,0 +1 @@
+mediacodec-x86.policy
\ No newline at end of file
diff --git a/services/mediaextractor/mediaextractor.rc b/services/mediaextractor/mediaextractor.rc
index 5fc2941..6b2d0a5 100644
--- a/services/mediaextractor/mediaextractor.rc
+++ b/services/mediaextractor/mediaextractor.rc
@@ -2,5 +2,7 @@
     class main
     user mediaex
     group drmrpc mediadrm
+    # TODO(b/123275379): Remove updatable when http://aosp/878198 has landed
+    updatable
     ioprio rt 4
     writepid /dev/cpuset/foreground/tasks
diff --git a/services/oboeservice/AAudioServiceEndpoint.h b/services/oboeservice/AAudioServiceEndpoint.h
index 43b0a37..3616fa2 100644
--- a/services/oboeservice/AAudioServiceEndpoint.h
+++ b/services/oboeservice/AAudioServiceEndpoint.h
@@ -121,7 +121,7 @@
     mutable std::mutex       mLockStreams;
     std::vector<android::sp<AAudioServiceStreamBase>> mRegisteredStreams;
 
-    SimpleDoubleBuffer<Timestamp>  mAtomicTimestamp;
+    SimpleDoubleBuffer<Timestamp>  mAtomicEndpointTimestamp;
 
     android::AudioClient     mMmapClient;   // set in open, used in open and startStream
 
diff --git a/services/oboeservice/AAudioServiceEndpointShared.cpp b/services/oboeservice/AAudioServiceEndpointShared.cpp
index 2f1ec7e..0a415fd 100644
--- a/services/oboeservice/AAudioServiceEndpointShared.cpp
+++ b/services/oboeservice/AAudioServiceEndpointShared.cpp
@@ -181,8 +181,8 @@
 // Get timestamp that was written by the real-time service thread, eg. mixer.
 aaudio_result_t AAudioServiceEndpointShared::getFreeRunningPosition(int64_t *positionFrames,
                                                                   int64_t *timeNanos) {
-    if (mAtomicTimestamp.isValid()) {
-        Timestamp timestamp = mAtomicTimestamp.read();
+    if (mAtomicEndpointTimestamp.isValid()) {
+        Timestamp timestamp = mAtomicEndpointTimestamp.read();
         *positionFrames = timestamp.getPosition();
         *timeNanos = timestamp.getNanoseconds();
         return AAUDIO_OK;
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index defbb7b..b16b5dc 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -43,7 +43,7 @@
 AAudioServiceStreamBase::AAudioServiceStreamBase(AAudioService &audioService)
         : mUpMessageQueue(nullptr)
         , mTimestampThread("AATime")
-        , mAtomicTimestamp()
+        , mAtomicStreamTimestamp()
         , mAudioService(audioService) {
     mMmapClient.clientUid = -1;
     mMmapClient.clientPid = -1;
@@ -182,7 +182,7 @@
     setSuspended(false);
 
     // Start with fresh presentation timestamps.
-    mAtomicTimestamp.clear();
+    mAtomicStreamTimestamp.clear();
 
     mClientHandle = AUDIO_PORT_HANDLE_NONE;
     result = startDevice();
@@ -291,16 +291,20 @@
 }
 
 // implement Runnable, periodically send timestamps to client
+__attribute__((no_sanitize("integer")))
 void AAudioServiceStreamBase::run() {
     ALOGD("%s() %s entering >>>>>>>>>>>>>> TIMESTAMPS", __func__, getTypeText());
     TimestampScheduler timestampScheduler;
     timestampScheduler.setBurstPeriod(mFramesPerBurst, getSampleRate());
     timestampScheduler.start(AudioClock::getNanoseconds());
     int64_t nextTime = timestampScheduler.nextAbsoluteTime();
+    int32_t loopCount = 0;
     while(mThreadEnabled.load()) {
+        loopCount++;
         if (AudioClock::getNanoseconds() >= nextTime) {
             aaudio_result_t result = sendCurrentTimestamp();
             if (result != AAUDIO_OK) {
+                ALOGE("%s() timestamp thread got result = %d", __func__, result);
                 break;
             }
             nextTime = timestampScheduler.nextAbsoluteTime();
@@ -310,7 +314,8 @@
             AudioClock::sleepUntilNanoTime(nextTime);
         }
     }
-    ALOGD("%s() %s exiting <<<<<<<<<<<<<< TIMESTAMPS", __func__, getTypeText());
+    ALOGD("%s() %s exiting after %d loops <<<<<<<<<<<<<< TIMESTAMPS",
+          __func__, getTypeText(), loopCount);
 }
 
 void AAudioServiceStreamBase::disconnect() {
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index 7904b25..ffc768b 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -301,7 +301,7 @@
     // TODO rename mClientHandle to mPortHandle to be more consistent with AudioFlinger.
     audio_port_handle_t     mClientHandle = AUDIO_PORT_HANDLE_NONE;
 
-    SimpleDoubleBuffer<Timestamp>  mAtomicTimestamp;
+    SimpleDoubleBuffer<Timestamp>  mAtomicStreamTimestamp;
 
     android::AAudioService &mAudioService;
 
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.cpp b/services/oboeservice/AAudioServiceStreamMMAP.cpp
index 9377945..837b080 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.cpp
+++ b/services/oboeservice/AAudioServiceStreamMMAP.cpp
@@ -162,7 +162,7 @@
     aaudio_result_t result = serviceEndpointMMAP->getFreeRunningPosition(positionFrames, timeNanos);
     if (result == AAUDIO_OK) {
         Timestamp timestamp(*positionFrames, *timeNanos);
-        mAtomicTimestamp.write(timestamp);
+        mAtomicStreamTimestamp.write(timestamp);
         *positionFrames = timestamp.getPosition();
         *timeNanos = timestamp.getNanoseconds();
     } else if (result != AAUDIO_ERROR_UNAVAILABLE) {
@@ -184,8 +184,8 @@
             static_cast<AAudioServiceEndpointMMAP *>(endpoint.get());
 
     // TODO Get presentation timestamp from the HAL
-    if (mAtomicTimestamp.isValid()) {
-        Timestamp timestamp = mAtomicTimestamp.read();
+    if (mAtomicStreamTimestamp.isValid()) {
+        Timestamp timestamp = mAtomicStreamTimestamp.read();
         *positionFrames = timestamp.getPosition();
         *timeNanos = timestamp.getNanoseconds() + serviceEndpointMMAP->getHardwareTimeOffsetNanos();
         return AAUDIO_OK;
diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp
index d5450fe..14742dd 100644
--- a/services/oboeservice/AAudioServiceStreamShared.cpp
+++ b/services/oboeservice/AAudioServiceStreamShared.cpp
@@ -238,15 +238,15 @@
 }
 
 void AAudioServiceStreamShared::markTransferTime(Timestamp &timestamp) {
-    mAtomicTimestamp.write(timestamp);
+    mAtomicStreamTimestamp.write(timestamp);
 }
 
 // Get timestamp that was written by mixer or distributor.
 aaudio_result_t AAudioServiceStreamShared::getFreeRunningPosition(int64_t *positionFrames,
                                                                   int64_t *timeNanos) {
     // TODO Get presentation timestamp from the HAL
-    if (mAtomicTimestamp.isValid()) {
-        Timestamp timestamp = mAtomicTimestamp.read();
+    if (mAtomicStreamTimestamp.isValid()) {
+        Timestamp timestamp = mAtomicStreamTimestamp.read();
         *positionFrames = timestamp.getPosition();
         *timeNanos = timestamp.getNanoseconds();
         return AAUDIO_OK;
diff --git a/services/soundtrigger/SoundTriggerHalHidl.cpp b/services/soundtrigger/SoundTriggerHalHidl.cpp
index 1d37a8e..68d54c7 100644
--- a/services/soundtrigger/SoundTriggerHalHidl.cpp
+++ b/services/soundtrigger/SoundTriggerHalHidl.cpp
@@ -168,18 +168,23 @@
     int ret;
     SoundModelHandle halHandle;
     sp<V2_1_ISoundTriggerHw> soundtrigger_2_1 = toService2_1(soundtrigger);
+    sp<V2_2_ISoundTriggerHw> soundtrigger_2_2 = toService2_2(soundtrigger);
     if (sound_model->type == SOUND_MODEL_TYPE_KEYPHRASE) {
-        if (!soundtrigger_2_1) {
-            ISoundTriggerHw::PhraseSoundModel halSoundModel;
-            convertPhraseSoundModelToHal(&halSoundModel, sound_model);
-            AutoMutex lock(mHalLock);
-            hidlReturn = soundtrigger->loadPhraseSoundModel(
-                    halSoundModel,
-                    this, modelId, [&](int32_t retval, auto res) {
-                        ret = retval;
-                        halHandle = res;
-                    });
-        } else {
+        if (soundtrigger_2_2) {
+            V2_2_ISoundTriggerHw::PhraseSoundModel halSoundModel;
+            auto result = convertPhraseSoundModelToHal(&halSoundModel, sound_model);
+            if (result.first) {
+                AutoMutex lock(mHalLock);
+                hidlReturn = soundtrigger_2_2->loadPhraseSoundModel_2_1(
+                        halSoundModel,
+                        this, modelId, [&](int32_t retval, auto res) {
+                            ret = retval;
+                            halHandle = res;
+                        });
+            } else {
+                return NO_MEMORY;
+            }
+        } else if (soundtrigger_2_1) {
             V2_1_ISoundTriggerHw::PhraseSoundModel halSoundModel;
             auto result = convertPhraseSoundModelToHal(&halSoundModel, sound_model);
             if (result.first) {
@@ -193,18 +198,32 @@
             } else {
                 return NO_MEMORY;
             }
-        }
-    } else {
-        if (!soundtrigger_2_1) {
-            ISoundTriggerHw::SoundModel halSoundModel;
-            convertSoundModelToHal(&halSoundModel, sound_model);
+        } else {
+            ISoundTriggerHw::PhraseSoundModel halSoundModel;
+            convertPhraseSoundModelToHal(&halSoundModel, sound_model);
             AutoMutex lock(mHalLock);
-            hidlReturn = soundtrigger->loadSoundModel(halSoundModel,
+            hidlReturn = soundtrigger->loadPhraseSoundModel(
+                    halSoundModel,
                     this, modelId, [&](int32_t retval, auto res) {
                         ret = retval;
                         halHandle = res;
                     });
-        } else {
+        }
+    } else {
+        if (soundtrigger_2_2) {
+            V2_2_ISoundTriggerHw::SoundModel halSoundModel;
+            auto result = convertSoundModelToHal(&halSoundModel, sound_model);
+            if (result.first) {
+                AutoMutex lock(mHalLock);
+                hidlReturn = soundtrigger_2_2->loadSoundModel_2_1(halSoundModel,
+                        this, modelId, [&](int32_t retval, auto res) {
+                            ret = retval;
+                            halHandle = res;
+                        });
+            } else {
+                return NO_MEMORY;
+            }
+        } else if (soundtrigger_2_1) {
             V2_1_ISoundTriggerHw::SoundModel halSoundModel;
             auto result = convertSoundModelToHal(&halSoundModel, sound_model);
             if (result.first) {
@@ -217,6 +236,15 @@
             } else {
                 return NO_MEMORY;
             }
+        } else {
+            ISoundTriggerHw::SoundModel halSoundModel;
+            convertSoundModelToHal(&halSoundModel, sound_model);
+            AutoMutex lock(mHalLock);
+            hidlReturn = soundtrigger->loadSoundModel(halSoundModel,
+                    this, modelId, [&](int32_t retval, auto res) {
+                        ret = retval;
+                        halHandle = res;
+                    });
         }
     }
 
@@ -282,16 +310,20 @@
     model->mRecognitionCookie = cookie;
 
     sp<V2_1_ISoundTriggerHw> soundtrigger_2_1 = toService2_1(soundtrigger);
+    sp<V2_2_ISoundTriggerHw> soundtrigger_2_2 = toService2_2(soundtrigger);
     Return<int32_t> hidlReturn(0);
 
-    if (!soundtrigger_2_1) {
-        ISoundTriggerHw::RecognitionConfig halConfig;
-        convertRecognitionConfigToHal(&halConfig, config);
-        {
+    if (soundtrigger_2_2) {
+        V2_2_ISoundTriggerHw::RecognitionConfig halConfig;
+        auto result = convertRecognitionConfigToHal(&halConfig, config);
+        if (result.first) {
             AutoMutex lock(mHalLock);
-            hidlReturn = soundtrigger->startRecognition(model->mHalHandle, halConfig, this, handle);
+            hidlReturn = soundtrigger_2_2->startRecognition_2_1(
+                    model->mHalHandle, halConfig, this, handle);
+        } else {
+            return NO_MEMORY;
         }
-    } else {
+    } else if (soundtrigger_2_1) {
         V2_1_ISoundTriggerHw::RecognitionConfig halConfig;
         auto result = convertRecognitionConfigToHal(&halConfig, config);
         if (result.first) {
@@ -301,6 +333,13 @@
         } else {
             return NO_MEMORY;
         }
+    } else {
+        ISoundTriggerHw::RecognitionConfig halConfig;
+        convertRecognitionConfigToHal(&halConfig, config);
+        {
+            AutoMutex lock(mHalLock);
+            hidlReturn = soundtrigger->startRecognition(model->mHalHandle, halConfig, this, handle);
+        }
     }
 
     if (!hidlReturn.isOk()) {