Merge "Don't send short URB packet when sending MtpDataPacket." into nyc-mr2-dev am: e3ebcffe41
am: be9322a021

Change-Id: Id0a1b2c57731bb0f442d4cfdadff81ae91f49144
diff --git a/camera/CameraBase.cpp b/camera/CameraBase.cpp
index 68e73f2..ece64fd 100644
--- a/camera/CameraBase.cpp
+++ b/camera/CameraBase.cpp
@@ -39,7 +39,7 @@
 
 namespace hardware {
 
-status_t CameraInfo::writeToParcel(Parcel* parcel) const {
+status_t CameraInfo::writeToParcel(android::Parcel* parcel) const {
     status_t res;
     res = parcel->writeInt32(facing);
     if (res != OK) return res;
@@ -47,7 +47,7 @@
     return res;
 }
 
-status_t CameraInfo::readFromParcel(const Parcel* parcel) {
+status_t CameraInfo::readFromParcel(const android::Parcel* parcel) {
     status_t res;
     res = parcel->readInt32(&facing);
     if (res != OK) return res;
@@ -55,8 +55,24 @@
     return res;
 }
 
+status_t CameraStatus::writeToParcel(android::Parcel* parcel) const {
+    status_t res;
+    res = parcel->writeString8(cameraId);
+    if (res != OK) return res;
+    res = parcel->writeInt32(status);
+    return res;
 }
 
+status_t CameraStatus::readFromParcel(const android::Parcel* parcel) {
+    status_t res;
+    res = parcel->readString8(&cameraId);
+    if (res != OK) return res;
+    res = parcel->readInt32(&status);
+    return res;
+}
+
+} // namespace hardware
+
 namespace {
     sp<::android::hardware::ICameraService> gCameraService;
     const int                 kCameraServicePollDelay = 500000; // 0.5s
@@ -239,24 +255,6 @@
     return res.isOk() ? OK : res.serviceSpecificErrorCode();
 }
 
-template <typename TCam, typename TCamTraits>
-status_t CameraBase<TCam, TCamTraits>::addServiceListener(
-        const sp<::android::hardware::ICameraServiceListener>& listener) {
-    const sp<::android::hardware::ICameraService>& cs = getCameraService();
-    if (cs == 0) return UNKNOWN_ERROR;
-    binder::Status res = cs->addListener(listener);
-    return res.isOk() ? OK : res.serviceSpecificErrorCode();
-}
-
-template <typename TCam, typename TCamTraits>
-status_t CameraBase<TCam, TCamTraits>::removeServiceListener(
-        const sp<::android::hardware::ICameraServiceListener>& listener) {
-    const sp<::android::hardware::ICameraService>& cs = getCameraService();
-    if (cs == 0) return UNKNOWN_ERROR;
-    binder::Status res = cs->removeListener(listener);
-    return res.isOk() ? OK : res.serviceSpecificErrorCode();
-}
-
 template class CameraBase<Camera>;
 
 } // namespace android
diff --git a/camera/CaptureResult.cpp b/camera/CaptureResult.cpp
index 0a447e7..e6c0d00 100644
--- a/camera/CaptureResult.cpp
+++ b/camera/CaptureResult.cpp
@@ -26,7 +26,7 @@
     return requestId >= 0;
 }
 
-status_t CaptureResultExtras::readFromParcel(const Parcel *parcel) {
+status_t CaptureResultExtras::readFromParcel(const android::Parcel *parcel) {
     if (parcel == NULL) {
         ALOGE("%s: Null parcel", __FUNCTION__);
         return BAD_VALUE;
@@ -43,7 +43,7 @@
     return OK;
 }
 
-status_t CaptureResultExtras::writeToParcel(Parcel *parcel) const {
+status_t CaptureResultExtras::writeToParcel(android::Parcel *parcel) const {
     if (parcel == NULL) {
         ALOGE("%s: Null parcel", __FUNCTION__);
         return BAD_VALUE;
@@ -69,7 +69,7 @@
     mMetadata = otherResult.mMetadata;
 }
 
-status_t CaptureResult::readFromParcel(Parcel *parcel) {
+status_t CaptureResult::readFromParcel(android::Parcel *parcel) {
 
     ALOGV("%s: parcel = %p", __FUNCTION__, parcel);
 
@@ -100,7 +100,7 @@
     return OK;
 }
 
-status_t CaptureResult::writeToParcel(Parcel *parcel) const {
+status_t CaptureResult::writeToParcel(android::Parcel *parcel) const {
 
     ALOGV("%s: parcel = %p", __FUNCTION__, parcel);
 
diff --git a/camera/VendorTagDescriptor.cpp b/camera/VendorTagDescriptor.cpp
index f3b3dbb..ed09b60 100644
--- a/camera/VendorTagDescriptor.cpp
+++ b/camera/VendorTagDescriptor.cpp
@@ -96,7 +96,7 @@
     mVendorOps = src.mVendorOps;
 }
 
-status_t VendorTagDescriptor::readFromParcel(const Parcel* parcel) {
+status_t VendorTagDescriptor::readFromParcel(const android::Parcel* parcel) {
     status_t res = OK;
     if (parcel == NULL) {
         ALOGE("%s: parcel argument was NULL.", __FUNCTION__);
@@ -244,7 +244,7 @@
     return mTagToTypeMap.valueFor(tag);
 }
 
-status_t VendorTagDescriptor::writeToParcel(Parcel* parcel) const {
+status_t VendorTagDescriptor::writeToParcel(android::Parcel* parcel) const {
     status_t res = OK;
     if (parcel == NULL) {
         ALOGE("%s: parcel argument was NULL.", __FUNCTION__);
diff --git a/camera/aidl/android/hardware/CameraStatus.aidl b/camera/aidl/android/hardware/CameraStatus.aidl
new file mode 100644
index 0000000..2089b8b
--- /dev/null
+++ b/camera/aidl/android/hardware/CameraStatus.aidl
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware;
+
+/** @hide */
+parcelable CameraStatus cpp_header "camera/CameraBase.h";
diff --git a/camera/aidl/android/hardware/ICameraService.aidl b/camera/aidl/android/hardware/ICameraService.aidl
index e94fd0c..99c479c 100644
--- a/camera/aidl/android/hardware/ICameraService.aidl
+++ b/camera/aidl/android/hardware/ICameraService.aidl
@@ -24,6 +24,7 @@
 import android.hardware.camera2.impl.CameraMetadataNative;
 import android.hardware.ICameraServiceListener;
 import android.hardware.CameraInfo;
+import android.hardware.CameraStatus;
 
 /**
  * Binder interface for the native camera service running in mediaserver.
@@ -83,7 +84,7 @@
      * Only supported for device HAL versions >= 3.2
      */
     ICameraDeviceUser connectDevice(ICameraDeviceCallbacks callbacks,
-            int cameraId,
+            String cameraId,
             String opPackageName,
             int clientUid);
 
@@ -102,16 +103,24 @@
             int clientUid);
 
     /**
-     * Add/remove listeners for changes to camera device and flashlight state
+     * Add listener for changes to camera device and flashlight state.
+     *
+     * Also returns the set of currently-known camera IDs and state of each device.
+     * Adding a listener will trigger the torch status listener to fire for all
+     * devices that have a flash unit
      */
-    void addListener(ICameraServiceListener listener);
+    CameraStatus[] addListener(ICameraServiceListener listener);
+
+    /**
+     * Remove listener for changes to camera device and flashlight state.
+     */
     void removeListener(ICameraServiceListener listener);
 
     /**
      * Read the static camera metadata for a camera device.
      * Only supported for device HAL versions >= 3.2
      */
-    CameraMetadataNative getCameraCharacteristics(int cameraId);
+    CameraMetadataNative getCameraCharacteristics(String cameraId);
 
     /**
      * Read in the vendor tag descriptors from the camera module HAL.
@@ -132,9 +141,9 @@
     const int API_VERSION_2 = 2;
 
     // Determines if a particular API version is supported directly
-    boolean supportsCameraApi(int cameraId, int apiVersion);
+    boolean supportsCameraApi(String cameraId, int apiVersion);
 
-    void setTorchMode(String CameraId, boolean enabled, IBinder clientBinder);
+    void setTorchMode(String cameraId, boolean enabled, IBinder clientBinder);
 
     /**
      * Notify the camera service of a system event.  Should only be called from system_server.
diff --git a/camera/aidl/android/hardware/ICameraServiceListener.aidl b/camera/aidl/android/hardware/ICameraServiceListener.aidl
index 4e2a8c7..f871ce4 100644
--- a/camera/aidl/android/hardware/ICameraServiceListener.aidl
+++ b/camera/aidl/android/hardware/ICameraServiceListener.aidl
@@ -40,7 +40,7 @@
      */
     // Device physically unplugged
     const int STATUS_NOT_PRESENT      = 0;
-    // Device physically has been plugged in and the camera can be used exlusively
+    // Device physically has been plugged in and the camera can be used exclusively
     const int STATUS_PRESENT          = 1;
     // Device physically has been plugged in but it will not be connect-able until enumeration is
     // complete
@@ -51,7 +51,7 @@
     // Use to initialize variables only
     const int STATUS_UNKNOWN          = -1;
 
-    oneway void onStatusChanged(int status, int cameraId);
+    oneway void onStatusChanged(int status, String cameraId);
 
     /**
      * The torch mode status of a camera.
diff --git a/camera/aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl b/camera/aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl
index 755ec8e..8308095 100644
--- a/camera/aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl
+++ b/camera/aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl
@@ -44,4 +44,5 @@
      * @param lastFrameNumber Frame number of the last frame of the streaming request.
      */
     oneway void onRepeatingRequestError(in long lastFrameNumber);
+    oneway void onRequestQueueEmpty();
 }
diff --git a/camera/camera2/CaptureRequest.cpp b/camera/camera2/CaptureRequest.cpp
index 0d689a6..4daf35b 100644
--- a/camera/camera2/CaptureRequest.cpp
+++ b/camera/camera2/CaptureRequest.cpp
@@ -28,7 +28,7 @@
 namespace hardware {
 namespace camera2 {
 
-status_t CaptureRequest::readFromParcel(const Parcel* parcel) {
+status_t CaptureRequest::readFromParcel(const android::Parcel* parcel) {
     if (parcel == NULL) {
         ALOGE("%s: Null parcel", __FUNCTION__);
         return BAD_VALUE;
@@ -90,7 +90,7 @@
     return OK;
 }
 
-status_t CaptureRequest::writeToParcel(Parcel* parcel) const {
+status_t CaptureRequest::writeToParcel(android::Parcel* parcel) const {
     if (parcel == NULL) {
         ALOGE("%s: Null parcel", __FUNCTION__);
         return BAD_VALUE;
diff --git a/camera/camera2/OutputConfiguration.cpp b/camera/camera2/OutputConfiguration.cpp
index 38e1c01..9cd3a47 100644
--- a/camera/camera2/OutputConfiguration.cpp
+++ b/camera/camera2/OutputConfiguration.cpp
@@ -21,8 +21,9 @@
 #include <utils/Log.h>
 
 #include <camera/camera2/OutputConfiguration.h>
-#include <gui/Surface.h>
 #include <binder/Parcel.h>
+#include <gui/Surface.h>
+#include <utils/String8.h>
 
 namespace android {
 
@@ -30,8 +31,9 @@
 const int OutputConfiguration::INVALID_ROTATION = -1;
 const int OutputConfiguration::INVALID_SET_ID = -1;
 
-sp<IGraphicBufferProducer> OutputConfiguration::getGraphicBufferProducer() const {
-    return mGbp;
+const std::vector<sp<IGraphicBufferProducer>>&
+        OutputConfiguration::getGraphicBufferProducers() const {
+    return mGbps;
 }
 
 int OutputConfiguration::getRotation() const {
@@ -62,13 +64,13 @@
         mHeight(0) {
 }
 
-OutputConfiguration::OutputConfiguration(const Parcel& parcel) :
+OutputConfiguration::OutputConfiguration(const android::Parcel& parcel) :
         mRotation(INVALID_ROTATION),
         mSurfaceSetID(INVALID_SET_ID) {
     readFromParcel(&parcel);
 }
 
-status_t OutputConfiguration::readFromParcel(const Parcel* parcel) {
+status_t OutputConfiguration::readFromParcel(const android::Parcel* parcel) {
     status_t err = OK;
     int rotation = 0;
 
@@ -103,42 +105,65 @@
         return err;
     }
 
-    view::Surface surfaceShim;
-    if ((err = surfaceShim.readFromParcel(parcel)) != OK) {
-        // Read surface failure for deferred surface configuration is expected.
-        if (surfaceType == SURFACE_TYPE_SURFACE_VIEW ||
-                surfaceType == SURFACE_TYPE_SURFACE_TEXTURE) {
-            ALOGV("%s: Get null surface from a deferred surface configuration (%dx%d)",
-                    __FUNCTION__, width, height);
-            err = OK;
-        } else {
-            ALOGE("%s: Failed to read surface from parcel", __FUNCTION__);
-            return err;
-        }
+    // numSurfaces is the total number of surfaces for this OutputConfiguration,
+    // regardless the surface is deferred or not.
+    int numSurfaces = 0;
+    if ((err = parcel->readInt32(&numSurfaces)) != OK) {
+        ALOGE("%s: Failed to read maxSurfaces from parcel", __FUNCTION__);
+        return err;
+    }
+    if (numSurfaces < 1) {
+        ALOGE("%s: there has to be at least 1 surface per"
+              " outputConfiguration", __FUNCTION__);
+        return BAD_VALUE;
     }
 
-    mGbp = surfaceShim.graphicBufferProducer;
+    // Read all surfaces from parcel. If a surface is deferred, readFromPacel
+    // returns error, and a null surface is put into the mGbps. We assume all
+    // deferred surfaces are after non-deferred surfaces in the parcel.
+    // TODO: Need better way to detect deferred surface than using error
+    // return from readFromParcel.
+    std::vector<sp<IGraphicBufferProducer>> gbps;
+    for (int i = 0; i < numSurfaces; i++) {
+        view::Surface surfaceShim;
+        if ((err = surfaceShim.readFromParcel(parcel)) != OK) {
+            // Read surface failure for deferred surface configuration is expected.
+            if ((surfaceType == SURFACE_TYPE_SURFACE_VIEW ||
+                    surfaceType == SURFACE_TYPE_SURFACE_TEXTURE)) {
+                ALOGV("%s: Get null surface from a deferred surface configuration (%dx%d)",
+                        __FUNCTION__, width, height);
+                err = OK;
+            } else {
+                ALOGE("%s: Failed to read surface from parcel", __FUNCTION__);
+                return err;
+            }
+        }
+        gbps.push_back(surfaceShim.graphicBufferProducer);
+        ALOGV("%s: OutputConfiguration: gbps[%d] : %p, name %s", __FUNCTION__,
+                i, gbps[i].get(), String8(surfaceShim.name).string());
+    }
+
     mRotation = rotation;
     mSurfaceSetID = setID;
     mSurfaceType = surfaceType;
     mWidth = width;
     mHeight = height;
+    mGbps = std::move(gbps);
 
-    ALOGV("%s: OutputConfiguration: bp = %p, name = %s, rotation = %d, setId = %d,"
-            "surfaceType = %d", __FUNCTION__, mGbp.get(), String8(surfaceShim.name).string(),
-            mRotation, mSurfaceSetID, mSurfaceType);
+    ALOGV("%s: OutputConfiguration: rotation = %d, setId = %d, surfaceType = %d",
+            __FUNCTION__, mRotation, mSurfaceSetID, mSurfaceType);
 
     return err;
 }
 
 OutputConfiguration::OutputConfiguration(sp<IGraphicBufferProducer>& gbp, int rotation,
         int surfaceSetID) {
-    mGbp = gbp;
+    mGbps.push_back(gbp);
     mRotation = rotation;
     mSurfaceSetID = surfaceSetID;
 }
 
-status_t OutputConfiguration::writeToParcel(Parcel* parcel) const {
+status_t OutputConfiguration::writeToParcel(android::Parcel* parcel) const {
 
     if (parcel == nullptr) return BAD_VALUE;
     status_t err = OK;
@@ -158,14 +183,53 @@
     err = parcel->writeInt32(mHeight);
     if (err != OK) return err;
 
-    view::Surface surfaceShim;
-    surfaceShim.name = String16("unknown_name"); // name of surface
-    surfaceShim.graphicBufferProducer = mGbp;
-
-    err = surfaceShim.writeToParcel(parcel);
+    int numSurfaces = mGbps.size();
+    err = parcel->writeInt32(numSurfaces);
     if (err != OK) return err;
 
+    for (int i = 0; i < numSurfaces; i++) {
+        view::Surface surfaceShim;
+        surfaceShim.name = String16("unknown_name"); // name of surface
+        surfaceShim.graphicBufferProducer = mGbps[i];
+
+        err = surfaceShim.writeToParcel(parcel);
+        if (err != OK) return err;
+    }
+
     return OK;
 }
 
+bool OutputConfiguration::gbpsEqual(const OutputConfiguration& other) const {
+    const std::vector<sp<IGraphicBufferProducer> >& otherGbps =
+            other.getGraphicBufferProducers();
+
+    if (mGbps.size() != otherGbps.size()) {
+        return false;
+    }
+
+    for (size_t i = 0; i < mGbps.size(); i++) {
+        if (mGbps[i] != otherGbps[i]) {
+            return false;
+        }
+    }
+
+    return true;
+}
+
+bool OutputConfiguration::gbpsLessThan(const OutputConfiguration& other) const {
+    const std::vector<sp<IGraphicBufferProducer> >& otherGbps =
+            other.getGraphicBufferProducers();
+
+    if (mGbps.size() !=  otherGbps.size()) {
+        return mGbps.size() < otherGbps.size();
+    }
+
+    for (size_t i = 0; i < mGbps.size(); i++) {
+        if (mGbps[i] != otherGbps[i]) {
+            return mGbps[i] < otherGbps[i];
+        }
+    }
+
+    return false;
+}
 }; // namespace android
diff --git a/camera/camera2/SubmitInfo.cpp b/camera/camera2/SubmitInfo.cpp
index d739c79..6ebd810 100644
--- a/camera/camera2/SubmitInfo.cpp
+++ b/camera/camera2/SubmitInfo.cpp
@@ -22,7 +22,7 @@
 namespace camera2 {
 namespace utils {
 
-status_t SubmitInfo::writeToParcel(Parcel *parcel) const {
+status_t SubmitInfo::writeToParcel(android::Parcel *parcel) const {
     status_t res;
     if (parcel == nullptr) return BAD_VALUE;
 
@@ -33,7 +33,7 @@
     return res;
 }
 
-status_t SubmitInfo::readFromParcel(const Parcel *parcel) {
+status_t SubmitInfo::readFromParcel(const android::Parcel *parcel) {
     status_t res;
     if (parcel == nullptr) return BAD_VALUE;
 
diff --git a/camera/cameraserver/Android.mk b/camera/cameraserver/Android.mk
index 888862a..bdafff1 100644
--- a/camera/cameraserver/Android.mk
+++ b/camera/cameraserver/Android.mk
@@ -22,9 +22,13 @@
 LOCAL_SHARED_LIBRARIES := \
 	libcameraservice \
 	liblog \
-	libcutils \
 	libutils \
 	libbinder \
+	android.hardware.camera.common@1.0 \
+	android.hardware.camera.provider@2.4 \
+	android.hardware.camera.device@1.0 \
+	android.hardware.camera.device@3.2 \
+	android.hidl.manager@1.0
 
 LOCAL_MODULE:= cameraserver
 LOCAL_32_BIT_ONLY := true
diff --git a/camera/ndk/Android.mk b/camera/ndk/Android.mk
index 40dbeef..591dfc2 100644
--- a/camera/ndk/Android.mk
+++ b/camera/ndk/Android.mk
@@ -51,8 +51,6 @@
     libcutils \
     libcamera_metadata
 
-LOCAL_CLANG := true
-
 include $(BUILD_SHARED_LIBRARY)
 
 endif
diff --git a/camera/ndk/impl/ACameraDevice.cpp b/camera/ndk/impl/ACameraDevice.cpp
index 7d78e2b..229b159 100644
--- a/camera/ndk/impl/ACameraDevice.cpp
+++ b/camera/ndk/impl/ACameraDevice.cpp
@@ -1347,6 +1347,12 @@
 }
 
 binder::Status
+CameraDevice::ServiceCallback::onRequestQueueEmpty() {
+    // onRequestQueueEmpty not yet implemented in NDK
+    return binder::Status::ok();
+}
+
+binder::Status
 CameraDevice::ServiceCallback::onRepeatingRequestError(int64_t lastFrameNumber) {
     binder::Status ret = binder::Status::ok();
 
diff --git a/camera/ndk/impl/ACameraDevice.h b/camera/ndk/impl/ACameraDevice.h
index 051462b..eb8028b 100644
--- a/camera/ndk/impl/ACameraDevice.h
+++ b/camera/ndk/impl/ACameraDevice.h
@@ -74,6 +74,7 @@
         binder::Status onResultReceived(const CameraMetadata& metadata,
                               const CaptureResultExtras& resultExtras) override;
         binder::Status onPrepared(int streamId) override;
+        binder::Status onRequestQueueEmpty() override;
         binder::Status onRepeatingRequestError(int64_t lastFrameNumber) override;
       private:
         const wp<CameraDevice> mDevice;
diff --git a/camera/ndk/impl/ACameraManager.cpp b/camera/ndk/impl/ACameraManager.cpp
index 35555ff..ba2100c 100644
--- a/camera/ndk/impl/ACameraManager.cpp
+++ b/camera/ndk/impl/ACameraManager.cpp
@@ -28,11 +28,6 @@
 
 using namespace android;
 
-//constants shared between ACameraManager and CameraManagerGlobal
-namespace {
-    const int kMaxCameraIdLen = 32;
-}
-
 namespace android {
 // Static member definitions
 const char* CameraManagerGlobal::kCameraIdKey   = "CameraId";
@@ -125,7 +120,11 @@
         if (mCameraServiceListener == nullptr) {
             mCameraServiceListener = new CameraServiceListener(this);
         }
-        mCameraService->addListener(mCameraServiceListener);
+        std::vector<hardware::CameraStatus> cameraStatuses{};
+        mCameraService->addListener(mCameraServiceListener, &cameraStatuses);
+        for (auto& c : cameraStatuses) {
+            onStatusChangedLocked(c.status, c.cameraId);
+        }
 
         // setup vendor tags
         sp<VendorTagDescriptor> desc = new VendorTagDescriptor();
@@ -157,8 +156,8 @@
     sp<CameraManagerGlobal> cm = mCameraManager.promote();
     if (cm != nullptr) {
         AutoMutex lock(cm->mLock);
-        for (auto pair : cm->mDeviceStatusMap) {
-            int32_t cameraId = pair.first;
+        for (auto& pair : cm->mDeviceStatusMap) {
+            const String8 &cameraId = pair.first;
             cm->onStatusChangedLocked(
                     CameraServiceListener::STATUS_NOT_PRESENT, cameraId);
         }
@@ -174,8 +173,8 @@
     auto pair = mCallbacks.insert(cb);
     // Send initial callbacks if callback is newly registered
     if (pair.second) {
-        for (auto pair : mDeviceStatusMap) {
-            int32_t cameraId = pair.first;
+        for (auto& pair : mDeviceStatusMap) {
+            const String8& cameraId = pair.first;
             int32_t status = pair.second;
 
             sp<AMessage> msg = new AMessage(kWhatSendSingleCallback, mHandler);
@@ -183,7 +182,7 @@
                     callback->onCameraAvailable : callback->onCameraUnavailable;
             msg->setPointer(kCallbackFpKey, (void *) cb);
             msg->setPointer(kContextKey, callback->context);
-            msg->setInt32(kCameraIdKey, cameraId);
+            msg->setString(kCameraIdKey, AString(cameraId));
             msg->post();
         }
     }
@@ -196,6 +195,26 @@
     mCallbacks.erase(cb);
 }
 
+void CameraManagerGlobal::getCameraIdList(std::vector<String8> *cameraIds) {
+    // Ensure that we have initialized/refreshed the list of available devices
+    auto cs = getCameraService();
+    Mutex::Autolock _l(mLock);
+
+    for(auto& deviceStatus : mDeviceStatusMap) {
+        if (deviceStatus.second == hardware::ICameraServiceListener::STATUS_NOT_PRESENT ||
+                deviceStatus.second == hardware::ICameraServiceListener::STATUS_ENUMERATING) {
+            continue;
+        }
+        bool camera2Support = false;
+        binder::Status serviceRet = cs->supportsCameraApi(String16(deviceStatus.first),
+                hardware::ICameraService::API_VERSION_2, &camera2Support);
+        if (!serviceRet.isOk() || !camera2Support) {
+            continue;
+        }
+        cameraIds->push_back(deviceStatus.first);
+    }
+}
+
 bool CameraManagerGlobal::validStatus(int32_t status) {
     switch (status) {
         case hardware::ICameraServiceListener::STATUS_NOT_PRESENT:
@@ -217,14 +236,6 @@
     }
 }
 
-void CameraManagerGlobal::CallbackHandler::sendSingleCallback(
-        int32_t cameraId, void* context,
-        ACameraManager_AvailabilityCallback cb) const {
-    char cameraIdStr[kMaxCameraIdLen];
-    snprintf(cameraIdStr, sizeof(cameraIdStr), "%d", cameraId);
-    (*cb)(context, cameraIdStr);
-}
-
 void CameraManagerGlobal::CallbackHandler::onMessageReceived(
         const sp<AMessage> &msg) {
     switch (msg->what()) {
@@ -232,7 +243,7 @@
         {
             ACameraManager_AvailabilityCallback cb;
             void* context;
-            int32_t cameraId;
+            AString cameraId;
             bool found = msg->findPointer(kCallbackFpKey, (void**) &cb);
             if (!found) {
                 ALOGE("%s: Cannot find camera callback fp!", __FUNCTION__);
@@ -243,12 +254,12 @@
                 ALOGE("%s: Cannot find callback context!", __FUNCTION__);
                 return;
             }
-            found = msg->findInt32(kCameraIdKey, &cameraId);
+            found = msg->findString(kCameraIdKey, &cameraId);
             if (!found) {
                 ALOGE("%s: Cannot find camera ID!", __FUNCTION__);
                 return;
             }
-            sendSingleCallback(cameraId, context, cb);
+            (*cb)(context, cameraId.c_str());
             break;
         }
         default:
@@ -258,10 +269,10 @@
 }
 
 binder::Status CameraManagerGlobal::CameraServiceListener::onStatusChanged(
-        int32_t status, int32_t cameraId) {
+        int32_t status, const String16& cameraId) {
     sp<CameraManagerGlobal> cm = mCameraManager.promote();
     if (cm != nullptr) {
-        cm->onStatusChanged(status, cameraId);
+        cm->onStatusChanged(status, String8(cameraId));
     } else {
         ALOGE("Cannot deliver status change. Global camera manager died");
     }
@@ -269,40 +280,40 @@
 }
 
 void CameraManagerGlobal::onStatusChanged(
-        int32_t status, int32_t cameraId) {
+        int32_t status, const String8& cameraId) {
     Mutex::Autolock _l(mLock);
     onStatusChangedLocked(status, cameraId);
 }
 
 void CameraManagerGlobal::onStatusChangedLocked(
-        int32_t status, int32_t cameraId) {
-        if (!validStatus(status)) {
-            ALOGE("%s: Invalid status %d", __FUNCTION__, status);
-            return;
-        }
+        int32_t status, const String8& cameraId) {
+    if (!validStatus(status)) {
+        ALOGE("%s: Invalid status %d", __FUNCTION__, status);
+        return;
+    }
 
-        bool firstStatus = (mDeviceStatusMap.count(cameraId) == 0);
-        int32_t oldStatus = firstStatus ?
-                status : // first status
-                mDeviceStatusMap[cameraId];
+    bool firstStatus = (mDeviceStatusMap.count(cameraId) == 0);
+    int32_t oldStatus = firstStatus ?
+            status : // first status
+            mDeviceStatusMap[cameraId];
 
-        if (!firstStatus &&
-                isStatusAvailable(status) == isStatusAvailable(oldStatus)) {
-            // No status update. No need to send callback
-            return;
-        }
+    if (!firstStatus &&
+            isStatusAvailable(status) == isStatusAvailable(oldStatus)) {
+        // No status update. No need to send callback
+        return;
+    }
 
-        // Iterate through all registered callbacks
-        mDeviceStatusMap[cameraId] = status;
-        for (auto cb : mCallbacks) {
-            sp<AMessage> msg = new AMessage(kWhatSendSingleCallback, mHandler);
-            ACameraManager_AvailabilityCallback cbFp = isStatusAvailable(status) ?
-                    cb.mAvailable : cb.mUnavailable;
-            msg->setPointer(kCallbackFpKey, (void *) cbFp);
-            msg->setPointer(kContextKey, cb.mContext);
-            msg->setInt32(kCameraIdKey, cameraId);
-            msg->post();
-        }
+    // Iterate through all registered callbacks
+    mDeviceStatusMap[cameraId] = status;
+    for (auto cb : mCallbacks) {
+        sp<AMessage> msg = new AMessage(kWhatSendSingleCallback, mHandler);
+        ACameraManager_AvailabilityCallback cbFp = isStatusAvailable(status) ?
+                cb.mAvailable : cb.mUnavailable;
+        msg->setPointer(kCallbackFpKey, (void *) cbFp);
+        msg->setPointer(kContextKey, cb.mContext);
+        msg->setString(kCameraIdKey, AString(cameraId));
+        msg->post();
+    }
 }
 
 } // namespace android
@@ -311,77 +322,13 @@
  * ACameraManger Implementation
  */
 camera_status_t
-ACameraManager::getOrCreateCameraIdListLocked(ACameraIdList** cameraIdList) {
-    if (mCachedCameraIdList.numCameras == kCameraIdListNotInit) {
-        if (isCameraServiceDisabled()) {
-            mCachedCameraIdList.numCameras = 0;
-            mCachedCameraIdList.cameraIds = new const char*[0];
-            *cameraIdList = &mCachedCameraIdList;
-            return ACAMERA_OK;
-        }
-
-        int numCameras = 0;
-        Vector<char *> cameraIds;
-        sp<hardware::ICameraService> cs = CameraManagerGlobal::getInstance().getCameraService();
-        if (cs == nullptr) {
-            ALOGE("%s: Cannot reach camera service!", __FUNCTION__);
-            return ACAMERA_ERROR_CAMERA_DISCONNECTED;
-        }
-        // Get number of cameras
-        int numAllCameras = 0;
-        binder::Status serviceRet = cs->getNumberOfCameras(hardware::ICameraService::CAMERA_TYPE_ALL,
-                &numAllCameras);
-        if (!serviceRet.isOk()) {
-            ALOGE("%s: Error getting camera count: %s", __FUNCTION__,
-                    serviceRet.toString8().string());
-            numAllCameras = 0;
-        }
-        // Filter API2 compatible cameras and push to cameraIds
-        for (int i = 0; i < numAllCameras; i++) {
-            // TODO: Only suppot HALs that supports API2 directly now
-            bool camera2Support = false;
-            serviceRet = cs->supportsCameraApi(i, hardware::ICameraService::API_VERSION_2,
-                    &camera2Support);
-            char buf[kMaxCameraIdLen];
-            if (camera2Support) {
-                numCameras++;
-                mCameraIds.insert(i);
-                snprintf(buf, sizeof(buf), "%d", i);
-                size_t cameraIdSize = strlen(buf) + 1;
-                char *cameraId = new char[cameraIdSize];
-                if (!cameraId) {
-                    ALOGE("Allocate memory for ACameraIdList failed!");
-                    return ACAMERA_ERROR_NOT_ENOUGH_MEMORY;
-                }
-                strlcpy(cameraId, buf, cameraIdSize);
-                cameraIds.push(cameraId);
-            }
-        }
-        mCachedCameraIdList.numCameras = numCameras;
-        mCachedCameraIdList.cameraIds = new const char*[numCameras];
-        if (!mCachedCameraIdList.cameraIds) {
-            ALOGE("Allocate memory for ACameraIdList failed!");
-            return ACAMERA_ERROR_NOT_ENOUGH_MEMORY;
-        }
-        for (int i = 0; i < numCameras; i++) {
-            mCachedCameraIdList.cameraIds[i] = cameraIds[i];
-        }
-    }
-    *cameraIdList = &mCachedCameraIdList;
-    return ACAMERA_OK;
-}
-
-camera_status_t
 ACameraManager::getCameraIdList(ACameraIdList** cameraIdList) {
     Mutex::Autolock _l(mLock);
-    ACameraIdList* cachedList;
-    camera_status_t ret = getOrCreateCameraIdListLocked(&cachedList);
-    if (ret != ACAMERA_OK) {
-        ALOGE("Get camera ID list failed! err: %d", ret);
-        return ret;
-    }
 
-    int numCameras = cachedList->numCameras;
+    std::vector<String8> idList;
+    CameraManagerGlobal::getInstance().getCameraIdList(&idList);
+
+    int numCameras = idList.size();
     ACameraIdList *out = new ACameraIdList;
     if (!out) {
         ALOGE("Allocate memory for ACameraIdList failed!");
@@ -391,14 +338,16 @@
     out->cameraIds = new const char*[numCameras];
     if (!out->cameraIds) {
         ALOGE("Allocate memory for ACameraIdList failed!");
+        deleteCameraIdList(out);
         return ACAMERA_ERROR_NOT_ENOUGH_MEMORY;
     }
     for (int i = 0; i < numCameras; i++) {
-        const char* src = cachedList->cameraIds[i];
+        const char* src = idList[i].string();
         size_t dstSize = strlen(src) + 1;
         char* dst = new char[dstSize];
         if (!dst) {
             ALOGE("Allocate memory for ACameraIdList failed!");
+            deleteCameraIdList(out);
             return ACAMERA_ERROR_NOT_ENOUGH_MEMORY;
         }
         strlcpy(dst, src, dstSize);
@@ -413,7 +362,9 @@
     if (cameraIdList != nullptr) {
         if (cameraIdList->cameraIds != nullptr) {
             for (int i = 0; i < cameraIdList->numCameras; i ++) {
-                delete[] cameraIdList->cameraIds[i];
+                if (cameraIdList->cameraIds[i] != nullptr) {
+                    delete[] cameraIdList->cameraIds[i];
+                }
             }
             delete[] cameraIdList->cameraIds;
         }
@@ -424,29 +375,27 @@
 camera_status_t ACameraManager::getCameraCharacteristics(
         const char *cameraIdStr, ACameraMetadata **characteristics) {
     Mutex::Autolock _l(mLock);
-    ACameraIdList* cachedList;
-    // Make sure mCameraIds is initialized
-    camera_status_t ret = getOrCreateCameraIdListLocked(&cachedList);
-    if (ret != ACAMERA_OK) {
-        ALOGE("%s: Get camera ID list failed! err: %d", __FUNCTION__, ret);
-        return ret;
-    }
-    int cameraId = atoi(cameraIdStr);
-    if (mCameraIds.count(cameraId) == 0) {
-        ALOGE("%s: Camera ID %s does not exist!", __FUNCTION__, cameraIdStr);
-        return ACAMERA_ERROR_INVALID_PARAMETER;
-    }
+
     sp<hardware::ICameraService> cs = CameraManagerGlobal::getInstance().getCameraService();
     if (cs == nullptr) {
         ALOGE("%s: Cannot reach camera service!", __FUNCTION__);
         return ACAMERA_ERROR_CAMERA_DISCONNECTED;
     }
     CameraMetadata rawMetadata;
-    binder::Status serviceRet = cs->getCameraCharacteristics(cameraId, &rawMetadata);
+    binder::Status serviceRet = cs->getCameraCharacteristics(String16(cameraIdStr), &rawMetadata);
     if (!serviceRet.isOk()) {
-        ALOGE("Get camera characteristics from camera service failed: %s",
-                serviceRet.toString8().string());
-        return ACAMERA_ERROR_UNKNOWN; // should not reach here
+        switch(serviceRet.serviceSpecificErrorCode()) {
+            case hardware::ICameraService::ERROR_DISCONNECTED:
+                ALOGE("%s: Camera %s has been disconnected", __FUNCTION__, cameraIdStr);
+                return ACAMERA_ERROR_CAMERA_DISCONNECTED;
+            case hardware::ICameraService::ERROR_ILLEGAL_ARGUMENT:
+                ALOGE("%s: Camera ID %s does not exist!", __FUNCTION__, cameraIdStr);
+                return ACAMERA_ERROR_INVALID_PARAMETER;
+            default:
+                ALOGE("Get camera characteristics from camera service failed: %s",
+                        serviceRet.toString8().string());
+                return ACAMERA_ERROR_UNKNOWN; // should not reach here
+        }
     }
 
     *characteristics = new ACameraMetadata(
@@ -475,16 +424,16 @@
     sp<hardware::ICameraService> cs = CameraManagerGlobal::getInstance().getCameraService();
     if (cs == nullptr) {
         ALOGE("%s: Cannot reach camera service!", __FUNCTION__);
+        delete device;
         return ACAMERA_ERROR_CAMERA_DISCONNECTED;
     }
 
-    int id = atoi(cameraId);
     sp<hardware::camera2::ICameraDeviceCallbacks> callbacks = device->getServiceCallback();
     sp<hardware::camera2::ICameraDeviceUser> deviceRemote;
     // No way to get package name from native.
     // Send a zero length package name and let camera service figure it out from UID
     binder::Status serviceRet = cs->connectDevice(
-            callbacks, id, String16(""),
+            callbacks, String16(cameraId), String16(""),
             hardware::ICameraService::USE_CALLING_UID, /*out*/&deviceRemote);
 
     if (!serviceRet.isOk()) {
@@ -533,11 +482,5 @@
 }
 
 ACameraManager::~ACameraManager() {
-    Mutex::Autolock _l(mLock);
-    if (mCachedCameraIdList.numCameras != kCameraIdListNotInit) {
-        for (int i = 0; i < mCachedCameraIdList.numCameras; i++) {
-            delete[] mCachedCameraIdList.cameraIds[i];
-        }
-        delete[] mCachedCameraIdList.cameraIds;
-    }
+
 }
diff --git a/camera/ndk/impl/ACameraManager.h b/camera/ndk/impl/ACameraManager.h
index 5b88904..97e4fd9 100644
--- a/camera/ndk/impl/ACameraManager.h
+++ b/camera/ndk/impl/ACameraManager.h
@@ -40,7 +40,7 @@
  * instances. Created when first ACameraManager is created and destroyed when
  * all ACameraManager instances are deleted.
  *
- * TODO: maybe CameraManagerGlobal is better sutied in libcameraclient?
+ * TODO: maybe CameraManagerGlobal is better suited in libcameraclient?
  */
 class CameraManagerGlobal final : public RefBase {
   public:
@@ -52,6 +52,11 @@
     void unregisterAvailabilityCallback(
             const ACameraManager_AvailabilityCallbacks *callback);
 
+    /**
+     * Return camera IDs that support camera2
+     */
+    void getCameraIdList(std::vector<String8> *cameraIds);
+
   private:
     sp<hardware::ICameraService> mCameraService;
     const int          kCameraServicePollDelay = 500000; // 0.5s
@@ -72,7 +77,7 @@
     class CameraServiceListener final : public hardware::BnCameraServiceListener {
       public:
         explicit CameraServiceListener(CameraManagerGlobal* cm) : mCameraManager(cm) {}
-        virtual binder::Status onStatusChanged(int32_t status, int32_t cameraId);
+        virtual binder::Status onStatusChanged(int32_t status, const String16& cameraId);
 
         // Torch API not implemented yet
         virtual binder::Status onTorchStatusChanged(int32_t, const String16&) {
@@ -125,22 +130,18 @@
       public:
         CallbackHandler() {}
         void onMessageReceived(const sp<AMessage> &msg) override;
-      private:
-        inline void sendSingleCallback(
-                int32_t cameraId, void* context,
-                ACameraManager_AvailabilityCallback cb) const;
     };
     sp<CallbackHandler> mHandler;
     sp<ALooper>         mCbLooper; // Looper thread where callbacks actually happen on
 
-    void onStatusChanged(int32_t status, int32_t cameraId);
-    void onStatusChangedLocked(int32_t status, int32_t cameraId);
+    void onStatusChanged(int32_t status, const String8& cameraId);
+    void onStatusChangedLocked(int32_t status, const String8& cameraId);
     // Utils for status
     static bool validStatus(int32_t status);
     static bool isStatusAvailable(int32_t status);
 
     // Map camera_id -> status
-    std::map<int32_t, int32_t> mDeviceStatusMap;
+    std::map<String8, int32_t> mDeviceStatusMap;
 
     // For the singleton instance
     static Mutex sLock;
@@ -157,7 +158,6 @@
  */
 struct ACameraManager {
     ACameraManager() :
-            mCachedCameraIdList({kCameraIdListNotInit, nullptr}),
             mGlobalManager(&(android::CameraManagerGlobal::getInstance())) {}
     ~ACameraManager();
     camera_status_t getCameraIdList(ACameraIdList** cameraIdList);
@@ -170,14 +170,10 @@
                                /*out*/ACameraDevice** device);
 
   private:
-    camera_status_t getOrCreateCameraIdListLocked(ACameraIdList** cameraIdList);
-
     enum {
         kCameraIdListNotInit = -1
     };
     android::Mutex         mLock;
-    std::set<int> mCameraIds;          // Init by getOrCreateCameraIdListLocked
-    ACameraIdList mCachedCameraIdList; // Init by getOrCreateCameraIdListLocked
     android::sp<android::CameraManagerGlobal> mGlobalManager;
 };
 
diff --git a/camera/tests/CameraBinderTests.cpp b/camera/tests/CameraBinderTests.cpp
index 828a758..946e3b8 100644
--- a/camera/tests/CameraBinderTests.cpp
+++ b/camera/tests/CameraBinderTests.cpp
@@ -65,14 +65,14 @@
 // Stub listener implementation
 class TestCameraServiceListener : public hardware::BnCameraServiceListener {
     std::map<String16, int32_t> mCameraTorchStatuses;
-    std::map<int32_t, int32_t> mCameraStatuses;
+    std::map<String16, int32_t> mCameraStatuses;
     mutable Mutex mLock;
     mutable Condition mCondition;
     mutable Condition mTorchCondition;
 public:
     virtual ~TestCameraServiceListener() {};
 
-    virtual binder::Status onStatusChanged(int32_t status, int32_t cameraId) {
+    virtual binder::Status onStatusChanged(int32_t status, const String16& cameraId) {
         Mutex::Autolock l(mLock);
         mCameraStatuses[cameraId] = status;
         mCondition.broadcast();
@@ -130,7 +130,7 @@
         return iter->second;
     };
 
-    int32_t getStatus(int32_t cameraId) const {
+    int32_t getStatus(const String16& cameraId) const {
         Mutex::Autolock l(mLock);
         const auto& iter = mCameraStatuses.find(cameraId);
         if (iter == mCameraStatuses.end()) {
@@ -151,6 +151,7 @@
         SENT_RESULT,
         UNINITIALIZED,
         REPEATING_REQUEST_ERROR,
+        REQUEST_QUEUE_EMPTY,
     };
 
 protected:
@@ -225,6 +226,14 @@
         return binder::Status::ok();
     }
 
+    virtual binder::Status onRequestQueueEmpty() {
+        Mutex::Autolock l(mLock);
+        mLastStatus = REQUEST_QUEUE_EMPTY;
+        mStatusesHit.push_back(mLastStatus);
+        mStatusCondition.broadcast();
+        return binder::Status::ok();
+    }
+
     // Test helper functions:
 
     bool hadError() const {
@@ -301,14 +310,16 @@
 
     // Check listener binder calls
     sp<TestCameraServiceListener> listener(new TestCameraServiceListener());
-    res = service->addListener(listener);
+    std::vector<hardware::CameraStatus> statuses;
+    res = service->addListener(listener, &statuses);
     EXPECT_TRUE(res.isOk()) << res;
 
-    EXPECT_TRUE(listener->waitForNumCameras(numCameras));
+    EXPECT_EQ(numCameras, static_cast<const int>(statuses.size()));
 
     for (int32_t i = 0; i < numCameras; i++) {
+        String16 cameraId = String16(String8::format("%d", i));
         bool isSupported = false;
-        res = service->supportsCameraApi(i,
+        res = service->supportsCameraApi(cameraId,
                 hardware::ICameraService::API_VERSION_2, &isSupported);
         EXPECT_TRUE(res.isOk()) << res;
 
@@ -319,12 +330,12 @@
 
         // Check metadata binder call
         CameraMetadata metadata;
-        res = service->getCameraCharacteristics(i, &metadata);
+        res = service->getCameraCharacteristics(cameraId, &metadata);
         EXPECT_TRUE(res.isOk()) << res;
         EXPECT_FALSE(metadata.isEmpty());
 
         // Make sure we're available, or skip device tests otherwise
-        int32_t s = listener->getStatus(i);
+        int32_t s = listener->getStatus(cameraId);
         EXPECT_EQ(::android::hardware::ICameraServiceListener::STATUS_PRESENT, s);
         if (s != ::android::hardware::ICameraServiceListener::STATUS_PRESENT) {
             continue;
@@ -333,7 +344,7 @@
         // Check connect binder calls
         sp<TestCameraDeviceCallbacks> callbacks(new TestCameraDeviceCallbacks());
         sp<hardware::camera2::ICameraDeviceUser> device;
-        res = service->connectDevice(callbacks, i, String16("meeeeeeeee!"),
+        res = service->connectDevice(callbacks, cameraId, String16("meeeeeeeee!"),
                 hardware::ICameraService::USE_CALLING_UID, /*out*/&device);
         EXPECT_TRUE(res.isOk()) << res;
         ASSERT_NE(nullptr, device.get());
@@ -343,12 +354,12 @@
         int32_t torchStatus = listener->getTorchStatus(i);
         if (torchStatus == hardware::ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF) {
             // Check torch calls
-            res = service->setTorchMode(String16(String8::format("%d", i)),
+            res = service->setTorchMode(cameraId,
                     /*enabled*/true, callbacks);
             EXPECT_TRUE(res.isOk()) << res;
             EXPECT_TRUE(listener->waitForTorchState(
                     hardware::ICameraServiceListener::TORCH_STATUS_AVAILABLE_ON, i));
-            res = service->setTorchMode(String16(String8::format("%d", i)),
+            res = service->setTorchMode(cameraId,
                     /*enabled*/false, callbacks);
             EXPECT_TRUE(res.isOk()) << res;
             EXPECT_TRUE(listener->waitForTorchState(
@@ -370,7 +381,7 @@
     sp<TestCameraServiceListener> serviceListener;
 
     std::pair<sp<TestCameraDeviceCallbacks>, sp<hardware::camera2::ICameraDeviceUser>>
-            openNewDevice(int deviceId) {
+            openNewDevice(const String16& deviceId) {
         sp<TestCameraDeviceCallbacks> callbacks(new TestCameraDeviceCallbacks());
         sp<hardware::camera2::ICameraDeviceUser> device;
         {
@@ -406,7 +417,8 @@
         sp<IBinder> binder = sm->getService(String16("media.camera"));
         service = interface_cast<hardware::ICameraService>(binder);
         serviceListener = new TestCameraServiceListener();
-        service->addListener(serviceListener);
+        std::vector<hardware::CameraStatus> statuses;
+        service->addListener(serviceListener, &statuses);
         service->getNumberOfCameras(hardware::ICameraService::CAMERA_TYPE_BACKWARD_COMPATIBLE,
                 &numCameras);
     }
@@ -426,14 +438,14 @@
     EXPECT_TRUE(serviceListener->waitForNumCameras(numCameras));
     for (int32_t i = 0; i < numCameras; i++) {
         // Make sure we're available, or skip device tests otherwise
-        int32_t s = serviceListener->getStatus(i);
+        String16 cameraId(String8::format("%d",i));
+        int32_t s = serviceListener->getStatus(cameraId);
         EXPECT_EQ(hardware::ICameraServiceListener::STATUS_PRESENT, s);
         if (s != hardware::ICameraServiceListener::STATUS_PRESENT) {
             continue;
         }
         binder::Status res;
-
-        auto p = openNewDevice(i);
+        auto p = openNewDevice(cameraId);
         sp<TestCameraDeviceCallbacks> callbacks = p.first;
         sp<hardware::camera2::ICameraDeviceUser> device = p.second;
 
diff --git a/cmds/screenrecord/Overlay.cpp b/cmds/screenrecord/Overlay.cpp
index 9fd192c..be993e0 100644
--- a/cmds/screenrecord/Overlay.cpp
+++ b/cmds/screenrecord/Overlay.cpp
@@ -259,6 +259,11 @@
     const char* format = "%T";
     struct tm tm;
 
+    if (mUseMonotonicTimestamps) {
+        snprintf(buf, bufLen, "%" PRId64, monotonicNsec);
+        return;
+    }
+
     // localtime/strftime is not the fastest way to do this, but a trivial
     // benchmark suggests that the cost is negligible.
     int64_t realTime = mStartRealtimeNsecs +
diff --git a/cmds/screenrecord/Overlay.h b/cmds/screenrecord/Overlay.h
index ee3444d..1d8a569 100644
--- a/cmds/screenrecord/Overlay.h
+++ b/cmds/screenrecord/Overlay.h
@@ -37,7 +37,7 @@
  */
 class Overlay : public GLConsumer::FrameAvailableListener, Thread {
 public:
-    Overlay() : Thread(false),
+    Overlay(bool monotonicTimestamps) : Thread(false),
         mThreadResult(UNKNOWN_ERROR),
         mState(UNINITIALIZED),
         mFrameAvailable(false),
@@ -45,7 +45,8 @@
         mStartMonotonicNsecs(0),
         mStartRealtimeNsecs(0),
         mLastFrameNumber(-1),
-        mTotalDroppedFrames(0)
+        mTotalDroppedFrames(0),
+        mUseMonotonicTimestamps(monotonicTimestamps)
         {}
 
     // Creates a thread that performs the overlay.  Pass in the surface that
@@ -151,6 +152,8 @@
     nsecs_t mLastFrameNumber;
     size_t mTotalDroppedFrames;
 
+    bool mUseMonotonicTimestamps;
+
     static const char* kPropertyNames[];
 };
 
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index 59d5661..6097f01 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -51,6 +51,7 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaMuxer.h>
 #include <media/ICrypto.h>
+#include <media/MediaCodecBuffer.h>
 
 #include "screenrecord.h"
 #include "Overlay.h"
@@ -68,6 +69,7 @@
 // Command-line parameters.
 static bool gVerbose = false;           // chatty on stdout
 static bool gRotate = false;            // rotate 90 degrees
+static bool gMonotonicTime = false;     // use system monotonic time for timestamps
 static enum {
     FORMAT_MP4, FORMAT_H264, FORMAT_FRAMES, FORMAT_RAW_FRAMES
 } gOutputFormat = FORMAT_MP4;           // data format for output
@@ -327,7 +329,7 @@
 
     assert((rawFp == NULL && muxer != NULL) || (rawFp != NULL && muxer == NULL));
 
-    Vector<sp<ABuffer> > buffers;
+    Vector<sp<MediaCodecBuffer> > buffers;
     err = encoder->getOutputBuffers(&buffers);
     if (err != NO_ERROR) {
         fprintf(stderr, "Unable to get output buffers (err=%d)\n", err);
@@ -410,7 +412,10 @@
                     // want to queue these up and do them on a different thread.
                     ATRACE_NAME("write sample");
                     assert(trackIdx != -1);
-                    err = muxer->writeSampleData(buffers[bufIndex], trackIdx,
+                    // TODO
+                    sp<ABuffer> buffer = new ABuffer(
+                            buffers[bufIndex]->data(), buffers[bufIndex]->size());
+                    err = muxer->writeSampleData(buffer, trackIdx,
                             ptsUsec, flags);
                     if (err != NO_ERROR) {
                         fprintf(stderr,
@@ -609,7 +614,7 @@
     sp<Overlay> overlay;
     if (gWantFrameTime) {
         // Send virtual display frames to an external texture.
-        overlay = new Overlay();
+        overlay = new Overlay(gMonotonicTime);
         err = overlay->start(encoderInputSurface, &bufferProducer);
         if (err != NO_ERROR) {
             if (encoder != NULL) encoder->release();
@@ -892,6 +897,7 @@
         { "show-frame-time",    no_argument,        NULL, 'f' },
         { "rotate",             no_argument,        NULL, 'r' },
         { "output-format",      required_argument,  NULL, 'o' },
+        { "monotonic-time",     no_argument,        NULL, 'm' },
         { NULL,                 0,                  NULL, 0 }
     };
 
@@ -971,6 +977,9 @@
                 return 2;
             }
             break;
+        case 'm':
+            gMonotonicTime = true;
+            break;
         default:
             if (ic != '?') {
                 fprintf(stderr, "getopt_long returned unexpected value 0x%x\n", ic);
diff --git a/cmds/stagefright/Android.mk b/cmds/stagefright/Android.mk
index f8c8d3d..9a236fc 100644
--- a/cmds/stagefright/Android.mk
+++ b/cmds/stagefright/Android.mk
@@ -18,7 +18,6 @@
 	external/jpeg \
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-LOCAL_CLANG := true
 
 LOCAL_MODULE_TAGS := optional
 
@@ -43,7 +42,6 @@
 	$(TOP)/frameworks/native/include/media/hardware
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-LOCAL_CLANG := true
 
 LOCAL_MODULE_TAGS := optional
 
@@ -68,7 +66,6 @@
 	$(TOP)/frameworks/native/include/media/hardware
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-LOCAL_CLANG := true
 
 LOCAL_MODULE_TAGS := optional
 
@@ -93,7 +90,6 @@
 	$(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-LOCAL_CLANG := true
 
 LOCAL_MODULE_TAGS := optional
 
@@ -117,7 +113,6 @@
 	$(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-LOCAL_CLANG := true
 
 LOCAL_MODULE_TAGS := optional
 
@@ -129,30 +124,6 @@
 
 include $(CLEAR_VARS)
 
-LOCAL_SRC_FILES:=         \
-	sf2.cpp    \
-
-LOCAL_SHARED_LIBRARIES := \
-	libstagefright liblog libutils libbinder libstagefright_foundation \
-	libmedia libgui libcutils
-
-LOCAL_C_INCLUDES:= \
-	frameworks/av/media/libstagefright \
-	$(TOP)/frameworks/native/include/media/openmax
-
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-LOCAL_CLANG := true
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_MODULE:= sf2
-
-include $(BUILD_EXECUTABLE)
-
-################################################################################
-
-include $(CLEAR_VARS)
-
 LOCAL_SRC_FILES:=               \
 	codec.cpp               \
 	SimplePlayer.cpp        \
@@ -166,7 +137,6 @@
 	$(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-LOCAL_CLANG := true
 
 LOCAL_MODULE_TAGS := optional
 
@@ -208,7 +178,6 @@
 	libstagefright_mediafilter
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-LOCAL_CLANG := true
 
 LOCAL_MODULE_TAGS := optional
 
@@ -232,7 +201,6 @@
 	$(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-LOCAL_CLANG := true
 
 LOCAL_MODULE_TAGS := optional
 
diff --git a/cmds/stagefright/SimplePlayer.cpp b/cmds/stagefright/SimplePlayer.cpp
index 50913cd..afb7db3 100644
--- a/cmds/stagefright/SimplePlayer.cpp
+++ b/cmds/stagefright/SimplePlayer.cpp
@@ -25,6 +25,7 @@
 #include <media/AudioTrack.h>
 #include <media/ICrypto.h>
 #include <media/IMediaHTTPService.h>
+#include <media/MediaCodecBuffer.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
@@ -355,7 +356,7 @@
             err = state->mCodec->dequeueInputBuffer(&index, -1ll);
             CHECK_EQ(err, (status_t)OK);
 
-            const sp<ABuffer> &dstBuffer = state->mBuffers[0].itemAt(index);
+            const sp<MediaCodecBuffer> &dstBuffer = state->mBuffers[0].itemAt(index);
 
             CHECK_LE(srcBuffer->size(), dstBuffer->capacity());
             dstBuffer->setRange(0, srcBuffer->size());
@@ -482,11 +483,13 @@
             state->mAvailInputBufferIndices.erase(
                     state->mAvailInputBufferIndices.begin());
 
-            const sp<ABuffer> &dstBuffer =
+            const sp<MediaCodecBuffer> &dstBuffer =
                 state->mBuffers[0].itemAt(index);
+            sp<ABuffer> abuffer = new ABuffer(dstBuffer->base(), dstBuffer->capacity());
 
-            err = mExtractor->readSampleData(dstBuffer);
+            err = mExtractor->readSampleData(abuffer);
             CHECK_EQ(err, (status_t)OK);
+            dstBuffer->setRange(abuffer->offset(), abuffer->size());
 
             int64_t timeUs;
             CHECK_EQ(mExtractor->getSampleTime(&timeUs), (status_t)OK);
@@ -530,7 +533,7 @@
                     state->mCodec->releaseOutputBuffer(info->mIndex);
                 } else {
                     if (state->mAudioTrack != NULL) {
-                        const sp<ABuffer> &srcBuffer =
+                        const sp<MediaCodecBuffer> &srcBuffer =
                             state->mBuffers[1].itemAt(info->mIndex);
 
                         renderAudio(state, info, srcBuffer);
@@ -597,7 +600,7 @@
 }
 
 void SimplePlayer::renderAudio(
-        CodecState *state, BufferInfo *info, const sp<ABuffer> &buffer) {
+        CodecState *state, BufferInfo *info, const sp<MediaCodecBuffer> &buffer) {
     CHECK(state->mAudioTrack != NULL);
 
     if (state->mAudioTrack->stopped()) {
diff --git a/cmds/stagefright/SimplePlayer.h b/cmds/stagefright/SimplePlayer.h
index 0d8c74a..1269162 100644
--- a/cmds/stagefright/SimplePlayer.h
+++ b/cmds/stagefright/SimplePlayer.h
@@ -25,6 +25,7 @@
 class AudioTrack;
 class IGraphicBufferProducer;
 struct MediaCodec;
+class MediaCodecBuffer;
 struct NuMediaExtractor;
 class Surface;
 
@@ -73,7 +74,7 @@
     {
         sp<MediaCodec> mCodec;
         Vector<sp<ABuffer> > mCSD;
-        Vector<sp<ABuffer> > mBuffers[2];
+        Vector<sp<MediaCodecBuffer> > mBuffers[2];
 
         List<size_t> mAvailInputBufferIndices;
         List<BufferInfo> mAvailOutputBufferInfos;
@@ -101,7 +102,7 @@
     status_t onOutputFormatChanged(size_t trackIndex, CodecState *state);
 
     void renderAudio(
-            CodecState *state, BufferInfo *info, const sp<ABuffer> &buffer);
+            CodecState *state, BufferInfo *info, const sp<MediaCodecBuffer> &buffer);
 
     DISALLOW_EVIL_CONSTRUCTORS(SimplePlayer);
 };
diff --git a/cmds/stagefright/codec.cpp b/cmds/stagefright/codec.cpp
index dae9bbe..3108a67 100644
--- a/cmds/stagefright/codec.cpp
+++ b/cmds/stagefright/codec.cpp
@@ -26,12 +26,12 @@
 #include <media/ICrypto.h>
 #include <media/IMediaHTTPService.h>
 #include <media/IMediaPlayerService.h>
+#include <media/MediaCodecBuffer.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/ALooper.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/foundation/AString.h>
-#include <media/stagefright/DataSource.h>
 #include <media/stagefright/MediaCodec.h>
 #include <media/stagefright/MediaCodecList.h>
 #include <media/stagefright/MediaDefs.h>
@@ -56,8 +56,8 @@
 
 struct CodecState {
     sp<MediaCodec> mCodec;
-    Vector<sp<ABuffer> > mInBuffers;
-    Vector<sp<ABuffer> > mOutBuffers;
+    Vector<sp<MediaCodecBuffer> > mInBuffers;
+    Vector<sp<MediaCodecBuffer> > mOutBuffers;
     bool mSignalledInputEOS;
     bool mSawOutputEOS;
     int64_t mNumBuffersDecoded;
@@ -174,10 +174,12 @@
                 if (err == OK) {
                     ALOGV("filling input buffer %zu", index);
 
-                    const sp<ABuffer> &buffer = state->mInBuffers.itemAt(index);
+                    const sp<MediaCodecBuffer> &buffer = state->mInBuffers.itemAt(index);
+                    sp<ABuffer> abuffer = new ABuffer(buffer->base(), buffer->capacity());
 
-                    err = extractor->readSampleData(buffer);
+                    err = extractor->readSampleData(abuffer);
                     CHECK_EQ(err, (status_t)OK);
+                    buffer->setRange(abuffer->offset(), abuffer->size());
 
                     int64_t timeUs;
                     err = extractor->getSampleTime(&timeUs);
@@ -398,8 +400,6 @@
 
     ProcessState::self()->startThreadPool();
 
-    DataSource::RegisterDefaultSniffers();
-
     sp<ALooper> looper = new ALooper;
     looper->start();
 
diff --git a/cmds/stagefright/mediafilter.cpp b/cmds/stagefright/mediafilter.cpp
index d829df0..f219e69 100644
--- a/cmds/stagefright/mediafilter.cpp
+++ b/cmds/stagefright/mediafilter.cpp
@@ -26,10 +26,10 @@
 #include <gui/Surface.h>
 #include <media/ICrypto.h>
 #include <media/IMediaHTTPService.h>
+#include <media/MediaCodecBuffer.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/DataSource.h>
 #include <media/stagefright/MediaCodec.h>
 #include <media/stagefright/NuMediaExtractor.h>
 #include <media/stagefright/RenderScriptWrapper.h>
@@ -131,8 +131,8 @@
 
 struct CodecState {
     sp<MediaCodec> mCodec;
-    Vector<sp<ABuffer> > mInBuffers;
-    Vector<sp<ABuffer> > mOutBuffers;
+    Vector<sp<MediaCodecBuffer> > mInBuffers;
+    Vector<sp<MediaCodecBuffer> > mOutBuffers;
     bool mSignalledInputEOS;
     bool mSawOutputEOS;
     int64_t mNumBuffersDecoded;
@@ -183,9 +183,9 @@
     }
     size_t outIndex = frame.index;
 
-    const sp<ABuffer> &srcBuffer =
+    const sp<MediaCodecBuffer> &srcBuffer =
         vidState->mOutBuffers.itemAt(outIndex);
-    const sp<ABuffer> &destBuffer =
+    const sp<MediaCodecBuffer> &destBuffer =
         filterState->mInBuffers.itemAt(filterIndex);
 
     sp<AMessage> srcFormat, destFormat;
@@ -532,10 +532,12 @@
                 if (err == OK) {
                     ALOGV("filling input buffer %zu", index);
 
-                    const sp<ABuffer> &buffer = state->mInBuffers.itemAt(index);
+                    const sp<MediaCodecBuffer> &buffer = state->mInBuffers.itemAt(index);
+                    sp<ABuffer> abuffer = new ABuffer(buffer->base(), buffer->capacity());
 
-                    err = extractor->readSampleData(buffer);
+                    err = extractor->readSampleData(abuffer);
                     CHECK(err == OK);
+                    buffer->setRange(abuffer->offset(), abuffer->size());
 
                     int64_t timeUs;
                     err = extractor->getSampleTime(&timeUs);
@@ -735,8 +737,6 @@
 
     ProcessState::self()->startThreadPool();
 
-    DataSource::RegisterDefaultSniffers();
-
     android::sp<ALooper> looper = new ALooper;
     looper->start();
 
diff --git a/cmds/stagefright/muxer.cpp b/cmds/stagefright/muxer.cpp
index 0a3bdf3..4a83a4a 100644
--- a/cmds/stagefright/muxer.cpp
+++ b/cmds/stagefright/muxer.cpp
@@ -29,7 +29,6 @@
 #include <media/stagefright/foundation/ALooper.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/foundation/AString.h>
-#include <media/stagefright/DataSource.h>
 #include <media/stagefright/MediaCodec.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaMuxer.h>
@@ -319,9 +318,6 @@
     }
     ProcessState::self()->startThreadPool();
 
-    // Make sure setDataSource() works.
-    DataSource::RegisterDefaultSniffers();
-
     sp<ALooper> looper = new ALooper;
     looper->start();
 
diff --git a/cmds/stagefright/record.cpp b/cmds/stagefright/record.cpp
index 9aa0156..94c2e96 100644
--- a/cmds/stagefright/record.cpp
+++ b/cmds/stagefright/record.cpp
@@ -170,8 +170,6 @@
 int main(int argc, char **argv) {
     android::ProcessState::self()->startThreadPool();
 
-    DataSource::RegisterDefaultSniffers();
-
 #if 1
     if (argc != 3) {
         fprintf(stderr, "usage: %s <filename> <input_color_format>\n", argv[0]);
diff --git a/cmds/stagefright/sf2.cpp b/cmds/stagefright/sf2.cpp
deleted file mode 100644
index 1a4bf08..0000000
--- a/cmds/stagefright/sf2.cpp
+++ /dev/null
@@ -1,682 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "sf2"
-#include <inttypes.h>
-#include <utils/Log.h>
-
-#include <signal.h>
-
-#include <binder/ProcessState.h>
-
-#include <media/IMediaHTTPService.h>
-
-#include <media/stagefright/foundation/hexdump.h>
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/ALooper.h>
-#include <media/stagefright/foundation/AMessage.h>
-
-#include <media/stagefright/ACodec.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaExtractor.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/Utils.h>
-
-#include <gui/SurfaceComposerClient.h>
-#include <gui/Surface.h>
-
-#include "include/ESDS.h"
-
-using namespace android;
-
-volatile static bool ctrlc = false;
-
-static sighandler_t oldhandler = NULL;
-
-static void mysighandler(int signum) {
-    if (signum == SIGINT) {
-        ctrlc = true;
-        return;
-    }
-    oldhandler(signum);
-}
-
-struct Controller : public AHandler {
-    Controller(const char *uri, bool decodeAudio,
-               const sp<Surface> &surface, bool renderToSurface)
-        : mURI(uri),
-          mDecodeAudio(decodeAudio),
-          mSurface(surface),
-          mRenderToSurface(renderToSurface),
-          mCodec(new ACodec),
-          mIsVorbis(false) {
-        CHECK(!mDecodeAudio || mSurface == NULL);
-    }
-
-    void startAsync() {
-        (new AMessage(kWhatStart, this))->post();
-    }
-
-protected:
-    virtual ~Controller() {
-    }
-
-    virtual void printStatistics() {
-        int64_t delayUs = ALooper::GetNowUs() - mStartTimeUs;
-
-        if (mDecodeAudio) {
-            printf("%" PRId64 " bytes received. %.2f KB/sec\n",
-            mTotalBytesReceived,
-            mTotalBytesReceived * 1E6 / 1024 / delayUs);
-        } else {
-            printf("%d frames decoded, %.2f fps. %" PRId64 " bytes "
-                    "received. %.2f KB/sec\n",
-            mNumOutputBuffersReceived,
-            mNumOutputBuffersReceived * 1E6 / delayUs,
-            mTotalBytesReceived,
-            mTotalBytesReceived * 1E6 / 1024 / delayUs);
-        }
-    }
-
-    virtual void onMessageReceived(const sp<AMessage> &msg) {
-        if (ctrlc) {
-            printf("\n");
-            printStatistics();
-            (new AMessage(kWhatStop, this))->post();
-            ctrlc = false;
-        }
-        switch (msg->what()) {
-            case kWhatStart:
-            {
-#if 1
-                mDecodeLooper = looper();
-#else
-                mDecodeLooper = new ALooper;
-                mDecodeLooper->setName("sf2 decode looper");
-                mDecodeLooper->start();
-#endif
-
-                sp<DataSource> dataSource =
-                    DataSource::CreateFromURI(
-                            NULL /* httpService */, mURI.c_str());
-
-                sp<IMediaExtractor> extractor =
-                    MediaExtractor::Create(dataSource);
-
-                for (size_t i = 0; i < extractor->countTracks(); ++i) {
-                    sp<MetaData> meta = extractor->getTrackMetaData(i);
-
-                    const char *mime;
-                    CHECK(meta->findCString(kKeyMIMEType, &mime));
-
-                    if (!strncasecmp(mDecodeAudio ? "audio/" : "video/",
-                                     mime, 6)) {
-                        mSource = extractor->getTrack(i);
-
-                        if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS)) {
-                            mIsVorbis = true;
-                        } else {
-                            mIsVorbis = false;
-                        }
-                        break;
-                    }
-                }
-                if (mSource == NULL) {
-                    printf("no %s track found\n", mDecodeAudio ? "audio" : "video");
-                    exit (1);
-                }
-
-                CHECK_EQ(mSource->start(), (status_t)OK);
-
-                mDecodeLooper->registerHandler(mCodec);
-
-                mCodec->setNotificationMessage(
-                        new AMessage(kWhatCodecNotify, this));
-
-                sp<AMessage> format = makeFormat(mSource->getFormat());
-
-                if (mSurface != NULL) {
-                    format->setObject("surface", mSurface);
-                }
-
-                mCodec->initiateSetup(format);
-
-                mCSDIndex = 0;
-                mStartTimeUs = ALooper::GetNowUs();
-                mNumOutputBuffersReceived = 0;
-                mTotalBytesReceived = 0;
-                mLeftOverBuffer = NULL;
-                mFinalResult = OK;
-                mSeekState = SEEK_NONE;
-
-                // (new AMessage(kWhatSeek, this))->post(5000000ll);
-                break;
-            }
-
-            case kWhatSeek:
-            {
-                printf("+");
-                fflush(stdout);
-
-                CHECK(mSeekState == SEEK_NONE
-                        || mSeekState == SEEK_FLUSH_COMPLETED);
-
-                if (mLeftOverBuffer != NULL) {
-                    mLeftOverBuffer->release();
-                    mLeftOverBuffer = NULL;
-                }
-
-                mSeekState = SEEK_FLUSHING;
-                mSeekTimeUs = 30000000ll;
-
-                mCodec->signalFlush();
-                break;
-            }
-
-            case kWhatStop:
-            {
-                if (mLeftOverBuffer != NULL) {
-                    mLeftOverBuffer->release();
-                    mLeftOverBuffer = NULL;
-                }
-
-                CHECK_EQ(mSource->stop(), (status_t)OK);
-                mSource.clear();
-
-                mCodec->initiateShutdown();
-                break;
-            }
-
-            case kWhatCodecNotify:
-            {
-                int32_t what;
-                CHECK(msg->findInt32("what", &what));
-
-                if (what == CodecBase::kWhatFillThisBuffer) {
-                    onFillThisBuffer(msg);
-                } else if (what == CodecBase::kWhatDrainThisBuffer) {
-                    if ((mNumOutputBuffersReceived++ % 16) == 0) {
-                        printf(".");
-                        fflush(stdout);
-                    }
-
-                    onDrainThisBuffer(msg);
-                } else if (what == CodecBase::kWhatEOS
-                        || what == CodecBase::kWhatError) {
-                    printf((what == CodecBase::kWhatEOS) ? "$\n" : "E\n");
-
-                    printStatistics();
-                    (new AMessage(kWhatStop, this))->post();
-                } else if (what == CodecBase::kWhatFlushCompleted) {
-                    mSeekState = SEEK_FLUSH_COMPLETED;
-                    mCodec->signalResume();
-
-                    (new AMessage(kWhatSeek, this))->post(5000000ll);
-                } else if (what == CodecBase::kWhatOutputFormatChanged) {
-                } else if (what == CodecBase::kWhatShutdownCompleted) {
-                    mDecodeLooper->unregisterHandler(mCodec->id());
-
-                    if (mDecodeLooper != looper()) {
-                        mDecodeLooper->stop();
-                    }
-
-                    looper()->stop();
-                }
-                break;
-            }
-
-            default:
-                TRESPASS();
-                break;
-        }
-    }
-
-private:
-    enum {
-        kWhatStart             = 'strt',
-        kWhatStop              = 'stop',
-        kWhatCodecNotify       = 'noti',
-        kWhatSeek              = 'seek',
-    };
-
-    sp<ALooper> mDecodeLooper;
-
-    AString mURI;
-    bool mDecodeAudio;
-    sp<Surface> mSurface;
-    bool mRenderToSurface;
-    sp<ACodec> mCodec;
-    sp<IMediaSource> mSource;
-    bool mIsVorbis;
-
-    Vector<sp<ABuffer> > mCSD;
-    size_t mCSDIndex;
-
-    MediaBuffer *mLeftOverBuffer;
-    status_t mFinalResult;
-
-    int64_t mStartTimeUs;
-    int32_t mNumOutputBuffersReceived;
-    int64_t mTotalBytesReceived;
-
-    enum SeekState {
-        SEEK_NONE,
-        SEEK_FLUSHING,
-        SEEK_FLUSH_COMPLETED,
-    };
-    SeekState mSeekState;
-    int64_t mSeekTimeUs;
-
-    sp<AMessage> makeFormat(const sp<MetaData> &meta) {
-        CHECK(mCSD.isEmpty());
-
-        const char *mime;
-        CHECK(meta->findCString(kKeyMIMEType, &mime));
-
-        sp<AMessage> msg = new AMessage;
-        msg->setString("mime", mime);
-
-        if (!strncasecmp("video/", mime, 6)) {
-            int32_t width, height;
-            CHECK(meta->findInt32(kKeyWidth, &width));
-            CHECK(meta->findInt32(kKeyHeight, &height));
-
-            msg->setInt32("width", width);
-            msg->setInt32("height", height);
-        } else {
-            CHECK(!strncasecmp("audio/", mime, 6));
-
-            int32_t numChannels, sampleRate;
-            CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
-            CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
-
-            msg->setInt32("channel-count", numChannels);
-            msg->setInt32("sample-rate", sampleRate);
-
-            int32_t isADTS;
-            if (meta->findInt32(kKeyIsADTS, &isADTS) && isADTS != 0) {
-                msg->setInt32("is-adts", true);
-            }
-        }
-
-        uint32_t type;
-        const void *data;
-        size_t size;
-        if (meta->findData(kKeyAVCC, &type, &data, &size)) {
-            // Parse the AVCDecoderConfigurationRecord
-
-            const uint8_t *ptr = (const uint8_t *)data;
-
-            CHECK(size >= 7);
-            CHECK_EQ((unsigned)ptr[0], 1u);  // configurationVersion == 1
-            uint8_t profile __unused = ptr[1];
-            uint8_t level __unused = ptr[3];
-
-            // There is decodable content out there that fails the following
-            // assertion, let's be lenient for now...
-            // CHECK((ptr[4] >> 2) == 0x3f);  // reserved
-
-            size_t lengthSize __unused = 1 + (ptr[4] & 3);
-
-            // commented out check below as H264_QVGA_500_NO_AUDIO.3gp
-            // violates it...
-            // CHECK((ptr[5] >> 5) == 7);  // reserved
-
-            size_t numSeqParameterSets = ptr[5] & 31;
-
-            ptr += 6;
-            size -= 6;
-
-            sp<ABuffer> buffer = new ABuffer(1024);
-            buffer->setRange(0, 0);
-
-            for (size_t i = 0; i < numSeqParameterSets; ++i) {
-                CHECK(size >= 2);
-                size_t length = U16_AT(ptr);
-
-                ptr += 2;
-                size -= 2;
-
-                CHECK(size >= length);
-
-                memcpy(buffer->data() + buffer->size(), "\x00\x00\x00\x01", 4);
-                memcpy(buffer->data() + buffer->size() + 4, ptr, length);
-                buffer->setRange(0, buffer->size() + 4 + length);
-
-                ptr += length;
-                size -= length;
-            }
-
-            buffer->meta()->setInt32("csd", true);
-            mCSD.push(buffer);
-
-            buffer = new ABuffer(1024);
-            buffer->setRange(0, 0);
-
-            CHECK(size >= 1);
-            size_t numPictureParameterSets = *ptr;
-            ++ptr;
-            --size;
-
-            for (size_t i = 0; i < numPictureParameterSets; ++i) {
-                CHECK(size >= 2);
-                size_t length = U16_AT(ptr);
-
-                ptr += 2;
-                size -= 2;
-
-                CHECK(size >= length);
-
-                memcpy(buffer->data() + buffer->size(), "\x00\x00\x00\x01", 4);
-                memcpy(buffer->data() + buffer->size() + 4, ptr, length);
-                buffer->setRange(0, buffer->size() + 4 + length);
-
-                ptr += length;
-                size -= length;
-            }
-
-            buffer->meta()->setInt32("csd", true);
-            mCSD.push(buffer);
-
-            msg->setBuffer("csd", buffer);
-        } else if (meta->findData(kKeyESDS, &type, &data, &size)) {
-            ESDS esds((const char *)data, size);
-            CHECK_EQ(esds.InitCheck(), (status_t)OK);
-
-            const void *codec_specific_data;
-            size_t codec_specific_data_size;
-            esds.getCodecSpecificInfo(
-                    &codec_specific_data, &codec_specific_data_size);
-
-            sp<ABuffer> buffer = new ABuffer(codec_specific_data_size);
-
-            memcpy(buffer->data(), codec_specific_data,
-                   codec_specific_data_size);
-
-            buffer->meta()->setInt32("csd", true);
-            mCSD.push(buffer);
-        } else if (meta->findData(kKeyVorbisInfo, &type, &data, &size)) {
-            sp<ABuffer> buffer = new ABuffer(size);
-            memcpy(buffer->data(), data, size);
-
-            buffer->meta()->setInt32("csd", true);
-            mCSD.push(buffer);
-
-            CHECK(meta->findData(kKeyVorbisBooks, &type, &data, &size));
-
-            buffer = new ABuffer(size);
-            memcpy(buffer->data(), data, size);
-
-            buffer->meta()->setInt32("csd", true);
-            mCSD.push(buffer);
-        }
-
-        int32_t maxInputSize;
-        if (meta->findInt32(kKeyMaxInputSize, &maxInputSize)) {
-            msg->setInt32("max-input-size", maxInputSize);
-        }
-
-        return msg;
-    }
-
-    void onFillThisBuffer(const sp<AMessage> &msg) {
-        sp<AMessage> reply;
-        CHECK(msg->findMessage("reply", &reply));
-
-        if (mSource == NULL || mSeekState == SEEK_FLUSHING) {
-            reply->setInt32("err", ERROR_END_OF_STREAM);
-            reply->post();
-            return;
-        }
-
-        sp<ABuffer> outBuffer;
-        CHECK(msg->findBuffer("buffer", &outBuffer));
-
-        if (mCSDIndex < mCSD.size()) {
-            outBuffer = mCSD.editItemAt(mCSDIndex++);
-            outBuffer->meta()->setInt64("timeUs", 0);
-        } else {
-            size_t sizeLeft = outBuffer->capacity();
-            outBuffer->setRange(0, 0);
-
-            int32_t n = 0;
-
-            for (;;) {
-                MediaBuffer *inBuffer;
-
-                if (mLeftOverBuffer != NULL) {
-                    inBuffer = mLeftOverBuffer;
-                    mLeftOverBuffer = NULL;
-                } else if (mFinalResult != OK) {
-                    break;
-                } else {
-                    MediaSource::ReadOptions options;
-                    if (mSeekState == SEEK_FLUSH_COMPLETED) {
-                        options.setSeekTo(mSeekTimeUs);
-                        mSeekState = SEEK_NONE;
-                    }
-                    status_t err = mSource->read(&inBuffer, &options);
-
-                    if (err != OK) {
-                        mFinalResult = err;
-                        break;
-                    }
-                }
-
-                size_t sizeNeeded = inBuffer->range_length();
-                if (mIsVorbis) {
-                    // Vorbis data is suffixed with the number of
-                    // valid samples on the page.
-                    sizeNeeded += sizeof(int32_t);
-                }
-
-                if (sizeNeeded > sizeLeft) {
-                    if (outBuffer->size() == 0) {
-                        ALOGE("Unable to fit even a single input buffer of size %zu.",
-                             sizeNeeded);
-                    }
-                    CHECK_GT(outBuffer->size(), 0u);
-
-                    mLeftOverBuffer = inBuffer;
-                    break;
-                }
-
-                ++n;
-
-                if (outBuffer->size() == 0) {
-                    int64_t timeUs;
-                    CHECK(inBuffer->meta_data()->findInt64(kKeyTime, &timeUs));
-
-                    outBuffer->meta()->setInt64("timeUs", timeUs);
-                }
-
-                memcpy(outBuffer->data() + outBuffer->size(),
-                       (const uint8_t *)inBuffer->data()
-                        + inBuffer->range_offset(),
-                       inBuffer->range_length());
-
-                if (mIsVorbis) {
-                    int32_t numPageSamples;
-                    if (!inBuffer->meta_data()->findInt32(
-                                kKeyValidSamples, &numPageSamples)) {
-                        numPageSamples = -1;
-                    }
-
-                    memcpy(outBuffer->data()
-                            + outBuffer->size() + inBuffer->range_length(),
-                           &numPageSamples, sizeof(numPageSamples));
-                }
-
-                outBuffer->setRange(
-                        0, outBuffer->size() + sizeNeeded);
-
-                sizeLeft -= sizeNeeded;
-
-                inBuffer->release();
-                inBuffer = NULL;
-
-                break;  // Don't coalesce
-            }
-
-            ALOGV("coalesced %d input buffers", n);
-
-            if (outBuffer->size() == 0) {
-                CHECK_NE(mFinalResult, (status_t)OK);
-
-                reply->setInt32("err", mFinalResult);
-                reply->post();
-                return;
-            }
-        }
-
-        reply->setBuffer("buffer", outBuffer);
-        reply->post();
-    }
-
-    void onDrainThisBuffer(const sp<AMessage> &msg) {
-        sp<ABuffer> buffer;
-        CHECK(msg->findBuffer("buffer", &buffer));
-
-        mTotalBytesReceived += buffer->size();
-
-        sp<AMessage> reply;
-        CHECK(msg->findMessage("reply", &reply));
-
-        if (mRenderToSurface) {
-            reply->setInt32("render", 1);
-        }
-
-        reply->post();
-    }
-
-    DISALLOW_EVIL_CONSTRUCTORS(Controller);
-};
-
-static void usage(const char *me) {
-    fprintf(stderr, "usage: %s\n", me);
-    fprintf(stderr, "       -h(elp)\n");
-    fprintf(stderr, "       -a(udio)\n");
-
-    fprintf(stderr,
-            "       -S(urface) Allocate output buffers on a surface.\n"
-            "       -R(ender)  Render surface-allocated buffers.\n");
-}
-
-int main(int argc, char **argv) {
-    android::ProcessState::self()->startThreadPool();
-
-    bool decodeAudio = false;
-    bool useSurface = false;
-    bool renderToSurface = false;
-
-    int res;
-    while ((res = getopt(argc, argv, "haSR")) >= 0) {
-        switch (res) {
-            case 'a':
-                decodeAudio = true;
-                break;
-
-            case 'S':
-                useSurface = true;
-                break;
-
-            case 'R':
-                renderToSurface = true;
-                break;
-
-            case '?':
-            case 'h':
-            default:
-            {
-                usage(argv[0]);
-                return 1;
-            }
-        }
-    }
-
-    argc -= optind;
-    argv += optind;
-
-    if (argc != 1) {
-        usage(argv[-optind]);
-        return 1;
-    }
-
-    DataSource::RegisterDefaultSniffers();
-
-    sp<ALooper> looper = new ALooper;
-    looper->setName("sf2");
-
-    sp<SurfaceComposerClient> composerClient;
-    sp<SurfaceControl> control;
-    sp<Surface> surface;
-
-    if (!decodeAudio && useSurface) {
-        composerClient = new SurfaceComposerClient;
-        CHECK_EQ(composerClient->initCheck(), (status_t)OK);
-
-        control = composerClient->createSurface(
-                String8("A Surface"),
-                1280,
-                800,
-                PIXEL_FORMAT_RGB_565,
-                0);
-
-        CHECK(control != NULL);
-        CHECK(control->isValid());
-
-        SurfaceComposerClient::openGlobalTransaction();
-        CHECK_EQ(control->setLayer(INT_MAX), (status_t)OK);
-        CHECK_EQ(control->show(), (status_t)OK);
-        SurfaceComposerClient::closeGlobalTransaction();
-
-        surface = control->getSurface();
-        CHECK(surface != NULL);
-
-        CHECK_EQ((status_t)OK,
-                 native_window_api_connect(
-                     surface.get(), NATIVE_WINDOW_API_MEDIA));
-    }
-
-    sp<Controller> controller =
-        new Controller(argv[0], decodeAudio, surface, renderToSurface);
-
-    looper->registerHandler(controller);
-
-    signal(SIGINT, mysighandler);
-
-    controller->startAsync();
-
-    CHECK_EQ(looper->start(true /* runOnCallingThread */), (status_t)OK);
-
-    looper->unregisterHandler(controller->id());
-
-    if (!decodeAudio && useSurface) {
-        CHECK_EQ((status_t)OK,
-                 native_window_api_disconnect(
-                     surface.get(), NATIVE_WINDOW_API_MEDIA));
-
-        composerClient->dispose();
-    }
-
-    return 0;
-}
-
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index 2bb35cb..5e3a859 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -965,8 +965,6 @@
         }
     }
 
-    DataSource::RegisterDefaultSniffers();
-
     status_t err = OK;
 
     for (int k = 0; k < argc && err == OK; ++k) {
diff --git a/cmds/stagefright/stream.cpp b/cmds/stagefright/stream.cpp
index 16ff39d..8f9333a 100644
--- a/cmds/stagefright/stream.cpp
+++ b/cmds/stagefright/stream.cpp
@@ -301,8 +301,6 @@
 int main(int argc, char **argv) {
     android::ProcessState::self()->startThreadPool();
 
-    DataSource::RegisterDefaultSniffers();
-
     if (argc != 2) {
         fprintf(stderr, "Usage: %s filename\n", argv[0]);
         return 1;
@@ -349,9 +347,7 @@
 
     sp<IStreamSource> source;
 
-    char prop[PROPERTY_VALUE_MAX];
-    bool usemp4 = property_get("media.stagefright.use-mp4source", prop, NULL) &&
-            (!strcmp(prop, "1") || !strcasecmp(prop, "true"));
+    bool usemp4 = property_get_bool("media.stagefright.use-mp4source", false);
 
     size_t len = strlen(argv[1]);
     if ((!usemp4 && len >= 3 && !strcasecmp(".ts", &argv[1][len - 3])) ||
diff --git a/drm/libdrmframework/plugins/forward-lock/internal-format/doc/FwdLock.html b/drm/libdrmframework/plugins/forward-lock/internal-format/doc/FwdLock.html
old mode 100755
new mode 100644
diff --git a/drm/libmediadrm/Android.mk b/drm/libmediadrm/Android.mk
index 3be1d60..3f0e663 100644
--- a/drm/libmediadrm/Android.mk
+++ b/drm/libmediadrm/Android.mk
@@ -7,22 +7,25 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:= \
-	Crypto.cpp \
-	Drm.cpp \
-	DrmSessionManager.cpp \
-	SharedLibrary.cpp
+    Crypto.cpp \
+    Drm.cpp \
+    DrmSessionManager.cpp \
+    ICrypto.cpp \
+    IDrm.cpp \
+    IDrmClient.cpp \
+    IMediaDrmService.cpp \
+    SharedLibrary.cpp
 
 LOCAL_SHARED_LIBRARIES := \
-	libbinder \
-	libcutils \
-	libdl \
-	liblog \
-	libmedia \
-	libstagefright \
-	libutils
+    libbinder \
+    libcutils \
+    libdl \
+    liblog \
+    libmediautils \
+    libstagefright_foundation \
+    libutils
 
 LOCAL_CFLAGS += -Werror -Wno-error=deprecated-declarations -Wall
-LOCAL_CLANG := true
 
 LOCAL_MODULE:= libmediadrm
 
diff --git a/drm/libmediadrm/Drm.cpp b/drm/libmediadrm/Drm.cpp
index 9ab08db..07e9414 100644
--- a/drm/libmediadrm/Drm.cpp
+++ b/drm/libmediadrm/Drm.cpp
@@ -334,6 +334,7 @@
         return -EINVAL;
     }
 
+    setListener(NULL);
     delete mPlugin;
     mPlugin = NULL;
 
diff --git a/media/libmedia/ICrypto.cpp b/drm/libmediadrm/ICrypto.cpp
similarity index 98%
rename from media/libmedia/ICrypto.cpp
rename to drm/libmediadrm/ICrypto.cpp
index 7b261be..8ba80c6 100644
--- a/media/libmedia/ICrypto.cpp
+++ b/drm/libmediadrm/ICrypto.cpp
@@ -302,6 +302,10 @@
             int32_t offset = data.readInt32();
 
             int32_t numSubSamples = data.readInt32();
+            if (numSubSamples < 0 || numSubSamples > 0xffff) {
+                reply->writeInt32(BAD_VALUE);
+                return OK;
+            }
 
             CryptoPlugin::SubSample *subSamples =
                 new CryptoPlugin::SubSample[numSubSamples];
diff --git a/media/libmedia/IDrm.cpp b/drm/libmediadrm/IDrm.cpp
similarity index 100%
rename from media/libmedia/IDrm.cpp
rename to drm/libmediadrm/IDrm.cpp
diff --git a/media/libmedia/IDrmClient.cpp b/drm/libmediadrm/IDrmClient.cpp
similarity index 100%
rename from media/libmedia/IDrmClient.cpp
rename to drm/libmediadrm/IDrmClient.cpp
diff --git a/media/libmedia/IMediaDrmService.cpp b/drm/libmediadrm/IMediaDrmService.cpp
similarity index 97%
rename from media/libmedia/IMediaDrmService.cpp
rename to drm/libmediadrm/IMediaDrmService.cpp
index 9b6ecfd..84812dc 100644
--- a/media/libmedia/IMediaDrmService.cpp
+++ b/drm/libmediadrm/IMediaDrmService.cpp
@@ -37,7 +37,7 @@
 class BpMediaDrmService: public BpInterface<IMediaDrmService>
 {
 public:
-    BpMediaDrmService(const sp<IBinder>& impl)
+    explicit BpMediaDrmService(const sp<IBinder>& impl)
         : BpInterface<IMediaDrmService>(impl)
     {
     }
diff --git a/include/camera/CameraBase.h b/include/camera/CameraBase.h
index 0692a27..03fbdfd 100644
--- a/include/camera/CameraBase.h
+++ b/include/camera/CameraBase.h
@@ -17,7 +17,10 @@
 #ifndef ANDROID_HARDWARE_CAMERA_BASE_H
 #define ANDROID_HARDWARE_CAMERA_BASE_H
 
+#include <android/hardware/ICameraServiceListener.h>
+
 #include <utils/Mutex.h>
+#include <binder/BinderService.h>
 
 struct camera_frame_metadata;
 
@@ -29,6 +32,13 @@
 class ICameraService;
 class ICameraServiceListener;
 
+enum {
+    /** The facing of the camera is opposite to that of the screen. */
+    CAMERA_FACING_BACK = 0,
+    /** The facing of the camera is the same as that of the screen. */
+    CAMERA_FACING_FRONT = 1,
+};
+
 struct CameraInfo : public android::Parcelable {
     /**
      * The direction that the camera faces to. It should be CAMERA_FACING_BACK
@@ -50,11 +60,33 @@
      */
     int orientation;
 
-    virtual status_t writeToParcel(Parcel* parcel) const;
-    virtual status_t readFromParcel(const Parcel* parcel);
+    virtual status_t writeToParcel(android::Parcel* parcel) const;
+    virtual status_t readFromParcel(const android::Parcel* parcel);
 
 };
 
+/**
+ * Basic status information about a camera device - its name and its current
+ * state.
+ */
+struct CameraStatus : public android::Parcelable {
+    /**
+     * The name of the camera device
+     */
+    String8 cameraId;
+
+    /**
+     * Its current status, one of the ICameraService::STATUS_* fields
+     */
+    int32_t status;
+
+    virtual status_t writeToParcel(android::Parcel* parcel) const;
+    virtual status_t readFromParcel(const android::Parcel* parcel);
+
+    CameraStatus(String8 id, int32_t s) : cameraId(id), status(s) {}
+    CameraStatus() : status(ICameraServiceListener::STATUS_PRESENT) {}
+};
+
 } // namespace hardware
 
 using hardware::CameraInfo;
@@ -86,12 +118,6 @@
                                        /*out*/
                                        struct hardware::CameraInfo* cameraInfo);
 
-    static status_t      addServiceListener(
-        const sp<::android::hardware::ICameraServiceListener>& listener);
-
-    static status_t      removeServiceListener(
-        const sp<::android::hardware::ICameraServiceListener>& listener);
-
     sp<TCamUser>         remote();
 
     // Status is set to 'UNKNOWN_ERROR' after successful (re)connection
diff --git a/include/camera/CaptureResult.h b/include/camera/CaptureResult.h
index 45e4518..917d953 100644
--- a/include/camera/CaptureResult.h
+++ b/include/camera/CaptureResult.h
@@ -88,8 +88,8 @@
      */
     bool isValid();
 
-    virtual status_t                readFromParcel(const Parcel* parcel) override;
-    virtual status_t                writeToParcel(Parcel* parcel) const override;
+    virtual status_t                readFromParcel(const android::Parcel* parcel) override;
+    virtual status_t                writeToParcel(android::Parcel* parcel) const override;
 };
 } // namespace impl
 } // namespace camera2
@@ -105,8 +105,8 @@
 
     CaptureResult(const CaptureResult& otherResult);
 
-    status_t                readFromParcel(Parcel* parcel);
-    status_t                writeToParcel(Parcel* parcel) const;
+    status_t                readFromParcel(android::Parcel* parcel);
+    status_t                writeToParcel(android::Parcel* parcel) const;
 };
 
 }
diff --git a/include/camera/VendorTagDescriptor.h b/include/camera/VendorTagDescriptor.h
index bfc8c96..adfc8c7 100644
--- a/include/camera/VendorTagDescriptor.h
+++ b/include/camera/VendorTagDescriptor.h
@@ -76,7 +76,7 @@
          */
         virtual status_t writeToParcel(
                 /*out*/
-                Parcel* parcel) const override;
+                android::Parcel* parcel) const override;
 
         /**
          * Convenience method to get a vector containing all vendor tag
@@ -103,7 +103,7 @@
          *
          * Returns OK on success, or a negative error code.
          */
-        virtual status_t readFromParcel(const Parcel* parcel) override;
+        virtual status_t readFromParcel(const android::Parcel* parcel) override;
 
     protected:
         KeyedVector<String8, KeyedVector<String8, uint32_t>*> mReverseMapping;
diff --git a/include/camera/android/hardware/ICamera.h b/include/camera/android/hardware/ICamera.h
index 3b12afe..315669e 100644
--- a/include/camera/android/hardware/ICamera.h
+++ b/include/camera/android/hardware/ICamera.h
@@ -33,7 +33,7 @@
 
 class ICameraClient;
 
-class ICamera: public IInterface
+class ICamera: public android::IInterface
 {
     /**
      * Keep up-to-date with ICamera.aidl in frameworks/base
@@ -139,7 +139,7 @@
 
 // ----------------------------------------------------------------------------
 
-class BnCamera: public BnInterface<ICamera>
+class BnCamera: public android::BnInterface<ICamera>
 {
 public:
     virtual status_t    onTransact( uint32_t code,
diff --git a/include/camera/android/hardware/ICameraClient.h b/include/camera/android/hardware/ICameraClient.h
index 3f835a9..f6ee311 100644
--- a/include/camera/android/hardware/ICameraClient.h
+++ b/include/camera/android/hardware/ICameraClient.h
@@ -27,7 +27,7 @@
 namespace android {
 namespace hardware {
 
-class ICameraClient: public IInterface
+class ICameraClient: public android::IInterface
 {
 public:
     DECLARE_META_INTERFACE(CameraClient);
@@ -45,7 +45,7 @@
 
 // ----------------------------------------------------------------------------
 
-class BnCameraClient: public BnInterface<ICameraClient>
+class BnCameraClient: public android::BnInterface<ICameraClient>
 {
 public:
     virtual status_t    onTransact( uint32_t code,
diff --git a/include/camera/camera2/CaptureRequest.h b/include/camera/camera2/CaptureRequest.h
index c989f26..978f48d 100644
--- a/include/camera/camera2/CaptureRequest.h
+++ b/include/camera/camera2/CaptureRequest.h
@@ -37,8 +37,8 @@
     /**
      * Keep impl up-to-date with CaptureRequest.java in frameworks/base
      */
-    status_t                readFromParcel(const Parcel* parcel) override;
-    status_t                writeToParcel(Parcel* parcel) const override;
+    status_t                readFromParcel(const android::Parcel* parcel) override;
+    status_t                writeToParcel(android::Parcel* parcel) const override;
 };
 
 } // namespace camera2
diff --git a/include/camera/camera2/OutputConfiguration.h b/include/camera/camera2/OutputConfiguration.h
index cf8f3c6..2961e2a 100644
--- a/include/camera/camera2/OutputConfiguration.h
+++ b/include/camera/camera2/OutputConfiguration.h
@@ -38,7 +38,7 @@
         SURFACE_TYPE_SURFACE_VIEW = 0,
         SURFACE_TYPE_SURFACE_TEXTURE = 1
     };
-    sp<IGraphicBufferProducer> getGraphicBufferProducer() const;
+    const std::vector<sp<IGraphicBufferProducer>>& getGraphicBufferProducers() const;
     int                        getRotation() const;
     int                        getSurfaceSetID() const;
     int                        getSurfaceType() const;
@@ -47,9 +47,9 @@
     /**
      * Keep impl up-to-date with OutputConfiguration.java in frameworks/base
      */
-    virtual status_t           writeToParcel(Parcel* parcel) const override;
+    virtual status_t           writeToParcel(android::Parcel* parcel) const override;
 
-    virtual status_t           readFromParcel(const Parcel* parcel) override;
+    virtual status_t           readFromParcel(const android::Parcel* parcel) override;
 
     // getGraphicBufferProducer will be NULL
     // getRotation will be INVALID_ROTATION
@@ -59,25 +59,24 @@
     // getGraphicBufferProducer will be NULL if error occurred
     // getRotation will be INVALID_ROTATION if error occurred
     // getSurfaceSetID will be INVALID_SET_ID if error occurred
-    OutputConfiguration(const Parcel& parcel);
+    OutputConfiguration(const android::Parcel& parcel);
 
     OutputConfiguration(sp<IGraphicBufferProducer>& gbp, int rotation,
             int surfaceSetID = INVALID_SET_ID);
 
     bool operator == (const OutputConfiguration& other) const {
-        return (mGbp == other.mGbp &&
-                mRotation == other.mRotation &&
+        return ( mRotation == other.mRotation &&
                 mSurfaceSetID == other.mSurfaceSetID &&
                 mSurfaceType == other.mSurfaceType &&
                 mWidth == other.mWidth &&
-                mHeight == other.mHeight);
+                mHeight == other.mHeight &&
+                gbpsEqual(other));
     }
     bool operator != (const OutputConfiguration& other) const {
         return !(*this == other);
     }
     bool operator < (const OutputConfiguration& other) const {
         if (*this == other) return false;
-        if (mGbp != other.mGbp) return mGbp < other.mGbp;
         if (mSurfaceSetID != other.mSurfaceSetID) {
             return mSurfaceSetID < other.mSurfaceSetID;
         }
@@ -90,22 +89,27 @@
         if (mHeight != other.mHeight) {
             return mHeight < other.mHeight;
         }
+        if (mRotation != other.mRotation) {
+            return mRotation < other.mRotation;
+        }
 
-        return mRotation < other.mRotation;
+        return gbpsLessThan(other);
     }
     bool operator > (const OutputConfiguration& other) const {
         return (*this != other && !(*this < other));
     }
 
+    bool gbpsEqual(const OutputConfiguration& other) const;
+    bool gbpsLessThan(const OutputConfiguration& other) const;
 private:
-    sp<IGraphicBufferProducer> mGbp;
+    std::vector<sp<IGraphicBufferProducer>> mGbps;
     int                        mRotation;
     int                        mSurfaceSetID;
     int                        mSurfaceType;
     int                        mWidth;
     int                        mHeight;
     // helper function
-    static String16 readMaybeEmptyString16(const Parcel* parcel);
+    static String16 readMaybeEmptyString16(const android::Parcel* parcel);
 };
 } // namespace params
 } // namespace camera2
diff --git a/include/camera/camera2/SubmitInfo.h b/include/camera/camera2/SubmitInfo.h
index 3b47b32..8f271c0 100644
--- a/include/camera/camera2/SubmitInfo.h
+++ b/include/camera/camera2/SubmitInfo.h
@@ -31,8 +31,8 @@
     int32_t mRequestId;
     int64_t mLastFrameNumber;
 
-    virtual status_t writeToParcel(Parcel *parcel) const override;
-    virtual status_t readFromParcel(const Parcel* parcel) override;
+    virtual status_t writeToParcel(android::Parcel *parcel) const override;
+    virtual status_t readFromParcel(const android::Parcel* parcel) override;
 
 };
 
diff --git a/include/drm/drm_framework_common.h b/include/drm/drm_framework_common.h
index 0750406..d75f71c 100644
--- a/include/drm/drm_framework_common.h
+++ b/include/drm/drm_framework_common.h
@@ -234,10 +234,6 @@
      * POSIX based Decrypt API set for container based DRM
      */
     static const int CONTAINER_BASED = 0x02;
-    /**
-     * Decrypt API for Widevine streams
-     */
-    static const int WV_BASED = 0x3;
 };
 
 /**
diff --git a/include/media/AudioEffect.h b/include/media/AudioEffect.h
index 7f6ccac..bfc068b 100644
--- a/include/media/AudioEffect.h
+++ b/include/media/AudioEffect.h
@@ -24,8 +24,8 @@
 #include <media/IAudioPolicyService.h>
 #include <media/IEffect.h>
 #include <media/IEffectClient.h>
-#include <hardware/audio_effect.h>
 #include <media/AudioSystem.h>
+#include <system/audio_effect.h>
 
 #include <utils/RefBase.h>
 #include <utils/Errors.h>
diff --git a/services/audioflinger/AudioMixer.h b/include/media/AudioMixer.h
similarity index 98%
rename from services/audioflinger/AudioMixer.h
rename to include/media/AudioMixer.h
index e788ac3..87ada76 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/include/media/AudioMixer.h
@@ -21,17 +21,15 @@
 #include <stdint.h>
 #include <sys/types.h>
 
-#include <hardware/audio_effect.h>
 #include <media/AudioBufferProvider.h>
+#include <media/AudioResampler.h>
 #include <media/AudioResamplerPublic.h>
+#include <media/BufferProviders.h>
 #include <media/nbaio/NBLog.h>
 #include <system/audio.h>
 #include <utils/Compat.h>
 #include <utils/threads.h>
 
-#include "AudioResampler.h"
-#include "BufferProviders.h"
-
 // FIXME This is actually unity gain, which might not be max in future, expressed in U.12
 #define MAX_GAIN_INT AudioMixer::UNITY_GAIN_INT
 
diff --git a/include/media/AudioParameter.h b/include/media/AudioParameter.h
index 891bc4b..1ace607 100644
--- a/include/media/AudioParameter.h
+++ b/include/media/AudioParameter.h
@@ -49,24 +49,55 @@
     static const char * const keyInputSource;
     static const char * const keyScreenState;
 
-    String8 toString();
+    //  keyBtNrec: BT SCO Noise Reduction + Echo Cancellation parameters
+    //  keyHwAvSync: get HW synchronization source identifier from a device
+    //  keyMonoOutput: Enable mono audio playback
+    //  keyStreamHwAvSync: set HW synchronization source identifier on a stream
+    static const char * const keyBtNrec;
+    static const char * const keyHwAvSync;
+    static const char * const keyMonoOutput;
+    static const char * const keyStreamHwAvSync;
+
+    //  keyStreamConnect / Disconnect: value is an int in audio_devices_t
+    static const char * const keyStreamConnect;
+    static const char * const keyStreamDisconnect;
+
+    // For querying stream capabilities. All the returned values are lists.
+    //   keyStreamSupportedFormats: audio_format_t
+    //   keyStreamSupportedChannels: audio_channel_mask_t
+    //   keyStreamSupportedSamplingRates: sampling rate values
+    static const char * const keyStreamSupportedFormats;
+    static const char * const keyStreamSupportedChannels;
+    static const char * const keyStreamSupportedSamplingRates;
+
+    static const char * const valueOn;
+    static const char * const valueOff;
+
+    static const char * const valueListSeparator;
+
+    String8 toString() const { return toStringImpl(true); }
+    String8 keysToString() const { return toStringImpl(false); }
 
     status_t add(const String8& key, const String8& value);
     status_t addInt(const String8& key, const int value);
+    status_t addKey(const String8& key);
     status_t addFloat(const String8& key, const float value);
 
     status_t remove(const String8& key);
 
-    status_t get(const String8& key, String8& value);
-    status_t getInt(const String8& key, int& value);
-    status_t getFloat(const String8& key, float& value);
-    status_t getAt(size_t index, String8& key, String8& value);
+    status_t get(const String8& key, String8& value) const;
+    status_t getInt(const String8& key, int& value) const;
+    status_t getFloat(const String8& key, float& value) const;
+    status_t getAt(size_t index, String8& key) const;
+    status_t getAt(size_t index, String8& key, String8& value) const;
 
-    size_t size() { return mParameters.size(); }
+    size_t size() const { return mParameters.size(); }
 
 private:
     String8 mKeyValuePairs;
     KeyedVector <String8, String8> mParameters;
+
+    String8 toStringImpl(bool useValues) const;
 };
 
 };  // namespace android
diff --git a/include/media/AudioPolicyHelper.h b/include/media/AudioPolicyHelper.h
index 79231be..04f6a20 100644
--- a/include/media/AudioPolicyHelper.h
+++ b/include/media/AudioPolicyHelper.h
@@ -32,9 +32,11 @@
     switch (attr->usage) {
     case AUDIO_USAGE_MEDIA:
     case AUDIO_USAGE_GAME:
-    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
     case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
+    case AUDIO_USAGE_ASSISTANT:
         return AUDIO_STREAM_MUSIC;
+    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
+        return AUDIO_STREAM_ACCESSIBILITY;
     case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
         return AUDIO_STREAM_SYSTEM;
     case AUDIO_USAGE_VOICE_COMMUNICATION:
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index 63076e9..1c8746f 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -180,7 +180,7 @@
                                     audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
                                     transfer_type transferType = TRANSFER_DEFAULT,
                                     audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
-                                    int uid = -1,
+                                    uid_t uid = AUDIO_UID_INVALID,
                                     pid_t pid = -1,
                                     const audio_attributes_t* pAttributes = NULL);
 
@@ -218,7 +218,7 @@
                             audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
                             transfer_type transferType = TRANSFER_DEFAULT,
                             audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
-                            int uid = -1,
+                            uid_t uid = AUDIO_UID_INVALID,
                             pid_t pid = -1,
                             const audio_attributes_t* pAttributes = NULL);
 
@@ -642,7 +642,7 @@
 
     sp<DeathNotifier>       mDeathNotifier;
     uint32_t                mSequence;              // incremented for each new IAudioRecord attempt
-    int                     mClientUid;
+    uid_t                   mClientUid;
     pid_t                   mClientPid;
     audio_attributes_t      mAttributes;
 
@@ -650,6 +650,8 @@
     //  a value of AUDIO_PORT_HANDLE_NONE indicated default (AudioPolicyManager) routing.
     audio_port_handle_t    mSelectedDeviceId;
     sp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
+    audio_port_handle_t    mPortId;  // unique ID allocated by audio policy
+
 };
 
 }; // namespace android
diff --git a/services/audioflinger/AudioResampler.h b/include/media/AudioResampler.h
similarity index 100%
rename from services/audioflinger/AudioResampler.h
rename to include/media/AudioResampler.h
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index d67ad44..4c64242 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -17,12 +17,12 @@
 #ifndef ANDROID_AUDIOSYSTEM_H_
 #define ANDROID_AUDIOSYSTEM_H_
 
-#include <hardware/audio_effect.h>
 #include <media/AudioPolicy.h>
 #include <media/AudioIoDescriptor.h>
 #include <media/IAudioFlingerClient.h>
 #include <media/IAudioPolicyServiceClient.h>
 #include <system/audio.h>
+#include <system/audio_effect.h>
 #include <system/audio_policy.h>
 #include <utils/Errors.h>
 #include <utils/Mutex.h>
@@ -220,12 +220,10 @@
                                      audio_session_t session,
                                      audio_stream_type_t *stream,
                                      uid_t uid,
-                                     uint32_t samplingRate = 0,
-                                     audio_format_t format = AUDIO_FORMAT_DEFAULT,
-                                     audio_channel_mask_t channelMask = AUDIO_CHANNEL_OUT_STEREO,
-                                     audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
-                                     audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
-                                     const audio_offload_info_t *offloadInfo = NULL);
+                                     const audio_config_t *config,
+                                     audio_output_flags_t flags,
+                                     audio_port_handle_t selectedDeviceId,
+                                     audio_port_handle_t *portId);
     static status_t startOutput(audio_io_handle_t output,
                                 audio_stream_type_t stream,
                                 audio_session_t session);
@@ -243,11 +241,10 @@
                                     audio_session_t session,
                                     pid_t pid,
                                     uid_t uid,
-                                    uint32_t samplingRate,
-                                    audio_format_t format,
-                                    audio_channel_mask_t channelMask,
+                                    const audio_config_base_t *config,
                                     audio_input_flags_t flags,
-                                    audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
+                                    audio_port_handle_t selectedDeviceId,
+                                    audio_port_handle_t *portId);
 
     static status_t startInput(audio_io_handle_t input,
                                audio_session_t session);
@@ -333,8 +330,8 @@
 
     static status_t startAudioSource(const struct audio_port_config *source,
                                       const audio_attributes_t *attributes,
-                                      audio_io_handle_t *handle);
-    static status_t stopAudioSource(audio_io_handle_t handle);
+                                      audio_patch_handle_t *handle);
+    static status_t stopAudioSource(audio_patch_handle_t handle);
 
     static status_t setMasterMono(bool mono);
     static status_t getMasterMono(bool *mono);
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 88c4e61..cd33a44 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -233,7 +233,7 @@
                                     audio_session_t sessionId  = AUDIO_SESSION_ALLOCATE,
                                     transfer_type transferType = TRANSFER_DEFAULT,
                                     const audio_offload_info_t *offloadInfo = NULL,
-                                    int uid = -1,
+                                    uid_t uid = AUDIO_UID_INVALID,
                                     pid_t pid = -1,
                                     const audio_attributes_t* pAttributes = NULL,
                                     bool doNotReconnect = false,
@@ -263,7 +263,7 @@
                                     audio_session_t sessionId   = AUDIO_SESSION_ALLOCATE,
                                     transfer_type transferType = TRANSFER_DEFAULT,
                                     const audio_offload_info_t *offloadInfo = NULL,
-                                    int uid = -1,
+                                    uid_t uid = AUDIO_UID_INVALID,
                                     pid_t pid = -1,
                                     const audio_attributes_t* pAttributes = NULL,
                                     bool doNotReconnect = false,
@@ -309,7 +309,7 @@
                             audio_session_t sessionId  = AUDIO_SESSION_ALLOCATE,
                             transfer_type transferType = TRANSFER_DEFAULT,
                             const audio_offload_info_t *offloadInfo = NULL,
-                            int uid = -1,
+                            uid_t uid = AUDIO_UID_INVALID,
                             pid_t pid = -1,
                             const audio_attributes_t* pAttributes = NULL,
                             bool doNotReconnect = false,
@@ -758,6 +758,9 @@
      * The timestamp parameter is undefined on return, if status is not NO_ERROR.
      */
             status_t    getTimestamp(AudioTimestamp& timestamp);
+private:
+            status_t    getTimestamp_l(AudioTimestamp& timestamp);
+public:
 
     /* Return the extended timestamp, with additional timebase info and improved drain behavior.
      *
@@ -840,6 +843,24 @@
             status_t pendingDuration(int32_t *msec,
                     ExtendedTimestamp::Location location = ExtendedTimestamp::LOCATION_SERVER);
 
+    /* hasStarted() is used to determine if audio is now audible at the device after
+     * a start() command. The underlying implementation checks a nonzero timestamp position
+     * or increment for the audible assumption.
+     *
+     * hasStarted() returns true if the track has been started() and audio is audible
+     * and no subsequent pause() or flush() has been called.  Immediately after pause() or
+     * flush() hasStarted() will return false.
+     *
+     * If stop() has been called, hasStarted() will return true if audio is still being
+     * delivered or has finished delivery (even if no audio was written) for both offloaded
+     * and normal tracks. This property removes a race condition in checking hasStarted()
+     * for very short clips, where stop() must be called to finish drain.
+     *
+     * In all cases, hasStarted() may turn false briefly after a subsequent start() is called
+     * until audio becomes audible again.
+     */
+            bool hasStarted(); // not const
+
 protected:
     /* copying audio tracks is not allowed */
                         AudioTrack(const AudioTrack& other);
@@ -1041,6 +1062,10 @@
                                                     // and could be easily widened to uint64_t
     int64_t                 mStartUs;               // the start time after flush or stop.
                                                     // only used for offloaded and direct tracks.
+    ExtendedTimestamp       mStartEts;              // Extended timestamp at start for normal
+                                                    // AudioTracks.
+    AudioTimestamp          mStartTs;               // Timestamp at start for offloaded or direct
+                                                    // AudioTracks.
 
     bool                    mPreviousTimestampValid;// true if mPreviousTimestamp is valid
     bool                    mTimestampStartupGlitchReported; // reduce log spam
@@ -1056,6 +1081,10 @@
                                                     // after flush.
     int64_t                 mFramesWrittenServerOffset; // An offset to server frames due to
                                                     // restoring AudioTrack, or stop/start.
+                                                    // This offset is also used for static tracks.
+    int64_t                 mFramesWrittenAtRestore; // Frames written at restore point (or frames
+                                                    // delivered for static tracks).
+                                                    // -1 indicates no previous restore point.
 
     audio_output_flags_t    mFlags;                 // same as mOrigFlags, except for bits that may
                                                     // be denied by client or server, such as
@@ -1101,10 +1130,11 @@
 
     sp<DeathNotifier>       mDeathNotifier;
     uint32_t                mSequence;              // incremented for each new IAudioTrack attempt
-    int                     mClientUid;
+    uid_t                   mClientUid;
     pid_t                   mClientPid;
 
     sp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
+    audio_port_handle_t     mPortId;  // unique ID allocated by audio policy
 };
 
 }; // namespace android
diff --git a/services/audioflinger/BufferProviders.h b/include/media/BufferProviders.h
similarity index 92%
rename from services/audioflinger/BufferProviders.h
rename to include/media/BufferProviders.h
index abd43c6..d5899ea 100644
--- a/services/audioflinger/BufferProviders.h
+++ b/include/media/BufferProviders.h
@@ -20,13 +20,22 @@
 #include <stdint.h>
 #include <sys/types.h>
 
-#include <hardware/audio_effect.h>
 #include <media/AudioBufferProvider.h>
+#include <media/AudioResamplerPublic.h>
 #include <system/audio.h>
-#include <sonic.h>
+#include <system/audio_effect.h>
+#include <utils/StrongPointer.h>
+
+// external forward declaration from external/sonic/sonic.h
+struct sonicStreamStruct;
+typedef struct sonicStreamStruct *sonicStream;
 
 namespace android {
 
+class EffectBufferHalInterface;
+class EffectHalInterface;
+class EffectsFactoryHalInterface;
+
 // ----------------------------------------------------------------------------
 
 class PassthruBufferProvider : public AudioBufferProvider {
@@ -97,12 +106,15 @@
     //Overrides
     virtual void copyFrames(void *dst, const void *src, size_t frames);
 
-    bool isValid() const { return mDownmixHandle != NULL; }
+    bool isValid() const { return mDownmixInterface.get() != NULL; }
     static status_t init();
     static bool isMultichannelCapable() { return sIsMultichannelCapable; }
 
 protected:
-    effect_handle_t    mDownmixHandle;
+    sp<EffectsFactoryHalInterface> mEffectsFactory;
+    sp<EffectHalInterface> mDownmixInterface;
+    sp<EffectBufferHalInterface> mInBuffer;
+    sp<EffectBufferHalInterface> mOutBuffer;
     effect_config_t    mDownmixConfig;
 
     // effect descriptor for the downmixer used by the mixer
diff --git a/include/media/BufferingSettings.h b/include/media/BufferingSettings.h
new file mode 100644
index 0000000..e812d2a
--- /dev/null
+++ b/include/media/BufferingSettings.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_BUFFERING_SETTINGS_H
+#define ANDROID_BUFFERING_SETTINGS_H
+
+#include <binder/Parcelable.h>
+
+namespace android {
+
+enum BufferingMode : int {
+    // Do not support buffering.
+    BUFFERING_MODE_NONE             = 0,
+    // Support only time based buffering.
+    BUFFERING_MODE_TIME_ONLY        = 1,
+    // Support only size based buffering.
+    BUFFERING_MODE_SIZE_ONLY        = 2,
+    // Support both time and size based buffering, time based calculation precedes size based.
+    // Size based calculation will be used only when time information is not available for
+    // the stream.
+    BUFFERING_MODE_TIME_THEN_SIZE   = 3,
+    // Number of modes.
+    BUFFERING_MODE_COUNT            = 4,
+};
+
+struct BufferingSettings : public Parcelable {
+    static const int kNoWatermark = -1;
+
+    static bool IsValidBufferingMode(int mode);
+    static bool IsTimeBasedBufferingMode(int mode);
+    static bool IsSizeBasedBufferingMode(int mode);
+
+    BufferingMode mInitialBufferingMode;  // for prepare
+    BufferingMode mRebufferingMode;  // for playback
+
+    int mInitialWatermarkMs;  // time based
+    int mInitialWatermarkKB;  // size based
+
+    // When cached data is below this mark, playback will be paused for buffering
+    // till data reach |mRebufferingWatermarkHighMs| or end of stream.
+    int mRebufferingWatermarkLowMs;
+    // When cached data is above this mark, buffering will be paused.
+    int mRebufferingWatermarkHighMs;
+
+    // When cached data is below this mark, playback will be paused for buffering
+    // till data reach |mRebufferingWatermarkHighKB| or end of stream.
+    int mRebufferingWatermarkLowKB;
+    // When cached data is above this mark, buffering will be paused.
+    int mRebufferingWatermarkHighKB;
+
+    BufferingSettings();
+
+    status_t writeToParcel(Parcel* parcel) const override;
+    status_t readFromParcel(const Parcel* parcel) override;
+
+    String8 toString() const;
+};
+
+} // namespace android
+
+// ---------------------------------------------------------------------------
+
+#endif // ANDROID_BUFFERING_SETTINGS_H
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index 096f7ef..8c5e61a 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -28,9 +28,8 @@
 #include <media/IAudioRecord.h>
 #include <media/IAudioFlingerClient.h>
 #include <system/audio.h>
+#include <system/audio_effect.h>
 #include <system/audio_policy.h>
-#include <hardware/audio_policy.h>
-#include <hardware/audio_effect.h>
 #include <media/IEffect.h>
 #include <media/IEffectClient.h>
 #include <utils/String8.h>
@@ -67,7 +66,8 @@
                                 pid_t tid,  // -1 means unused, otherwise must be valid non-0
                                 audio_session_t *sessionId,
                                 int clientUid,
-                                status_t *status) = 0;
+                                status_t *status,
+                                audio_port_handle_t portId) = 0;
 
     virtual sp<IAudioRecord> openRecord(
                                 // On successful return, AudioFlinger takes over the handle
@@ -87,7 +87,8 @@
                                 size_t *notificationFrames,
                                 sp<IMemory>& cblk,
                                 sp<IMemory>& buffers,   // return value 0 means it follows cblk
-                                status_t *status) = 0;
+                                status_t *status,
+                                audio_port_handle_t portId) = 0;
 
     // FIXME Surprisingly, format/latency don't work for input handles
 
@@ -197,6 +198,7 @@
                                     audio_io_handle_t output,
                                     audio_session_t sessionId,
                                     const String16& callingPackage,
+                                    pid_t pid,
                                     status_t *status,
                                     int *id,
                                     int *enabled) = 0;
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index f9dcbea..d111fd2 100644
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -66,12 +66,10 @@
                                       audio_session_t session,
                                       audio_stream_type_t *stream,
                                       uid_t uid,
-                                      uint32_t samplingRate = 0,
-                                      audio_format_t format = AUDIO_FORMAT_DEFAULT,
-                                      audio_channel_mask_t channelMask = 0,
-                                      audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
-                                      audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
-                                      const audio_offload_info_t *offloadInfo = NULL) = 0;
+                                      const audio_config_t *config,
+                                      audio_output_flags_t flags,
+                                      audio_port_handle_t selectedDeviceId,
+                                      audio_port_handle_t *portId) = 0;
     virtual status_t startOutput(audio_io_handle_t output,
                                  audio_stream_type_t stream,
                                  audio_session_t session) = 0;
@@ -86,11 +84,10 @@
                               audio_session_t session,
                               pid_t pid,
                               uid_t uid,
-                              uint32_t samplingRate,
-                              audio_format_t format,
-                              audio_channel_mask_t channelMask,
+                              const audio_config_base_t *config,
                               audio_input_flags_t flags,
-                              audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE) = 0;
+                              audio_port_handle_t selectedDeviceId,
+                              audio_port_handle_t *portId) = 0;
     virtual status_t startInput(audio_io_handle_t input,
                                 audio_session_t session) = 0;
     virtual status_t stopInput(audio_io_handle_t input,
@@ -167,8 +164,8 @@
 
     virtual status_t startAudioSource(const struct audio_port_config *source,
                                       const audio_attributes_t *attributes,
-                                      audio_io_handle_t *handle) = 0;
-    virtual status_t stopAudioSource(audio_io_handle_t handle) = 0;
+                                      audio_patch_handle_t *handle) = 0;
+    virtual status_t stopAudioSource(audio_patch_handle_t handle) = 0;
 
     virtual status_t setMasterMono(bool mono) = 0;
     virtual status_t getMasterMono(bool *mono) = 0;
diff --git a/include/media/IMediaAnalyticsService.h b/include/media/IMediaAnalyticsService.h
new file mode 100644
index 0000000..9213637
--- /dev/null
+++ b/include/media/IMediaAnalyticsService.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_IMEDIAANALYTICSSERVICE_H
+#define ANDROID_IMEDIAANALYTICSSERVICE_H
+
+#include <utils/String8.h>
+#include <binder/IInterface.h>
+#include <binder/Parcel.h>
+
+#include <sys/types.h>
+#include <utils/Errors.h>
+#include <utils/Log.h>
+#include <utils/RefBase.h>
+#include <utils/List.h>
+
+#include <binder/IServiceManager.h>
+
+#include <media/MediaAnalyticsItem.h>
+// nope...#include <media/MediaAnalytics.h>
+
+namespace android {
+
+class IMediaAnalyticsService: public IInterface
+{
+public:
+    DECLARE_META_INTERFACE(MediaAnalyticsService);
+
+    // generate a unique sessionID to use across multiple requests
+    // 'unique' is within this device, since last reboot
+    virtual MediaAnalyticsItem::SessionID_t generateUniqueSessionID() = 0;
+
+    // submit the indicated record to the mediaanalytics service, where
+    // it will be merged (if appropriate) with incomplete records that
+    // share the same key and sessionid.
+    // 'forcenew' marks any matching incomplete record as complete before
+    // inserting this new record.
+    // returns the sessionID associated with that item.
+    // caller continues to own the passed item
+    virtual MediaAnalyticsItem::SessionID_t submit(MediaAnalyticsItem *item, bool forcenew) = 0;
+
+
+    // return lists of records that match the supplied parameters.
+    // finished [or not] records since time 'ts' with key 'key'
+    // timestamp 'ts' is nanoseconds, unix time.
+    // caller responsible for deallocating returned data structures
+    virtual List<MediaAnalyticsItem *> *getMediaAnalyticsItemList(bool finished, int64_t ts) = 0;
+    virtual List<MediaAnalyticsItem *> *getMediaAnalyticsItemList(bool finished, int64_t ts, MediaAnalyticsItem::Key key) = 0;
+
+};
+
+// ----------------------------------------------------------------------------
+
+class BnMediaAnalyticsService: public BnInterface<IMediaAnalyticsService>
+{
+public:
+    virtual status_t    onTransact( uint32_t code,
+                                    const Parcel& data,
+                                    Parcel* reply,
+                                    uint32_t flags = 0);
+};
+
+}; // namespace android
+
+#endif // ANDROID_IMEDIASTATISTICSSERVICE_H
diff --git a/include/media/IMediaExtractor.h b/include/media/IMediaExtractor.h
index 34b15e9..e0a81f1 100644
--- a/include/media/IMediaExtractor.h
+++ b/include/media/IMediaExtractor.h
@@ -54,8 +54,6 @@
     virtual uint32_t flags() const = 0;
 
     // for DRM
-    virtual void setDrmFlag(bool flag) = 0;
-    virtual bool getDrmFlag() = 0;
     virtual char* getDrmTrackInfo(size_t trackID, int *len)  = 0;
     virtual void setUID(uid_t uid)  = 0;
 
diff --git a/include/media/IMediaPlayer.h b/include/media/IMediaPlayer.h
index 0fd8933..f642373 100644
--- a/include/media/IMediaPlayer.h
+++ b/include/media/IMediaPlayer.h
@@ -23,6 +23,8 @@
 #include <utils/KeyedVector.h>
 #include <system/audio.h>
 
+#include <media/IMediaSource.h>
+
 // Fwd decl to make sure everyone agrees that the scope of struct sockaddr_in is
 // global, and not in android::
 struct sockaddr_in;
@@ -37,6 +39,9 @@
 struct IMediaHTTPService;
 struct AudioPlaybackRate;
 struct AVSyncSettings;
+struct BufferingSettings;
+
+typedef IMediaSource::ReadOptions::SeekMode MediaPlayerSeekMode;
 
 class IMediaPlayer: public IInterface
 {
@@ -55,6 +60,9 @@
     virtual status_t        setDataSource(const sp<IDataSource>& source) = 0;
     virtual status_t        setVideoSurfaceTexture(
                                     const sp<IGraphicBufferProducer>& bufferProducer) = 0;
+    virtual status_t        getDefaultBufferingSettings(
+                                    BufferingSettings* buffering /* nonnull */) = 0;
+    virtual status_t        setBufferingSettings(const BufferingSettings& buffering) = 0;
     virtual status_t        prepareAsync() = 0;
     virtual status_t        start() = 0;
     virtual status_t        stop() = 0;
@@ -65,7 +73,9 @@
     virtual status_t        setSyncSettings(const AVSyncSettings& sync, float videoFpsHint) = 0;
     virtual status_t        getSyncSettings(AVSyncSettings* sync /* nonnull */,
                                             float* videoFps /* nonnull */) = 0;
-    virtual status_t        seekTo(int msec) = 0;
+    virtual status_t        seekTo(
+            int msec,
+            MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC) = 0;
     virtual status_t        getCurrentPosition(int* msec) = 0;
     virtual status_t        getDuration(int* msec) = 0;
     virtual status_t        reset() = 0;
diff --git a/include/media/IMediaRecorder.h b/include/media/IMediaRecorder.h
index 68a65f0..3e05532 100644
--- a/include/media/IMediaRecorder.h
+++ b/include/media/IMediaRecorder.h
@@ -28,8 +28,8 @@
 }
 class ICameraRecordingProxy;
 class IMediaRecorderClient;
-class IGraphicBufferConsumer;
 class IGraphicBufferProducer;
+struct PersistentSurface;
 
 class IMediaRecorder: public IInterface
 {
@@ -60,7 +60,7 @@
     virtual status_t init() = 0;
     virtual status_t close() = 0;
     virtual status_t release() = 0;
-    virtual status_t setInputSurface(const sp<IGraphicBufferConsumer>& surface) = 0;
+    virtual status_t setInputSurface(const sp<PersistentSurface>& surface) = 0;
     virtual sp<IGraphicBufferProducer> querySurfaceMediaSource() = 0;
 };
 
diff --git a/include/media/IOMX.h b/include/media/IOMX.h
index ffa6d6d..ec1d4b6 100644
--- a/include/media/IOMX.h
+++ b/include/media/IOMX.h
@@ -20,8 +20,6 @@
 
 #include <binder/IInterface.h>
 #include <gui/IGraphicBufferProducer.h>
-#include <gui/IGraphicBufferConsumer.h>
-#include <ui/GraphicBuffer.h>
 #include <utils/List.h>
 #include <utils/String8.h>
 
@@ -34,23 +32,39 @@
 
 namespace android {
 
+class IGraphicBufferProducer;
+class IGraphicBufferSource;
 class IMemory;
+class IOMXBufferSource;
+class IOMXNode;
 class IOMXObserver;
-class IOMXRenderer;
 class NativeHandle;
-class Surface;
+class OMXBuffer;
+struct omx_message;
 
 class IOMX : public IInterface {
 public:
     DECLARE_META_INTERFACE(OMX);
 
     typedef uint32_t buffer_id;
-    typedef uint32_t node_id;
 
-    // Given a node_id and the calling process' pid, returns true iff
-    // the implementation of the OMX interface lives in the same
-    // process.
-    virtual bool livesLocally(node_id node, pid_t pid) = 0;
+    enum {
+        kFenceTimeoutMs = 1000
+    };
+
+    enum PortMode {
+        kPortModePresetStart = 0,
+        kPortModePresetByteBuffer,
+        kPortModePresetANWBuffer,
+        kPortModePresetSecureBuffer,
+        kPortModePresetEnd,
+
+        kPortModeDynamicStart = 100,
+        kPortModeDynamicANWBuffer,      // uses metadata mode kMetadataBufferTypeANWBuffer
+                                        // or kMetadataBufferTypeGrallocSource
+        kPortModeDynamicNativeHandle,   // uses metadata mode kMetadataBufferTypeNativeHandleSource
+        kPortModeDynamicEnd,
+    };
 
     struct ComponentInfo {
         String8 mName;
@@ -60,88 +74,52 @@
 
     virtual status_t allocateNode(
             const char *name, const sp<IOMXObserver> &observer,
-            sp<IBinder> *nodeBinder,
-            node_id *node) = 0;
+            sp<IOMXNode> *omxNode) = 0;
 
-    virtual status_t freeNode(node_id node) = 0;
+    virtual status_t createInputSurface(
+            sp<IGraphicBufferProducer> *bufferProducer,
+            sp<IGraphicBufferSource> *bufferSource) = 0;
+};
+
+class IOMXNode : public IInterface {
+public:
+    DECLARE_META_INTERFACE(OMXNode);
+
+    typedef IOMX::buffer_id buffer_id;
+
+    virtual status_t freeNode() = 0;
 
     virtual status_t sendCommand(
-            node_id node, OMX_COMMANDTYPE cmd, OMX_S32 param) = 0;
+            OMX_COMMANDTYPE cmd, OMX_S32 param) = 0;
 
     virtual status_t getParameter(
-            node_id node, OMX_INDEXTYPE index,
-            void *params, size_t size) = 0;
+            OMX_INDEXTYPE index, void *params, size_t size) = 0;
 
     virtual status_t setParameter(
-            node_id node, OMX_INDEXTYPE index,
-            const void *params, size_t size) = 0;
+            OMX_INDEXTYPE index, const void *params, size_t size) = 0;
 
     virtual status_t getConfig(
-            node_id node, OMX_INDEXTYPE index,
-            void *params, size_t size) = 0;
+            OMX_INDEXTYPE index, void *params, size_t size) = 0;
 
     virtual status_t setConfig(
-            node_id node, OMX_INDEXTYPE index,
-            const void *params, size_t size) = 0;
+            OMX_INDEXTYPE index, const void *params, size_t size) = 0;
 
-    virtual status_t getState(
-            node_id node, OMX_STATETYPE* state) = 0;
-
-    // This will set *type to previous metadata buffer type on OMX error (not on binder error), and
-    // new metadata buffer type on success.
-    virtual status_t storeMetaDataInBuffers(
-            node_id node, OMX_U32 port_index, OMX_BOOL enable, MetadataBufferType *type = NULL) = 0;
+    virtual status_t setPortMode(
+            OMX_U32 port_index, IOMX::PortMode mode) = 0;
 
     virtual status_t prepareForAdaptivePlayback(
-            node_id node, OMX_U32 portIndex, OMX_BOOL enable,
+            OMX_U32 portIndex, OMX_BOOL enable,
             OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight) = 0;
 
     virtual status_t configureVideoTunnelMode(
-            node_id node, OMX_U32 portIndex, OMX_BOOL tunneled,
+            OMX_U32 portIndex, OMX_BOOL tunneled,
             OMX_U32 audioHwSync, native_handle_t **sidebandHandle) = 0;
 
-    virtual status_t enableNativeBuffers(
-            node_id node, OMX_U32 port_index, OMX_BOOL graphic, OMX_BOOL enable) = 0;
-
     virtual status_t getGraphicBufferUsage(
-            node_id node, OMX_U32 port_index, OMX_U32* usage) = 0;
+            OMX_U32 port_index, OMX_U32* usage) = 0;
 
-    // Use |params| as an OMX buffer, but limit the size of the OMX buffer to |allottedSize|.
-    virtual status_t useBuffer(
-            node_id node, OMX_U32 port_index, const sp<IMemory> &params,
-            buffer_id *buffer, OMX_U32 allottedSize) = 0;
-
-    virtual status_t useGraphicBuffer(
-            node_id node, OMX_U32 port_index,
-            const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer) = 0;
-
-    virtual status_t updateGraphicBufferInMeta(
-            node_id node, OMX_U32 port_index,
-            const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer) = 0;
-
-    virtual status_t updateNativeHandleInMeta(
-            node_id node, OMX_U32 port_index,
-            const sp<NativeHandle> &nativeHandle, buffer_id buffer) = 0;
-
-    // This will set *type to resulting metadata buffer type on OMX error (not on binder error) as
-    // well as on success.
-    virtual status_t createInputSurface(
-            node_id node, OMX_U32 port_index, android_dataspace dataSpace,
-            sp<IGraphicBufferProducer> *bufferProducer,
-            MetadataBufferType *type = NULL) = 0;
-
-    virtual status_t createPersistentInputSurface(
-            sp<IGraphicBufferProducer> *bufferProducer,
-            sp<IGraphicBufferConsumer> *bufferConsumer) = 0;
-
-    // This will set *type to resulting metadata buffer type on OMX error (not on binder error) as
-    // well as on success.
     virtual status_t setInputSurface(
-            node_id node, OMX_U32 port_index,
-            const sp<IGraphicBufferConsumer> &bufferConsumer,
-            MetadataBufferType *type) = 0;
-
-    virtual status_t signalEndOfInputStream(node_id node) = 0;
+            const sp<IOMXBufferSource> &bufferSource) = 0;
 
     // Allocate an opaque buffer as a native handle. If component supports returning native
     // handles, those are returned in *native_handle. Otherwise, the allocated buffer is
@@ -149,57 +127,48 @@
     // same process as the callee, i.e. is the media_server, as the returned "buffer_data"
     // pointer is just that, a pointer into local address space.
     virtual status_t allocateSecureBuffer(
-            node_id node, OMX_U32 port_index, size_t size,
-            buffer_id *buffer, void **buffer_data, sp<NativeHandle> *native_handle) = 0;
+            OMX_U32 port_index, size_t size, buffer_id *buffer,
+            void **buffer_data, sp<NativeHandle> *native_handle) = 0;
 
-    // Allocate an OMX buffer of size |allotedSize|. Use |params| as the backup buffer, which
-    // may be larger.
-    virtual status_t allocateBufferWithBackup(
-            node_id node, OMX_U32 port_index, const sp<IMemory> &params,
-            buffer_id *buffer, OMX_U32 allottedSize) = 0;
+    // Instructs the component to use the buffer passed in via |omxBuf| on the
+    // specified port. Returns in |*buffer| the buffer id that the component
+    // assigns to this buffer. |omxBuf| must be one of:
+    // 1) OMXBuffer::sPreset for meta-mode,
+    // 2) type kBufferTypeANWBuffer for non-meta-graphic buffer mode,
+    // 3) type kBufferTypeSharedMem for bytebuffer mode.
+    virtual status_t useBuffer(
+            OMX_U32 port_index, const OMXBuffer &omxBuf, buffer_id *buffer) = 0;
 
+    // Frees the buffer on the specified port with buffer id |buffer|.
     virtual status_t freeBuffer(
-            node_id node, OMX_U32 port_index, buffer_id buffer) = 0;
+            OMX_U32 port_index, buffer_id buffer) = 0;
 
-    enum {
-        kFenceTimeoutMs = 1000
-    };
-    // Calls OMX_FillBuffer on buffer, and passes |fenceFd| to component if it supports
-    // fences. Otherwise, it waits on |fenceFd| before calling OMX_FillBuffer.
-    // Takes ownership of |fenceFd| even if this call fails.
-    virtual status_t fillBuffer(node_id node, buffer_id buffer, int fenceFd = -1) = 0;
+    // Calls OMX_FillBuffer on buffer. Passes |fenceFd| to component if it
+    // supports fences. Otherwise, it waits on |fenceFd| before calling
+    // OMX_FillBuffer. Takes ownership of |fenceFd| even if this call fails.
+    // If the port is in metadata mode, the buffer will be updated to point
+    // to the new buffer passed in via |omxBuf| before OMX_FillBuffer is called.
+    // Otherwise info in the |omxBuf| is not used.
+    virtual status_t fillBuffer(
+            buffer_id buffer, const OMXBuffer &omxBuf, int fenceFd = -1) = 0;
 
-    // Calls OMX_EmptyBuffer on buffer (after updating buffer header with |range_offset|,
-    // |range_length|, |flags| and |timestamp|). Passes |fenceFd| to component if it
-    // supports fences. Otherwise, it waits on |fenceFd| before calling OMX_EmptyBuffer.
-    // Takes ownership of |fenceFd| even if this call fails.
+    // Calls OMX_EmptyBuffer on buffer. Passes |fenceFd| to component if it
+    // supports fences. Otherwise, it waits on |fenceFd| before calling
+    // OMX_EmptyBuffer. Takes ownership of |fenceFd| even if this call fails.
+    // If the port is in metadata mode, the buffer will be updated to point
+    // to the new buffer passed in via |omxBuf| before OMX_EmptyBuffer is called.
     virtual status_t emptyBuffer(
-            node_id node,
-            buffer_id buffer,
-            OMX_U32 range_offset, OMX_U32 range_length,
+            buffer_id buffer, const OMXBuffer &omxBuf,
             OMX_U32 flags, OMX_TICKS timestamp, int fenceFd = -1) = 0;
 
     virtual status_t getExtensionIndex(
-            node_id node,
             const char *parameter_name,
             OMX_INDEXTYPE *index) = 0;
 
-    enum InternalOptionType {
-        INTERNAL_OPTION_SUSPEND,  // data is a bool
-        INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY,  // data is an int64_t
-        INTERNAL_OPTION_MAX_TIMESTAMP_GAP, // data is int64_t
-        INTERNAL_OPTION_MAX_FPS, // data is float
-        INTERNAL_OPTION_START_TIME, // data is an int64_t
-        INTERNAL_OPTION_TIME_LAPSE, // data is an int64_t[2]
-        INTERNAL_OPTION_COLOR_ASPECTS, // data is ColorAspects
-        INTERNAL_OPTION_TIME_OFFSET, // data is an int64_t
-    };
-    virtual status_t setInternalOption(
-            node_id node,
-            OMX_U32 port_index,
-            InternalOptionType type,
-            const void *data,
-            size_t size) = 0;
+    virtual status_t dispatchMessage(const omx_message &msg) = 0;
+
+    // TODO: this is temporary, will be removed when quirks move to OMX side
+    virtual status_t setQuirks(OMX_U32 quirks) = 0;
 };
 
 struct omx_message {
@@ -210,7 +179,6 @@
         FRAME_RENDERED,
     } type;
 
-    IOMX::node_id node;
     int fenceFd; // used for EMPTY_BUFFER_DONE and FILL_BUFFER_DONE; client must close this
 
     union {
@@ -219,6 +187,8 @@
             OMX_EVENTTYPE event;
             OMX_U32 data1;
             OMX_U32 data2;
+            OMX_U32 data3;
+            OMX_U32 data4;
         } event_data;
 
         // if type == EMPTY_BUFFER_DONE
@@ -258,10 +228,17 @@
     virtual status_t onTransact(
             uint32_t code, const Parcel &data, Parcel *reply,
             uint32_t flags = 0);
+};
+
+class BnOMXNode : public BnInterface<IOMXNode> {
+public:
+    virtual status_t onTransact(
+            uint32_t code, const Parcel &data, Parcel *reply,
+            uint32_t flags = 0);
 
 protected:
     // check if the codec is secure.
-    virtual bool isSecure(IOMX::node_id /*node*/) {
+    virtual bool isSecure() const {
         return false;
     }
 };
@@ -273,23 +250,6 @@
             uint32_t flags = 0);
 };
 
-struct CodecProfileLevel {
-    OMX_U32 mProfile;
-    OMX_U32 mLevel;
-};
-
-inline static const char *asString(MetadataBufferType i, const char *def = "??") {
-    using namespace android;
-    switch (i) {
-        case kMetadataBufferTypeCameraSource:   return "CameraSource";
-        case kMetadataBufferTypeGrallocSource:  return "GrallocSource";
-        case kMetadataBufferTypeANWBuffer:      return "ANWBuffer";
-        case kMetadataBufferTypeNativeHandleSource: return "NativeHandleSource";
-        case kMetadataBufferTypeInvalid:        return "Invalid";
-        default:                                return def;
-    }
-}
-
 }  // namespace android
 
 #endif  // ANDROID_IOMX_H_
diff --git a/services/audioflinger/LinearMap.h b/include/media/LinearMap.h
similarity index 100%
rename from services/audioflinger/LinearMap.h
rename to include/media/LinearMap.h
diff --git a/include/media/MediaAnalyticsItem.h b/include/media/MediaAnalyticsItem.h
new file mode 100644
index 0000000..a78aa8b
--- /dev/null
+++ b/include/media/MediaAnalyticsItem.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_MEDIAANALYTICSITEM_H
+#define ANDROID_MEDIA_MEDIAANALYTICSITEM_H
+
+#include <cutils/properties.h>
+#include <sys/types.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/RefBase.h>
+#include <utils/StrongPointer.h>
+#include <utils/Timers.h>
+
+#include <media/stagefright/foundation/AString.h>
+
+namespace android {
+
+
+
+class IMediaAnalyticsService;
+
+// the class interface
+//
+
+class MediaAnalyticsItem {
+
+    friend class MediaAnalyticsService;
+    friend class IMediaAnalyticsService;
+
+    public:
+
+            enum Type {
+                kTypeNone = 0,
+                kTypeInt32 = 1,
+                kTypeInt64 = 2,
+                kTypeDouble = 3,
+                kTypeCString = 4,
+                kTypeRate = 5,
+            };
+
+        // sessionid
+        // unique within device, within boot,
+        typedef int64_t SessionID_t;
+        static constexpr SessionID_t SessionIDInvalid = -1;
+        static constexpr SessionID_t SessionIDNone = 0;
+
+        // Key: the record descriminator
+        // values for the record discriminator
+        // values can be "component/component"
+        // basic values: "video", "audio", "drm"
+        // XXX: need to better define the format
+        typedef AString Key;
+        static const Key kKeyNone;              // ""
+        static const Key kKeyAny;               // "*"
+
+        // Attr: names for attributes within a record
+        // format "prop1" or "prop/subprop"
+        // XXX: need to better define the format
+        typedef const char *Attr;
+
+
+    public:
+
+        // access functions for the class
+        MediaAnalyticsItem();
+        MediaAnalyticsItem(Key);
+        ~MediaAnalyticsItem();
+
+        // so clients can send intermediate values to be overlaid later
+        MediaAnalyticsItem &setFinalized(bool);
+        bool getFinalized() const;
+
+        // SessionID ties multiple submissions for same key together
+        // so that if video "height" and "width" are known at one point
+        // and "framerate" is only known later, they can be be brought
+        // together.
+        MediaAnalyticsItem &setSessionID(SessionID_t);
+        MediaAnalyticsItem &clearSessionID();
+        SessionID_t getSessionID() const;
+        // generates and stores a new ID iff mSessionID == SessionIDNone
+        SessionID_t generateSessionID();
+
+        // reset all contents, discarding any extra data
+        void clear();
+        MediaAnalyticsItem *dup();
+
+        // set the key discriminator for the record.
+        // most often initialized as part of the constructor
+        MediaAnalyticsItem &setKey(MediaAnalyticsItem::Key);
+        MediaAnalyticsItem::Key getKey();
+
+        // # of attributes in the record
+        int32_t count() const;
+
+        // set values appropriately
+        void setInt32(Attr, int32_t value);
+        void setInt64(Attr, int64_t value);
+        void setDouble(Attr, double value);
+        void setRate(Attr, int64_t count, int64_t duration);
+        void setCString(Attr, const char *value);
+
+        // fused get/add/set; if attr wasn't there, it's a simple set.
+        // type-mismatch counts as "wasn't there".
+        void addInt32(Attr, int32_t value);
+        void addInt64(Attr, int64_t value);
+        void addDouble(Attr, double value);
+        void addRate(Attr, int64_t count, int64_t duration);
+
+        // find & extract values
+        // return indicates whether attr exists (and thus value filled in)
+        // NULL parameter value suppresses storage of value.
+        bool getInt32(Attr, int32_t *value);
+        bool getInt64(Attr, int64_t *value);
+        bool getDouble(Attr, double *value);
+        bool getRate(Attr, int64_t *count, int64_t *duration, double *rate);
+        // Caller owns the returned string
+        bool getCString(Attr, char **value);
+
+        // parameter indicates whether to close any existing open
+        // record with same key before establishing a new record
+        // caller retains ownership of 'this'.
+        bool selfrecord(bool);
+        bool selfrecord();
+
+        // remove indicated attributes and their values
+        // filterNot() could also be called keepOnly()
+        // return value is # attributes removed
+        // XXX: perhaps 'remove' instead of 'filter'
+        // XXX: filterNot would become 'keep'
+        int32_t filter(int count, Attr attrs[]);
+        int32_t filterNot(int count, Attr attrs[]);
+        int32_t filter(Attr attr);
+
+        // below here are used on server side or to talk to server
+        // clients need not worry about these.
+
+        // timestamp, pid, and uid only used on server side
+        // timestamp is in 'nanoseconds, unix time'
+        MediaAnalyticsItem &setTimestamp(nsecs_t);
+        nsecs_t getTimestamp() const;
+
+        MediaAnalyticsItem &setPid(pid_t);
+        pid_t getPid() const;
+
+        MediaAnalyticsItem &setUid(uid_t);
+        uid_t getUid() const;
+
+        // our serialization code for binder calls
+        int32_t writeToParcel(Parcel *);
+        int32_t readFromParcel(const Parcel&);
+
+        AString toString();
+
+        // are we collecting analytics data
+        static bool isEnabled();
+
+    protected:
+
+        // merge fields from arg into this
+        // with rules for first/last/add, etc
+        // XXX: document semantics and how they are indicated
+        // caller continues to own 'incoming'
+        bool merge(MediaAnalyticsItem *incoming);
+
+        // enabled 1, disabled 0
+        static const char * const EnabledProperty;
+        static const char * const EnabledPropertyPersist;
+        static const int   EnabledProperty_default;
+
+    private:
+
+        // to help validate that A doesn't mess with B's records
+        pid_t     mPid;
+        uid_t     mUid;
+
+        // let's reuse a binder connection
+        static sp<IMediaAnalyticsService> sAnalyticsService;
+        static sp<IMediaAnalyticsService> getInstance();
+
+        // tracking information
+        SessionID_t mSessionID;         // grouping similar records
+        nsecs_t mTimestamp;             // ns, system_time_monotonic
+
+        // will this record accept further updates
+        bool mFinalized;
+
+        Key mKey;
+
+        struct Prop {
+
+            Type mType;
+            const char *mName;
+            size_t mNameLen;    // the strlen(), doesn't include the null
+            union {
+                    int32_t int32Value;
+                    int64_t int64Value;
+                    double doubleValue;
+                    char *CStringValue;
+                    struct { int64_t count, duration; } rate;
+            } u;
+            void setName(const char *name, size_t len);
+        };
+
+        void initProp(Prop *item);
+        void clearProp(Prop *item);
+        void clearPropValue(Prop *item);
+        void copyProp(Prop *dst, const Prop *src);
+        enum {
+            kGrowProps = 10
+        };
+        void growProps(int increment = kGrowProps);
+        size_t findPropIndex(const char *name, size_t len);
+        Prop *findProp(const char *name);
+        Prop *allocateProp(const char *name);
+
+        size_t mPropCount;
+        size_t mPropSize;
+        Prop *mProps;
+
+};
+
+} // namespace android
+
+#endif
diff --git a/include/media/MediaCodecBuffer.h b/include/media/MediaCodecBuffer.h
new file mode 100644
index 0000000..501c00b
--- /dev/null
+++ b/include/media/MediaCodecBuffer.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_CODEC_BUFFER_H_
+
+#define MEDIA_CODEC_BUFFER_H_
+
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <utils/StrongPointer.h>
+
+namespace android {
+
+struct ABuffer;
+struct AMessage;
+class MediaBufferBase;
+
+/**
+ * Buffers used by MediaCodec.
+ */
+class MediaCodecBuffer : public RefBase {
+public:
+    MediaCodecBuffer(const sp<AMessage> &format, const sp<ABuffer> &buffer);
+
+    /**
+     * MediaCodec will release all references to the buffer when it's done using
+     * it, so the destructor should return the buffer to the owner, such as OMX
+     * components, buffer allocators, surfaces, etc.
+     */
+    virtual ~MediaCodecBuffer() = default;
+
+    // ABuffer-like interface
+    uint8_t *base();
+    uint8_t *data();
+    size_t capacity() const;
+    size_t size() const;
+    size_t offset() const;
+    // Default implementation calls ABuffer::setRange() and returns OK.
+    virtual status_t setRange(size_t offset, size_t size);
+    // TODO: These can be removed if we finish replacing all MediaBuffer's.
+    MediaBufferBase *getMediaBufferBase();
+    void setMediaBufferBase(MediaBufferBase *mediaBuffer);
+
+    // TODO: Specify each field for meta/format.
+    sp<AMessage> meta();
+    sp<AMessage> format();
+
+    void setFormat(const sp<AMessage> &format);
+
+private:
+    MediaCodecBuffer() = delete;
+
+    const sp<AMessage> mMeta;
+    sp<AMessage> mFormat;
+    const sp<ABuffer> mBuffer;
+    MediaBufferBase *mMediaBufferBase;
+};
+
+}  // namespace android
+
+#endif  // MEDIA_CODEC_BUFFER_H_
diff --git a/include/media/MediaDefs.h b/include/media/MediaDefs.h
new file mode 100644
index 0000000..0682413
--- /dev/null
+++ b/include/media/MediaDefs.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_DEFS_H_
+
+#define MEDIA_DEFS_H_
+
+namespace android {
+
+extern const char *MEDIA_MIMETYPE_IMAGE_JPEG;
+
+extern const char *MEDIA_MIMETYPE_VIDEO_VP8;
+extern const char *MEDIA_MIMETYPE_VIDEO_VP9;
+extern const char *MEDIA_MIMETYPE_VIDEO_AVC;
+extern const char *MEDIA_MIMETYPE_VIDEO_HEVC;
+extern const char *MEDIA_MIMETYPE_VIDEO_MPEG4;
+extern const char *MEDIA_MIMETYPE_VIDEO_H263;
+extern const char *MEDIA_MIMETYPE_VIDEO_MPEG2;
+extern const char *MEDIA_MIMETYPE_VIDEO_RAW;
+extern const char *MEDIA_MIMETYPE_VIDEO_DOLBY_VISION;
+
+extern const char *MEDIA_MIMETYPE_AUDIO_AMR_NB;
+extern const char *MEDIA_MIMETYPE_AUDIO_AMR_WB;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEG;           // layer III
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II;
+extern const char *MEDIA_MIMETYPE_AUDIO_MIDI;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC;
+extern const char *MEDIA_MIMETYPE_AUDIO_QCELP;
+extern const char *MEDIA_MIMETYPE_AUDIO_VORBIS;
+extern const char *MEDIA_MIMETYPE_AUDIO_OPUS;
+extern const char *MEDIA_MIMETYPE_AUDIO_G711_ALAW;
+extern const char *MEDIA_MIMETYPE_AUDIO_G711_MLAW;
+extern const char *MEDIA_MIMETYPE_AUDIO_RAW;
+extern const char *MEDIA_MIMETYPE_AUDIO_FLAC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS;
+extern const char *MEDIA_MIMETYPE_AUDIO_MSGSM;
+extern const char *MEDIA_MIMETYPE_AUDIO_AC3;
+extern const char *MEDIA_MIMETYPE_AUDIO_EAC3;
+
+extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4;
+extern const char *MEDIA_MIMETYPE_CONTAINER_WAV;
+extern const char *MEDIA_MIMETYPE_CONTAINER_OGG;
+extern const char *MEDIA_MIMETYPE_CONTAINER_MATROSKA;
+extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2TS;
+extern const char *MEDIA_MIMETYPE_CONTAINER_AVI;
+extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2PS;
+
+extern const char *MEDIA_MIMETYPE_TEXT_3GPP;
+extern const char *MEDIA_MIMETYPE_TEXT_SUBRIP;
+extern const char *MEDIA_MIMETYPE_TEXT_VTT;
+extern const char *MEDIA_MIMETYPE_TEXT_CEA_608;
+extern const char *MEDIA_MIMETYPE_TEXT_CEA_708;
+extern const char *MEDIA_MIMETYPE_DATA_TIMED_ID3;
+
+// These are values exported to JAVA API that need to be in sync with
+// frameworks/base/media/java/android/media/AudioFormat.java. Unfortunately,
+// they are not defined in frameworks/av, so defining them here.
+enum AudioEncoding {
+    kAudioEncodingPcm16bit = 2,
+    kAudioEncodingPcm8bit = 3,
+    kAudioEncodingPcmFloat = 4,
+};
+
+}  // namespace android
+
+#endif  // MEDIA_DEFS_H_
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index 4977efd..0e815cb 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -30,6 +30,7 @@
 #include <media/AudioSystem.h>
 #include <media/AudioTimestamp.h>
 #include <media/AVSyncSettings.h>
+#include <media/BufferingSettings.h>
 #include <media/Metadata.h>
 
 // Fwd decl to make sure everyone agrees that the scope of struct sockaddr_in is
@@ -174,6 +175,15 @@
     virtual status_t    setVideoSurfaceTexture(
                                 const sp<IGraphicBufferProducer>& bufferProducer) = 0;
 
+    virtual status_t    getDefaultBufferingSettings(
+                                BufferingSettings* buffering /* nonnull */) {
+        *buffering = BufferingSettings();
+        return OK;
+    }
+    virtual status_t    setBufferingSettings(const BufferingSettings& /* buffering */) {
+        return OK;
+    }
+
     virtual status_t    prepare() = 0;
     virtual status_t    prepareAsync() = 0;
     virtual status_t    start() = 0;
@@ -205,7 +215,8 @@
         *videoFps = -1.f;
         return OK;
     }
-    virtual status_t    seekTo(int msec) = 0;
+    virtual status_t    seekTo(
+            int msec, MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC) = 0;
     virtual status_t    getCurrentPosition(int *msec) = 0;
     virtual status_t    getDuration(int *msec) = 0;
     virtual status_t    reset() = 0;
diff --git a/include/media/MediaRecorderBase.h b/include/media/MediaRecorderBase.h
index 42151ea..b9105b1 100644
--- a/include/media/MediaRecorderBase.h
+++ b/include/media/MediaRecorderBase.h
@@ -25,9 +25,8 @@
 namespace android {
 
 class ICameraRecordingProxy;
-class Surface;
-class IGraphicBufferConsumer;
 class IGraphicBufferProducer;
+struct PersistentSurface;
 
 struct MediaRecorderBase {
     MediaRecorderBase(const String16 &opPackageName)
@@ -59,7 +58,7 @@
     virtual status_t reset() = 0;
     virtual status_t getMaxAmplitude(int *max) = 0;
     virtual status_t dump(int fd, const Vector<String16>& args) const = 0;
-    virtual status_t setInputSurface(const sp<IGraphicBufferConsumer>& surface) = 0;
+    virtual status_t setInputSurface(const sp<PersistentSurface>& surface) = 0;
     virtual sp<IGraphicBufferProducer> querySurfaceMediaSource() const = 0;
 
 
diff --git a/include/media/MidiDeviceInfo.h b/include/media/MidiDeviceInfo.h
new file mode 100644
index 0000000..5b4a241
--- /dev/null
+++ b/include/media/MidiDeviceInfo.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_MIDI_DEVICE_INFO_H
+#define ANDROID_MEDIA_MIDI_DEVICE_INFO_H
+
+#include <binder/Parcelable.h>
+#include <binder/PersistableBundle.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
+
+namespace android {
+namespace media {
+namespace midi {
+
+class MidiDeviceInfo : public Parcelable {
+public:
+    MidiDeviceInfo() = default;
+    virtual ~MidiDeviceInfo() = default;
+    MidiDeviceInfo(const MidiDeviceInfo& midiDeviceInfo) = default;
+
+    status_t writeToParcel(Parcel* parcel) const override;
+    status_t readFromParcel(const Parcel* parcel) override;
+
+    int getType() const { return mType; }
+    int getUid() const { return mId; }
+    bool isPrivate() const { return mIsPrivate; }
+    const Vector<String16>& getInputPortNames() const { return mInputPortNames; }
+    const Vector<String16>&  getOutputPortNames() const { return mOutputPortNames; }
+    String16 getProperty(const char* propertyName);
+
+    // The constants need to be kept in sync with MidiDeviceInfo.java
+    enum {
+        TYPE_USB = 1,
+        TYPE_VIRTUAL = 2,
+        TYPE_BLUETOOTH = 3,
+    };
+    static const char* const PROPERTY_NAME;
+    static const char* const PROPERTY_MANUFACTURER;
+    static const char* const PROPERTY_PRODUCT;
+    static const char* const PROPERTY_VERSION;
+    static const char* const PROPERTY_SERIAL_NUMBER;
+    static const char* const PROPERTY_ALSA_CARD;
+    static const char* const PROPERTY_ALSA_DEVICE;
+
+    friend bool operator==(const MidiDeviceInfo& lhs, const MidiDeviceInfo& rhs);
+    friend bool operator!=(const MidiDeviceInfo& lhs, const MidiDeviceInfo& rhs) {
+        return !(lhs == rhs);
+    }
+
+private:
+    status_t readStringVector(
+            const Parcel* parcel, Vector<String16> *vectorPtr, size_t defaultLength);
+    status_t writeStringVector(Parcel* parcel, const Vector<String16>& vector) const;
+
+    int32_t mType;
+    int32_t mId;
+    Vector<String16> mInputPortNames;
+    Vector<String16> mOutputPortNames;
+    os::PersistableBundle mProperties;
+    bool mIsPrivate;
+};
+
+}  // namespace midi
+}  // namespace media
+}  // namespace android
+
+#endif  // ANDROID_MEDIA_MIDI_DEVICE_INFO_H
diff --git a/include/media/OMXBuffer.h b/include/media/OMXBuffer.h
new file mode 100644
index 0000000..697823f
--- /dev/null
+++ b/include/media/OMXBuffer.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _OMXBUFFER_H_
+#define _OMXBUFFER_H_
+
+#include <cutils/native_handle.h>
+#include <media/IOMX.h>
+#include <system/window.h>
+#include <utils/StrongPointer.h>
+
+namespace android {
+
+class OMXBuffer;
+
+// This is needed temporarily for the OMX HIDL transition.
+namespace hardware { namespace media { namespace omx {
+namespace V1_0 {
+    struct CodecBuffer;
+namespace implementation {
+    inline bool wrapAs(::android::hardware::media::omx::V1_0::CodecBuffer* t,
+            ::android::OMXBuffer const& l);
+    inline bool convertTo(::android::OMXBuffer* l,
+            ::android::hardware::media::omx::V1_0::CodecBuffer const& t);
+}}}}}
+
+class GraphicBuffer;
+class IMemory;
+class MediaCodecBuffer;
+class NativeHandle;
+struct OMXNodeInstance;
+
+// TODO: After complete HIDL transition, this class would be replaced by
+// CodecBuffer.
+class OMXBuffer {
+public:
+    // sPreset is used in places where we are referring to a pre-registered
+    // buffer on a port. It has type kBufferTypePreset and mRangeLength of 0.
+    static OMXBuffer sPreset;
+
+    // Default constructor, constructs a buffer of type kBufferTypeInvalid.
+    OMXBuffer();
+
+    // Constructs a buffer of type kBufferTypePreset with mRangeLength set to
+    // |codecBuffer|'s size (or 0 if |codecBuffer| is NULL).
+    OMXBuffer(const sp<MediaCodecBuffer> &codecBuffer);
+
+    // Constructs a buffer of type kBufferTypeSharedMem.
+    OMXBuffer(const sp<IMemory> &mem);
+
+    // Constructs a buffer of type kBufferTypeANWBuffer.
+    OMXBuffer(const sp<GraphicBuffer> &gbuf);
+
+    // Constructs a buffer of type kBufferTypeNativeHandle.
+    OMXBuffer(const sp<NativeHandle> &handle);
+
+    // Parcelling/Un-parcelling.
+    status_t writeToParcel(Parcel *parcel) const;
+    status_t readFromParcel(const Parcel *parcel);
+
+    ~OMXBuffer();
+
+private:
+    friend struct OMXNodeInstance;
+
+    // This is needed temporarily for OMX HIDL transition.
+    friend inline bool (::android::hardware::media::omx::V1_0::implementation::
+            wrapAs)(::android::hardware::media::omx::V1_0::CodecBuffer* t,
+            OMXBuffer const& l);
+    friend inline bool (::android::hardware::media::omx::V1_0::implementation::
+            convertTo)(OMXBuffer* l,
+            ::android::hardware::media::omx::V1_0::CodecBuffer const& t);
+
+    enum BufferType {
+        kBufferTypeInvalid = 0,
+        kBufferTypePreset,
+        kBufferTypeSharedMem,
+        kBufferTypeANWBuffer,
+        kBufferTypeNativeHandle,
+    };
+
+    BufferType mBufferType;
+
+    // kBufferTypePreset
+    // If the port is operating in byte buffer mode, mRangeLength is the valid
+    // range length. Otherwise the range info should also be ignored.
+    OMX_U32 mRangeOffset;
+    OMX_U32 mRangeLength;
+
+    // kBufferTypeSharedMem
+    sp<IMemory> mMem;
+
+    // kBufferTypeANWBuffer
+    sp<GraphicBuffer> mGraphicBuffer;
+
+    // kBufferTypeNativeHandle
+    sp<NativeHandle> mNativeHandle;
+
+    // Move assignment
+    OMXBuffer &operator=(OMXBuffer&&);
+
+    // Deleted copy constructor and assignment.
+    OMXBuffer(const OMXBuffer&) = delete;
+    OMXBuffer& operator=(const OMXBuffer&) = delete;
+};
+
+}  // namespace android
+
+#endif  // _OMXBUFFER_H_
diff --git a/include/media/OMXFenceParcelable.h b/include/media/OMXFenceParcelable.h
new file mode 100644
index 0000000..f529301
--- /dev/null
+++ b/include/media/OMXFenceParcelable.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _OMX_FENCE_PARCELABLE_
+#define _OMX_FENCE_PARCELABLE_
+
+#include <binder/Parcel.h>
+
+namespace android {
+
+struct OMXFenceParcelable;
+
+// This is needed temporarily for the OMX HIDL transition.
+namespace hardware {
+    struct hidl_handle;
+namespace media { namespace omx { namespace V1_0 { namespace implementation {
+    void wrapAs(::android::OMXFenceParcelable* l,
+            ::android::hardware::hidl_handle const& t);
+    bool convertTo(::android::OMXFenceParcelable* l,
+            ::android::hardware::hidl_handle const& t);
+}}}}}
+
+struct OMXFenceParcelable : public Parcelable {
+    OMXFenceParcelable() : mFenceFd(-1) {}
+    OMXFenceParcelable(int fenceFd) : mFenceFd(fenceFd) {}
+
+    int get() const { return mFenceFd; }
+
+    status_t readFromParcel(const Parcel* parcel) override;
+    status_t writeToParcel(Parcel* parcel) const override;
+
+private:
+    // Disable copy ctor and operator=
+    OMXFenceParcelable(const OMXFenceParcelable &);
+    OMXFenceParcelable &operator=(const OMXFenceParcelable &);
+
+    int mFenceFd;
+
+    // This is needed temporarily for OMX HIDL transition.
+    friend void (::android::hardware::media::omx::V1_0::implementation::
+            wrapAs)(OMXFenceParcelable* l,
+            ::android::hardware::hidl_handle const& t);
+    friend bool (::android::hardware::media::omx::V1_0::implementation::
+            convertTo)(OMXFenceParcelable* l,
+            ::android::hardware::hidl_handle const& t);
+};
+
+inline status_t OMXFenceParcelable::readFromParcel(const Parcel* parcel) {
+    int32_t haveFence;
+    status_t err = parcel->readInt32(&haveFence);
+    if (err == OK && haveFence) {
+        int fd = ::dup(parcel->readFileDescriptor());
+        if (fd < 0) {
+            return fd;
+        }
+        mFenceFd = fd;
+    }
+    return err;
+}
+
+inline status_t OMXFenceParcelable::writeToParcel(Parcel* parcel) const {
+    status_t err = parcel->writeInt32(mFenceFd >= 0);
+    if (err == OK && mFenceFd >= 0) {
+        err = parcel->writeFileDescriptor(mFenceFd, true /* takeOwnership */);
+    }
+    return err;
+}
+
+} // namespace android
+
+#endif // _OMX_FENCE_PARCELABLE_
diff --git a/include/media/PluginLoader.h b/include/media/PluginLoader.h
new file mode 100644
index 0000000..360af2d
--- /dev/null
+++ b/include/media/PluginLoader.h
@@ -0,0 +1,97 @@
+/**
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef PLUGIN_LOADER_H_
+#define PLUGIN_LOADER_H_
+
+#include "SharedLibrary.h"
+#include <utils/Log.h>
+#include <utils/String8.h>
+#include <utils/Vector.h>
+
+namespace android {
+
+template <class T>
+class PluginLoader {
+
+  public:
+    PluginLoader(const char *dir, const char *entry) {
+        /**
+         * scan all plugins in the plugin directory and add them to the
+         * factories list.
+         */
+        String8 pluginDir(dir);
+
+        DIR* pDir = opendir(pluginDir.string());
+        if (pDir == NULL) {
+            ALOGE("Failed to find plugin directory %s", pluginDir.string());
+        } else {
+            struct dirent* pEntry;
+            while ((pEntry = readdir(pDir))) {
+                String8 file(pEntry->d_name);
+                if (file.getPathExtension() == ".so") {
+                    String8 path = pluginDir + "/" + pEntry->d_name;
+                    T *plugin = loadOne(path, entry);
+                    if (plugin) {
+                        factories.push(plugin);
+                    }
+                }
+            }
+            closedir(pDir);
+        }
+    }
+
+    ~PluginLoader() {
+        for (size_t i = 0; i < factories.size(); i++) {
+            delete factories[i];
+        }
+    }
+
+    T *getFactory(size_t i) const {return factories[i];}
+    size_t factoryCount() const {return factories.size();}
+
+  private:
+    T* loadOne(const char *path, const char *entry) {
+        sp<SharedLibrary> library = new SharedLibrary(String8(path));
+        if (!library.get()) {
+            ALOGE("Failed to open plugin library %s: %s", path,
+                    library->lastError());
+        } else {
+            typedef T *(*CreateFactoryFunc)();
+            CreateFactoryFunc createFactoryFunc =
+                    (CreateFactoryFunc)library->lookup(entry);
+            if (createFactoryFunc) {
+                libraries.push(library);
+                return createFactoryFunc();
+            } else {
+                ALOGE("Failed to create plugin factory from %s at entry %s: %s",
+                        path, entry, library->lastError());
+            }
+        }
+        return NULL;
+    }
+
+    Vector<T *> factories;
+    Vector<sp<SharedLibrary> > libraries;
+
+    PluginLoader(const PluginLoader &) = delete;
+    void operator=(const PluginLoader &) = delete;
+};
+
+} // namespace android
+
+#endif // PLUGIN_LOADER_H_
+
diff --git a/include/media/RecordBufferConverter.h b/include/media/RecordBufferConverter.h
new file mode 100644
index 0000000..2abc45e
--- /dev/null
+++ b/include/media/RecordBufferConverter.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_RECORD_BUFFER_CONVERTER_H
+#define ANDROID_RECORD_BUFFER_CONVERTER_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <media/AudioBufferProvider.h>
+#include <system/audio.h>
+
+class AudioResampler;
+class PassthruBufferProvider;
+
+namespace android {
+
+/* The RecordBufferConverter is used for format, channel, and sample rate
+ * conversion for a RecordTrack.
+ *
+ * RecordBufferConverter uses the convert() method rather than exposing a
+ * buffer provider interface; this is to save a memory copy.
+ *
+ * There are legacy conversion requirements for this converter, specifically
+ * due to mono handling, so be careful about modifying.
+ *
+ * Original source audioflinger/Threads.{h,cpp}
+ */
+class RecordBufferConverter
+{
+public:
+    RecordBufferConverter(
+            audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
+            uint32_t srcSampleRate,
+            audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
+            uint32_t dstSampleRate);
+
+    ~RecordBufferConverter();
+
+    /* Converts input data from an AudioBufferProvider by format, channelMask,
+     * and sampleRate to a destination buffer.
+     *
+     * Parameters
+     *      dst:  buffer to place the converted data.
+     * provider:  buffer provider to obtain source data.
+     *   frames:  number of frames to convert
+     *
+     * Returns the number of frames converted.
+     */
+    size_t convert(void *dst, AudioBufferProvider *provider, size_t frames);
+
+    // returns NO_ERROR if constructor was successful
+    status_t initCheck() const {
+        // mSrcChannelMask set on successful updateParameters
+        return mSrcChannelMask != AUDIO_CHANNEL_INVALID ? NO_ERROR : NO_INIT;
+    }
+
+    // allows dynamic reconfigure of all parameters
+    status_t updateParameters(
+            audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
+            uint32_t srcSampleRate,
+            audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
+            uint32_t dstSampleRate);
+
+    // called to reset resampler buffers on record track discontinuity
+    void reset();
+
+private:
+    // format conversion when not using resampler
+    void convertNoResampler(void *dst, const void *src, size_t frames);
+
+    // format conversion when using resampler; modifies src in-place
+    void convertResampler(void *dst, /*not-a-const*/ void *src, size_t frames);
+
+    // user provided information
+    audio_channel_mask_t mSrcChannelMask;
+    audio_format_t       mSrcFormat;
+    uint32_t             mSrcSampleRate;
+    audio_channel_mask_t mDstChannelMask;
+    audio_format_t       mDstFormat;
+    uint32_t             mDstSampleRate;
+
+    // derived information
+    uint32_t             mSrcChannelCount;
+    uint32_t             mDstChannelCount;
+    size_t               mDstFrameSize;
+
+    // format conversion buffer
+    void                *mBuf;
+    size_t               mBufFrames;
+    size_t               mBufFrameSize;
+
+    // resampler info
+    AudioResampler      *mResampler;
+
+    bool                 mIsLegacyDownmix;  // legacy stereo to mono conversion needed
+    bool                 mIsLegacyUpmix;    // legacy mono to stereo conversion needed
+    bool                 mRequiresFloat;    // data processing requires float (e.g. resampler)
+    PassthruBufferProvider *mInputConverterProvider;    // converts input to float
+    int8_t               mIdxAry[sizeof(uint32_t) * 8]; // used for channel mask conversion
+};
+
+// ----------------------------------------------------------------------------
+} // namespace android
+
+#endif // ANDROID_RECORD_BUFFER_CONVERTER_H
diff --git a/include/media/TypeConverter.h b/include/media/TypeConverter.h
new file mode 100644
index 0000000..e262eef
--- /dev/null
+++ b/include/media/TypeConverter.h
@@ -0,0 +1,257 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_TYPE_CONVERTER_H_
+#define ANDROID_TYPE_CONVERTER_H_
+
+#include <string>
+#include <string.h>
+
+#include <system/audio.h>
+#include <utils/Log.h>
+#include <utils/Vector.h>
+#include <utils/SortedVector.h>
+
+#include "convert.h"
+#include "AudioParameter.h"
+
+namespace android {
+
+struct SampleRateTraits
+{
+    typedef uint32_t Type;
+    typedef SortedVector<Type> Collection;
+};
+struct DeviceTraits
+{
+    typedef audio_devices_t Type;
+    typedef Vector<Type> Collection;
+};
+struct OutputDeviceTraits : public DeviceTraits {};
+struct InputDeviceTraits : public DeviceTraits {};
+struct OutputFlagTraits
+{
+    typedef audio_output_flags_t Type;
+    typedef Vector<Type> Collection;
+};
+struct InputFlagTraits
+{
+    typedef audio_input_flags_t Type;
+    typedef Vector<Type> Collection;
+};
+struct FormatTraits
+{
+    typedef audio_format_t Type;
+    typedef Vector<Type> Collection;
+};
+struct ChannelTraits
+{
+    typedef audio_channel_mask_t Type;
+    typedef SortedVector<Type> Collection;
+};
+struct OutputChannelTraits : public ChannelTraits {};
+struct InputChannelTraits : public ChannelTraits {};
+struct ChannelIndexTraits : public ChannelTraits {};
+struct GainModeTraits
+{
+    typedef audio_gain_mode_t Type;
+    typedef Vector<Type> Collection;
+};
+struct StreamTraits
+{
+    typedef audio_stream_type_t Type;
+    typedef Vector<Type> Collection;
+};
+struct AudioModeTraits
+{
+    typedef audio_mode_t Type;
+    typedef Vector<Type> Collection;
+};
+struct UsageTraits
+{
+    typedef audio_usage_t Type;
+    typedef Vector<Type> Collection;
+};
+struct SourceTraits
+{
+    typedef audio_source_t Type;
+    typedef Vector<Type> Collection;
+};
+template <typename T>
+struct DefaultTraits
+{
+    typedef T Type;
+    typedef Vector<Type> Collection;
+};
+
+template <class Traits>
+static void collectionFromString(const std::string &str, typename Traits::Collection &collection,
+                                 const char *del = AudioParameter::valueListSeparator)
+{
+    char *literal = strdup(str.c_str());
+    for (const char *cstr = strtok(literal, del); cstr != NULL; cstr = strtok(NULL, del)) {
+        typename Traits::Type value;
+        if (utilities::convertTo<std::string, typename Traits::Type >(cstr, value)) {
+            collection.add(value);
+        }
+    }
+    free(literal);
+}
+
+template <class Traits>
+class TypeConverter
+{
+public:
+    static bool toString(const typename Traits::Type &value, std::string &str);
+
+    static bool fromString(const std::string &str, typename Traits::Type &result);
+
+    static void collectionFromString(const std::string &str,
+                                     typename Traits::Collection &collection,
+                                     const char *del = AudioParameter::valueListSeparator);
+
+    static uint32_t maskFromString(
+            const std::string &str, const char *del = AudioParameter::valueListSeparator);
+
+    static void maskToString(
+            uint32_t mask, std::string &str, const char *del = AudioParameter::valueListSeparator);
+
+protected:
+    struct Table {
+        const char *literal;
+        typename Traits::Type value;
+    };
+
+    static const Table mTable[];
+};
+
+template <class Traits>
+inline bool TypeConverter<Traits>::toString(const typename Traits::Type &value, std::string &str)
+{
+    for (size_t i = 0; mTable[i].literal; i++) {
+        if (mTable[i].value == value) {
+            str = mTable[i].literal;
+            return true;
+        }
+    }
+    char result[64];
+    snprintf(result, sizeof(result), "Unknown enum value %d", value);
+    str = result;
+    return false;
+}
+
+template <class Traits>
+inline bool TypeConverter<Traits>::fromString(const std::string &str, typename Traits::Type &result)
+{
+    for (size_t i = 0; mTable[i].literal; i++) {
+        if (strcmp(mTable[i].literal, str.c_str()) == 0) {
+            ALOGV("stringToEnum() found %s", mTable[i].literal);
+            result = mTable[i].value;
+            return true;
+        }
+    }
+    return false;
+}
+
+template <class Traits>
+inline void TypeConverter<Traits>::collectionFromString(const std::string &str,
+        typename Traits::Collection &collection,
+        const char *del)
+{
+    char *literal = strdup(str.c_str());
+
+    for (const char *cstr = strtok(literal, del); cstr != NULL; cstr = strtok(NULL, del)) {
+        typename Traits::Type value;
+        if (fromString(cstr, value)) {
+            collection.add(value);
+        }
+    }
+    free(literal);
+}
+
+template <class Traits>
+inline uint32_t TypeConverter<Traits>::maskFromString(const std::string &str, const char *del)
+{
+    char *literal = strdup(str.c_str());
+    uint32_t value = 0;
+    for (const char *cstr = strtok(literal, del); cstr != NULL; cstr = strtok(NULL, del)) {
+        typename Traits::Type type;
+        if (fromString(cstr, type)) {
+            value |= static_cast<uint32_t>(type);
+        }
+    }
+    free(literal);
+    return value;
+}
+
+template <class Traits>
+inline void TypeConverter<Traits>::maskToString(uint32_t mask, std::string &str, const char *del)
+{
+    if (mask != 0) {
+        bool first_flag = true;
+        for (size_t i = 0; mTable[i].literal; i++) {
+            if (mTable[i].value != 0 && (mask & mTable[i].value) == mTable[i].value) {
+                if (!first_flag) str += del;
+                first_flag = false;
+                str += mTable[i].literal;
+            }
+        }
+    } else {
+        toString(static_cast<typename Traits::Type>(0), str);
+    }
+}
+
+typedef TypeConverter<OutputDeviceTraits> OutputDeviceConverter;
+typedef TypeConverter<InputDeviceTraits> InputDeviceConverter;
+typedef TypeConverter<OutputFlagTraits> OutputFlagConverter;
+typedef TypeConverter<InputFlagTraits> InputFlagConverter;
+typedef TypeConverter<FormatTraits> FormatConverter;
+typedef TypeConverter<OutputChannelTraits> OutputChannelConverter;
+typedef TypeConverter<InputChannelTraits> InputChannelConverter;
+typedef TypeConverter<ChannelIndexTraits> ChannelIndexConverter;
+typedef TypeConverter<GainModeTraits> GainModeConverter;
+typedef TypeConverter<StreamTraits> StreamTypeConverter;
+typedef TypeConverter<AudioModeTraits> AudioModeConverter;
+typedef TypeConverter<UsageTraits> UsageTypeConverter;
+typedef TypeConverter<SourceTraits> SourceTypeConverter;
+
+bool deviceFromString(const std::string& literalDevice, audio_devices_t& device);
+
+bool deviceToString(audio_devices_t device, std::string& literalDevice);
+
+SampleRateTraits::Collection samplingRatesFromString(
+        const std::string &samplingRates, const char *del = AudioParameter::valueListSeparator);
+
+FormatTraits::Collection formatsFromString(
+        const std::string &formats, const char *del = AudioParameter::valueListSeparator);
+
+audio_format_t formatFromString(
+        const std::string &literalFormat, audio_format_t defaultFormat = AUDIO_FORMAT_DEFAULT);
+
+audio_channel_mask_t channelMaskFromString(const std::string &literalChannels);
+
+ChannelTraits::Collection channelMasksFromString(
+        const std::string &channels, const char *del = AudioParameter::valueListSeparator);
+
+InputChannelTraits::Collection inputChannelMasksFromString(
+        const std::string &inChannels, const char *del = AudioParameter::valueListSeparator);
+
+OutputChannelTraits::Collection outputChannelMasksFromString(
+        const std::string &outChannels, const char *del = AudioParameter::valueListSeparator);
+
+}; // namespace android
+
+#endif  /*ANDROID_TYPE_CONVERTER_H_*/
diff --git a/include/media/Visualizer.h b/include/media/Visualizer.h
index 7bb9e8b..f8f4f50 100644
--- a/include/media/Visualizer.h
+++ b/include/media/Visualizer.h
@@ -18,7 +18,7 @@
 #define ANDROID_MEDIA_VISUALIZER_H
 
 #include <media/AudioEffect.h>
-#include <audio_effects/effect_visualizer.h>
+#include <system/audio_effects/effect_visualizer.h>
 #include <utils/Thread.h>
 
 /**
diff --git a/include/media/audiohal/DeviceHalInterface.h b/include/media/audiohal/DeviceHalInterface.h
new file mode 100644
index 0000000..caf01be
--- /dev/null
+++ b/include/media/audiohal/DeviceHalInterface.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_DEVICE_HAL_INTERFACE_H
+#define ANDROID_HARDWARE_DEVICE_HAL_INTERFACE_H
+
+#include <system/audio.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <utils/String8.h>
+
+namespace android {
+
+class StreamInHalInterface;
+class StreamOutHalInterface;
+
+class DeviceHalInterface : public RefBase
+{
+  public:
+    // Sets the value of 'devices' to a bitmask of 1 or more values of audio_devices_t.
+    virtual status_t getSupportedDevices(uint32_t *devices) = 0;
+
+    // Check to see if the audio hardware interface has been initialized.
+    virtual status_t initCheck() = 0;
+
+    // Set the audio volume of a voice call. Range is between 0.0 and 1.0.
+    virtual status_t setVoiceVolume(float volume) = 0;
+
+    // Set the audio volume for all audio activities other than voice call.
+    virtual status_t setMasterVolume(float volume) = 0;
+
+    // Get the current master volume value for the HAL.
+    virtual status_t getMasterVolume(float *volume) = 0;
+
+    // Called when the audio mode changes.
+    virtual status_t setMode(audio_mode_t mode) = 0;
+
+    // Muting control.
+    virtual status_t setMicMute(bool state) = 0;
+    virtual status_t getMicMute(bool *state) = 0;
+    virtual status_t setMasterMute(bool state) = 0;
+    virtual status_t getMasterMute(bool *state) = 0;
+
+    // Set global audio parameters.
+    virtual status_t setParameters(const String8& kvPairs) = 0;
+
+    // Get global audio parameters.
+    virtual status_t getParameters(const String8& keys, String8 *values) = 0;
+
+    // Returns audio input buffer size according to parameters passed.
+    virtual status_t getInputBufferSize(const struct audio_config *config,
+            size_t *size) = 0;
+
+    // Creates and opens the audio hardware output stream. The stream is closed
+    // by releasing all references to the returned object.
+    virtual status_t openOutputStream(
+            audio_io_handle_t handle,
+            audio_devices_t devices,
+            audio_output_flags_t flags,
+            struct audio_config *config,
+            const char *address,
+            sp<StreamOutHalInterface> *outStream) = 0;
+
+    // Creates and opens the audio hardware input stream. The stream is closed
+    // by releasing all references to the returned object.
+    virtual status_t openInputStream(
+            audio_io_handle_t handle,
+            audio_devices_t devices,
+            struct audio_config *config,
+            audio_input_flags_t flags,
+            const char *address,
+            audio_source_t source,
+            sp<StreamInHalInterface> *inStream) = 0;
+
+    // Returns whether createAudioPatch and releaseAudioPatch operations are supported.
+    virtual status_t supportsAudioPatches(bool *supportsPatches) = 0;
+
+    // Creates an audio patch between several source and sink ports.
+    virtual status_t createAudioPatch(
+            unsigned int num_sources,
+            const struct audio_port_config *sources,
+            unsigned int num_sinks,
+            const struct audio_port_config *sinks,
+            audio_patch_handle_t *patch) = 0;
+
+    // Releases an audio patch.
+    virtual status_t releaseAudioPatch(audio_patch_handle_t patch) = 0;
+
+    // Fills the list of supported attributes for a given audio port.
+    virtual status_t getAudioPort(struct audio_port *port) = 0;
+
+    // Set audio port configuration.
+    virtual status_t setAudioPortConfig(const struct audio_port_config *config) = 0;
+
+    virtual status_t dump(int fd) = 0;
+
+  protected:
+    // Subclasses can not be constructed directly by clients.
+    DeviceHalInterface() {}
+
+    // The destructor automatically closes the device.
+    virtual ~DeviceHalInterface() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_DEVICE_HAL_INTERFACE_H
diff --git a/include/media/audiohal/DevicesFactoryHalInterface.h b/include/media/audiohal/DevicesFactoryHalInterface.h
new file mode 100644
index 0000000..14af384
--- /dev/null
+++ b/include/media/audiohal/DevicesFactoryHalInterface.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_INTERFACE_H
+#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_INTERFACE_H
+
+#include <media/audiohal/DeviceHalInterface.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+class DevicesFactoryHalInterface : public RefBase
+{
+  public:
+    // Opens a device with the specified name. To close the device, it is
+    // necessary to release references to the returned object.
+    virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device) = 0;
+
+    static sp<DevicesFactoryHalInterface> create();
+
+  protected:
+    // Subclasses can not be constructed directly by clients.
+    DevicesFactoryHalInterface() {}
+
+    virtual ~DevicesFactoryHalInterface() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_INTERFACE_H
diff --git a/include/media/audiohal/EffectBufferHalInterface.h b/include/media/audiohal/EffectBufferHalInterface.h
new file mode 100644
index 0000000..102ec56
--- /dev/null
+++ b/include/media/audiohal/EffectBufferHalInterface.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECT_BUFFER_HAL_INTERFACE_H
+#define ANDROID_HARDWARE_EFFECT_BUFFER_HAL_INTERFACE_H
+
+#include <system/audio_effect.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+// Abstraction for an audio buffer. It may be a "mirror" for
+// a buffer that the effect chain doesn't own, or a buffer owned by
+// the effect chain.
+class EffectBufferHalInterface : public RefBase
+{
+  public:
+    virtual audio_buffer_t* audioBuffer() = 0;
+    virtual void* externalData() const = 0;
+    // To be used when interacting with the code that doesn't know about
+    // "mirrored" buffers.
+    virtual void* ptr() {
+        return externalData() != nullptr ? externalData() : audioBuffer()->raw;
+    }
+
+    virtual void setExternalData(void* external) = 0;
+    virtual void setFrameCount(size_t frameCount) = 0;
+
+    virtual void update() = 0;  // copies data from the external buffer, noop for allocated buffers
+    virtual void commit() = 0;  // copies data to the external buffer, noop for allocated buffers
+
+    static status_t allocate(size_t size, sp<EffectBufferHalInterface>* buffer);
+    static status_t mirror(void* external, size_t size, sp<EffectBufferHalInterface>* buffer);
+
+  protected:
+    // Subclasses can not be constructed directly by clients.
+    EffectBufferHalInterface() {}
+
+    virtual ~EffectBufferHalInterface() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECT_BUFFER_HAL_INTERFACE_H
diff --git a/include/media/audiohal/EffectHalInterface.h b/include/media/audiohal/EffectHalInterface.h
new file mode 100644
index 0000000..7f9a6fd
--- /dev/null
+++ b/include/media/audiohal/EffectHalInterface.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECT_HAL_INTERFACE_H
+#define ANDROID_HARDWARE_EFFECT_HAL_INTERFACE_H
+
+#include <media/audiohal/EffectBufferHalInterface.h>
+#include <system/audio_effect.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+class EffectHalInterface : public RefBase
+{
+  public:
+    // Set the input buffer.
+    virtual status_t setInBuffer(const sp<EffectBufferHalInterface>& buffer) = 0;
+
+    // Set the output buffer.
+    virtual status_t setOutBuffer(const sp<EffectBufferHalInterface>& buffer) = 0;
+
+    // Effect process function. Takes input samples as specified
+    // in input buffer descriptor and output processed samples as specified
+    // in output buffer descriptor.
+    virtual status_t process() = 0;
+
+    // Process reverse stream function. This function is used to pass
+    // a reference stream to the effect engine.
+    virtual status_t processReverse() = 0;
+
+    // Send a command and receive a response to/from effect engine.
+    virtual status_t command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
+            uint32_t *replySize, void *pReplyData) = 0;
+
+    // Returns the effect descriptor.
+    virtual status_t getDescriptor(effect_descriptor_t *pDescriptor) = 0;
+
+    // Free resources on the remote side.
+    virtual status_t close() = 0;
+
+  protected:
+    // Subclasses can not be constructed directly by clients.
+    EffectHalInterface() {}
+
+    // The destructor automatically releases the effect.
+    virtual ~EffectHalInterface() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECT_HAL_INTERFACE_H
diff --git a/include/media/audiohal/EffectsFactoryHalInterface.h b/include/media/audiohal/EffectsFactoryHalInterface.h
new file mode 100644
index 0000000..a616e86
--- /dev/null
+++ b/include/media/audiohal/EffectsFactoryHalInterface.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_INTERFACE_H
+#define ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_INTERFACE_H
+
+#include <media/audiohal/EffectHalInterface.h>
+#include <system/audio_effect.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+class EffectsFactoryHalInterface : public RefBase
+{
+  public:
+    // Returns the number of different effects in all loaded libraries.
+    virtual status_t queryNumberEffects(uint32_t *pNumEffects) = 0;
+
+    // Returns a descriptor of the next available effect.
+    virtual status_t getDescriptor(uint32_t index,
+            effect_descriptor_t *pDescriptor) = 0;
+
+    virtual status_t getDescriptor(const effect_uuid_t *pEffectUuid,
+            effect_descriptor_t *pDescriptor) = 0;
+
+    // Creates an effect engine of the specified type.
+    // To release the effect engine, it is necessary to release references
+    // to the returned effect object.
+    virtual status_t createEffect(const effect_uuid_t *pEffectUuid,
+            int32_t sessionId, int32_t ioId,
+            sp<EffectHalInterface> *effect) = 0;
+
+    virtual status_t dumpEffects(int fd) = 0;
+
+    static sp<EffectsFactoryHalInterface> create();
+
+    // Helper function to compare effect uuid to EFFECT_UUID_NULL.
+    static bool isNullUuid(const effect_uuid_t *pEffectUuid);
+
+  protected:
+    // Subclasses can not be constructed directly by clients.
+    EffectsFactoryHalInterface() {}
+
+    virtual ~EffectsFactoryHalInterface() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_INTERFACE_H
diff --git a/include/media/audiohal/StreamHalInterface.h b/include/media/audiohal/StreamHalInterface.h
new file mode 100644
index 0000000..7419c34
--- /dev/null
+++ b/include/media/audiohal/StreamHalInterface.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_STREAM_HAL_INTERFACE_H
+#define ANDROID_HARDWARE_STREAM_HAL_INTERFACE_H
+
+#include <media/audiohal/EffectHalInterface.h>
+#include <system/audio.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <utils/String8.h>
+
+namespace android {
+
+class StreamHalInterface : public virtual RefBase
+{
+  public:
+    // Return the sampling rate in Hz - eg. 44100.
+    virtual status_t getSampleRate(uint32_t *rate) = 0;
+
+    // Return size of input/output buffer in bytes for this stream - eg. 4800.
+    virtual status_t getBufferSize(size_t *size) = 0;
+
+    // Return the channel mask.
+    virtual status_t getChannelMask(audio_channel_mask_t *mask) = 0;
+
+    // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
+    virtual status_t getFormat(audio_format_t *format) = 0;
+
+    // Convenience method.
+    virtual status_t getAudioProperties(
+            uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) = 0;
+
+    // Set audio stream parameters.
+    virtual status_t setParameters(const String8& kvPairs) = 0;
+
+    // Get audio stream parameters.
+    virtual status_t getParameters(const String8& keys, String8 *values) = 0;
+
+    // Return the frame size (number of bytes per sample) of a stream.
+    virtual status_t getFrameSize(size_t *size) = 0;
+
+    // Add or remove the effect on the stream.
+    virtual status_t addEffect(sp<EffectHalInterface> effect) = 0;
+    virtual status_t removeEffect(sp<EffectHalInterface> effect) = 0;
+
+    // Put the audio hardware input/output into standby mode.
+    virtual status_t standby() = 0;
+
+    virtual status_t dump(int fd) = 0;
+
+    // Start a stream operating in mmap mode.
+    virtual status_t start() = 0;
+
+    // Stop a stream operating in mmap mode.
+    virtual status_t stop() = 0;
+
+    // Retrieve information on the data buffer in mmap mode.
+    virtual status_t createMmapBuffer(int32_t minSizeFrames,
+                                      struct audio_mmap_buffer_info *info) = 0;
+
+    // Get current read/write position in the mmap buffer
+    virtual status_t getMmapPosition(struct audio_mmap_position *position) = 0;
+
+    // Set the priority of the thread that interacts with the HAL
+    // (must match the priority of the audioflinger's thread that calls 'read' / 'write')
+    virtual status_t setHalThreadPriority(int priority) = 0;
+
+  protected:
+    // Subclasses can not be constructed directly by clients.
+    StreamHalInterface() {}
+
+    // The destructor automatically closes the stream.
+    virtual ~StreamHalInterface() {}
+};
+
+class StreamOutHalInterfaceCallback : public virtual RefBase {
+  public:
+    virtual void onWriteReady() {}
+    virtual void onDrainReady() {}
+    virtual void onError() {}
+
+  protected:
+    StreamOutHalInterfaceCallback() {}
+    virtual ~StreamOutHalInterfaceCallback() {}
+};
+
+class StreamOutHalInterface : public virtual StreamHalInterface {
+  public:
+    // Return the audio hardware driver estimated latency in milliseconds.
+    virtual status_t getLatency(uint32_t *latency) = 0;
+
+    // Use this method in situations where audio mixing is done in the hardware.
+    virtual status_t setVolume(float left, float right) = 0;
+
+    // Write audio buffer to driver.
+    virtual status_t write(const void *buffer, size_t bytes, size_t *written) = 0;
+
+    // Return the number of audio frames written by the audio dsp to DAC since
+    // the output has exited standby.
+    virtual status_t getRenderPosition(uint32_t *dspFrames) = 0;
+
+    // Get the local time at which the next write to the audio driver will be presented.
+    virtual status_t getNextWriteTimestamp(int64_t *timestamp) = 0;
+
+    // Set the callback for notifying completion of non-blocking write and drain.
+    // The callback must be owned by someone else. The output stream does not own it
+    // to avoid strong pointer loops.
+    virtual status_t setCallback(wp<StreamOutHalInterfaceCallback> callback) = 0;
+
+    // Returns whether pause and resume operations are supported.
+    virtual status_t supportsPauseAndResume(bool *supportsPause, bool *supportsResume) = 0;
+
+    // Notifies to the audio driver to resume playback following a pause.
+    virtual status_t pause() = 0;
+
+    // Notifies to the audio driver to resume playback following a pause.
+    virtual status_t resume() = 0;
+
+    // Returns whether drain operation is supported.
+    virtual status_t supportsDrain(bool *supportsDrain) = 0;
+
+    // Requests notification when data buffered by the driver/hardware has been played.
+    virtual status_t drain(bool earlyNotify) = 0;
+
+    // Notifies to the audio driver to flush the queued data.
+    virtual status_t flush() = 0;
+
+    // Return a recent count of the number of audio frames presented to an external observer.
+    virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp) = 0;
+
+  protected:
+    virtual ~StreamOutHalInterface() {}
+};
+
+class StreamInHalInterface : public virtual StreamHalInterface {
+  public:
+    // Set the input gain for the audio driver.
+    virtual status_t setGain(float gain) = 0;
+
+    // Read audio buffer in from driver.
+    virtual status_t read(void *buffer, size_t bytes, size_t *read) = 0;
+
+    // Return the amount of input frames lost in the audio driver.
+    virtual status_t getInputFramesLost(uint32_t *framesLost) = 0;
+
+    // Return a recent count of the number of audio frames received and
+    // the clock time associated with that frame count.
+    virtual status_t getCapturePosition(int64_t *frames, int64_t *time) = 0;
+
+  protected:
+    virtual ~StreamInHalInterface() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_STREAM_HAL_INTERFACE_H
diff --git a/include/media/audiohal/hidl/HalDeathHandler.h b/include/media/audiohal/hidl/HalDeathHandler.h
new file mode 100644
index 0000000..c9b7084
--- /dev/null
+++ b/include/media/audiohal/hidl/HalDeathHandler.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_HIDL_HAL_DEATH_HANDLER_H
+#define ANDROID_HARDWARE_HIDL_HAL_DEATH_HANDLER_H
+
+#include <functional>
+#include <mutex>
+#include <unordered_map>
+
+#include <hidl/HidlSupport.h>
+#include <utils/Singleton.h>
+
+using android::hardware::hidl_death_recipient;
+using android::hidl::base::V1_0::IBase;
+
+namespace android {
+
+class HalDeathHandler : public hidl_death_recipient, private Singleton<HalDeathHandler> {
+  public:
+    typedef std::function<void()> AtExitHandler;
+
+    // Note that the exit handler gets called using a thread from
+    // RPC threadpool, thus it needs to be thread-safe.
+    void registerAtExitHandler(void* cookie, AtExitHandler handler);
+    void unregisterAtExitHandler(void* cookie);
+
+    // hidl_death_recipient
+    virtual void serviceDied(uint64_t cookie, const wp<IBase>& who);
+
+    // Used both for (un)registering handlers, and for passing to
+    // '(un)linkToDeath'.
+    static sp<HalDeathHandler> getInstance();
+
+  private:
+    friend class Singleton<HalDeathHandler>;
+    typedef std::unordered_map<void*, AtExitHandler> Handlers;
+
+    HalDeathHandler();
+    virtual ~HalDeathHandler();
+
+    sp<HalDeathHandler> mSelf;  // Allows the singleton instance to live forever.
+    std::mutex mHandlersLock;
+    Handlers mHandlers;
+};
+
+}  // namespace android
+
+#endif // ANDROID_HARDWARE_HIDL_HAL_DEATH_HANDLER_H
diff --git a/services/audiopolicy/utilities/convert/convert.h b/include/media/convert.h
similarity index 100%
rename from services/audiopolicy/utilities/convert/convert.h
rename to include/media/convert.h
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index 389ec01..9130159 100644
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -219,6 +219,8 @@
             status_t        setVideoSurfaceTexture(
                                     const sp<IGraphicBufferProducer>& bufferProducer);
             status_t        setListener(const sp<MediaPlayerListener>& listener);
+            status_t        getDefaultBufferingSettings(BufferingSettings* buffering /* nonnull */);
+            status_t        setBufferingSettings(const BufferingSettings& buffering);
             status_t        prepare();
             status_t        prepareAsync();
             status_t        start();
@@ -233,7 +235,9 @@
                                     float* videoFps /* nonnull */);
             status_t        getVideoWidth(int *w);
             status_t        getVideoHeight(int *h);
-            status_t        seekTo(int msec);
+            status_t        seekTo(
+                    int msec,
+                    MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC);
             status_t        getCurrentPosition(int *msec);
             status_t        getDuration(int *msec);
             status_t        reset();
@@ -257,7 +261,7 @@
 
 private:
             void            clear_l();
-            status_t        seekTo_l(int msec);
+            status_t        seekTo_l(int msec, MediaPlayerSeekMode mode);
             status_t        prepareAsync_l();
             status_t        getDuration_l(int *msec);
             status_t        attachNewPlayer(const sp<IMediaPlayer>& player);
@@ -274,7 +278,9 @@
     void*                       mCookie;
     media_player_states         mCurrentState;
     int                         mCurrentPosition;
+    MediaPlayerSeekMode         mCurrentSeekMode;
     int                         mSeekPosition;
+    MediaPlayerSeekMode         mSeekMode;
     bool                        mPrepareSync;
     status_t                    mPrepareStatus;
     audio_stream_type_t         mStreamType;
diff --git a/include/media/nbaio/AudioStreamInSource.h b/include/media/nbaio/AudioStreamInSource.h
index a6e7992..508e0fe 100644
--- a/include/media/nbaio/AudioStreamInSource.h
+++ b/include/media/nbaio/AudioStreamInSource.h
@@ -17,16 +17,17 @@
 #ifndef ANDROID_AUDIO_STREAM_IN_SOURCE_H
 #define ANDROID_AUDIO_STREAM_IN_SOURCE_H
 
-#include <hardware/audio.h>
 #include "NBAIO.h"
 
 namespace android {
 
+class StreamInHalInterface;
+
 // not multi-thread safe
 class AudioStreamInSource : public NBAIO_Source {
 
 public:
-    AudioStreamInSource(audio_stream_in *stream);
+    AudioStreamInSource(sp<StreamInHalInterface> stream);
     virtual ~AudioStreamInSource();
 
     // NBAIO_Port interface
@@ -50,11 +51,11 @@
     // NBAIO_Sink end
 
 #if 0   // until necessary
-    audio_stream_in *stream() const { return mStream; }
+    sp<StreamInHalInterface> stream() const { return mStream; }
 #endif
 
 private:
-    audio_stream_in * const mStream;
+    sp<StreamInHalInterface> mStream;
     size_t              mStreamBufferSizeBytes; // as reported by get_buffer_size()
     int64_t             mFramesOverrun;
     int64_t             mOverruns;
diff --git a/include/media/nbaio/AudioStreamOutSink.h b/include/media/nbaio/AudioStreamOutSink.h
index e86b018..56a2a38 100644
--- a/include/media/nbaio/AudioStreamOutSink.h
+++ b/include/media/nbaio/AudioStreamOutSink.h
@@ -17,16 +17,17 @@
 #ifndef ANDROID_AUDIO_STREAM_OUT_SINK_H
 #define ANDROID_AUDIO_STREAM_OUT_SINK_H
 
-#include <hardware/audio.h>
 #include "NBAIO.h"
 
 namespace android {
 
+class StreamOutHalInterface;
+
 // not multi-thread safe
 class AudioStreamOutSink : public NBAIO_Sink {
 
 public:
-    AudioStreamOutSink(audio_stream_out *stream);
+    AudioStreamOutSink(sp<StreamOutHalInterface> stream);
     virtual ~AudioStreamOutSink();
 
     // NBAIO_Port interface
@@ -43,7 +44,7 @@
 
     // This is an over-estimate, and could dupe the caller into making a blocking write()
     // FIXME Use an audio HAL API to query the buffer emptying status when it's available.
-    virtual ssize_t availableToWrite() const { return mStreamBufferSizeBytes / mFrameSize; }
+    virtual ssize_t availableToWrite() { return mStreamBufferSizeBytes / mFrameSize; }
 
     virtual ssize_t write(const void *buffer, size_t count);
 
@@ -52,11 +53,11 @@
     // NBAIO_Sink end
 
 #if 0   // until necessary
-    audio_stream_out *stream() const { return mStream; }
+    sp<StreamOutHalInterface> stream() const { return mStream; }
 #endif
 
 private:
-    audio_stream_out * const mStream;
+    sp<StreamOutHalInterface> mStream;
     size_t              mStreamBufferSizeBytes; // as reported by get_buffer_size()
 };
 
diff --git a/include/media/nbaio/LibsndfileSink.h b/include/media/nbaio/LibsndfileSink.h
index f5d53d5..97a57e0 100644
--- a/include/media/nbaio/LibsndfileSink.h
+++ b/include/media/nbaio/LibsndfileSink.h
@@ -41,7 +41,7 @@
     //virtual size_t framesWritten() const;
     //virtual size_t framesUnderrun() const;
     //virtual size_t underruns() const;
-    //virtual ssize_t availableToWrite() const;
+    //virtual ssize_t availableToWrite();
     virtual ssize_t write(const void *buffer, size_t count);
     //virtual ssize_t writeVia(writeVia_t via, size_t total, void *user, size_t block);
 
diff --git a/include/media/nbaio/MonoPipe.h b/include/media/nbaio/MonoPipe.h
index d2cd218..60ae92e 100644
--- a/include/media/nbaio/MonoPipe.h
+++ b/include/media/nbaio/MonoPipe.h
@@ -18,8 +18,9 @@
 #define ANDROID_AUDIO_MONO_PIPE_H
 
 #include <time.h>
-#include "NBAIO.h"
+#include <audio_utils/fifo.h>
 #include <media/SingleStateQueue.h>
+#include "NBAIO.h"
 
 namespace android {
 
@@ -55,7 +56,10 @@
     //virtual int64_t framesUnderrun() const;
     //virtual int64_t underruns() const;
 
-    virtual ssize_t availableToWrite() const;
+    // returns n where 0 <= n <= mMaxFrames, or a negative status_t
+    // including the private status codes in NBAIO.h
+    virtual ssize_t availableToWrite();
+
     virtual ssize_t write(const void *buffer, size_t count);
     //virtual ssize_t writeVia(writeVia_t via, size_t total, void *user, size_t block);
 
@@ -80,16 +84,10 @@
             status_t getTimestamp(ExtendedTimestamp &timestamp);
 
 private:
-    const size_t    mReqFrames;     // as requested in constructor, unrounded
-    const size_t    mMaxFrames;     // always a power of 2
+    const size_t    mMaxFrames;     // as requested in constructor, rounded up to a power of 2
     void * const    mBuffer;
-    // mFront and mRear will never be separated by more than mMaxFrames.
-    // 32-bit overflow is possible if the pipe is active for a long time, but if that happens it's
-    // safe because we "&" with (mMaxFrames-1) at end of computations to calculate a buffer index.
-    volatile int32_t mFront;        // written by reader with android_atomic_release_store,
-                                    // read by writer with android_atomic_acquire_load
-    volatile int32_t mRear;         // written by writer with android_atomic_release_store,
-                                    // read by reader with android_atomic_acquire_load
+    audio_utils_fifo        mFifo;
+    audio_utils_fifo_writer mFifoWriter;
     bool            mWriteTsValid;  // whether mWriteTs is valid
     struct timespec mWriteTs;       // time that the previous write() completed
     size_t          mSetpoint;      // target value for pipe fill depth
diff --git a/include/media/nbaio/MonoPipeReader.h b/include/media/nbaio/MonoPipeReader.h
index b3c891d..0776ecd 100644
--- a/include/media/nbaio/MonoPipeReader.h
+++ b/include/media/nbaio/MonoPipeReader.h
@@ -27,7 +27,7 @@
 public:
 
     // Construct a MonoPipeReader and associate it with a MonoPipe;
-    // any data already in the pipe is visible to this PipeReader.
+    // any data already in the pipe is visible to this MonoPipeReader.
     // There can be only a single MonoPipeReader per MonoPipe.
     // FIXME make this constructor a factory method of MonoPipe.
     MonoPipeReader(MonoPipe* pipe);
@@ -59,6 +59,7 @@
 
 private:
     MonoPipe * const mPipe;
+    audio_utils_fifo_reader mFifoReader;
 };
 
 }   // namespace android
diff --git a/include/media/nbaio/NBAIO.h b/include/media/nbaio/NBAIO.h
index 212f8e8..f8ec38b 100644
--- a/include/media/nbaio/NBAIO.h
+++ b/include/media/nbaio/NBAIO.h
@@ -35,13 +35,16 @@
 
 // In addition to the usual status_t
 enum {
-    NEGOTIATE    = 0x80000010,  // Must (re-)negotiate format.  For negotiate() only, the offeree
-                                // doesn't accept offers, and proposes counter-offers
-    OVERRUN      = 0x80000011,  // availableToRead(), read(), or readVia() detected lost input due
-                                // to overrun; an event is counted and the caller should re-try
-    UNDERRUN     = 0x80000012,  // availableToWrite(), write(), or writeVia() detected a gap in
-                                // output due to underrun (not being called often enough, or with
-                                // enough data); an event is counted and the caller should re-try
+    NEGOTIATE    = (UNKNOWN_ERROR + 0x100),  // Must (re-)negotiate format.  For negotiate() only,
+                                             // the offeree doesn't accept offers, and proposes
+                                             // counter-offers
+    OVERRUN      = (UNKNOWN_ERROR + 0x101),  // availableToRead(), read(), or readVia() detected
+                                             // lost input due to overrun; an event is counted and
+                                             // the caller should re-try
+    UNDERRUN     = (UNKNOWN_ERROR + 0x102),  // availableToWrite(), write(), or writeVia() detected
+                                             // a gap in output due to underrun (not being called
+                                             // often enough, or with enough data); an event is
+                                             // counted and the caller should re-try
 };
 
 // Negotiation of format is based on the data provider and data sink, or the data consumer and
@@ -161,7 +164,12 @@
     //  UNDERRUN    write() has not been called frequently enough, or with enough frames to keep up.
     //              An underrun event is counted, and the caller should re-try this operation.
     //  WOULD_BLOCK Determining how many frames can be written without blocking would itself block.
-    virtual ssize_t availableToWrite() const { return SSIZE_MAX; }
+    virtual ssize_t availableToWrite() {
+        if (!mNegotiated) {
+            return NEGOTIATE;
+        }
+        return SSIZE_MAX;
+    }
 
     // Transfer data to sink from single input buffer.  Implies a copy.
     // Inputs:
@@ -266,6 +274,17 @@
     //              One or more frames were lost due to overrun, try again to read more recent data.
     virtual ssize_t read(void *buffer, size_t count) = 0;
 
+    // Flush data from buffer.  There is no notion of overrun as all data is dropped.
+    // Flushed frames also count towards frames read.
+    //
+    // Return value:
+    //  >= 0    Number of frames successfully flushed
+    //  < 0     status_t error occurred
+    // Errors:
+    //  NEGOTIATE         (Re-)negotiation is needed.
+    //  INVALID_OPERATION Not implemented
+    virtual ssize_t flush() { return INVALID_OPERATION; }
+
     // Transfer data from source using a series of callbacks.  More suitable for zero-fill,
     // synthesis, and non-contiguous transfers (e.g. circular buffer or readv).
     // Inputs:
diff --git a/include/media/nbaio/NBLog.h b/include/media/nbaio/NBLog.h
index 1297b51..acf2d31 100644
--- a/include/media/nbaio/NBLog.h
+++ b/include/media/nbaio/NBLog.h
@@ -21,7 +21,7 @@
 
 #include <binder/IMemory.h>
 #include <utils/Mutex.h>
-#include <audio_utils/roundup.h>
+#include <audio_utils/fifo.h>
 
 namespace android {
 
@@ -55,8 +55,11 @@
 private:
     friend class Writer;
     Event       mEvent;     // event type
-    size_t      mLength;    // length of additional data, 0 <= mLength <= 255
+    uint8_t     mLength;    // length of additional data, 0 <= mLength <= kMaxLength
     const void *mData;      // event type-specific data
+    static const size_t kMaxLength = 255;
+public:
+    static const size_t kOverhead = 3;  // mEvent, mLength, mData[...], duplicate mLength
 };
 
 // representation of a single log entry in shared memory
@@ -70,13 +73,17 @@
 //  byte[2+mLength]     duplicate copy of mLength to permit reverse scan
 //  byte[3+mLength]     start of next log entry
 
-// located in shared memory
+public:
+
+// Located in shared memory, must be POD.
+// Exactly one process must explicitly call the constructor or use placement new.
+// Since this is a POD, the destructor is empty and unnecessary to call it explicitly.
 struct Shared {
-    Shared() : mRear(0) { }
+    Shared() /* mRear initialized via default constructor */ { }
     /*virtual*/ ~Shared() { }
 
-    volatile int32_t mRear;     // index one byte past the end of most recent Entry
-    char    mBuffer[0];         // circular buffer for entries
+    audio_utils_fifo_index  mRear;  // index one byte past the end of most recent Entry
+    char    mBuffer[0];             // circular buffer for entries
 };
 
 public:
@@ -117,10 +124,10 @@
 
     // Input parameter 'size' is the desired size of the timeline in byte units.
     // The size of the shared memory must be at least Timeline::sharedSize(size).
-    Writer(size_t size, void *shared);
-    Writer(size_t size, const sp<IMemory>& iMemory);
+    Writer(void *shared, size_t size);
+    Writer(const sp<IMemory>& iMemory, size_t size);
 
-    virtual ~Writer() { }
+    virtual ~Writer();
 
     virtual void    log(const char *string);
     virtual void    logf(const char *fmt, ...) __attribute__ ((format (printf, 2, 3)));
@@ -138,13 +145,16 @@
     sp<IMemory>     getIMemory() const  { return mIMemory; }
 
 private:
+    // 0 <= length <= kMaxLength
     void    log(Event event, const void *data, size_t length);
     void    log(const Entry *entry, bool trusted = false);
 
-    const size_t    mSize;      // circular buffer size in bytes, must be a power of 2
     Shared* const   mShared;    // raw pointer to shared memory
-    const sp<IMemory> mIMemory; // ref-counted version
-    int32_t         mRear;      // my private copy of mShared->mRear
+    sp<IMemory>     mIMemory;   // ref-counted version, initialized in constructor and then const
+    audio_utils_fifo * const mFifo;                 // FIFO itself,
+                                                    // non-NULL unless constructor fails
+    audio_utils_fifo_writer * const mFifoWriter;    // used to write to FIFO,
+                                                    // non-NULL unless dummy constructor used
     bool            mEnabled;   // whether to actually log
 };
 
@@ -154,7 +164,7 @@
 class LockedWriter : public Writer {
 public:
     LockedWriter();
-    LockedWriter(size_t size, void *shared);
+    LockedWriter(void *shared, size_t size);
 
     virtual void    log(const char *string);
     virtual void    logf(const char *fmt, ...) __attribute__ ((format (printf, 2, 3)));
@@ -176,21 +186,24 @@
 
     // Input parameter 'size' is the desired size of the timeline in byte units.
     // The size of the shared memory must be at least Timeline::sharedSize(size).
-    Reader(size_t size, const void *shared);
-    Reader(size_t size, const sp<IMemory>& iMemory);
+    Reader(const void *shared, size_t size);
+    Reader(const sp<IMemory>& iMemory, size_t size);
 
-    virtual ~Reader() { }
+    virtual ~Reader();
 
     void    dump(int fd, size_t indent = 0);
     bool    isIMemory(const sp<IMemory>& iMemory) const;
 
 private:
-    const size_t    mSize;      // circular buffer size in bytes, must be a power of 2
-    const Shared* const mShared; // raw pointer to shared memory
-    const sp<IMemory> mIMemory; // ref-counted version
-    int32_t     mFront;         // index of oldest acknowledged Entry
+    /*const*/ Shared* const mShared;    // raw pointer to shared memory, actually const but not
+                                        // declared as const because audio_utils_fifo() constructor
+    sp<IMemory> mIMemory;       // ref-counted version, assigned only in constructor
     int     mFd;                // file descriptor
     int     mIndent;            // indentation level
+    audio_utils_fifo * const mFifo;                 // FIFO itself,
+                                                    // non-NULL unless constructor fails
+    audio_utils_fifo_reader * const mFifoReader;    // used to read from FIFO,
+                                                    // non-NULL unless constructor fails
 
     void    dumpLine(const String8& timestamp, String8& body);
 
diff --git a/include/media/nbaio/Pipe.h b/include/media/nbaio/Pipe.h
index cc95ff7..58b9750 100644
--- a/include/media/nbaio/Pipe.h
+++ b/include/media/nbaio/Pipe.h
@@ -17,6 +17,7 @@
 #ifndef ANDROID_AUDIO_PIPE_H
 #define ANDROID_AUDIO_PIPE_H
 
+#include <audio_utils/fifo.h>
 #include "NBAIO.h"
 
 namespace android {
@@ -51,7 +52,7 @@
 
     // The write side of a pipe permits overruns; flow control is the caller's responsibility.
     // It doesn't return +infinity because that would guarantee an overrun.
-    virtual ssize_t availableToWrite() const { return mMaxFrames; }
+    virtual ssize_t availableToWrite() { return mMaxFrames; }
 
     virtual ssize_t write(const void *buffer, size_t count);
     //virtual ssize_t writeVia(writeVia_t via, size_t total, void *user, size_t block);
@@ -59,7 +60,8 @@
 private:
     const size_t    mMaxFrames;     // always a power of 2
     void * const    mBuffer;
-    volatile int32_t mRear;         // written by android_atomic_release_store
+    audio_utils_fifo        mFifo;
+    audio_utils_fifo_writer mFifoWriter;
     volatile int32_t mReaders;      // number of PipeReader clients currently attached to this Pipe
     const bool      mFreeBufferInDestructor;
 };
diff --git a/include/media/nbaio/PipeReader.h b/include/media/nbaio/PipeReader.h
index 7c733ad..70ecb34 100644
--- a/include/media/nbaio/PipeReader.h
+++ b/include/media/nbaio/PipeReader.h
@@ -47,6 +47,8 @@
 
     virtual ssize_t read(void *buffer, size_t count);
 
+    virtual ssize_t flush();
+
     // NBAIO_Source end
 
 #if 0   // until necessary
@@ -55,7 +57,7 @@
 
 private:
     Pipe&       mPipe;
-    int32_t     mFront;         // follows behind mPipe.mRear
+    audio_utils_fifo_reader mFifoReader;
     int64_t     mFramesOverrun;
     int64_t     mOverruns;
 };
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index b59319c..3420617 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -35,6 +35,8 @@
 namespace android {
 
 struct ABuffer;
+class ACodecBufferChannel;
+class MediaCodecBuffer;
 class MemoryDealer;
 struct DescribeColorFormat2Params;
 struct DataConverter;
@@ -42,10 +44,9 @@
 struct ACodec : public AHierarchicalStateMachine, public CodecBase {
     ACodec();
 
-    virtual void setNotificationMessage(const sp<AMessage> &msg);
-
     void initiateSetup(const sp<AMessage> &msg);
 
+    virtual std::shared_ptr<BufferChannelBase> getBufferChannel() override;
     virtual void initiateAllocateComponent(const sp<AMessage> &msg);
     virtual void initiateConfigureComponent(const sp<AMessage> &msg);
     virtual void initiateCreateInputSurface();
@@ -71,33 +72,6 @@
         handleMessage(msg);
     }
 
-    struct PortDescription : public CodecBase::PortDescription {
-        size_t countBuffers();
-        IOMX::buffer_id bufferIDAt(size_t index) const;
-        sp<ABuffer> bufferAt(size_t index) const;
-        sp<NativeHandle> handleAt(size_t index) const;
-        sp<RefBase> memRefAt(size_t index) const;
-
-    private:
-        friend struct ACodec;
-
-        Vector<IOMX::buffer_id> mBufferIDs;
-        Vector<sp<ABuffer> > mBuffers;
-        Vector<sp<NativeHandle> > mHandles;
-        Vector<sp<RefBase> > mMemRefs;
-
-        PortDescription();
-        void addBuffer(
-                IOMX::buffer_id id, const sp<ABuffer> &buffer,
-                const sp<NativeHandle> &handle, const sp<RefBase> &memRef);
-
-        DISALLOW_EVIL_CONSTRUCTORS(PortDescription);
-    };
-
-    static bool isFlexibleColorFormat(
-            const sp<IOMX> &omx, IOMX::node_id node,
-            uint32_t colorFormat, bool usingNativeBuffers, OMX_U32 *flexibleEquivalent);
-
     // Returns 0 if configuration is not supported.  NOTE: this is treated by
     // some OMX components as auto level, and by others as invalid level.
     static int /* OMX_VIDEO_AVCLEVELTYPE */ getAVCLevelFor(
@@ -190,13 +164,14 @@
         Status mStatus;
         unsigned mDequeuedAt;
 
-        sp<ABuffer> mData;      // the client's buffer; if not using data conversion, this is the
-                                // codec buffer; otherwise, it is allocated separately
-        sp<RefBase> mMemRef;    // and a reference to the IMemory, so it does not go away
-        sp<ABuffer> mCodecData; // the codec's buffer
-        sp<RefBase> mCodecRef;  // and a reference to the IMemory
+        sp<MediaCodecBuffer> mData;  // the client's buffer; if not using data conversion, this is
+                                     // the codec buffer; otherwise, it is allocated separately
+        sp<RefBase> mMemRef;         // and a reference to the IMemory, so it does not go away
+        sp<MediaCodecBuffer> mCodecData;  // the codec's buffer
+        sp<RefBase> mCodecRef;            // and a reference to the IMemory
+
         sp<GraphicBuffer> mGraphicBuffer;
-        sp<NativeHandle> mNativeHandle;
+        bool mNewGraphicBuffer;
         int mFenceFd;
         FrameRenderTracker::Info *mRenderInfo;
 
@@ -226,8 +201,6 @@
     KeyedVector<int64_t, BufferStats> mBufferStats;
 #endif
 
-    sp<AMessage> mNotify;
-
     sp<UninitializedState> mUninitializedState;
     sp<LoadedState> mLoadedState;
     sp<LoadedToIdleState> mLoadedToIdleState;
@@ -242,10 +215,9 @@
 
     AString mComponentName;
     uint32_t mFlags;
-    uint32_t mQuirks;
     sp<IOMX> mOMX;
-    sp<IBinder> mNodeBinder;
-    IOMX::node_id mNode;
+    sp<IOMXNode> mOMXNode;
+    int32_t mNodeGeneration;
     sp<MemoryDealer> mDealer[2];
 
     bool mUsingNativeWindow;
@@ -287,20 +259,17 @@
     bool mChannelMaskPresent;
     int32_t mChannelMask;
     unsigned mDequeueCounter;
-    MetadataBufferType mInputMetadataType;
-    MetadataBufferType mOutputMetadataType;
-    bool mLegacyAdaptiveExperiment;
+    IOMX::PortMode mPortMode[2];
     int32_t mMetadataBuffersToSubmit;
     size_t mNumUndequeuedBuffers;
     sp<DataConverter> mConverter[2];
 
+    sp<IGraphicBufferSource> mGraphicBufferSource;
     int64_t mRepeatFrameDelayUs;
     int64_t mMaxPtsGapUs;
     float mMaxFps;
-
     int64_t mTimePerFrameUs;
     int64_t mTimePerCaptureUs;
-
     bool mCreateInputBuffersSuspended;
 
     bool mTunneled;
@@ -308,12 +277,15 @@
     OMX_INDEXTYPE mDescribeColorAspectsIndex;
     OMX_INDEXTYPE mDescribeHDRStaticInfoIndex;
 
+    std::shared_ptr<ACodecBufferChannel> mBufferChannel;
+
     status_t setCyclicIntraMacroblockRefresh(const sp<AMessage> &msg, int32_t mode);
     status_t allocateBuffersOnPort(OMX_U32 portIndex);
     status_t freeBuffersOnPort(OMX_U32 portIndex);
     status_t freeBuffer(OMX_U32 portIndex, size_t i);
 
     status_t handleSetSurface(const sp<Surface> &surface);
+    status_t setPortMode(int32_t portIndex, IOMX::PortMode mode);
     status_t setupNativeWindowSizeFormatAndUsage(
             ANativeWindow *nativeWindow /* nonnull */, int *finalUsage /* nonnull */,
             bool reconnect);
@@ -330,21 +302,20 @@
     BufferInfo *dequeueBufferFromNativeWindow();
 
     inline bool storingMetadataInDecodedBuffers() {
-        return mOutputMetadataType >= 0 && !mIsEncoder;
+        return (mPortMode[kPortIndexOutput] == IOMX::kPortModeDynamicANWBuffer) && !mIsEncoder;
     }
 
-    inline bool usingMetadataOnEncoderOutput() {
-        return mOutputMetadataType >= 0 && mIsEncoder;
+    inline bool usingSecureBufferOnEncoderOutput() {
+        return (mPortMode[kPortIndexOutput] == IOMX::kPortModePresetSecureBuffer) && mIsEncoder;
     }
 
     BufferInfo *findBufferByID(
             uint32_t portIndex, IOMX::buffer_id bufferID,
             ssize_t *index = NULL);
 
+    status_t fillBuffer(BufferInfo *info);
+
     status_t setComponentRole(bool isEncoder, const char *mime);
-    static const char *getComponentRole(bool isEncoder, const char *mime);
-    static status_t setComponentRole(
-            const sp<IOMX> &omx, IOMX::node_id node, const char *role);
 
     status_t configureCodec(const char *mime, const sp<AMessage> &msg);
 
@@ -506,8 +477,6 @@
 
     status_t setupErrorCorrectionParameters();
 
-    status_t initNativeWindow();
-
     // Returns true iff all buffers on the given port have status
     // OWNED_BY_US or OWNED_BY_NATIVE_WINDOW.
     bool allYourBuffersAreBelongToUs(OMX_U32 portIndex);
@@ -552,11 +521,6 @@
             OMX_ERRORTYPE error = OMX_ErrorUndefined,
             status_t internalError = UNKNOWN_ERROR);
 
-    static bool describeDefaultColorFormat(DescribeColorFormat2Params &describeParams);
-    static bool describeColorFormat(
-        const sp<IOMX> &omx, IOMX::node_id node,
-        DescribeColorFormat2Params &describeParams);
-
     status_t requestIDRFrame();
     status_t setParameters(const sp<AMessage> &params);
 
diff --git a/include/media/stagefright/AudioSource.h b/include/media/stagefright/AudioSource.h
index 2ec89a4..f20c2cd 100644
--- a/include/media/stagefright/AudioSource.h
+++ b/include/media/stagefright/AudioSource.h
@@ -89,6 +89,8 @@
     int64_t mPrevSampleTimeUs;
     int64_t mInitialReadTimeUs;
     int64_t mNumFramesReceived;
+    int64_t mNumFramesSkipped;
+    int64_t mNumFramesLost;
     int64_t mNumClientOwnedBuffers;
 
     List<MediaBuffer * > mBuffersReceived;
diff --git a/include/media/stagefright/CodecBase.h b/include/media/stagefright/CodecBase.h
index e057075..cfbaea4 100644
--- a/include/media/stagefright/CodecBase.h
+++ b/include/media/stagefright/CodecBase.h
@@ -18,12 +18,16 @@
 
 #define CODEC_BASE_H_
 
+#include <memory>
+
 #include <stdint.h>
 
 #define STRINGIFY_ENUMS
 
+#include <media/ICrypto.h>
 #include <media/IOMX.h>
 #include <media/MediaCodecInfo.h>
+#include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/foundation/AHandler.h>
 #include <media/stagefright/foundation/ColorUtils.h>
 #include <media/hardware/HardwareAPI.h>
@@ -34,32 +38,164 @@
 
 namespace android {
 
-struct ABuffer;
+class BufferChannelBase;
+class BufferProducerWrapper;
+class MediaCodecBuffer;
 struct PersistentSurface;
+struct RenderedFrameInfo;
+class Surface;
 
 struct CodecBase : public AHandler, /* static */ ColorUtils {
-    enum {
-        kWhatFillThisBuffer      = 'fill',
-        kWhatDrainThisBuffer     = 'drai',
-        kWhatEOS                 = 'eos ',
-        kWhatShutdownCompleted   = 'scom',
-        kWhatFlushCompleted      = 'fcom',
-        kWhatOutputFormatChanged = 'outC',
-        kWhatError               = 'erro',
-        kWhatComponentAllocated  = 'cAll',
-        kWhatComponentConfigured = 'cCon',
-        kWhatInputSurfaceCreated = 'isfc',
-        kWhatInputSurfaceAccepted = 'isfa',
-        kWhatSignaledInputEOS    = 'seos',
-        kWhatBuffersAllocated    = 'allc',
-        kWhatOutputFramesRendered = 'outR',
+    /**
+     * This interface defines events firing from CodecBase back to MediaCodec.
+     * All methods must not block.
+     */
+    class CodecCallback {
+    public:
+        virtual ~CodecCallback() = default;
+
+        /**
+         * Notify MediaCodec for seeing an output EOS.
+         *
+         * @param err the underlying cause of the EOS. If the value is neither
+         *            OK nor ERROR_END_OF_STREAM, the EOS is declared
+         *            prematurely for that error.
+         */
+        virtual void onEos(status_t err) = 0;
+        /**
+         * Notify MediaCodec that start operation is complete.
+         */
+        virtual void onStartCompleted() = 0;
+        /**
+         * Notify MediaCodec that stop operation is complete.
+         */
+        virtual void onStopCompleted() = 0;
+        /**
+         * Notify MediaCodec that release operation is complete.
+         */
+        virtual void onReleaseCompleted() = 0;
+        /**
+         * Notify MediaCodec that flush operation is complete.
+         */
+        virtual void onFlushCompleted() = 0;
+        /**
+         * Notify MediaCodec that an error is occurred.
+         *
+         * @param err         an error code for the occurred error.
+         * @param actionCode  an action code for severity of the error.
+         */
+        virtual void onError(status_t err, enum ActionCode actionCode) = 0;
+        /**
+         * Notify MediaCodec that the underlying component is allocated.
+         *
+         * @param componentName the unique name of the component specified in
+         *                      MediaCodecList.
+         */
+        virtual void onComponentAllocated(const char *componentName) = 0;
+        /**
+         * Notify MediaCodec that the underlying component is configured.
+         *
+         * @param inputFormat   an input format at configure time.
+         * @param outputFormat  an output format at configure time.
+         */
+        virtual void onComponentConfigured(
+                const sp<AMessage> &inputFormat, const sp<AMessage> &outputFormat) = 0;
+        /**
+         * Notify MediaCodec that the input surface is created.
+         *
+         * @param inputFormat   an input format at surface creation. Formats
+         *                      could change from the previous state as a result
+         *                      of creating a surface.
+         * @param outputFormat  an output format at surface creation.
+         * @param inputSurface  the created surface.
+         */
+        virtual void onInputSurfaceCreated(
+                const sp<AMessage> &inputFormat,
+                const sp<AMessage> &outputFormat,
+                const sp<BufferProducerWrapper> &inputSurface) = 0;
+        /**
+         * Notify MediaCodec that the input surface creation is failed.
+         *
+         * @param err an error code of the cause.
+         */
+        virtual void onInputSurfaceCreationFailed(status_t err) = 0;
+        /**
+         * Notify MediaCodec that the component accepted the provided input
+         * surface.
+         *
+         * @param inputFormat   an input format at surface assignment. Formats
+         *                      could change from the previous state as a result
+         *                      of assigning a surface.
+         * @param outputFormat  an output format at surface assignment.
+         */
+        virtual void onInputSurfaceAccepted(
+                const sp<AMessage> &inputFormat,
+                const sp<AMessage> &outputFormat) = 0;
+        /**
+         * Notify MediaCodec that the component declined the provided input
+         * surface.
+         *
+         * @param err an error code of the cause.
+         */
+        virtual void onInputSurfaceDeclined(status_t err) = 0;
+        /**
+         * Noitfy MediaCodec that the requested input EOS is sent to the input
+         * surface.
+         *
+         * @param err an error code returned from the surface. If there is no
+         *            input surface, the value is INVALID_OPERATION.
+         */
+        virtual void onSignaledInputEOS(status_t err) = 0;
+        /**
+         * Notify MediaCodec that output frames are rendered with information on
+         * those frames.
+         *
+         * @param done  a list of rendered frames.
+         */
+        virtual void onOutputFramesRendered(const std::list<RenderedFrameInfo> &done) = 0;
+        /**
+         * Notify MediaCodec that output buffers are changed.
+         */
+        virtual void onOutputBuffersChanged() = 0;
     };
 
+    /**
+     * This interface defines events firing from BufferChannelBase back to MediaCodec.
+     * All methods must not block.
+     */
+    class BufferCallback {
+    public:
+        virtual ~BufferCallback() = default;
+
+        /**
+         * Notify MediaCodec that an input buffer is available with given index.
+         * When BufferChannelBase::getInputBufferArray() is not called,
+         * BufferChannelBase may report different buffers with the same index if
+         * MediaCodec already queued/discarded the buffer. After calling
+         * BufferChannelBase::getInputBufferArray(), the buffer and index match the
+         * returned array.
+         */
+        virtual void onInputBufferAvailable(
+                size_t index, const sp<MediaCodecBuffer> &buffer) = 0;
+        /**
+         * Notify MediaCodec that an output buffer is available with given index.
+         * When BufferChannelBase::getOutputBufferArray() is not called,
+         * BufferChannelBase may report different buffers with the same index if
+         * MediaCodec already queued/discarded the buffer. After calling
+         * BufferChannelBase::getOutputBufferArray(), the buffer and index match the
+         * returned array.
+         */
+        virtual void onOutputBufferAvailable(
+                size_t index, const sp<MediaCodecBuffer> &buffer) = 0;
+    };
     enum {
         kMaxCodecBufferSize = 8192 * 4096 * 4, // 8K RGBA
     };
 
-    virtual void setNotificationMessage(const sp<AMessage> &msg) = 0;
+    inline void setCallback(std::unique_ptr<CodecCallback> &&callback) {
+        mCallback = std::move(callback);
+    }
+    virtual std::shared_ptr<BufferChannelBase> getBufferChannel() = 0;
 
     virtual void initiateAllocateComponent(const sp<AMessage> &msg) = 0;
     virtual void initiateConfigureComponent(const sp<AMessage> &msg) = 0;
@@ -85,33 +221,104 @@
     virtual void signalSetParameters(const sp<AMessage> &msg) = 0;
     virtual void signalEndOfInputStream() = 0;
 
-    struct PortDescription : public RefBase {
-        virtual size_t countBuffers() = 0;
-        virtual IOMX::buffer_id bufferIDAt(size_t index) const = 0;
-        virtual sp<ABuffer> bufferAt(size_t index) const = 0;
-        virtual sp<NativeHandle> handleAt(size_t /*index*/) const { return NULL; };
-        virtual sp<RefBase> memRefAt(size_t /*index*/) const { return NULL; }
-
-    protected:
-        PortDescription();
-        virtual ~PortDescription();
-
-    private:
-        DISALLOW_EVIL_CONSTRUCTORS(PortDescription);
-    };
-
     /*
      * Codec-related defines
      */
 
 protected:
-    CodecBase();
-    virtual ~CodecBase();
+    CodecBase() = default;
+    virtual ~CodecBase() = default;
+
+    std::unique_ptr<CodecCallback> mCallback;
 
 private:
     DISALLOW_EVIL_CONSTRUCTORS(CodecBase);
 };
 
+/**
+ * A channel between MediaCodec and CodecBase object which manages buffer
+ * passing. Only MediaCodec is expected to call these methods, and
+ * underlying CodecBase implementation should define its own interface
+ * separately for itself.
+ *
+ * Concurrency assumptions:
+ *
+ * 1) Clients may access the object at multiple threads concurrently.
+ * 2) All methods do not call underlying CodecBase object while holding a lock.
+ * 3) Code inside critical section executes within 1ms.
+ */
+class BufferChannelBase {
+public:
+    virtual ~BufferChannelBase() = default;
+
+    inline void setCallback(std::unique_ptr<CodecBase::BufferCallback> &&callback) {
+        mCallback = std::move(callback);
+    }
+
+    inline void setCrypto(const sp<ICrypto> &crypto) {
+        mCrypto = crypto;
+    }
+
+    /**
+     * Queue an input buffer into the buffer channel.
+     *
+     * @return    OK if successful;
+     *            -ENOENT if the buffer is not known (TODO: this should be
+     *            handled gracefully in the future, here and below).
+     */
+    virtual status_t queueInputBuffer(const sp<MediaCodecBuffer> &buffer) = 0;
+    /**
+     * Queue a secure input buffer into the buffer channel.
+     *
+     * @return    OK if successful;
+     *            -ENOENT if the buffer is not known;
+     *            -ENOSYS if mCrypto is not set so that decryption is not
+     *            possible;
+     *            other errors if decryption failed.
+     */
+    virtual status_t queueSecureInputBuffer(
+            const sp<MediaCodecBuffer> &buffer,
+            bool secure,
+            const uint8_t *key,
+            const uint8_t *iv,
+            CryptoPlugin::Mode mode,
+            CryptoPlugin::Pattern pattern,
+            const CryptoPlugin::SubSample *subSamples,
+            size_t numSubSamples,
+            AString *errorDetailMsg) = 0;
+    /**
+     * Request buffer rendering at specified time.
+     *
+     * @param     timestampNs   nanosecond timestamp for rendering time.
+     * @return    OK if successful;
+     *            -ENOENT if the buffer is not known.
+     */
+    virtual status_t renderOutputBuffer(
+            const sp<MediaCodecBuffer> &buffer, int64_t timestampNs) = 0;
+    /**
+     * Discard a buffer to the underlying CodecBase object.
+     *
+     * TODO: remove once this operation can be handled by just clearing the
+     * reference.
+     *
+     * @return    OK if successful;
+     *            -ENOENT if the buffer is not known.
+     */
+    virtual status_t discardBuffer(const sp<MediaCodecBuffer> &buffer) = 0;
+    /**
+     * Clear and fill array with input buffers.
+     */
+    virtual void getInputBufferArray(Vector<sp<MediaCodecBuffer>> *array) = 0;
+    /**
+     * Clear and fill array with output buffers.
+     */
+    virtual void getOutputBufferArray(Vector<sp<MediaCodecBuffer>> *array) = 0;
+
+protected:
+    std::unique_ptr<CodecBase::BufferCallback> mCallback;
+    sp<ICrypto> mCrypto;
+};
+
 }  // namespace android
 
 #endif  // CODEC_BASE_H_
diff --git a/include/media/stagefright/DataSource.h b/include/media/stagefright/DataSource.h
index 052b9b7..e7135a2 100644
--- a/include/media/stagefright/DataSource.h
+++ b/include/media/stagefright/DataSource.h
@@ -44,6 +44,7 @@
         kStreamedFromLocalHost = 2,
         kIsCachingDataSource   = 4,
         kIsHTTPBasedSource     = 8,
+        kIsLocalFileSource     = 16,
     };
 
     static sp<DataSource> CreateFromURI(
@@ -102,17 +103,6 @@
 
     ////////////////////////////////////////////////////////////////////////////
 
-    bool sniff(String8 *mimeType, float *confidence, sp<AMessage> *meta);
-
-    // The sniffer can optionally fill in "meta" with an AMessage containing
-    // a dictionary of values that helps the corresponding extractor initialize
-    // its state without duplicating effort already exerted by the sniffer.
-    typedef bool (*SnifferFunc)(
-            const sp<DataSource> &source, String8 *mimeType,
-            float *confidence, sp<AMessage> *meta);
-
-    static void RegisterDefaultSniffers();
-
     // for DRM
     virtual sp<DecryptHandle> DrmInitialization(const char * /*mime*/ = NULL) {
         return NULL;
@@ -131,12 +121,6 @@
     virtual ~DataSource() {}
 
 private:
-    static Mutex gSnifferMutex;
-    static List<SnifferFunc> gSniffers;
-    static bool gSniffersRegistered;
-
-    static void RegisterSniffer_l(SnifferFunc func);
-
     DataSource(const DataSource &);
     DataSource &operator=(const DataSource &);
 };
diff --git a/include/media/stagefright/FileSource.h b/include/media/stagefright/FileSource.h
index b6349e0..9f3bb5e 100644
--- a/include/media/stagefright/FileSource.h
+++ b/include/media/stagefright/FileSource.h
@@ -39,6 +39,10 @@
 
     virtual status_t getSize(off64_t *size);
 
+    virtual uint32_t flags() {
+        return kIsLocalFileSource;
+    }
+
     virtual sp<DecryptHandle> DrmInitialization(const char *mime);
 
     virtual void getDrmInfo(sp<DecryptHandle> &handle, DrmManagerClient **client);
diff --git a/include/media/stagefright/FrameRenderTracker.h b/include/media/stagefright/FrameRenderTracker.h
index 327d260..6cbf85d 100644
--- a/include/media/stagefright/FrameRenderTracker.h
+++ b/include/media/stagefright/FrameRenderTracker.h
@@ -32,58 +32,61 @@
 class Fence;
 class GraphicBuffer;
 
+// Tracks the render information about a frame. Frames go through several states while
+// the render information is tracked:
+//
+// 1. queued frame: mMediaTime and mGraphicBuffer are set for the frame. mFence is the
+// queue fence (read fence). mIndex is negative, and mRenderTimeNs is invalid.
+// Key characteristics: mFence is not NULL and mIndex is negative.
+//
+// 2. dequeued frame: mFence is updated with the dequeue fence (write fence). mIndex is set.
+// Key characteristics: mFence is not NULL and mIndex is non-negative. mRenderTime is still
+// invalid.
+//
+// 3. rendered frame or frame: mFence is cleared, mRenderTimeNs is set.
+// Key characteristics: mFence is NULL.
+//
+struct RenderedFrameInfo {
+    // set by client during onFrameQueued or onFrameRendered
+    int64_t getMediaTimeUs() const  { return mMediaTimeUs; }
+
+    // -1 if frame is not yet rendered
+    nsecs_t getRenderTimeNs() const { return mRenderTimeNs; }
+
+    // set by client during updateRenderInfoForDequeuedBuffer; -1 otherwise
+    ssize_t getIndex() const        { return mIndex; }
+
+    // creates information for a queued frame
+    RenderedFrameInfo(int64_t mediaTimeUs, const sp<GraphicBuffer> &graphicBuffer,
+            const sp<Fence> &fence)
+        : mMediaTimeUs(mediaTimeUs),
+          mRenderTimeNs(-1),
+          mIndex(-1),
+          mGraphicBuffer(graphicBuffer),
+          mFence(fence) {
+    }
+
+    // creates information for a frame rendered on a tunneled surface
+    RenderedFrameInfo(int64_t mediaTimeUs, nsecs_t renderTimeNs)
+        : mMediaTimeUs(mediaTimeUs),
+          mRenderTimeNs(renderTimeNs),
+          mIndex(-1),
+          mGraphicBuffer(NULL),
+          mFence(NULL) {
+    }
+
+private:
+    int64_t mMediaTimeUs;
+    nsecs_t mRenderTimeNs;
+    ssize_t mIndex;         // to be used by client
+    sp<GraphicBuffer> mGraphicBuffer;
+    sp<Fence> mFence;
+
+    friend struct FrameRenderTracker;
+};
+
 struct FrameRenderTracker {
-    // Tracks the render information about a frame. Frames go through several states while
-    // the render information is tracked:
-    //
-    // 1. queued frame: mMediaTime and mGraphicBuffer are set for the frame. mFence is the
-    // queue fence (read fence). mIndex is negative, and mRenderTimeNs is invalid.
-    // Key characteristics: mFence is not NULL and mIndex is negative.
-    //
-    // 2. dequeued frame: mFence is updated with the dequeue fence (write fence). mIndex is set.
-    // Key characteristics: mFence is not NULL and mIndex is non-negative. mRenderTime is still
-    // invalid.
-    //
-    // 3. rendered frame or frame: mFence is cleared, mRenderTimeNs is set.
-    // Key characteristics: mFence is NULL.
-    //
-    struct Info {
-        // set by client during onFrameQueued or onFrameRendered
-        int64_t getMediaTimeUs() const  { return mMediaTimeUs; }
-
-        // -1 if frame is not yet rendered
-        nsecs_t getRenderTimeNs() const { return mRenderTimeNs; }
-
-        // set by client during updateRenderInfoForDequeuedBuffer; -1 otherwise
-        ssize_t getIndex() const        { return mIndex; }
-
-        // creates information for a queued frame
-        Info(int64_t mediaTimeUs, const sp<GraphicBuffer> &graphicBuffer, const sp<Fence> &fence)
-            : mMediaTimeUs(mediaTimeUs),
-              mRenderTimeNs(-1),
-              mIndex(-1),
-              mGraphicBuffer(graphicBuffer),
-              mFence(fence) {
-        }
-
-        // creates information for a frame rendered on a tunneled surface
-        Info(int64_t mediaTimeUs, nsecs_t renderTimeNs)
-            : mMediaTimeUs(mediaTimeUs),
-              mRenderTimeNs(renderTimeNs),
-              mIndex(-1),
-              mGraphicBuffer(NULL),
-              mFence(NULL) {
-        }
-
-    private:
-        int64_t mMediaTimeUs;
-        nsecs_t mRenderTimeNs;
-        ssize_t mIndex;         // to be used by client
-        sp<GraphicBuffer> mGraphicBuffer;
-        sp<Fence> mFence;
-
-        friend struct FrameRenderTracker;
-    };
+    typedef RenderedFrameInfo Info;
 
     FrameRenderTracker();
 
diff --git a/include/media/stagefright/MPEG4Writer.h b/include/media/stagefright/MPEG4Writer.h
index a8ba095..d415b8b 100644
--- a/include/media/stagefright/MPEG4Writer.h
+++ b/include/media/stagefright/MPEG4Writer.h
@@ -36,9 +36,8 @@
     MPEG4Writer(int fd);
 
     // Limitations
-    // 1. No more than 2 tracks can be added
-    // 2. Only video or audio source can be added
-    // 3. No more than one video and/or one audio source can be added.
+    // No more than one video and/or one audio source can be added, but
+    // multiple metadata sources can be added.
     virtual status_t addSource(const sp<IMediaSource> &source);
 
     // Returns INVALID_OPERATION if there is no source or track.
diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h
index d18aad8..2e367bf 100644
--- a/include/media/stagefright/MediaCodec.h
+++ b/include/media/stagefright/MediaCodec.h
@@ -18,6 +18,9 @@
 
 #define MEDIA_CODEC_H_
 
+#include <memory>
+#include <vector>
+
 #include <gui/IGraphicBufferProducer.h>
 #include <media/hardware/CryptoAPI.h>
 #include <media/MediaCodecInfo.h>
@@ -32,11 +35,12 @@
 struct AMessage;
 struct AReplyToken;
 struct AString;
+class BufferChannelBase;
 struct CodecBase;
 class IBatteryStats;
 struct ICrypto;
+class MediaCodecBuffer;
 class IMemory;
-class MemoryDealer;
 class IResourceManagerClient;
 class IResourceManagerService;
 struct PersistentSurface;
@@ -63,14 +67,15 @@
     };
 
     static const pid_t kNoPid = -1;
+    static const uid_t kNoUid = -1;
 
     static sp<MediaCodec> CreateByType(
             const sp<ALooper> &looper, const AString &mime, bool encoder, status_t *err = NULL,
-            pid_t pid = kNoPid);
+            pid_t pid = kNoPid, uid_t uid = kNoUid);
 
     static sp<MediaCodec> CreateByComponentName(
             const sp<ALooper> &looper, const AString &name, status_t *err = NULL,
-            pid_t pid = kNoPid);
+            pid_t pid = kNoPid, uid_t uid = kNoUid);
 
     static sp<PersistentSurface> CreatePersistentInputSurface();
 
@@ -149,14 +154,12 @@
     status_t getOutputFormat(sp<AMessage> *format) const;
     status_t getInputFormat(sp<AMessage> *format) const;
 
-    status_t getWidevineLegacyBuffers(Vector<sp<ABuffer> > *buffers) const;
+    status_t getInputBuffers(Vector<sp<MediaCodecBuffer> > *buffers) const;
+    status_t getOutputBuffers(Vector<sp<MediaCodecBuffer> > *buffers) const;
 
-    status_t getInputBuffers(Vector<sp<ABuffer> > *buffers) const;
-    status_t getOutputBuffers(Vector<sp<ABuffer> > *buffers) const;
-
-    status_t getOutputBuffer(size_t index, sp<ABuffer> *buffer);
+    status_t getOutputBuffer(size_t index, sp<MediaCodecBuffer> *buffer);
     status_t getOutputFormat(size_t index, sp<AMessage> *format);
-    status_t getInputBuffer(size_t index, sp<ABuffer> *buffer);
+    status_t getInputBuffer(size_t index, sp<MediaCodecBuffer> *buffer);
 
     status_t setSurface(const sp<Surface> &nativeWindow);
 
@@ -245,21 +248,16 @@
         kFlagIsSecure                   = 64,
         kFlagSawMediaServerDie          = 128,
         kFlagIsEncoder                  = 256,
-        kFlagGatherCodecSpecificData    = 512,
+        // 512 skipped
         kFlagIsAsync                    = 1024,
         kFlagIsComponentAllocated       = 2048,
         kFlagPushBlankBuffersOnShutdown = 4096,
     };
 
     struct BufferInfo {
-        uint32_t mBufferID;
-        sp<ABuffer> mData;
-        sp<NativeHandle> mNativeHandle;
-        sp<RefBase> mMemRef;
-        sp<ABuffer> mEncryptedData;
-        sp<IMemory> mSharedEncryptedBuffer;
-        sp<AMessage> mNotify;
-        sp<AMessage> mFormat;
+        BufferInfo();
+
+        sp<MediaCodecBuffer> mData;
         bool mOwnedByClient;
     };
 
@@ -288,6 +286,7 @@
     };
 
     State mState;
+    uid_t mUid;
     bool mReleasedByResourceManager;
     sp<ALooper> mLooper;
     sp<ALooper> mCodecLooper;
@@ -303,7 +302,6 @@
     sp<AMessage> mInputFormat;
     sp<AMessage> mCallback;
     sp<AMessage> mOnFrameRenderedNotification;
-    sp<MemoryDealer> mDealer;
 
     sp<IResourceManagerClient> mResourceManagerClient;
     sp<ResourceManagerServiceProxy> mResourceManagerService;
@@ -329,7 +327,7 @@
     Mutex mBufferLock;
 
     List<size_t> mAvailPortBuffers[2];
-    Vector<BufferInfo> mPortBuffers[2];
+    std::vector<BufferInfo> mPortBuffers[2];
 
     int32_t mDequeueInputTimeoutGeneration;
     sp<AReplyToken> mDequeueInputReplyID;
@@ -346,7 +344,9 @@
     bool mHaveInputSurface;
     bool mHavePendingInputBuffers;
 
-    MediaCodec(const sp<ALooper> &looper, pid_t pid);
+    std::shared_ptr<BufferChannelBase> mBufferChannel;
+
+    MediaCodec(const sp<ALooper> &looper, pid_t pid, uid_t uid);
 
     static sp<CodecBase> GetCodecBase(const AString &name, bool nameIsType = false);
 
@@ -367,7 +367,7 @@
 
     status_t getBufferAndFormat(
             size_t portIndex, size_t index,
-            sp<ABuffer> *buffer, sp<AMessage> *format);
+            sp<MediaCodecBuffer> *buffer, sp<AMessage> *format);
 
     bool handleDequeueInputBuffer(const sp<AReplyToken> &replyID, bool newRequest = false);
     bool handleDequeueOutputBuffer(const sp<AReplyToken> &replyID, bool newRequest = false);
@@ -389,7 +389,7 @@
 
     status_t onSetParameters(const sp<AMessage> &params);
 
-    status_t amendOutputFormatWithCodecSpecificData(const sp<ABuffer> &buffer);
+    status_t amendOutputFormatWithCodecSpecificData(const sp<MediaCodecBuffer> &buffer);
     void updateBatteryStat();
     bool isExecuting() const;
 
diff --git a/include/media/stagefright/MediaCodecList.h b/include/media/stagefright/MediaCodecList.h
index 44dbde0..430bc16 100644
--- a/include/media/stagefright/MediaCodecList.h
+++ b/include/media/stagefright/MediaCodecList.h
@@ -115,7 +115,6 @@
 
     Vector<sp<MediaCodecInfo> > mCodecInfos;
     sp<MediaCodecInfo> mCurrentInfo;
-    sp<IOMX> mOMX;
 
     MediaCodecList();
     ~MediaCodecList();
diff --git a/include/media/stagefright/MediaCodecSource.h b/include/media/stagefright/MediaCodecSource.h
index 18b1955..f9a46a9 100644
--- a/include/media/stagefright/MediaCodecSource.h
+++ b/include/media/stagefright/MediaCodecSource.h
@@ -21,8 +21,7 @@
 #include <media/stagefright/foundation/AHandlerReflector.h>
 #include <media/stagefright/foundation/Mutexed.h>
 #include <media/stagefright/MediaSource.h>
-
-#include <gui/IGraphicBufferConsumer.h>
+#include <media/stagefright/PersistentSurface.h>
 
 namespace android {
 
@@ -44,7 +43,7 @@
             const sp<ALooper> &looper,
             const sp<AMessage> &format,
             const sp<MediaSource> &source,
-            const sp<IGraphicBufferConsumer> &consumer = NULL,
+            const sp<PersistentSurface> &persistentSurface = NULL,
             uint32_t flags = 0);
 
     bool isVideo() const { return mIsVideo; }
@@ -88,7 +87,7 @@
             const sp<ALooper> &looper,
             const sp<AMessage> &outputFormat,
             const sp<MediaSource> &source,
-            const sp<IGraphicBufferConsumer> &consumer,
+            const sp<PersistentSurface> &persistentSurface,
             uint32_t flags = 0);
 
     status_t onStart(MetaData *params);
@@ -121,7 +120,7 @@
     int32_t mEncoderDataSpace;
     sp<AMessage> mEncoderActivityNotify;
     sp<IGraphicBufferProducer> mGraphicBufferProducer;
-    sp<IGraphicBufferConsumer> mGraphicBufferConsumer;
+    sp<PersistentSurface> mPersistentSurface;
     List<MediaBuffer *> mInputBufferQueue;
     List<size_t> mAvailEncoderInputIndices;
     List<int64_t> mDecodingTimeQueue; // decoding time (us) for video
diff --git a/include/media/stagefright/MediaDefs.h b/include/media/stagefright/MediaDefs.h
index 5f2a32d..359fb69 100644
--- a/include/media/stagefright/MediaDefs.h
+++ b/include/media/stagefright/MediaDefs.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009 The Android Open Source Project
+ * Copyright (C) 2016 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,69 +14,18 @@
  * limitations under the License.
  */
 
-#ifndef MEDIA_DEFS_H_
 
-#define MEDIA_DEFS_H_
+#ifndef STAGEFRIGHT_MEDIA_DEFS_H_
+#define STAGEFRIGHT_MEDIA_DEFS_H_
 
-namespace android {
+/*
+ * Please, DO NOT USE!
+ *
+ * This file is here only for legacy reasons. Instead, include directly
+ * the header below.
+ *
+ */
 
-extern const char *MEDIA_MIMETYPE_IMAGE_JPEG;
+#include <media/MediaDefs.h>
 
-extern const char *MEDIA_MIMETYPE_VIDEO_VP8;
-extern const char *MEDIA_MIMETYPE_VIDEO_VP9;
-extern const char *MEDIA_MIMETYPE_VIDEO_AVC;
-extern const char *MEDIA_MIMETYPE_VIDEO_HEVC;
-extern const char *MEDIA_MIMETYPE_VIDEO_MPEG4;
-extern const char *MEDIA_MIMETYPE_VIDEO_H263;
-extern const char *MEDIA_MIMETYPE_VIDEO_MPEG2;
-extern const char *MEDIA_MIMETYPE_VIDEO_RAW;
-extern const char *MEDIA_MIMETYPE_VIDEO_DOLBY_VISION;
-
-extern const char *MEDIA_MIMETYPE_AUDIO_AMR_NB;
-extern const char *MEDIA_MIMETYPE_AUDIO_AMR_WB;
-extern const char *MEDIA_MIMETYPE_AUDIO_MPEG;           // layer III
-extern const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I;
-extern const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II;
-extern const char *MEDIA_MIMETYPE_AUDIO_MIDI;
-extern const char *MEDIA_MIMETYPE_AUDIO_AAC;
-extern const char *MEDIA_MIMETYPE_AUDIO_QCELP;
-extern const char *MEDIA_MIMETYPE_AUDIO_VORBIS;
-extern const char *MEDIA_MIMETYPE_AUDIO_OPUS;
-extern const char *MEDIA_MIMETYPE_AUDIO_G711_ALAW;
-extern const char *MEDIA_MIMETYPE_AUDIO_G711_MLAW;
-extern const char *MEDIA_MIMETYPE_AUDIO_RAW;
-extern const char *MEDIA_MIMETYPE_AUDIO_FLAC;
-extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS;
-extern const char *MEDIA_MIMETYPE_AUDIO_MSGSM;
-extern const char *MEDIA_MIMETYPE_AUDIO_AC3;
-extern const char *MEDIA_MIMETYPE_AUDIO_EAC3;
-
-extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4;
-extern const char *MEDIA_MIMETYPE_CONTAINER_WAV;
-extern const char *MEDIA_MIMETYPE_CONTAINER_OGG;
-extern const char *MEDIA_MIMETYPE_CONTAINER_MATROSKA;
-extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2TS;
-extern const char *MEDIA_MIMETYPE_CONTAINER_AVI;
-extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2PS;
-
-extern const char *MEDIA_MIMETYPE_CONTAINER_WVM;
-
-extern const char *MEDIA_MIMETYPE_TEXT_3GPP;
-extern const char *MEDIA_MIMETYPE_TEXT_SUBRIP;
-extern const char *MEDIA_MIMETYPE_TEXT_VTT;
-extern const char *MEDIA_MIMETYPE_TEXT_CEA_608;
-extern const char *MEDIA_MIMETYPE_TEXT_CEA_708;
-extern const char *MEDIA_MIMETYPE_DATA_TIMED_ID3;
-
-// These are values exported to JAVA API that need to be in sync with
-// frameworks/base/media/java/android/media/AudioFormat.java. Unfortunately,
-// they are not defined in frameworks/av, so defining them here.
-enum AudioEncoding {
-    kAudioEncodingPcm16bit = 2,
-    kAudioEncodingPcm8bit = 3,
-    kAudioEncodingPcmFloat = 4,
-};
-
-}  // namespace android
-
-#endif  // MEDIA_DEFS_H_
+#endif  // STAGEFRIGHT_MEDIA_DEFS_H_
diff --git a/include/media/stagefright/MediaExtractor.h b/include/media/stagefright/MediaExtractor.h
index f383e72..b460ef7 100644
--- a/include/media/stagefright/MediaExtractor.h
+++ b/include/media/stagefright/MediaExtractor.h
@@ -59,12 +59,6 @@
     virtual uint32_t flags() const;
 
     // for DRM
-    void setDrmFlag(bool flag) {
-        mIsDrm = flag;
-    };
-    bool getDrmFlag() {
-        return mIsDrm;
-    }
     virtual char* getDrmTrackInfo(size_t /*trackID*/, int * /*len*/) {
         return NULL;
     }
@@ -78,7 +72,24 @@
     virtual ~MediaExtractor() {}
 
 private:
-    bool mIsDrm;
+
+    typedef bool (*SnifferFunc)(
+            const sp<DataSource> &source, String8 *mimeType,
+            float *confidence, sp<AMessage> *meta);
+
+    static Mutex gSnifferMutex;
+    static List<SnifferFunc> gSniffers;
+    static bool gSniffersRegistered;
+
+    // The sniffer can optionally fill in "meta" with an AMessage containing
+    // a dictionary of values that helps the corresponding extractor initialize
+    // its state without duplicating effort already exerted by the sniffer.
+    static void RegisterSniffer_l(SnifferFunc func);
+
+    static bool sniff(const sp<DataSource> &source,
+            String8 *mimeType, float *confidence, sp<AMessage> *meta);
+
+    static void RegisterDefaultSniffers();
 
     MediaExtractor(const MediaExtractor &);
     MediaExtractor &operator=(const MediaExtractor &);
diff --git a/include/media/stagefright/MediaFilter.h b/include/media/stagefright/MediaFilter.h
index 5725f88..0c10d11 100644
--- a/include/media/stagefright/MediaFilter.h
+++ b/include/media/stagefright/MediaFilter.h
@@ -21,7 +21,7 @@
 
 namespace android {
 
-struct ABuffer;
+class ACodecBufferChannel;
 struct GraphicBufferListener;
 class MemoryDealer;
 struct SimpleFilter;
@@ -29,8 +29,7 @@
 struct MediaFilter : public CodecBase {
     MediaFilter();
 
-    virtual void setNotificationMessage(const sp<AMessage> &msg);
-
+    virtual std::shared_ptr<BufferChannelBase> getBufferChannel() override;
     virtual void initiateAllocateComponent(const sp<AMessage> &msg);
     virtual void initiateConfigureComponent(const sp<AMessage> &msg);
     virtual void initiateCreateInputSurface();
@@ -48,25 +47,6 @@
 
     virtual void onMessageReceived(const sp<AMessage> &msg);
 
-    struct PortDescription : public CodecBase::PortDescription {
-        virtual size_t countBuffers();
-        virtual IOMX::buffer_id bufferIDAt(size_t index) const;
-        virtual sp<ABuffer> bufferAt(size_t index) const;
-
-    protected:
-        PortDescription();
-
-    private:
-        friend struct MediaFilter;
-
-        Vector<IOMX::buffer_id> mBufferIDs;
-        Vector<sp<ABuffer> > mBuffers;
-
-        void addBuffer(IOMX::buffer_id id, const sp<ABuffer> &buffer);
-
-        DISALLOW_EVIL_CONSTRUCTORS(PortDescription);
-    };
-
 protected:
     virtual ~MediaFilter();
 
@@ -82,7 +62,7 @@
         int32_t mOutputFlags;
         Status mStatus;
 
-        sp<ABuffer> mData;
+        sp<MediaCodecBuffer> mData;
     };
 
     enum State {
@@ -121,7 +101,6 @@
     int32_t mColorFormatIn, mColorFormatOut;
     size_t mMaxInputSize, mMaxOutputSize;
     int32_t mGeneration;
-    sp<AMessage> mNotify;
     sp<AMessage> mInputFormat;
     sp<AMessage> mOutputFormat;
 
@@ -134,6 +113,8 @@
     sp<SimpleFilter> mFilter;
     sp<GraphicBufferListener> mGraphicBufferListener;
 
+    std::shared_ptr<ACodecBufferChannel> mBufferChannel;
+
     // helper functions
     void signalProcessBuffers();
     void signalError(status_t error);
@@ -145,7 +126,6 @@
     void postFillThisBuffer(BufferInfo *info);
     void postDrainThisBuffer(BufferInfo *info);
     void postEOS();
-    void sendFormatChange();
     void requestFillEmptyInput();
     void processBuffers();
 
diff --git a/include/media/stagefright/NuMediaExtractor.h b/include/media/stagefright/NuMediaExtractor.h
index dd31447..e414757 100644
--- a/include/media/stagefright/NuMediaExtractor.h
+++ b/include/media/stagefright/NuMediaExtractor.h
@@ -108,7 +108,6 @@
     sp<DataSource> mDataSource;
 
     sp<IMediaExtractor> mImpl;
-    bool mIsWidevineExtractor;
 
     Vector<TrackInfo> mSelectedTracks;
     int64_t mTotalBitrate;  // in bits/sec
diff --git a/include/media/stagefright/OMXClient.h b/include/media/stagefright/OMXClient.h
index 2f14d06..6973405 100644
--- a/include/media/stagefright/OMXClient.h
+++ b/include/media/stagefright/OMXClient.h
@@ -18,10 +18,10 @@
 
 #define OMX_CLIENT_H_
 
-#include <media/IOMX.h>
-
 namespace android {
 
+class IOMX;
+
 class OMXClient {
 public:
     OMXClient();
diff --git a/include/media/stagefright/PersistentSurface.h b/include/media/stagefright/PersistentSurface.h
index a35b9f1..d8b75a2 100644
--- a/include/media/stagefright/PersistentSurface.h
+++ b/include/media/stagefright/PersistentSurface.h
@@ -19,29 +19,46 @@
 #define PERSISTENT_SURFACE_H_
 
 #include <gui/IGraphicBufferProducer.h>
-#include <gui/IGraphicBufferConsumer.h>
+#include <android/IGraphicBufferSource.h>
 #include <media/stagefright/foundation/ABase.h>
+#include <binder/Parcel.h>
 
 namespace android {
 
 struct PersistentSurface : public RefBase {
+    PersistentSurface() {}
+
     PersistentSurface(
             const sp<IGraphicBufferProducer>& bufferProducer,
-            const sp<IGraphicBufferConsumer>& bufferConsumer) :
+            const sp<IGraphicBufferSource>& bufferSource) :
         mBufferProducer(bufferProducer),
-        mBufferConsumer(bufferConsumer) { }
+        mBufferSource(bufferSource) { }
 
     sp<IGraphicBufferProducer> getBufferProducer() const {
         return mBufferProducer;
     }
 
-    sp<IGraphicBufferConsumer> getBufferConsumer() const {
-        return mBufferConsumer;
+    sp<IGraphicBufferSource> getBufferSource() const {
+        return mBufferSource;
+    }
+
+    status_t writeToParcel(Parcel *parcel) const {
+        parcel->writeStrongBinder(IInterface::asBinder(mBufferProducer));
+        parcel->writeStrongBinder(IInterface::asBinder(mBufferSource));
+        return NO_ERROR;
+    }
+
+    status_t readFromParcel(const Parcel *parcel) {
+        mBufferProducer = interface_cast<IGraphicBufferProducer>(
+                parcel->readStrongBinder());
+        mBufferSource = interface_cast<IGraphicBufferSource>(
+                parcel->readStrongBinder());
+        return NO_ERROR;
     }
 
 private:
-    const sp<IGraphicBufferProducer> mBufferProducer;
-    const sp<IGraphicBufferConsumer> mBufferConsumer;
+    sp<IGraphicBufferProducer> mBufferProducer;
+    sp<IGraphicBufferSource> mBufferSource;
 
     DISALLOW_EVIL_CONSTRUCTORS(PersistentSurface);
 };
diff --git a/include/media/stagefright/SkipCutBuffer.h b/include/media/stagefright/SkipCutBuffer.h
index 61f9949..0fb5690 100644
--- a/include/media/stagefright/SkipCutBuffer.h
+++ b/include/media/stagefright/SkipCutBuffer.h
@@ -18,6 +18,7 @@
 
 #define SKIP_CUT_BUFFER_H_
 
+#include <media/MediaCodecBuffer.h>
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/foundation/ABuffer.h>
 
@@ -39,6 +40,7 @@
     // After this, the caller should continue processing the buffer as usual.
     void submit(MediaBuffer *buffer);
     void submit(const sp<ABuffer>& buffer);    // same as above, but with an ABuffer
+    void submit(const sp<MediaCodecBuffer>& buffer);    // same as above, but with an ABuffer
     void clear();
     size_t size(); // how many bytes are currently stored in the buffer
 
@@ -48,6 +50,8 @@
  private:
     void write(const char *src, size_t num);
     size_t read(char *dst, size_t num);
+    template <typename T>
+    void submitInternal(const sp<T>& buffer);
     int32_t mSkip;
     int32_t mFrontPadding;
     int32_t mBackPadding;
diff --git a/include/media/stagefright/Utils.h b/include/media/stagefright/Utils.h
index 8eff914..88a416a 100644
--- a/include/media/stagefright/Utils.h
+++ b/include/media/stagefright/Utils.h
@@ -23,6 +23,7 @@
 #include <utils/Errors.h>
 #include <utils/RefBase.h>
 #include <system/audio.h>
+#include <media/BufferingSettings.h>
 #include <media/MediaPlayerInterface.h>
 
 namespace android {
@@ -90,6 +91,9 @@
 void readFromAMessage(
         const sp<AMessage> &msg, AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */);
 
+void writeToAMessage(const sp<AMessage> &msg, const BufferingSettings &buffering);
+void readFromAMessage(const sp<AMessage> &msg, BufferingSettings *buffering /* nonnull */);
+
 AString nameForFd(int fd);
 
 }  // namespace android
diff --git a/include/media/stagefright/foundation/AData.h b/include/media/stagefright/foundation/AData.h
new file mode 100644
index 0000000..49aa0dc
--- /dev/null
+++ b/include/media/stagefright/foundation/AData.h
@@ -0,0 +1,843 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef STAGEFRIGHT_FOUNDATION_A_DATA_H_
+#define STAGEFRIGHT_FOUNDATION_A_DATA_H_
+
+#include <memory> // for std::shared_ptr, weak_ptr and unique_ptr
+#include <type_traits> // for std::aligned_union
+
+#include <utils/StrongPointer.h> // for android::sp and wp
+
+#include <media/stagefright/foundation/TypeTraits.h>
+#include <media/stagefright/foundation/Flagged.h>
+
+namespace android {
+
+/**
+ * AData is a flexible union type that supports non-POD members. It supports arbitrary types as long
+ * as they are either moveable or copyable.
+ *
+ * Internally, AData is using AUnion - a structure providing the union support. AUnion should not
+ * be used by generic code as it is very unsafe - it opens type aliasing errors where an object of
+ * one type can be easily accessed as an object of another type. AData prevents this.
+ *
+ * AData allows a custom type flagger to be used for future extensions (e.g. allowing automatic
+ * type conversion). A strict and a relaxed flagger are provided as internal types.
+ *
+ * Use as follows:
+ *
+ * AData<int, float>::Basic data; // strict type support
+ * int i = 1;
+ * float f = 7.0f;
+ *
+ * data.set(5);
+ * EXPECT_TRUE(data.find(&i));
+ * EXPECT_FALSE(data.find(&f));
+ * EXPECT_EQ(i, 5);
+ *
+ * data.set(6.0f);
+ * EXPECT_FALSE(data.find(&i));
+ * EXPECT_TRUE(data.find(&f));
+ * EXPECT_EQ(f, 6.0f);
+ *
+ * AData<int, sp<RefBase>>::RelaxedBasic objdata; // relaxed type support
+ * sp<ABuffer> buf = new ABuffer(16), buf2;
+ * sp<RefBase> obj;
+ *
+ * objdata.set(buf);
+ * EXPECT_TRUE(objdata.find(&buf2));
+ * EXPECT_EQ(buf, buf2);
+ * EXPECT_FALSE(objdata.find(&i));
+ * EXPECT_TRUE(objdata.find(&obj));
+ * EXPECT_TRUE(obj == buf);
+ *
+ * obj = buf;
+ * objdata.set(obj); // storing as sp<RefBase>
+ * EXPECT_FALSE(objdata.find(&buf2));  // not stored as ABuffer(!)
+ * EXPECT_TRUE(objdata.find(&obj));
+ */
+
+/// \cond Internal
+
+/**
+ * Helper class to call constructor and destructor for a specific type in AUnion.
+ * This class is needed as member function specialization is not allowed for a
+ * templated class.
+ */
+struct _AUnion_impl {
+    /**
+     * Calls placement constuctor for type T with arbitrary arguments for a storage at an address.
+     * Storage MUST be large enough to contain T.
+     * Also clears the slack space after type T. \todo This is not technically needed, so we may
+     * choose to do this just for debugging.
+     *
+     * \param totalSize size of the storage
+     * \param addr      pointer to where object T should be constructed
+     * \param args      arbitrary arguments for constructor
+     */
+    template<typename T, typename ...Args>
+    inline static void emplace(size_t totalSize, T *addr, Args&&... args) {
+        new(addr)T(std::forward<Args>(args)...);
+        // clear slack space - this is not technically required
+        constexpr size_t size = sizeof(T);
+        memset(reinterpret_cast<uint8_t*>(addr) + size, 0, totalSize - size);
+    }
+
+    /**
+     * Calls destuctor for an object of type T located at a specific address.
+     *
+     * \note we do not clear the storage in this case as the storage should not be used
+     * until another object is placed there, at which case the storage will be cleared.
+     *
+     * \param addr    pointer to where object T is stored
+     */
+    template<typename T>
+    inline static void del(T *addr) {
+        addr->~T();
+    }
+};
+
+/** Constructor specialization for void type */
+template<>
+inline void _AUnion_impl::emplace<void>(size_t totalSize, void *addr) {
+    memset(addr, 0, totalSize);
+}
+
+/** Destructor specialization for void type */
+template<>
+inline void _AUnion_impl::del<void>(void *) {
+}
+
+/// \endcond
+
+/**
+ * A templated union class that can contain specific types of data, and provides
+ * constructors, destructor and access methods strictly for those types.
+ *
+ * \note This class is VERY UNSAFE compared to a union, but it allows non-POD unions.
+ * In particular care must be taken that methods are called in a careful order to
+ * prevent accessing objects of one type as another type. This class provides no
+ * facilities to help with this ordering. This is meant to be wrapped by safer
+ * utility classes that do that.
+ *
+ * \param Ts types stored in this union.
+ */
+template<typename ...Ts>
+struct AUnion {
+private:
+    using _type = typename std::aligned_union<0, Ts...>::type; ///< storage type
+    _type mValue;                                              ///< storage
+
+public:
+    /**
+     * Constructs an object of type T with arbitrary arguments in this union. After this call,
+     * this union will contain this object.
+     *
+     * This method MUST be called only when either 1) no object or 2) a void object (equivalent to
+     * no object) is contained in this union.
+     *
+     * \param T     type of object to be constructed. This must be one of the template parameters of
+     *              the union class with the same cv-qualification, or void.
+     * \param args  arbitrary arguments for the constructor
+     */
+    template<
+            typename T, typename ...Args,
+            typename=typename std::enable_if<is_one_of<T, void, Ts...>::value>::type>
+    inline void emplace(Args&&... args) {
+        _AUnion_impl::emplace(
+                sizeof(_type), reinterpret_cast<T*>(&mValue), std::forward<Args>(args)...);
+    }
+
+    /**
+     * Destructs an object of type T in this union. After this call, this union will contain no
+     * object.
+     *
+     * This method MUST be called only when this union contains an object of type T.
+     *
+     * \param T     type of object to be destructed. This must be one of the template parameters of
+     *              the union class with the same cv-qualification, or void.
+     */
+    template<
+            typename T,
+            typename=typename std::enable_if<is_one_of<T, void, Ts...>::value>::type>
+    inline void del() {
+        _AUnion_impl::del(reinterpret_cast<T*>(&mValue));
+    }
+
+    /**
+     * Returns a const reference to the object of type T in this union.
+     *
+     * This method MUST be called only when this union contains an object of type T.
+     *
+     * \param T     type of object to be returned. This must be one of the template parameters of
+     *              the union class with the same cv-qualification.
+     */
+    template<
+            typename T,
+            typename=typename std::enable_if<is_one_of<T, Ts...>::value>::type>
+    inline const T &get() const {
+        return *reinterpret_cast<const T*>(&mValue);
+    }
+
+    /**
+     * Returns a reference to the object of type T in this union.
+     *
+     * This method MUST be called only when this union contains an object of type T.
+     *
+     * \param T     type of object to be returned. This must be one of the template parameters of
+     *              the union class with the same cv-qualification.
+     */
+    template<typename T>
+    inline T &get() {
+        return *reinterpret_cast<T*>(&mValue);
+    }
+};
+
+/**
+ * Helper utility class that copies an object of type T to a destination.
+ *
+ * T must be copy assignable or copy constructible.
+ *
+ * It provides:
+ *
+ * void assign(T*, const U&) // for copiable types - this leaves the source unchanged, hence const.
+ *
+ * \param T type of object to assign to
+ */
+template<
+        typename T,
+        bool=std::is_copy_assignable<T>::value>
+struct _AData_copier {
+    static_assert(std::is_copy_assignable<T>::value, "T must be copy assignable here");
+
+    /**
+     * Copies src to data without modifying data.
+     *
+     * \param data pointer to destination
+     * \param src source object
+     */
+    inline static void assign(T *data, const T &src) {
+        *data = src;
+    }
+
+    template<typename U>
+    using enable_if_T_is_same_as = typename std::enable_if<std::is_same<U, T>::value>::type;
+
+    /**
+     * Downcast specializations for sp<>, shared_ptr<> and weak_ptr<>
+     */
+    template<typename Tp, typename U, typename=enable_if_T_is_same_as<sp<Tp>>>
+    inline static void assign(sp<Tp> *data, const sp<U> &src) {
+        *data = static_cast<Tp*>(src.get());
+    }
+
+    template<typename Tp, typename U, typename=enable_if_T_is_same_as<wp<Tp>>>
+    inline static void assign(wp<Tp> *data, const wp<U> &src) {
+        sp<U> __tmp = src.promote();
+        *data = static_cast<Tp*>(__tmp.get());
+    }
+
+    template<typename Tp, typename U, typename=enable_if_T_is_same_as<sp<Tp>>>
+    inline static void assign(sp<Tp> *data, sp<U> &&src) {
+        sp<U> __tmp = std::move(src); // move src out as get cannot
+        *data = static_cast<Tp*>(__tmp.get());
+    }
+
+    template<typename Tp, typename U, typename=enable_if_T_is_same_as<std::shared_ptr<Tp>>>
+    inline static void assign(std::shared_ptr<Tp> *data, const std::shared_ptr<U> &src) {
+        *data = std::static_pointer_cast<Tp>(src);
+    }
+
+    template<typename Tp, typename U, typename=enable_if_T_is_same_as<std::shared_ptr<Tp>>>
+    inline static void assign(std::shared_ptr<Tp> *data, std::shared_ptr<U> &&src) {
+        std::shared_ptr<U> __tmp = std::move(src); // move src out as static_pointer_cast cannot
+        *data = std::static_pointer_cast<Tp>(__tmp);
+    }
+
+    template<typename Tp, typename U, typename=enable_if_T_is_same_as<std::weak_ptr<Tp>>>
+    inline static void assign(std::weak_ptr<Tp> *data, const std::weak_ptr<U> &src) {
+        *data = std::static_pointer_cast<Tp>(src.lock());
+    }
+
+    // shared_ptrs are implicitly convertible to weak_ptrs but not vice versa, but picking the
+    // first compatible type in Ts requires having shared_ptr types before weak_ptr types, so that
+    // they are stored as shared_ptrs.
+    /**
+     * Provide sensible error message if encountering shared_ptr/weak_ptr ambiguity. This method
+     * is not enough to detect this, only if someone is trying to find the shared_ptr.
+     */
+    template<typename Tp, typename U>
+    inline static void assign(std::shared_ptr<Tp> *, const std::weak_ptr<U> &) {
+        static_assert(std::is_same<Tp, void>::value,
+                      "shared/weak pointer ambiguity. move shared ptr types before weak_ptrs");
+    }
+};
+
+/**
+ * Template specialization for non copy assignable, but copy constructible types.
+ *
+ * \todo Test this. No basic classes are copy constructible but not assignable.
+ *
+ */
+template<typename T>
+struct _AData_copier<T, false> {
+    static_assert(!std::is_copy_assignable<T>::value, "T must not be copy assignable here");
+    static_assert(std::is_copy_constructible<T>::value, "T must be copy constructible here");
+
+    inline static void copy(T *data, const T &src) {
+        data->~T();
+        new(data)T(src);
+    }
+};
+
+/**
+ * Helper utility class that moves an object of type T to a destination.
+ *
+ * T must be move assignable or move constructible.
+ *
+ * It provides multiple methods:
+ *
+ * void assign(T*, T&&)
+ *
+ * \param T type of object to assign
+ */
+template<
+        typename T,
+        bool=std::is_move_assignable<T>::value>
+struct _AData_mover {
+    static_assert(std::is_move_assignable<T>::value, "T must be move assignable here");
+
+    /**
+     * Moves src to data while likely modifying it.
+     *
+     * \param data pointer to destination
+     * \param src source object
+     */
+    inline static void assign(T *data, T &&src) {
+        *data = std::move(src);
+    }
+
+    template<typename U>
+    using enable_if_T_is_same_as = typename std::enable_if<std::is_same<U, T>::value>::type;
+
+    /**
+     * Downcast specializations for sp<>, shared_ptr<> and weak_ptr<>
+     */
+    template<typename Tp, typename U, typename=enable_if_T_is_same_as<sp<Tp>>>
+    inline static void assign(sp<Tp> *data, sp<U> &&src) {
+        sp<U> __tmp = std::move(src); // move src out as get cannot
+        *data = static_cast<Tp*>(__tmp.get());
+    }
+
+    template<typename Tp, typename U, typename=enable_if_T_is_same_as<std::shared_ptr<Tp>>>
+    inline static void assign(std::shared_ptr<Tp> *data, std::shared_ptr<U> &&src) {
+        std::shared_ptr<U> __tmp = std::move(src); // move src out as static_pointer_cast cannot
+        *data = std::static_pointer_cast<Tp>(__tmp);
+    }
+
+    template<typename Tp, typename Td, typename U, typename Ud,
+            typename=enable_if_T_is_same_as<std::unique_ptr<Tp, Td>>>
+    inline static void assign(std::unique_ptr<Tp, Td> *data, std::unique_ptr<U, Ud> &&src) {
+        *data = std::unique_ptr<Tp, Td>(static_cast<Tp*>(src.release()));
+    }
+
+    // shared_ptrs are implicitly convertible to weak_ptrs but not vice versa, but picking the
+    // first compatible type in Ts requires having shared_ptr types before weak_ptr types, so that
+    // they are stored as shared_ptrs.
+    /**
+     * Provide sensible error message if encountering shared_ptr/weak_ptr ambiguity. This method
+     * is not enough to detect this, only if someone is trying to remove the shared_ptr.
+     */
+    template<typename Tp, typename U>
+    inline static void assign(std::shared_ptr<Tp> *, std::weak_ptr<U> &&) {
+        static_assert(std::is_same<Tp, void>::value,
+                      "shared/weak pointer ambiguity. move shared ptr types before weak_ptrs");
+    }
+
+    // unique_ptrs are implicitly convertible to shared_ptrs but not vice versa, but picking the
+    // first compatible type in Ts requires having unique_ptrs types before shared_ptrs types, so
+    // that they are stored as unique_ptrs.
+    /**
+     * Provide sensible error message if encountering shared_ptr/unique_ptr ambiguity. This method
+     * is not enough to detect this, only if someone is trying to remove the unique_ptr.
+     */
+    template<typename Tp, typename U>
+    inline static void assign(std::unique_ptr<Tp> *, std::shared_ptr<U> &&) {
+        static_assert(std::is_same<Tp, void>::value,
+                      "unique/shared pointer ambiguity. move unique ptr types before shared_ptrs");
+    }
+};
+
+/**
+ * Template specialization for non move assignable, but move constructible types.
+ *
+ * \todo Test this. No basic classes are move constructible but not assignable.
+ *
+ */
+template<typename T>
+struct _AData_mover<T, false> {
+    static_assert(!std::is_move_assignable<T>::value, "T must not be move assignable here");
+    static_assert(std::is_move_constructible<T>::value, "T must be move constructible here");
+
+    inline static void assign(T *data, T &&src) {
+        data->~T();
+        new(data)T(std::move(src));
+    }
+};
+
+/**
+ * Helper template that deletes an object of a specific type (member) in an AUnion.
+ *
+ * \param Flagger type flagger class (see AData)
+ * \param U AUnion object in which the member should be deleted
+ * \param Ts types to consider for the member
+ */
+template<typename Flagger, typename U, typename ...Ts>
+struct _AData_deleter;
+
+/**
+ * Template specialization when there are still types to consider (T and rest)
+ */
+template<typename Flagger, typename U, typename T, typename ...Ts>
+struct _AData_deleter<Flagger, U, T, Ts...> {
+    static bool del(typename Flagger::type flags, U &data) {
+        if (Flagger::canDeleteAs(flags, Flagger::flagFor((T*)0))) {
+            data.template del<T>();
+            return true;
+        }
+        return _AData_deleter<Flagger, U, Ts...>::del(flags, data);
+    }
+};
+
+/**
+ * Template specialization when there are no more types to consider.
+ */
+template<typename Flagger, typename U>
+struct _AData_deleter<Flagger, U> {
+    inline static bool del(typename Flagger::type, U &) {
+        return false;
+    }
+};
+
+/**
+ * Container that can store an arbitrary object of a set of specified types.
+ *
+ * This struct is an outer class that contains various inner classes based on desired type
+ * strictness. The following inner classes are supported:
+ *
+ * AData<types...>::Basic   - strict type support using uint32_t flag.
+ *
+ * AData<types...>::Strict<Flag> - strict type support using custom flag.
+ * AData<types...>::Relaxed<Flag, MaxSize, Align>
+ *                          - relaxed type support with compatible (usually derived) class support
+ *                            for pointer types with added size checking for minimal additional
+ *                            safety.
+ *
+ * AData<types...>::RelaxedBasic - strict type support using uint32_t flag.
+ *
+ * AData<types...>::Custom<flagger> - custom type support (flaggers determine the supported types
+ *                                    and the base type to use for each)
+ *
+ */
+template<typename ...Ts>
+struct AData {
+private:
+    static_assert(are_unique<Ts...>::value, "types must be unique");
+
+    static constexpr size_t num_types = sizeof...(Ts); ///< number of types to support
+
+public:
+    /**
+     * Default (strict) type flagger provided.
+     *
+     * The default flagger simply returns the index of the type within Ts, or 0 for void.
+     *
+     * Type flaggers return a flag for a supported type.
+     *
+     * They must provide:
+     *
+     * - a flagFor(T*) method for supported types _and_ for T=void. T=void is used to mark that no
+     *   object is stored in the container. For this, an arbitrary unique value may be returned.
+     * - a mask field that contains the flag mask.
+     * - a canDeleteAs(Flag, Flag) flag comparison method that checks if a type of a flag can be
+     *   deleted as another type.
+     *
+     * \param Flag the underlying unsigned integral to use for the flags.
+     */
+    template<typename Flag>
+    struct flagger {
+    private:
+        static_assert(std::is_unsigned<Flag>::value, "Flag must be unsigned");
+        static_assert(std::is_integral<Flag>::value, "Flag must be an integral type");
+
+        static constexpr Flag count = num_types + 1;
+
+    public:
+        typedef Flag type; ///< flag type
+
+        static constexpr Flag mask = _Flagged_helper::minMask<Flag>(count); ///< flag mask
+
+        /**
+         * Return the stored type for T. This is itself.
+         */
+        template<typename T>
+        struct store {
+            typedef T as_type; ///< the base type that T is stored as
+        };
+
+        /**
+         * Constexpr method that returns if two flags are compatible for deletion.
+         *
+         * \param objectFlag flag for object to be deleted
+         * \param deleteFlag flag for type that object is to be deleted as
+         */
+        static constexpr bool canDeleteAs(Flag objectFlag, Flag deleteFlag) {
+            // default flagger requires strict type equality
+            return objectFlag == deleteFlag;
+        }
+
+        /**
+         * Constexpr method that returns the flag to use for a given type.
+         *
+         * Function overload for void*.
+         */
+        static constexpr Flag flagFor(void*) {
+            return 0u;
+        }
+
+        /**
+         * Constexpr method that returns the flag to use for a given supported type (T).
+         */
+        template<typename T, typename=typename std::enable_if<is_one_of<T, Ts...>::value>::type>
+        static constexpr Flag flagFor(T*) {
+            return find_first<T, Ts...>::index;
+        }
+    };
+
+    /**
+     * Relaxed flagger returns the index of the type within Ts. However, for pointers T* it returns
+     * the first type in Ts that T* can be converted into (this is normally a base type, but also
+     * works for sp<>, shared_ptr<> or unique_ptr<>). For a bit more strictness, the flag also
+     * contains the size of the class to avoid finding objects that were stored as a different
+     * derived class of the same base class.
+     *
+     * Flag is basically the index of the (base) type in Ts multiplied by the max size stored plus
+     * the size of the type (divided by alignment) for derived pointer types.
+     *
+     * \param MaxSize max supported size for derived class pointers
+     * \param Align alignment to assume for derived class pointers
+     */
+    template<typename Flag, size_t MaxSize=1024, size_t Align=4>
+    struct relaxed_flagger {
+    private:
+        static_assert(std::is_unsigned<Flag>::value, "Flag must be unsigned");
+        static_assert(std::is_integral<Flag>::value, "Flag must be an integral type");
+
+        static constexpr Flag count = num_types + 1;
+        static_assert(std::numeric_limits<Flag>::max() / count > (MaxSize / Align),
+                      "not enough bits to fit into flag");
+
+        static constexpr Flag max_size_stored = MaxSize / Align + 1;
+
+        // T can be converted if it's size is <= MaxSize and it can be converted to one of the Ts
+        template<typename T, size_t size>
+        using enable_if_can_be_converted = typename std::enable_if<
+                (size / Align < max_size_stored
+                        && find_first_convertible_to<T, Ts...>::index)>::type;
+
+
+        template<typename W, typename T, typename=enable_if_can_be_converted<W, sizeof(T)>>
+        static constexpr Flag relaxedFlagFor(W*, T*) {
+            return find_first_convertible_to<W, Ts...>::index * max_size_stored
+                    + (is_one_of<W, Ts...>::value ? 0 : (sizeof(T) / Align));
+        }
+
+    public:
+        typedef Flag type; ///< flag type
+
+        static constexpr Flag mask =
+            _Flagged_helper::minMask<Flag>(count * max_size_stored); ///< flag mask
+
+        /**
+         * Constexpr method that returns if two flags are compatible for deletion.
+         *
+         * \param objectFlag flag for object to be deleted
+         * \param deleteFlag flag for type that object is to be deleted as
+         */
+        static constexpr bool canDeleteAs(Flag objectFlag, Flag deleteFlag) {
+            // can delete if objects have the same base type
+            return
+                objectFlag / max_size_stored == deleteFlag / max_size_stored &&
+                (deleteFlag % max_size_stored) == 0;
+        }
+
+        /**
+         * Constexpr method that returns the flag to use for a given type.
+         *
+         * Function overload for void*.
+         */
+        static constexpr Flag flagFor(void*) {
+            return 0u;
+        }
+
+        /**
+         * Constexpr method that returns the flag to use for a given supported type (T).
+         *
+         * This is a member method to enable both overloading as well as template specialization.
+         */
+        template<typename T, typename=typename std::enable_if<is_one_of<T, Ts...>::value>::type>
+        static constexpr Flag flagFor(T*) {
+            return find_first<T, Ts...>::index * max_size_stored;
+        }
+
+        /**
+         * For precaution, we only consider converting pointers to their base classes.
+         */
+
+        /**
+         * Template specialization for derived class pointers and managed pointers.
+         */
+        template<typename T>
+        static constexpr Flag flagFor(T**p) { return relaxedFlagFor(p, (T*)0); }
+        template<typename T>
+        static constexpr Flag flagFor(std::shared_ptr<T>*p) { return relaxedFlagFor(p, (T*)0); }
+        template<typename T>
+        static constexpr Flag flagFor(std::unique_ptr<T>*p) { return relaxedFlagFor(p, (T*)0); }
+        template<typename T>
+        static constexpr Flag flagFor(std::weak_ptr<T>*p) { return relaxedFlagFor(p, (T*)0); }
+        template<typename T>
+        static constexpr Flag flagFor(sp<T>*p) { return relaxedFlagFor(p, (T*)0); }
+        template<typename T>
+        static constexpr Flag flagFor(wp<T>*p) { return relaxedFlagFor(p, (T*)0); }
+
+        /**
+         * Type support template that provodes the stored type for T.
+         * This is itself if it is one of Ts, or the first type in Ts that T is convertible to.
+         *
+         * NOTE: This template may provide a base class for an unsupported type. Support is
+         * determined by flagFor().
+         */
+        template<typename T>
+        struct store {
+            typedef typename std::conditional<
+                    is_one_of<T, Ts...>::value,
+                    T,
+                    typename find_first_convertible_to<T, Ts...>::type>::type as_type;
+        };
+    };
+
+    /**
+     * Implementation of AData.
+     */
+    template<typename Flagger>
+    struct Custom : protected Flagged<AUnion<Ts...>, typename Flagger::type, Flagger::mask> {
+        using data_t = AUnion<Ts...>;
+        using base_t = Flagged<AUnion<Ts...>, typename Flagger::type, Flagger::mask>;
+
+        /**
+         * Constructor. Initializes this to a container that does not contain any object.
+         */
+        Custom() : base_t(Flagger::flagFor((void*)0)) { }
+
+        /**
+         * Removes the contained object, if any.
+         */
+        ~Custom() {
+            if (!this->clear()) {
+                __builtin_trap();
+                // std::cerr << "could not delete data of type " << this->flags() << std::endl;
+            }
+        }
+
+        /**
+         * Returns whether there is any object contained.
+         */
+        inline bool used() const {
+            return this->flags() != Flagger::flagFor((void*)0);
+        }
+
+        /**
+         * Removes the contained object, if any. Returns true if there are no objects contained,
+         * or false on any error (this is highly unexpected).
+         */
+        bool clear() {
+            if (this->used()) {
+                if (_AData_deleter<Flagger, data_t, Ts...>::del(this->flags(), this->get())) {
+                    this->setFlags(Flagger::flagFor((void*)0));
+                    return true;
+                }
+                return false;
+            }
+            return true;
+        }
+
+        template<typename T>
+        using is_supported_by_flagger =
+            typename std::enable_if<Flagger::flagFor((T*)0) != Flagger::flagFor((void*)0)>::type;
+
+        /**
+         * Checks if there is a copiable object of type T in this container. If there is, it copies
+         * that object into the provided address and returns true. Otherwise, it does nothing and
+         * returns false.
+         *
+         * This method normally requires a flag equality between the stored and retrieved types.
+         * However, it also allows retrieving the stored object as the stored type
+         * (usually base type).
+         *
+         * \param T type of object to sought
+         * \param data address at which the object should be retrieved
+         *
+         * \return true if the object was retrieved. false if it was not.
+         */
+        template<
+                typename T,
+                typename=is_supported_by_flagger<T>>
+        bool find(T *data) const {
+            using B = typename Flagger::template store<T>::as_type;
+            if (this->flags() == Flagger::flagFor((T*)0) ||
+                Flagger::canDeleteAs(this->flags(), Flagger::flagFor((T*)0))) {
+                _AData_copier<T>::assign(data, this->get().template get<B>());
+                return true;
+            }
+            return false;
+        }
+
+        /**
+         * Checks if there is an object of type T in this container. If there is, it moves that
+         * object into the provided address and returns true. Otherwise, it does nothing and returns
+         * false.
+         *
+         * This method normally requires a flag equality between the stored and retrieved types.
+         * However, it also allows retrieving the stored object as the stored type
+         * (usually base type).
+         *
+         * \param T type of object to sought
+         * \param data address at which the object should be retrieved.
+         *
+         * \return true if the object was retrieved. false if it was not.
+         */
+        template<
+                typename T,
+                typename=is_supported_by_flagger<T>>
+        bool remove(T *data) {
+            using B = typename Flagger::template store<T>::as_type;
+            if (this->flags() == Flagger::flagFor((T*)0) ||
+                Flagger::canDeleteAs(this->flags(), Flagger::flagFor((T*)0))) {
+                _AData_mover<T>::assign(data, std::move(this->get().template get<B>()));
+                return true;
+            }
+            return false;
+        }
+
+        /**
+         * Stores an object into this container by copying. If it was successful, returns true.
+         * Otherwise, (e.g. it could not destroy the already stored object) it returns false. This
+         * latter would be highly unexpected.
+         *
+         * \param T type of object to store
+         * \param data object to store
+         *
+         * \return true if the object was stored. false if it was not.
+         */
+        template<
+                typename T,
+                typename=is_supported_by_flagger<T>,
+                typename=typename std::enable_if<
+                        std::is_copy_constructible<T>::value ||
+                        (std::is_default_constructible<T>::value &&
+                                std::is_copy_assignable<T>::value)>::type>
+        bool set(const T &data) {
+            using B = typename Flagger::template store<T>::as_type;
+
+            // if already contains an object of this type, simply assign
+            if (this->flags() == Flagger::flagFor((T*)0) && std::is_same<T, B>::value) {
+                _AData_copier<B>::assign(&this->get().template get<B>(), data);
+                return true;
+            } else if (this->used()) {
+                // destroy previous object
+                if (!this->clear()) {
+                    return false;
+                }
+            }
+            this->get().template emplace<B>(data);
+            this->setFlags(Flagger::flagFor((T *)0));
+            return true;
+        }
+
+        /**
+         * Moves an object into this container. If it was successful, returns true. Otherwise,
+         * (e.g. it could not destroy the already stored object) it returns false. This latter
+         * would be highly unexpected.
+         *
+         * \param T type of object to store
+         * \param data object to store
+         *
+         * \return true if the object was stored. false if it was not.
+         */
+        template<
+                typename T,
+                typename=is_supported_by_flagger<T>>
+        bool set(T &&data) {
+            using B = typename Flagger::template store<T>::as_type;
+
+            // if already contains an object of this type, simply assign
+            if (this->flags() == Flagger::flagFor((T*)0) && std::is_same<T, B>::value) {
+                _AData_mover<B>::assign(&this->get().template get<B>(), std::forward<T&&>(data));
+                return true;
+            } else if (this->used()) {
+                // destroy previous object
+                if (!this->clear()) {
+                    return false;
+                }
+            }
+            this->get().template emplace<B>(std::forward<T&&>(data));
+            this->setFlags(Flagger::flagFor((T *)0));
+            return true;
+        }
+    };
+
+    /**
+     * Basic AData using the default type flagger and requested flag type.
+     *
+     * \param Flag desired flag type to use. Must be an unsigned and std::integral type.
+     */
+    template<typename Flag>
+    using Strict = Custom<flagger<Flag>>;
+
+    /**
+     * Basic AData using the default type flagger and uint32_t flag.
+     */
+    using Basic = Strict<uint32_t>;
+
+    /**
+     * AData using the relaxed type flagger for max size and requested flag type.
+     *
+     * \param Flag desired flag type to use. Must be an unsigned and std::integral type.
+     */
+    template<typename Flag, size_t MaxSize = 1024, size_t Align = 4>
+    using Relaxed = Custom<relaxed_flagger<Flag, MaxSize, Align>>;
+
+    /**
+     * Basic AData using the relaxed type flagger and uint32_t flag.
+     */
+    using RelaxedBasic = Relaxed<uint32_t>;
+};
+
+}  // namespace android
+
+#endif  // STAGEFRIGHT_FOUNDATION_A_DATA_H_
+
diff --git a/include/media/stagefright/foundation/ColorUtils.h b/include/media/stagefright/foundation/ColorUtils.h
index 2368b82..b889a02 100644
--- a/include/media/stagefright/foundation/ColorUtils.h
+++ b/include/media/stagefright/foundation/ColorUtils.h
@@ -138,6 +138,12 @@
             int32_t primaries, int32_t transfer, int32_t coeffs, bool fullRange,
             ColorAspects &aspects);
 
+    // unpack a uint32_t to a full ColorAspects struct
+    static ColorAspects unpackToColorAspects(uint32_t packed);
+
+    // pack a full ColorAspects struct into a uint32_t
+    static uint32_t packToU32(const ColorAspects &aspects);
+
     // updates Unspecified color aspects to their defaults based on the video size
     static void setDefaultCodecColorAspectsIfNeeded(
             ColorAspects &aspects, int32_t width, int32_t height);
diff --git a/include/media/stagefright/foundation/Flagged.h b/include/media/stagefright/foundation/Flagged.h
new file mode 100644
index 0000000..bf0afbf
--- /dev/null
+++ b/include/media/stagefright/foundation/Flagged.h
@@ -0,0 +1,513 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef STAGEFRIGHT_FOUNDATION_FLAGGED_H_
+#define STAGEFRIGHT_FOUNDATION_FLAGGED_H_
+
+#include <media/stagefright/foundation/TypeTraits.h>
+
+namespace android {
+
+/**
+ * Flagged<T, Flag> is basically a specialized std::pair<Flag, T> that automatically optimizes out
+ * the flag if the wrapped type T is already flagged and we can combine the outer and inner flags.
+ *
+ * Flags can be queried/manipulated via flags() an setFlags(Flags). The wrapped value can be
+ * accessed via get(). This template is meant to be inherited by other utility/wrapper classes
+ * that need to store integral information along with the value.
+ *
+ * Users must specify the used bits (MASK) in the flags. Flag getters and setters will enforce this
+ * mask. _Flagged_helper::minMask<Flag> is provided to easily calculate a mask for a max value.
+ *
+ * E.g. adding a safe flag can be achieved like this:
+ *
+ *
+ * enum SafeFlags : uint32_t {
+ *   kUnsafe,
+ *   kSafe,
+ *   kSafeMask = _Flagged_helper::minMask(kSafe),
+ * };
+ * typedef Flagged<int32_t, SafeFlags, kSafeMask> safeInt32;
+ *
+ * safeInt32 a;
+ * a.setFlags(kSafe);
+ * a.get() = 15;
+ * EXPECT_EQ(a.flags(), kSafe);
+ * EXPECT_EQ(a.get(), 15);
+ *
+ *
+ * Flagged also supports lazy or calculated wrapping of already flagged types. Lazy wrapping is
+ * provided automatically (flags are automatically shared if possible, e.g. mask is shifted
+ * automatically to not overlap with used bits of the wrapped type's flags, and fall back to
+ * unshared version of the template.):
+ *
+ * enum OriginFlags : uint32_t {
+ *   kUnknown,
+ *   kConst,
+ *   kCalculated,
+ *   kComponent,
+ *   kApplication,
+ *   kFile,
+ *   kBinder,
+ *   kOriginMask = _Flagged_helper::minMask(kBinder),
+ * };
+ * typedef Flagged<safeInt32, OriginFlags, kOriginMask>
+ *          trackedSafeInt32;
+ *
+ * static_assert(sizeof(trackedSafeInt32) == sizeof(safeInt32), "");
+ *
+ * trackedSafeInt32 b(kConst, kSafe, 1);
+ * EXPECT_EQ(b.flags(), kConst);
+ * EXPECT_EQ(b.get().flags(), kSafe);
+ * EXPECT_EQ(b.get().get(), 1);
+ * b.setFlags(kCalculated);
+ * b.get().setFlags(overflow ? kUnsafe : kSafe);
+ *
+ * One can also choose to share some flag-bits with the wrapped class:
+ *
+ * enum ValidatedFlags : uint32_t {
+ *   kUnsafeV = kUnsafe,
+ *   kSafeV = kSafe,
+ *   kValidated = kSafe | 2,
+ *   kSharedMaskV = kSafeMask,
+ *   kValidatedMask = _Flagged_helper::minMask(kValidated),
+ * };
+ * typedef Flagged<safeInt32, ValidatedFlags, kValidatedMask, kSharedMaskV> validatedInt32;
+ *
+ * validatedInt32 v(kUnsafeV, kSafe, 10);
+ * EXPECT_EQ(v.flags(), kUnsafeV);
+ * EXPECT_EQ(v.get().flags(), kUnsafe);  // !kUnsafeV overrides kSafe
+ * EXPECT_EQ(v.get().get(), 10);
+ * v.setFlags(kValidated);
+ * EXPECT_EQ(v.flags(), kValidated);
+ * EXPECT_EQ(v.get().flags(), kSafe);
+ * v.get().setFlags(kUnsafe);
+ * EXPECT_EQ(v.flags(), 2);  // NOTE: sharing masks with enums allows strange situations to occur
+ */
+
+/**
+ * Helper class for Flagged support. Encapsulates common utilities used by all
+ * templated classes.
+ */
+struct _Flagged_helper {
+    /**
+     * Calculates the value with a given number of top-most bits set.
+     *
+     * This method may be called with a signed flag.
+     *
+     * \param num number of bits to set. This must be between 0 and the number of bits in Flag.
+     *
+     * \return the value where only the given number of top-most bits are set.
+     */
+    template<typename Flag>
+    static constexpr Flag topBits(int num) {
+        return Flag(num > 0 ?
+                    ~((Flag(1) << (sizeof(Flag) * 8 - is_signed_integral<Flag>::value - num)) - 1) :
+                    0);
+    }
+
+    /**
+     * Calculates the minimum mask required to cover a value. Used with the maximum enum value for
+     * an unsigned flag.
+     *
+     * \param maxValue maximum value to cover
+     * \param shift DO NO USE. used internally
+     *
+     * \return mask that can be used that covers the maximum value.
+     */
+    template<typename Flag>
+    static constexpr Flag minMask(Flag maxValue, int shift=sizeof(Flag) * 4) {
+        static_assert(is_unsigned_integral<Flag>::value,
+                      "this method only makes sense for unsigned flags");
+        return shift ? minMask<Flag>(Flag(maxValue | (maxValue >> shift)), shift >> 1) : maxValue;
+    }
+
+    /**
+     * Returns a value left-shifted by an argument as a potential constexpr.
+     *
+     * This method helps around the C-language limitation, when left-shift of a negative value with
+     * even 0 cannot be a constexpr.
+     *
+     * \param value value to shift
+     * \param shift amount of shift
+     * \returns the shifted value as an integral type
+     */
+    template<typename Flag, typename IntFlag = typename underlying_integral_type<Flag>::type>
+    static constexpr IntFlag lshift(Flag value, int shift) {
+        return shift ? value << shift : value;
+    }
+
+private:
+
+    /**
+     * Determines whether mask can be combined with base-mask for a given left shift.
+     *
+     * \param mask desired mask
+     * \param baseMask mask used by T or 0 if T is not flagged by Flag
+     * \param sharedMask desired shared mask (if this is non-0, this must be mask & baseMask)
+     * \param shift desired left shift to be used for mask
+     * \param baseShift left shift used by T or 0 if T is not flagged by Flag
+     * \param effectiveMask effective mask used by T or 0 if T is not flagged by Flag
+     *
+     * \return bool whether mask can be combined with baseMask using the desired values.
+     */
+    template<typename Flag, typename IntFlag=typename underlying_integral_type<Flag>::type>
+    static constexpr bool canCombine(
+            Flag mask, IntFlag baseMask, Flag sharedMask, int shift,
+            int baseShift, IntFlag effectiveMask) {
+        return
+            // verify that shift is valid and mask can be shifted
+            shift >= 0 && (mask & topBits<Flag>(shift)) == 0 &&
+
+            // verify that base mask is part of effective mask (sanity check on arguments)
+            (baseMask & ~(effectiveMask >> baseShift)) == 0 &&
+
+            // if sharing masks, shift must be the base's shift.
+            // verify that shared mask is the overlap of base mask and mask
+            (sharedMask ?
+                    ((sharedMask ^ (baseMask & mask)) == 0 &&
+                     shift == baseShift) :
+
+
+            // otherwise, verify that there is no overlap between mask and base's effective mask
+                    (mask & (effectiveMask >> shift)) == 0);
+    }
+
+
+    /**
+     * Calculates the minimum (left) shift required to combine a mask with the mask of an
+     * underlying type (T, also flagged by Flag).
+     *
+     * \param mask desired mask
+     * \param baseMask mask used by T or 0 if T is not flagged by Flag
+     * \param sharedMask desired shared mask (if this is non-0, this must be mask & baseMask)
+     * \param baseShift left shift used by T
+     * \param effectiveMask effective mask used by T
+     *
+     * \return a non-negative minimum left shift value if mask can be combined with baseMask,
+     *         or -1 if the masks cannot be combined. -2 if the input is invalid.
+     */
+    template<typename Flag,
+             typename IntFlag = typename underlying_integral_type<Flag>::type>
+    static constexpr int getShift(
+            Flag mask, IntFlag baseMask, Flag sharedMask, int baseShift, IntFlag effectiveMask) {
+        return
+            // baseMask must be part of the effective mask
+            (baseMask & ~(effectiveMask >> baseShift)) ? -2 :
+
+            // if sharing masks, shift must be base's shift. verify that shared mask is part of
+            // base mask and mask, and that desired mask still fits with base's shift value
+            sharedMask ?
+                    (canCombine(mask, baseMask, sharedMask, baseShift /* shift */,
+                                baseShift, effectiveMask) ? baseShift : -1) :
+
+            // otherwise, see if 0-shift works
+            ((mask & effectiveMask) == 0) ? 0 :
+
+            // otherwise, verify that mask can be shifted up
+            ((mask & topBits<Flag>(1)) || (mask < 0)) ? -1 :
+
+            incShift(getShift(Flag(mask << 1), baseMask /* unused */, sharedMask /* 0 */,
+                              baseShift /* unused */, effectiveMask));
+    }
+
+    /**
+     * Helper method that increments a non-negative (shift) value.
+     *
+     * This method is used to make it easier to create a constexpr for getShift.
+     *
+     * \param shift (shift) value to increment
+     *
+     * \return original shift if it was negative; otherwise, the shift incremented by one.
+     */
+    static constexpr int incShift(int shift) {
+        return shift + (shift >= 0);
+    }
+
+#ifdef FRIEND_TEST
+    FRIEND_TEST(FlaggedTest, _Flagged_helper_Test);
+#endif
+
+public:
+    /**
+     * Base class for all Flagged<T, Flag> classes.
+     *
+     * \note flagged types do not have a member variable for the mask used by the type. As such,
+     * they should be be cast to this base class.
+     *
+     * \todo can we replace this base class check with a static member check to remove possibility
+     * of cast?
+     */
+    template<typename Flag>
+    struct base {};
+
+    /**
+     * Type support utility that retrieves the mask of a class (T) if it is a type flagged by
+     * Flag (e.g. Flagged<T, Flag>).
+     *
+     * \note This retrieves 0 if T is a flagged class, that is not flagged by Flag or an equivalent
+     * underlying type.
+     *
+     * Generic implementation for a non-flagged class.
+     */
+    template<
+        typename T, typename Flag,
+        bool=std::is_base_of<base<typename underlying_integral_type<Flag>::type>, T>::value>
+    struct mask_of {
+        using IntFlag = typename underlying_integral_type<Flag>::type;
+        static constexpr IntFlag value = Flag(0); ///< mask of a potentially flagged class
+        static constexpr int shift = 0; ///<left shift of flags in a potentially flagged class
+        static constexpr IntFlag effective_value = IntFlag(0); ///<effective mask of flagged class
+    };
+
+    /**
+     * Type support utility that calculates the minimum (left) shift required to combine a mask
+     * with the mask of an underlying type T also flagged by Flag.
+     *
+     * \note if T is not flagged, not flagged by Flag, or the masks cannot be combined due to
+     * incorrect sharing or the flags not having enough bits, the minimum is -1.
+     *
+     * \param MASK desired mask
+     * \param SHARED_MASK desired shared mask (if this is non-0, T must be an type flagged by
+     *        Flag with a mask that has exactly these bits common with MASK)
+     */
+    template<typename T, typename Flag, Flag MASK, Flag SHARED_MASK>
+    struct min_shift {
+        /// minimum (left) shift required, or -1 if masks cannot be combined
+        static constexpr int value =
+            getShift(MASK, mask_of<T, Flag>::value, SHARED_MASK,
+                     mask_of<T, Flag>::shift, mask_of<T, Flag>::effective_value);
+    };
+
+    /**
+     * Type support utility that calculates whether the flags of T can be combined with MASK.
+     *
+     * \param MASK desired mask
+     * \param SHARED_MASK desired shared mask (if this is non-0, T MUST be an type flagged by
+     *        Flag with a mask that has exactly these bits common with MASK)
+     */
+    template<
+            typename T, typename Flag, Flag MASK,
+            Flag SHARED_MASK=Flag(0),
+            int SHIFT=min_shift<T, Flag, MASK, SHARED_MASK>::value>
+    struct can_combine {
+        using IntFlag = typename underlying_integral_type<Flag>::type;
+        /// true if this mask can be combined with T's existing flag. false otherwise.
+        static constexpr bool value =
+                std::is_base_of<base<IntFlag>, T>::value
+                        && canCombine(MASK, mask_of<T, Flag>::value, SHARED_MASK, SHIFT,
+                                      mask_of<T, Flag>::shift, mask_of<T, Flag>::effective_value);
+    };
+};
+
+/**
+ * Template specialization for the case when T is flagged by Flag or a compatible type.
+ */
+template<typename T, typename Flag>
+struct _Flagged_helper::mask_of<T, Flag, true> {
+    using IntType = typename underlying_integral_type<Flag>::type;
+    static constexpr IntType value = T::sFlagMask;
+    static constexpr int shift = T::sFlagShift;
+    static constexpr IntType effective_value = T::sEffectiveMask;
+};
+
+/**
+ * Main Flagged template that adds flags to an object of another type (in essence, creates a pair)
+ *
+ * Flag must be an integral type (enums are allowed).
+ *
+ * \note We could make SHARED_MASK be a boolean as it must be either 0 or MASK & base's mask, but we
+ * want it to be spelled out for safety.
+ *
+ * \param T type of object wrapped
+ * \param Flag type of flag
+ * \param MASK mask for the bits used in flag (before any shift)
+ * \param SHARED_MASK optional mask to be shared with T (if this is not zero, SHIFT must be 0, and
+ *        it must equal to MASK & T's mask)
+ * \param SHIFT optional left shift for MASK to combine with T's mask (or -1, if masks should not
+ *        be combined.)
+ */
+template<
+        typename T, typename Flag, Flag MASK, Flag SHARED_MASK=(Flag)0,
+        int SHIFT=_Flagged_helper::min_shift<T, Flag, MASK, SHARED_MASK>::value,
+        typename IntFlag=typename underlying_integral_type<Flag>::type,
+        bool=_Flagged_helper::can_combine<T, IntFlag, MASK, SHARED_MASK, SHIFT>::value>
+class Flagged : public _Flagged_helper::base<IntFlag> {
+    static_assert(SHARED_MASK == 0,
+                  "shared mask can only be used with common flag types "
+                  "and must be part of mask and mask of base type");
+    static_assert((_Flagged_helper::topBits<Flag>(SHIFT) & MASK) == 0, "SHIFT overflows MASK");
+
+    static constexpr Flag sFlagMask = MASK;  ///< the mask
+    static constexpr int sFlagShift = SHIFT > 0 ? SHIFT : 0; ///< the left shift applied to flags
+
+    friend struct _Flagged_helper;
+#ifdef FRIEND_TEST
+    static constexpr bool sFlagCombined = false;
+    FRIEND_TEST(FlaggedTest, _Flagged_helper_Test);
+#endif
+
+    T       mValue; ///< wrapped value
+    IntFlag mFlags; ///< flags
+
+protected:
+    /// The effective combined mask used by this class and any wrapped classes if the flags are
+    /// combined.
+    static constexpr IntFlag sEffectiveMask = _Flagged_helper::lshift(MASK, sFlagShift);
+
+    /**
+     * Helper method used by subsequent flagged wrappers to query flags. Returns the
+     * flags for a particular mask and left shift.
+     *
+     * \param mask bitmask to use
+     * \param shift left shifts to use
+     *
+     * \return the requested flags
+     */
+    inline constexpr IntFlag getFlagsHelper(IntFlag mask, int shift) const {
+        return (mFlags >> shift) & mask;
+    }
+
+    /**
+     * Helper method used by subsequent flagged wrappers to apply combined flags. Sets the flags
+     * in the bitmask using a particulare left shift.
+     *
+     * \param mask bitmask to use
+     * \param shift left shifts to use
+     * \param flags flags to update (any flags within the bitmask are updated to their value in this
+     *        argument)
+     */
+    inline void setFlagsHelper(IntFlag mask, int shift, IntFlag flags) {
+        mFlags = Flag((mFlags & ~(mask << shift)) | ((flags & mask) << shift));
+    }
+
+public:
+    /**
+     * Wrapper around base class constructor. These take the flags as their first
+     * argument and pass the rest of the arguments to the base class constructor.
+     *
+     * \param flags initial flags
+     */
+    template<typename ...Args>
+    constexpr Flagged(Flag flags, Args... args)
+        : mValue(std::forward<Args>(args)...),
+          mFlags(Flag(_Flagged_helper::lshift(flags & sFlagMask, sFlagShift))) { }
+
+    /** Gets the wrapped value as const. */
+    inline constexpr const T &get() const { return mValue; }
+
+    /** Gets the wrapped value. */
+    inline T &get() { return mValue; }
+
+    /** Gets the flags. */
+    constexpr Flag flags() const {
+        return Flag(getFlagsHelper(sFlagMask, sFlagShift));
+    }
+
+    /** Sets the flags. */
+    void setFlags(Flag flags) {
+        setFlagsHelper(sFlagMask, sFlagShift, flags);
+    }
+};
+
+/*
+ * TRICKY: we cannot implement the specialization as:
+ *
+ * class Flagged : base<Flag> {
+ *    T value;
+ * };
+ *
+ * Because T also inherits from base<Flag> and this runs into a compiler bug where
+ * sizeof(Flagged) > sizeof(T).
+ *
+ * Instead, we must inherit directly from the wrapped class
+ *
+ */
+#if 0
+template<
+        typename T, typename Flag, Flag MASK, Flag SHARED_MASK, int SHIFT>
+class Flagged<T, Flag, MASK, SHARED_MASK, SHIFT, true> : public _Flagged_helper::base<Flag> {
+private:
+    T mValue;
+};
+#else
+/**
+ * Specialization for the case when T is derived from Flagged<U, Flag> and flags can be combined.
+ */
+template<
+        typename T, typename Flag, Flag MASK, Flag SHARED_MASK, int SHIFT, typename IntFlag>
+class Flagged<T, Flag, MASK, SHARED_MASK, SHIFT, IntFlag, true> : private T {
+    static_assert(is_integral_or_enum<Flag>::value, "flag must be integer or enum");
+
+    static_assert(SHARED_MASK == 0 || SHIFT == 0, "cannot overlap masks when using SHIFT");
+    static_assert((SHARED_MASK & ~MASK) == 0, "shared mask must be part of the mask");
+    static_assert((SHARED_MASK & ~T::sEffectiveMask) == 0,
+                  "shared mask must be part of the base mask");
+    static_assert(SHARED_MASK == 0 || (~SHARED_MASK & (MASK & T::sEffectiveMask)) == 0,
+                  "mask and base mask can only overlap in shared mask");
+
+    static constexpr Flag sFlagMask = MASK;  ///< the mask
+    static constexpr int sFlagShift = SHIFT;  ///< the left shift applied to the flags
+
+#ifdef FRIEND_TEST
+    const static bool sFlagCombined = true;
+    FRIEND_TEST(FlaggedTest, _Flagged_helper_Test);
+#endif
+
+protected:
+    /// The effective combined mask used by this class and any wrapped classes if the flags are
+    /// combined.
+    static constexpr IntFlag sEffectiveMask = Flag((MASK << SHIFT) | T::sEffectiveMask);
+    friend struct _Flagged_helper;
+
+public:
+    /**
+     * Wrapper around base class constructor. These take the flags as their first
+     * argument and pass the rest of the arguments to the base class constructor.
+     *
+     * \param flags initial flags
+     */
+    template<typename ...Args>
+    constexpr Flagged(Flag flags, Args... args)
+        : T(std::forward<Args>(args)...) {
+        // we construct the base class first and apply the flags afterwards as
+        // base class may not have a constructor that takes flags even if it is derived from
+        // Flagged<U, Flag>
+        setFlags(flags);
+    }
+
+    /** Gets the wrapped value as const. */
+    inline constexpr T &get() const { return *this; }
+
+    /** Gets the wrapped value. */
+    inline T &get() { return *this; }
+
+    /** Gets the flags. */
+    Flag constexpr flags() const {
+        return Flag(this->getFlagsHelper(sFlagMask, sFlagShift));
+    }
+
+    /** Sets the flags. */
+    void setFlags(Flag flags) {
+        this->setFlagsHelper(sFlagMask, sFlagShift, flags);
+    }
+};
+#endif
+
+}  // namespace android
+
+#endif  // STAGEFRIGHT_FOUNDATION_FLAGGED_H_
+
diff --git a/include/media/stagefright/foundation/TypeTraits.h b/include/media/stagefright/foundation/TypeTraits.h
new file mode 100644
index 0000000..1250e9b
--- /dev/null
+++ b/include/media/stagefright/foundation/TypeTraits.h
@@ -0,0 +1,224 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef STAGEFRIGHT_FOUNDATION_TYPE_TRAITS_H_
+#define STAGEFRIGHT_FOUNDATION_TYPE_TRAITS_H_
+
+#include <type_traits>
+
+namespace android {
+
+/**
+ * std::is_signed, is_unsigned and is_integral does not consider enums even though the standard
+ * considers them integral. Create modified versions of these here. Also create a wrapper around
+ * std::underlying_type that does not require checking if the type is an enum.
+ */
+
+/**
+ * Type support utility class to check if a type is an integral type or an enum.
+ */
+template<typename T>
+struct is_integral_or_enum
+    : std::integral_constant<bool, std::is_integral<T>::value || std::is_enum<T>::value> { };
+
+/**
+ * Type support utility class to get the underlying std::is_integral supported type for a type.
+ * This returns the underlying type for enums, and the same type for types covered by
+ * std::is_integral.
+ *
+ * This is also used as a conditional to return an alternate type if the template param is not
+ * an integral or enum type (as in underlying_integral_type<T, TypeIfNotEnumOrIntegral>::type).
+ */
+template<typename T,
+        typename U=typename std::enable_if<is_integral_or_enum<T>::value>::type,
+        bool=std::is_enum<T>::value,
+        bool=std::is_integral<T>::value>
+struct underlying_integral_type {
+    static_assert(!std::is_enum<T>::value, "T should not be enum here");
+    static_assert(!std::is_integral<T>::value, "T should not be integral here");
+    typedef U type;
+};
+
+/** Specialization for enums. */
+template<typename T, typename U>
+struct underlying_integral_type<T, U, true, false> {
+    static_assert(std::is_enum<T>::value, "T should be enum here");
+    static_assert(!std::is_integral<T>::value, "T should not be integral here");
+    typedef typename std::underlying_type<T>::type type;
+};
+
+/** Specialization for non-enum std-integral types. */
+template<typename T, typename U>
+struct underlying_integral_type<T, U, false, true> {
+    static_assert(!std::is_enum<T>::value, "T should not be enum here");
+    static_assert(std::is_integral<T>::value, "T should be integral here");
+    typedef T type;
+};
+
+/**
+ * Type support utility class to check if the underlying integral type is signed.
+ */
+template<typename T>
+struct is_signed_integral
+    : std::integral_constant<bool, std::is_signed<
+            typename underlying_integral_type<T, unsigned>::type>::value> { };
+
+/**
+ * Type support utility class to check if the underlying integral type is unsigned.
+ */
+template<typename T>
+struct is_unsigned_integral
+    : std::integral_constant<bool, std::is_unsigned<
+            typename underlying_integral_type<T, signed>::type>::value> {
+};
+
+/**
+ * Type support relationship query template.
+ *
+ * If T occurs as one of the types in Us with the same const-volatile qualifications, provides the
+ * member constant |value| equal to true. Otherwise value is false.
+ */
+template<typename T, typename ...Us>
+struct is_one_of;
+
+/// \if 0
+/**
+ * Template specialization when first type matches the searched type.
+ */
+template<typename T, typename ...Us>
+struct is_one_of<T, T, Us...> : std::true_type {};
+
+/**
+ * Template specialization when first type does not match the searched type.
+ */
+template<typename T, typename U, typename ...Us>
+struct is_one_of<T, U, Us...> : is_one_of<T, Us...> {};
+
+/**
+ * Template specialization when there are no types to search.
+ */
+template<typename T>
+struct is_one_of<T> : std::false_type {};
+/// \endif
+
+/**
+ * Type support relationship query template.
+ *
+ * If all types in Us are unique, provides the member constant |value| equal to true.
+ * Otherwise value is false.
+ */
+template<typename ...Us>
+struct are_unique;
+
+/// \if 0
+/**
+ * Template specialization when there are no types.
+ */
+template<>
+struct are_unique<> : std::true_type {};
+
+/**
+ * Template specialization when there is at least one type to check.
+ */
+template<typename T, typename ...Us>
+struct are_unique<T, Us...>
+    : std::integral_constant<bool, are_unique<Us...>::value && !is_one_of<T, Us...>::value> {};
+/// \endif
+
+/// \if 0
+template<size_t Base, typename T, typename ...Us>
+struct _find_first_impl;
+
+/**
+ * Template specialization when there are no types to search.
+ */
+template<size_t Base, typename T>
+struct _find_first_impl<Base, T> : std::integral_constant<size_t, 0> {};
+
+/**
+ * Template specialization when T is the first type in Us.
+ */
+template<size_t Base, typename T, typename ...Us>
+struct _find_first_impl<Base, T, T, Us...> : std::integral_constant<size_t, Base> {};
+
+/**
+ * Template specialization when T is not the first type in Us.
+ */
+template<size_t Base, typename T, typename U, typename ...Us>
+struct _find_first_impl<Base, T, U, Us...>
+    : std::integral_constant<size_t, _find_first_impl<Base + 1, T, Us...>::value> {};
+
+/// \endif
+
+/**
+ * Type support relationship query template.
+ *
+ * If T occurs in Us, index is the 1-based left-most index of T in Us. Otherwise, index is 0.
+ */
+template<typename T, typename ...Us>
+struct find_first {
+    static constexpr size_t index = _find_first_impl<1, T, Us...>::value;
+};
+
+/// \if 0
+/**
+ * Helper class for find_first_convertible_to template.
+ *
+ * Adds a base index.
+ */
+template<size_t Base, typename T, typename ...Us>
+struct _find_first_convertible_to_helper;
+
+/**
+ * Template specialization for when there are more types to consider
+ */
+template<size_t Base, typename T, typename U, typename ...Us>
+struct _find_first_convertible_to_helper<Base, T, U, Us...> {
+    static constexpr size_t index =
+        std::is_convertible<T, U>::value ? Base :
+                _find_first_convertible_to_helper<Base + 1, T, Us...>::index;
+    typedef typename std::conditional<
+        std::is_convertible<T, U>::value, U,
+        typename _find_first_convertible_to_helper<Base + 1, T, Us...>::type>::type type;
+};
+
+/**
+ * Template specialization for when there are no more types to consider
+ */
+template<size_t Base, typename T>
+struct _find_first_convertible_to_helper<Base, T> {
+    static constexpr size_t index = 0;
+    typedef void type;
+};
+
+/// \endif
+
+/**
+ * Type support template that returns the type that T can be implicitly converted into, and its
+ * index, from a list of other types (Us).
+ *
+ * Returns index of 0 and type of void if there are no convertible types.
+ *
+ * \param T type that is converted
+ * \param Us types into which the conversion is considered
+ */
+template<typename T, typename ...Us>
+struct find_first_convertible_to : public _find_first_convertible_to_helper<1, T, Us...> { };
+
+}  // namespace android
+
+#endif  // STAGEFRIGHT_FOUNDATION_TYPE_TRAITS_H_
+
diff --git a/media/audioserver/Android.mk b/media/audioserver/Android.mk
index c620e7c..7dc4e1d 100644
--- a/media/audioserver/Android.mk
+++ b/media/audioserver/Android.mk
@@ -14,7 +14,8 @@
 	libmedialogservice \
 	libradioservice \
 	libsoundtriggerservice \
-	libutils
+	libutils \
+	libhwbinder
 
 LOCAL_C_INCLUDES := \
 	frameworks/av/services/audioflinger \
diff --git a/media/audioserver/audioserver.rc b/media/audioserver/audioserver.rc
index 80f78b6..4b0f6a2 100644
--- a/media/audioserver/audioserver.rc
+++ b/media/audioserver/audioserver.rc
@@ -5,3 +5,4 @@
     group audio camera drmrpc inet media mediadrm net_bt net_bt_admin net_bw_acct
     ioprio rt 4
     writepid /dev/cpuset/foreground/tasks /dev/stune/foreground/tasks
+    onrestart restart audio-hal-2-0
\ No newline at end of file
diff --git a/media/audioserver/main_audioserver.cpp b/media/audioserver/main_audioserver.cpp
index 4a7a988..bcd0342 100644
--- a/media/audioserver/main_audioserver.cpp
+++ b/media/audioserver/main_audioserver.cpp
@@ -27,6 +27,10 @@
 #include <binder/IServiceManager.h>
 #include <utils/Log.h>
 
+// FIXME: remove when BUG 31748996 is fixed
+#include <hwbinder/IPCThreadState.h>
+#include <hwbinder/ProcessState.h>
+
 // from LOCAL_C_INCLUDES
 #include "AudioFlinger.h"
 #include "AudioPolicyService.h"
@@ -130,6 +134,10 @@
         RadioService::instantiate();
         SoundTriggerHwService::instantiate();
         ProcessState::self()->startThreadPool();
+
+// FIXME: remove when BUG 31748996 is fixed
+        android::hardware::ProcessState::self()->startThreadPool();
+
         IPCThreadState::self()->joinThreadPool();
     }
 }
diff --git a/media/libaudioclient/AudioEffect.cpp b/media/libaudioclient/AudioEffect.cpp
index 590952f..a5f9ab6 100644
--- a/media/libaudioclient/AudioEffect.cpp
+++ b/media/libaudioclient/AudioEffect.cpp
@@ -128,9 +128,11 @@
     mDescriptor.uuid = *(uuid != NULL ? uuid : EFFECT_UUID_NULL);
 
     mIEffectClient = new EffectClient(this);
+    mClientPid = IPCThreadState::self()->getCallingPid();
 
     iEffect = audioFlinger->createEffect((effect_descriptor_t *)&mDescriptor,
-            mIEffectClient, priority, io, mSessionId, mOpPackageName, &mStatus, &mId, &enabled);
+            mIEffectClient, priority, io, mSessionId, mOpPackageName, mClientPid,
+            &mStatus, &mId, &enabled);
 
     if (iEffect == 0 || (mStatus != NO_ERROR && mStatus != ALREADY_EXISTS)) {
         ALOGE("set(): AudioFlinger could not create effect, status: %d", mStatus);
@@ -156,7 +158,6 @@
     mCblk->buffer = (uint8_t *)mCblk + bufOffset;
 
     IInterface::asBinder(iEffect)->linkToDeath(mIEffectClient);
-    mClientPid = IPCThreadState::self()->getCallingPid();
     ALOGV("set() %p OK effect: %s id: %d status %d enabled %d pid %d", this, mDescriptor.name, mId,
             mStatus, mEnabled, mClientPid);
 
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index a80c891..4c1fbd7 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -68,7 +68,7 @@
 AudioRecord::AudioRecord(const String16 &opPackageName)
     : mActive(false), mStatus(NO_INIT), mOpPackageName(opPackageName), mSessionId(AUDIO_SESSION_ALLOCATE),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT),
-      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
+      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE), mPortId(AUDIO_PORT_HANDLE_NONE)
 {
 }
 
@@ -85,7 +85,7 @@
         audio_session_t sessionId,
         transfer_type transferType,
         audio_input_flags_t flags,
-        int uid,
+        uid_t uid,
         pid_t pid,
         const audio_attributes_t* pAttributes)
     : mActive(false),
@@ -95,7 +95,8 @@
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
       mPreviousSchedulingGroup(SP_DEFAULT),
       mProxy(NULL),
-      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
+      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
+      mPortId(AUDIO_PORT_HANDLE_NONE)
 {
     mStatus = set(inputSource, sampleRate, format, channelMask, frameCount, cbf, user,
             notificationFrames, false /*threadCanCallJava*/, sessionId, transferType, flags,
@@ -143,7 +144,7 @@
         audio_session_t sessionId,
         transfer_type transferType,
         audio_input_flags_t flags,
-        int uid,
+        uid_t uid,
         pid_t pid,
         const audio_attributes_t* pAttributes)
 {
@@ -236,7 +237,7 @@
 
     int callingpid = IPCThreadState::self()->getCallingPid();
     int mypid = getpid();
-    if (uid == -1 || (callingpid != mypid)) {
+    if (uid == AUDIO_UID_INVALID || (callingpid != mypid)) {
         mClientUid = IPCThreadState::self()->getCallingUid();
     } else {
         mClientUid = uid;
@@ -529,14 +530,18 @@
     // The sp<> references will be dropped when re-entering scope.
     // The lack of indentation is deliberate, to reduce code churn and ease merges.
     for (;;) {
-
+    audio_config_base_t config  = {
+            .sample_rate = mSampleRate,
+            .channel_mask = mChannelMask,
+            .format = mFormat
+        };
     status = AudioSystem::getInputForAttr(&mAttributes, &input,
                                         mSessionId,
                                         // FIXME compare to AudioTrack
                                         mClientPid,
                                         mClientUid,
-                                        mSampleRate, mFormat, mChannelMask,
-                                        mFlags, mSelectedDeviceId);
+                                        &config,
+                                        mFlags, mSelectedDeviceId, &mPortId);
 
     if (status != NO_ERROR || input == AUDIO_IO_HANDLE_NONE) {
         ALOGE("Could not get audio input for session %d, record source %d, sample rate %u, "
@@ -622,7 +627,8 @@
                                                        &notificationFrames,
                                                        iMem,
                                                        bufferMem,
-                                                       &status);
+                                                       &status,
+                                                       mPortId);
     ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
             "session ID changed from %d to %d", originalSessionId, mSessionId);
 
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index bbe6a8f..1908f0e 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -811,18 +811,16 @@
                                         audio_session_t session,
                                         audio_stream_type_t *stream,
                                         uid_t uid,
-                                        uint32_t samplingRate,
-                                        audio_format_t format,
-                                        audio_channel_mask_t channelMask,
+                                        const audio_config_t *config,
                                         audio_output_flags_t flags,
                                         audio_port_handle_t selectedDeviceId,
-                                        const audio_offload_info_t *offloadInfo)
+                                        audio_port_handle_t *portId)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return NO_INIT;
     return aps->getOutputForAttr(attr, output, session, stream, uid,
-                                 samplingRate, format, channelMask,
-                                 flags, selectedDeviceId, offloadInfo);
+                                 config,
+                                 flags, selectedDeviceId, portId);
 }
 
 status_t AudioSystem::startOutput(audio_io_handle_t output,
@@ -857,17 +855,16 @@
                                 audio_session_t session,
                                 pid_t pid,
                                 uid_t uid,
-                                uint32_t samplingRate,
-                                audio_format_t format,
-                                audio_channel_mask_t channelMask,
+                                const audio_config_base_t *config,
                                 audio_input_flags_t flags,
-                                audio_port_handle_t selectedDeviceId)
+                                audio_port_handle_t selectedDeviceId,
+                                audio_port_handle_t *portId)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return NO_INIT;
     return aps->getInputForAttr(
             attr, input, session, pid, uid,
-            samplingRate, format, channelMask, flags, selectedDeviceId);
+            config, flags, selectedDeviceId, portId);
 }
 
 status_t AudioSystem::startInput(audio_io_handle_t input,
@@ -1194,14 +1191,14 @@
 
 status_t AudioSystem::startAudioSource(const struct audio_port_config *source,
                                        const audio_attributes_t *attributes,
-                                       audio_io_handle_t *handle)
+                                       audio_patch_handle_t *handle)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
     return aps->startAudioSource(source, attributes, handle);
 }
 
-status_t AudioSystem::stopAudioSource(audio_io_handle_t handle)
+status_t AudioSystem::stopAudioSource(audio_patch_handle_t handle)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 08ec834..f9ab208 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -50,6 +50,8 @@
     return x > y ? x : y;
 }
 
+static const int32_t NANOS_PER_SECOND = 1000000000;
+
 static inline nsecs_t framesToNanoseconds(ssize_t frames, uint32_t sampleRate, float speed)
 {
     return ((double)frames * 1000000000) / ((double)sampleRate * speed);
@@ -60,6 +62,11 @@
     return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
 }
 
+static inline nsecs_t convertTimespecToNs(const struct timespec &tv)
+{
+    return tv.tv_sec * (long long)NANOS_PER_SECOND + tv.tv_nsec;
+}
+
 // current monotonic time in microseconds.
 static int64_t getNowUs()
 {
@@ -176,7 +183,8 @@
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
       mPreviousSchedulingGroup(SP_DEFAULT),
       mPausedPosition(0),
-      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
+      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
+      mPortId(AUDIO_PORT_HANDLE_NONE)
 {
     mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
     mAttributes.usage = AUDIO_USAGE_UNKNOWN;
@@ -197,7 +205,7 @@
         audio_session_t sessionId,
         transfer_type transferType,
         const audio_offload_info_t *offloadInfo,
-        int uid,
+        uid_t uid,
         pid_t pid,
         const audio_attributes_t* pAttributes,
         bool doNotReconnect,
@@ -207,7 +215,8 @@
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
       mPreviousSchedulingGroup(SP_DEFAULT),
       mPausedPosition(0),
-      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
+      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
+      mPortId(AUDIO_PORT_HANDLE_NONE)
 {
     mStatus = set(streamType, sampleRate, format, channelMask,
             frameCount, flags, cbf, user, notificationFrames,
@@ -228,7 +237,7 @@
         audio_session_t sessionId,
         transfer_type transferType,
         const audio_offload_info_t *offloadInfo,
-        int uid,
+        uid_t uid,
         pid_t pid,
         const audio_attributes_t* pAttributes,
         bool doNotReconnect,
@@ -238,7 +247,8 @@
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
       mPreviousSchedulingGroup(SP_DEFAULT),
       mPausedPosition(0),
-      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
+      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
+      mPortId(AUDIO_PORT_HANDLE_NONE)
 {
     mStatus = set(streamType, sampleRate, format, channelMask,
             0 /*frameCount*/, flags, cbf, user, notificationFrames,
@@ -289,7 +299,7 @@
         audio_session_t sessionId,
         transfer_type transferType,
         const audio_offload_info_t *offloadInfo,
-        int uid,
+        uid_t uid,
         pid_t pid,
         const audio_attributes_t* pAttributes,
         bool doNotReconnect,
@@ -445,6 +455,7 @@
         mOffloadInfo = &mOffloadInfoCopy;
     } else {
         mOffloadInfo = NULL;
+        memset(&mOffloadInfoCopy, 0, sizeof(audio_offload_info_t));
     }
 
     mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
@@ -483,7 +494,7 @@
     }
     int callingpid = IPCThreadState::self()->getCallingPid();
     int mypid = getpid();
-    if (uid == -1 || (callingpid != mypid)) {
+    if (uid == AUDIO_UID_INVALID || (callingpid != mypid)) {
         mClientUid = IPCThreadState::self()->getCallingUid();
     } else {
         mClientUid = uid;
@@ -536,9 +547,11 @@
     mTimestampStartupGlitchReported = false;
     mRetrogradeMotionReported = false;
     mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
+    mStartTs.mPosition = 0;
     mUnderrunCountOffset = 0;
     mFramesWritten = 0;
     mFramesWrittenServerOffset = 0;
+    mFramesWrittenAtRestore = -1; // -1 is a unique initializer.
 
     return NO_ERROR;
 }
@@ -562,6 +575,17 @@
         mState = STATE_ACTIVE;
     }
     (void) updateAndGetPosition_l();
+
+    // save start timestamp
+    if (isOffloadedOrDirect_l()) {
+        if (getTimestamp_l(mStartTs) != OK) {
+            mStartTs.mPosition = 0;
+        }
+    } else {
+        if (getTimestamp_l(&mStartEts) != OK) {
+            mStartEts.clear();
+        }
+    }
     if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
         // reset current position as seen by client to 0
         mPosition = 0;
@@ -570,19 +594,17 @@
         mRetrogradeMotionReported = false;
         mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
 
-        // read last server side position change via timestamp.
-        ExtendedTimestamp ets;
-        if (mProxy->getTimestamp(&ets) == OK &&
-                ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] > 0) {
+        if (!isOffloadedOrDirect_l()
+                && mStartEts.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] > 0) {
             // Server side has consumed something, but is it finished consuming?
             // It is possible since flush and stop are asynchronous that the server
             // is still active at this point.
             ALOGV("start: server read:%lld  cumulative flushed:%lld  client written:%lld",
                     (long long)(mFramesWrittenServerOffset
-                            + ets.mPosition[ExtendedTimestamp::LOCATION_SERVER]),
-                    (long long)ets.mFlushed,
+                            + mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER]),
+                    (long long)mStartEts.mFlushed,
                     (long long)mFramesWritten);
-            mFramesWrittenServerOffset = -ets.mPosition[ExtendedTimestamp::LOCATION_SERVER];
+            mFramesWrittenServerOffset = -mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER];
         }
         mFramesWritten = 0;
         mProxy->clearTimestamp(); // need new server push for valid timestamp
@@ -599,7 +621,7 @@
         mRefreshRemaining = true;
     }
     mNewPosition = mPosition + mUpdatePeriod;
-    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
+    int32_t flags = android_atomic_and(~(CBLK_STREAM_END_DONE | CBLK_DISABLED), &mCblk->mFlags);
 
     status_t status = NO_ERROR;
     if (!(flags & CBLK_INVALID)) {
@@ -1237,10 +1259,15 @@
     // After fast request is denied, we will request again if IAudioTrack is re-created.
 
     status_t status;
+    audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+    config.sample_rate = mSampleRate;
+    config.channel_mask = mChannelMask;
+    config.format = mFormat;
+    config.offload_info = mOffloadInfoCopy;
     status = AudioSystem::getOutputForAttr(attr, &output,
                                            mSessionId, &streamType, mClientUid,
-                                           mSampleRate, mFormat, mChannelMask,
-                                           mFlags, mSelectedDeviceId, mOffloadInfo);
+                                           &config,
+                                           mFlags, mSelectedDeviceId, &mPortId);
 
     if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
         ALOGE("Could not get audio output for session %d, stream type %d, usage %d, sample rate %u, format %#x,"
@@ -1398,7 +1425,8 @@
                                                       tid,
                                                       &mSessionId,
                                                       mClientUid,
-                                                      &status);
+                                                      &status,
+                                                      mPortId);
     ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
             "session ID changed from %d to %d", originalSessionId, mSessionId);
 
@@ -2183,10 +2211,12 @@
     mUnderrunCountOffset = getUnderrunCount_l();
 
     // save the old static buffer position
+    uint32_t staticPosition = 0;
     size_t bufferPosition = 0;
     int loopCount = 0;
     if (mStaticProxy != 0) {
         mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
+        staticPosition = mStaticProxy->getPosition().unsignedValue();
     }
 
     mFlags = mOrigFlags;
@@ -2218,8 +2248,11 @@
         }
         if (mState == STATE_ACTIVE) {
             result = mAudioTrack->start();
-            mFramesWrittenServerOffset = mFramesWritten; // server resets to zero so we offset
         }
+        // server resets to zero so we offset
+        mFramesWrittenServerOffset =
+                mStaticProxy.get() != nullptr ? staticPosition : mFramesWritten;
+        mFramesWrittenAtRestore = mFramesWrittenServerOffset;
     }
     if (result != NO_ERROR) {
         ALOGW("restoreTrack_l() failed status %d", result);
@@ -2317,7 +2350,11 @@
 status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
 {
     AutoMutex lock(mLock);
+    return getTimestamp_l(timestamp);
+}
 
+status_t AudioTrack::getTimestamp_l(AudioTimestamp& timestamp)
+{
     bool previousTimestampValid = mPreviousTimestampValid;
     // Set false here to cover all the error return cases.
     mPreviousTimestampValid = false;
@@ -2395,6 +2432,26 @@
                     ALOGV_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_SERVER,
                             "getTimestamp() location moved from server to kernel");
                 }
+
+                // We update the timestamp time even when paused.
+                if (mState == STATE_PAUSED /* not needed: STATE_PAUSED_STOPPING */) {
+                    const int64_t now = systemTime();
+                    const int64_t at = convertTimespecToNs(timestamp.mTime);
+                    const int64_t lag =
+                            (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
+                                ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0)
+                            ? int64_t(mAfLatency * 1000000LL)
+                            : (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
+                             - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK])
+                             * NANOS_PER_SECOND / mSampleRate;
+                    const int64_t limit = now - lag; // no earlier than this limit
+                    if (at < limit) {
+                        ALOGV("timestamp pause lag:%lld adjusting from %lld to %lld",
+                                (long long)lag, (long long)at, (long long)limit);
+                        timestamp.mTime.tv_sec = limit / NANOS_PER_SECOND;
+                        timestamp.mTime.tv_nsec = limit % NANOS_PER_SECOND; // compiler opt.
+                    }
+                }
                 mPreviousLocation = location;
             } else {
                 // right after AudioTrack is started, one may not find a timestamp
@@ -2402,7 +2459,17 @@
             }
         }
         if (status == INVALID_OPERATION) {
-            status = WOULD_BLOCK;
+            // INVALID_OPERATION occurs when no timestamp has been issued by the server;
+            // other failures are signaled by a negative time.
+            // If we come out of FLUSHED or STOPPED where the position is known
+            // to be zero we convert this to WOULD_BLOCK (with the implicit meaning of
+            // "zero" for NuPlayer).  We don't convert for track restoration as position
+            // does not reset.
+            ALOGV("timestamp server offset:%lld restore frames:%lld",
+                    (long long)mFramesWrittenServerOffset, (long long)mFramesWrittenAtRestore);
+            if (mFramesWrittenServerOffset != mFramesWrittenAtRestore) {
+                status = WOULD_BLOCK;
+            }
         }
     }
     if (status != NO_ERROR) {
@@ -2414,6 +2481,7 @@
             // use cached paused position in case another offloaded track is running.
             timestamp.mPosition = mPausedPosition;
             clock_gettime(CLOCK_MONOTONIC, &timestamp.mTime);
+            // TODO: adjust for delay
             return NO_ERROR;
         }
 
@@ -2500,21 +2568,18 @@
     // This is sometimes caused by erratic reports of the available space in the ALSA drivers.
     if (status == NO_ERROR) {
         if (previousTimestampValid) {
-#define TIME_TO_NANOS(time) ((int64_t)(time).tv_sec * 1000000000 + (time).tv_nsec)
-            const int64_t previousTimeNanos = TIME_TO_NANOS(mPreviousTimestamp.mTime);
-            const int64_t currentTimeNanos = TIME_TO_NANOS(timestamp.mTime);
-#undef TIME_TO_NANOS
+            const int64_t previousTimeNanos = convertTimespecToNs(mPreviousTimestamp.mTime);
+            const int64_t currentTimeNanos = convertTimespecToNs(timestamp.mTime);
             if (currentTimeNanos < previousTimeNanos) {
-                ALOGW("retrograde timestamp time");
-                // FIXME Consider blocking this from propagating upwards.
+                ALOGW("retrograde timestamp time corrected, %lld < %lld",
+                        (long long)currentTimeNanos, (long long)previousTimeNanos);
+                timestamp.mTime = mPreviousTimestamp.mTime;
             }
 
             // Looking at signed delta will work even when the timestamps
             // are wrapping around.
             int32_t deltaPosition = (Modulo<uint32_t>(timestamp.mPosition)
                     - mPreviousTimestamp.mPosition).signedValue();
-            // position can bobble slightly as an artifact; this hides the bobble
-            static const int32_t MINIMUM_POSITION_DELTA = 8;
             if (deltaPosition < 0) {
                 // Only report once per position instead of spamming the log.
                 if (!mRetrogradeMotionReported) {
@@ -2527,9 +2592,21 @@
             } else {
                 mRetrogradeMotionReported = false;
             }
-            if (deltaPosition < MINIMUM_POSITION_DELTA) {
-                timestamp = mPreviousTimestamp;  // Use last valid timestamp.
+            if (deltaPosition < 0) {
+                timestamp.mPosition = mPreviousTimestamp.mPosition;
+                deltaPosition = 0;
             }
+#if 0
+            // Uncomment this to verify audio timestamp rate.
+            const int64_t deltaTime =
+                    convertTimespecToNs(timestamp.mTime) - previousTimeNanos;
+            if (deltaTime != 0) {
+                const int64_t computedSampleRate =
+                        deltaPosition * (long long)NANOS_PER_SECOND / deltaTime;
+                ALOGD("computedSampleRate:%u  sampleRate:%u",
+                        (unsigned)computedSampleRate, mSampleRate);
+            }
+#endif
         }
         mPreviousTimestamp = timestamp;
         mPreviousTimestampValid = true;
@@ -2699,6 +2776,75 @@
     return NO_ERROR;
 }
 
+bool AudioTrack::hasStarted()
+{
+    AutoMutex lock(mLock);
+    switch (mState) {
+    case STATE_STOPPED:
+        if (isOffloadedOrDirect_l()) {
+            // check if we have started in the past to return true.
+            return mStartUs > 0;
+        }
+        // A normal audio track may still be draining, so
+        // check if stream has ended.  This covers fasttrack position
+        // instability and start/stop without any data written.
+        if (mProxy->getStreamEndDone()) {
+            return true;
+        }
+        // fall through
+    case STATE_ACTIVE:
+    case STATE_STOPPING:
+        break;
+    case STATE_PAUSED:
+    case STATE_PAUSED_STOPPING:
+    case STATE_FLUSHED:
+        return false;  // we're not active
+    default:
+        LOG_ALWAYS_FATAL("Invalid mState in hasStarted(): %d", mState);
+        break;
+    }
+
+    // wait indicates whether we need to wait for a timestamp.
+    // This is conservatively figured - if we encounter an unexpected error
+    // then we will not wait.
+    bool wait = false;
+    if (isOffloadedOrDirect_l()) {
+        AudioTimestamp ts;
+        status_t status = getTimestamp_l(ts);
+        if (status == WOULD_BLOCK) {
+            wait = true;
+        } else if (status == OK) {
+            wait = (ts.mPosition == 0 || ts.mPosition == mStartTs.mPosition);
+        }
+        ALOGV("hasStarted wait:%d  ts:%u  start position:%lld",
+                (int)wait,
+                ts.mPosition,
+                (long long)mStartTs.mPosition);
+    } else {
+        int location = ExtendedTimestamp::LOCATION_SERVER; // for ALOG
+        ExtendedTimestamp ets;
+        status_t status = getTimestamp_l(&ets);
+        if (status == WOULD_BLOCK) {  // no SERVER or KERNEL frame info in ets
+            wait = true;
+        } else if (status == OK) {
+            for (location = ExtendedTimestamp::LOCATION_KERNEL;
+                    location >= ExtendedTimestamp::LOCATION_SERVER; --location) {
+                if (ets.mTimeNs[location] < 0 || mStartEts.mTimeNs[location] < 0) {
+                    continue;
+                }
+                wait = ets.mPosition[location] == 0
+                        || ets.mPosition[location] == mStartEts.mPosition[location];
+                break;
+            }
+        }
+        ALOGV("hasStarted wait:%d  ets:%lld  start position:%lld",
+                (int)wait,
+                (long long)ets.mPosition[location],
+                (long long)mStartEts.mPosition[location]);
+    }
+    return !wait;
+}
+
 // =========================================================================
 
 void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 65fdedb..255e350 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -108,7 +108,8 @@
                                 pid_t tid,
                                 audio_session_t *sessionId,
                                 int clientUid,
-                                status_t *status)
+                                status_t *status,
+                                audio_port_handle_t portId)
     {
         Parcel data, reply;
         sp<IAudioTrack> track;
@@ -137,6 +138,7 @@
         }
         data.writeInt32(lSessionId);
         data.writeInt32(clientUid);
+        data.writeInt32(portId);
         status_t lStatus = remote()->transact(CREATE_TRACK, data, &reply);
         if (lStatus != NO_ERROR) {
             ALOGE("createTrack error: %s", strerror(-lStatus));
@@ -188,7 +190,8 @@
                                 size_t *notificationFrames,
                                 sp<IMemory>& cblk,
                                 sp<IMemory>& buffers,
-                                status_t *status)
+                                status_t *status,
+                                audio_port_handle_t portId)
     {
         Parcel data, reply;
         sp<IAudioRecord> record;
@@ -211,6 +214,7 @@
         }
         data.writeInt32(lSessionId);
         data.writeInt64(notificationFrames != NULL ? *notificationFrames : 0);
+        data.writeInt32(portId);
         cblk.clear();
         buffers.clear();
         status_t lStatus = remote()->transact(OPEN_RECORD, data, &reply);
@@ -716,6 +720,7 @@
                                     audio_io_handle_t output,
                                     audio_session_t sessionId,
                                     const String16& opPackageName,
+                                    pid_t pid,
                                     status_t *status,
                                     int *id,
                                     int *enabled)
@@ -737,6 +742,7 @@
         data.writeInt32((int32_t) output);
         data.writeInt32(sessionId);
         data.writeString16(opPackageName);
+        data.writeInt32((int32_t) pid);
 
         status_t lStatus = remote()->transact(CREATE_EFFECT, data, &reply);
         if (lStatus != NO_ERROR) {
@@ -958,6 +964,7 @@
             pid_t tid = (pid_t) data.readInt32();
             audio_session_t sessionId = (audio_session_t) data.readInt32();
             int clientUid = data.readInt32();
+            audio_port_handle_t portId = (audio_port_handle_t) data.readInt32();
             status_t status = NO_ERROR;
             sp<IAudioTrack> track;
             if ((haveSharedBuffer && (buffer == 0)) ||
@@ -968,7 +975,7 @@
                 track = createTrack(
                         (audio_stream_type_t) streamType, sampleRate, format,
                         channelMask, &frameCount, &flags, buffer, output, pid, tid,
-                        &sessionId, clientUid, &status);
+                        &sessionId, clientUid, &status, portId);
                 LOG_ALWAYS_FATAL_IF((track != 0) != (status == NO_ERROR));
             }
             reply->writeInt64(frameCount);
@@ -992,13 +999,14 @@
             int clientUid = data.readInt32();
             audio_session_t sessionId = (audio_session_t) data.readInt32();
             size_t notificationFrames = data.readInt64();
+            audio_port_handle_t portId = (audio_port_handle_t) data.readInt32();
             sp<IMemory> cblk;
             sp<IMemory> buffers;
             status_t status = NO_ERROR;
             sp<IAudioRecord> record = openRecord(input,
                     sampleRate, format, channelMask, opPackageName, &frameCount, &flags,
                     pid, tid, clientUid, &sessionId, &notificationFrames, cblk, buffers,
-                    &status);
+                    &status, portId);
             LOG_ALWAYS_FATAL_IF((record != 0) != (status == NO_ERROR));
             reply->writeInt64(frameCount);
             reply->writeInt32(flags);
@@ -1294,12 +1302,14 @@
             audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
             audio_session_t sessionId = (audio_session_t) data.readInt32();
             const String16 opPackageName = data.readString16();
+            pid_t pid = (pid_t)data.readInt32();
+
             status_t status = NO_ERROR;
             int id = 0;
             int enabled = 0;
 
             sp<IEffect> effect = createEffect(&desc, client, priority, output, sessionId,
-                    opPackageName, &status, &id, &enabled);
+                    opPackageName, pid, &status, &id, &enabled);
             reply->writeInt32(status);
             reply->writeInt32(id);
             reply->writeInt32(enabled);
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index 0ac5726..f0f413d 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -189,12 +189,10 @@
                                         audio_session_t session,
                                         audio_stream_type_t *stream,
                                         uid_t uid,
-                                        uint32_t samplingRate,
-                                        audio_format_t format,
-                                        audio_channel_mask_t channelMask,
+                                        const audio_config_t *config,
                                         audio_output_flags_t flags,
                                         audio_port_handle_t selectedDeviceId,
-                                        const audio_offload_info_t *offloadInfo)
+                                        audio_port_handle_t *portId)
         {
             Parcel data, reply;
             data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -212,6 +210,10 @@
                 ALOGE("getOutputForAttr NULL output - shouldn't happen");
                 return BAD_VALUE;
             }
+            if (portId == NULL) {
+                ALOGE("getOutputForAttr NULL portId - shouldn't happen");
+                return BAD_VALUE;
+            }
             if (attr == NULL) {
                 data.writeInt32(0);
             } else {
@@ -226,18 +228,10 @@
                 data.writeInt32(*stream);
             }
             data.writeInt32(uid);
-            data.writeInt32(samplingRate);
-            data.writeInt32(static_cast <uint32_t>(format));
-            data.writeInt32(channelMask);
+            data.write(config, sizeof(audio_config_t));
             data.writeInt32(static_cast <uint32_t>(flags));
             data.writeInt32(selectedDeviceId);
-            // hasOffloadInfo
-            if (offloadInfo == NULL) {
-                data.writeInt32(0);
-            } else {
-                data.writeInt32(1);
-                data.write(offloadInfo, sizeof(audio_offload_info_t));
-            }
+            data.writeInt32(*portId);
             status_t status = remote()->transact(GET_OUTPUT_FOR_ATTR, data, &reply);
             if (status != NO_ERROR) {
                 return status;
@@ -247,9 +241,11 @@
                 return status;
             }
             *output = (audio_io_handle_t)reply.readInt32();
+            audio_stream_type_t lStream = (audio_stream_type_t)reply.readInt32();
             if (stream != NULL) {
-                *stream = (audio_stream_type_t)reply.readInt32();
+                *stream = lStream;
             }
+            *portId = (audio_port_handle_t)reply.readInt32();
             return status;
         }
 
@@ -296,11 +292,10 @@
                                      audio_session_t session,
                                      pid_t pid,
                                      uid_t uid,
-                                     uint32_t samplingRate,
-                                     audio_format_t format,
-                                     audio_channel_mask_t channelMask,
+                                     const audio_config_base_t *config,
                                      audio_input_flags_t flags,
-                                     audio_port_handle_t selectedDeviceId)
+                                     audio_port_handle_t selectedDeviceId,
+                                     audio_port_handle_t *portId)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -312,15 +307,18 @@
             ALOGE("getInputForAttr NULL input - shouldn't happen");
             return BAD_VALUE;
         }
+        if (portId == NULL) {
+            ALOGE("getInputForAttr NULL portId - shouldn't happen");
+            return BAD_VALUE;
+        }
         data.write(attr, sizeof(audio_attributes_t));
         data.writeInt32(session);
         data.writeInt32(pid);
         data.writeInt32(uid);
-        data.writeInt32(samplingRate);
-        data.writeInt32(static_cast <uint32_t>(format));
-        data.writeInt32(channelMask);
+        data.write(config, sizeof(audio_config_base_t));
         data.writeInt32(flags);
         data.writeInt32(selectedDeviceId);
+        data.writeInt32(*portId);
         status_t status = remote()->transact(GET_INPUT_FOR_ATTR, data, &reply);
         if (status != NO_ERROR) {
             return status;
@@ -330,6 +328,7 @@
             return status;
         }
         *input = (audio_io_handle_t)reply.readInt32();
+        *portId = (audio_port_handle_t)reply.readInt32();
         return NO_ERROR;
     }
 
@@ -752,7 +751,7 @@
 
     virtual status_t startAudioSource(const struct audio_port_config *source,
                                       const audio_attributes_t *attributes,
-                                      audio_io_handle_t *handle)
+                                      audio_patch_handle_t *handle)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -769,11 +768,11 @@
         if (status != NO_ERROR) {
             return status;
         }
-        *handle = (audio_io_handle_t)reply.readInt32();
+        *handle = (audio_patch_handle_t)reply.readInt32();
         return status;
     }
 
-    virtual status_t stopAudioSource(audio_io_handle_t handle)
+    virtual status_t stopAudioSource(audio_patch_handle_t handle)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -942,25 +941,22 @@
                 stream = (audio_stream_type_t)data.readInt32();
             }
             uid_t uid = (uid_t)data.readInt32();
-            uint32_t samplingRate = data.readInt32();
-            audio_format_t format = (audio_format_t) data.readInt32();
-            audio_channel_mask_t channelMask = data.readInt32();
+            audio_config_t config;
+            memset(&config, 0, sizeof(audio_config_t));
+            data.read(&config, sizeof(audio_config_t));
             audio_output_flags_t flags =
                     static_cast <audio_output_flags_t>(data.readInt32());
             audio_port_handle_t selectedDeviceId = data.readInt32();
-            bool hasOffloadInfo = data.readInt32() != 0;
-            audio_offload_info_t offloadInfo;
-            if (hasOffloadInfo) {
-                data.read(&offloadInfo, sizeof(audio_offload_info_t));
-            }
+            audio_port_handle_t portId = (audio_port_handle_t)data.readInt32();
             audio_io_handle_t output = 0;
             status_t status = getOutputForAttr(hasAttributes ? &attr : NULL,
                     &output, session, &stream, uid,
-                    samplingRate, format, channelMask,
-                    flags, selectedDeviceId, hasOffloadInfo ? &offloadInfo : NULL);
+                    &config,
+                    flags, selectedDeviceId, &portId);
             reply->writeInt32(status);
             reply->writeInt32(output);
             reply->writeInt32(stream);
+            reply->writeInt32(portId);
             return NO_ERROR;
         } break;
 
@@ -1004,18 +1000,20 @@
             audio_session_t session = (audio_session_t)data.readInt32();
             pid_t pid = (pid_t)data.readInt32();
             uid_t uid = (uid_t)data.readInt32();
-            uint32_t samplingRate = data.readInt32();
-            audio_format_t format = (audio_format_t) data.readInt32();
-            audio_channel_mask_t channelMask = data.readInt32();
+            audio_config_base_t config;
+            memset(&config, 0, sizeof(audio_config_base_t));
+            data.read(&config, sizeof(audio_config_base_t));
             audio_input_flags_t flags = (audio_input_flags_t) data.readInt32();
             audio_port_handle_t selectedDeviceId = (audio_port_handle_t) data.readInt32();
+            audio_port_handle_t portId = (audio_port_handle_t)data.readInt32();
             audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
             status_t status = getInputForAttr(&attr, &input, session, pid, uid,
-                                              samplingRate, format, channelMask,
-                                              flags, selectedDeviceId);
+                                              &config,
+                                              flags, selectedDeviceId, &portId);
             reply->writeInt32(status);
             if (status == NO_ERROR) {
                 reply->writeInt32(input);
+                reply->writeInt32(portId);
             }
             return NO_ERROR;
         } break;
@@ -1373,7 +1371,7 @@
             data.read(&source, sizeof(struct audio_port_config));
             audio_attributes_t attributes;
             data.read(&attributes, sizeof(audio_attributes_t));
-            audio_io_handle_t handle = {};
+            audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
             status_t status = startAudioSource(&source, &attributes, &handle);
             reply->writeInt32(status);
             reply->writeInt32(handle);
@@ -1382,7 +1380,7 @@
 
         case STOP_AUDIO_SOURCE: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
-            audio_io_handle_t handle = (audio_io_handle_t)data.readInt32();
+            audio_patch_handle_t handle = (audio_patch_handle_t) data.readInt32();
             status_t status = stopAudioSource(handle);
             reply->writeInt32(status);
             return NO_ERROR;
diff --git a/media/libaudiohal/Android.mk b/media/libaudiohal/Android.mk
new file mode 100644
index 0000000..5e00b77
--- /dev/null
+++ b/media/libaudiohal/Android.mk
@@ -0,0 +1,57 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SHARED_LIBRARIES := \
+    libcutils   \
+    libeffects  \
+    libhardware \
+    liblog      \
+    libutils
+
+ifeq ($(USE_LEGACY_LOCAL_AUDIO_HAL), true)
+
+# Use audiohal directly w/o hwbinder middleware.
+# This is for performance comparison and debugging only.
+
+LOCAL_SRC_FILES := \
+    DeviceHalLocal.cpp          \
+    DevicesFactoryHalLocal.cpp  \
+    EffectBufferHalLocal.cpp    \
+    EffectHalLocal.cpp          \
+    EffectsFactoryHalLocal.cpp  \
+    StreamHalLocal.cpp
+
+else  # if !USE_LEGACY_LOCAL_AUDIO_HAL
+
+LOCAL_SRC_FILES := \
+    ConversionHelperHidl.cpp   \
+    HalDeathHandlerHidl.cpp   \
+    DeviceHalHidl.cpp          \
+    DevicesFactoryHalHidl.cpp  \
+    EffectBufferHalHidl.cpp    \
+    EffectHalHidl.cpp          \
+    EffectsFactoryHalHidl.cpp  \
+    StreamHalHidl.cpp
+
+LOCAL_SHARED_LIBRARIES += \
+    libbase          \
+    libfmq           \
+    libhwbinder      \
+    libhidlbase      \
+    libhidlmemory    \
+    libhidltransport \
+    android.hardware.audio@2.0             \
+    android.hardware.audio.common@2.0      \
+    android.hardware.audio.common@2.0-util \
+    android.hardware.audio.effect@2.0      \
+    android.hidl.memory@1.0                \
+    libmedia_helper
+
+endif  # USE_LEGACY_LOCAL_AUDIO_HAL
+
+LOCAL_MODULE := libaudiohal
+
+LOCAL_CFLAGS := -Wall -Werror
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libaudiohal/ConversionHelperHidl.cpp b/media/libaudiohal/ConversionHelperHidl.cpp
new file mode 100644
index 0000000..9f9eb75
--- /dev/null
+++ b/media/libaudiohal/ConversionHelperHidl.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string.h>
+
+#define LOG_TAG "HalHidl"
+#include <media/AudioParameter.h>
+#include <utils/Log.h>
+
+#include "ConversionHelperHidl.h"
+
+using ::android::hardware::audio::V2_0::Result;
+
+namespace android {
+
+// static
+status_t ConversionHelperHidl::keysFromHal(const String8& keys, hidl_vec<hidl_string> *hidlKeys) {
+    AudioParameter halKeys(keys);
+    if (halKeys.size() == 0) return BAD_VALUE;
+    hidlKeys->resize(halKeys.size());
+    for (size_t i = 0; i < halKeys.size(); ++i) {
+        String8 key;
+        status_t status = halKeys.getAt(i, key);
+        if (status != OK) return status;
+        (*hidlKeys)[i] = key.string();
+    }
+    return OK;
+}
+
+// static
+status_t ConversionHelperHidl::parametersFromHal(
+        const String8& kvPairs, hidl_vec<ParameterValue> *hidlParams) {
+    AudioParameter params(kvPairs);
+    if (params.size() == 0) return BAD_VALUE;
+    hidlParams->resize(params.size());
+    for (size_t i = 0; i < params.size(); ++i) {
+        String8 key, value;
+        status_t status = params.getAt(i, key, value);
+        if (status != OK) return status;
+        (*hidlParams)[i].key = key.string();
+        (*hidlParams)[i].value = value.string();
+    }
+    return OK;
+}
+
+// static
+void ConversionHelperHidl::parametersToHal(
+        const hidl_vec<ParameterValue>& parameters, String8 *values) {
+    AudioParameter params;
+    for (size_t i = 0; i < parameters.size(); ++i) {
+        params.add(String8(parameters[i].key.c_str()), String8(parameters[i].value.c_str()));
+    }
+    values->setTo(params.toString());
+}
+
+ConversionHelperHidl::ConversionHelperHidl(const char* className)
+        : mClassName(className) {
+}
+
+// static
+status_t ConversionHelperHidl::analyzeResult(const Result& result) {
+    switch (result) {
+        case Result::OK: return OK;
+        case Result::INVALID_ARGUMENTS: return BAD_VALUE;
+        case Result::INVALID_STATE: return NOT_ENOUGH_DATA;
+        case Result::NOT_INITIALIZED: return NO_INIT;
+        case Result::NOT_SUPPORTED: return INVALID_OPERATION;
+        default: return NO_INIT;
+    }
+}
+
+void ConversionHelperHidl::emitError(const char* funcName, const char* description) {
+    ALOGE("%s %p %s: %s (from rpc)", mClassName, this, funcName, description);
+}
+
+}  // namespace android
diff --git a/media/libaudiohal/ConversionHelperHidl.h b/media/libaudiohal/ConversionHelperHidl.h
new file mode 100644
index 0000000..23fb360
--- /dev/null
+++ b/media/libaudiohal/ConversionHelperHidl.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_H
+#define ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_H
+
+#include <android/hardware/audio/2.0/types.h>
+#include <hidl/HidlSupport.h>
+#include <utils/String8.h>
+
+using ::android::hardware::audio::V2_0::ParameterValue;
+using ::android::hardware::Return;
+using ::android::hardware::Status;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+
+namespace android {
+
+class ConversionHelperHidl {
+  protected:
+    static status_t keysFromHal(const String8& keys, hidl_vec<hidl_string> *hidlKeys);
+    static status_t parametersFromHal(const String8& kvPairs, hidl_vec<ParameterValue> *hidlParams);
+    static void parametersToHal(const hidl_vec<ParameterValue>& parameters, String8 *values);
+
+    ConversionHelperHidl(const char* className);
+
+    template<typename R, typename T>
+    status_t processReturn(const char* funcName, const Return<R>& ret, T *retval) {
+        if (ret.isOk()) {
+            // This way it also works for enum class to unscoped enum conversion.
+            *retval = static_cast<T>(static_cast<R>(ret));
+            return OK;
+        }
+        return processReturn(funcName, ret);
+    }
+
+    template<typename T>
+    status_t processReturn(const char* funcName, const Return<T>& ret) {
+        if (!ret.isOk()) {
+            emitError(funcName, ret.description().c_str());
+        }
+        return ret.isOk() ? OK : FAILED_TRANSACTION;
+    }
+
+    status_t processReturn(const char* funcName, const Return<hardware::audio::V2_0::Result>& ret) {
+        return processReturn(funcName, ret, ret);
+    }
+
+    template<typename T>
+    status_t processReturn(
+            const char* funcName, const Return<T>& ret, hardware::audio::V2_0::Result retval) {
+        const status_t st = ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
+        if (!ret.isOk()) {
+            emitError(funcName, ret.description().c_str());
+        }
+        return st;
+    }
+
+  private:
+    const char* mClassName;
+
+    static status_t analyzeResult(const hardware::audio::V2_0::Result& result);
+
+    void emitError(const char* funcName, const char* description);
+};
+
+}  // namespace android
+
+#endif // ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_H
diff --git a/media/libaudiohal/DeviceHalHidl.cpp b/media/libaudiohal/DeviceHalHidl.cpp
new file mode 100644
index 0000000..a6ced12
--- /dev/null
+++ b/media/libaudiohal/DeviceHalHidl.cpp
@@ -0,0 +1,351 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+
+#define LOG_TAG "DeviceHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <android/hardware/audio/2.0/IPrimaryDevice.h>
+#include <cutils/native_handle.h>
+#include <utils/Log.h>
+
+#include "DeviceHalHidl.h"
+#include "HidlUtils.h"
+#include "StreamHalHidl.h"
+
+using ::android::hardware::audio::common::V2_0::AudioConfig;
+using ::android::hardware::audio::common::V2_0::AudioDevice;
+using ::android::hardware::audio::common::V2_0::AudioInputFlag;
+using ::android::hardware::audio::common::V2_0::AudioOutputFlag;
+using ::android::hardware::audio::common::V2_0::AudioPatchHandle;
+using ::android::hardware::audio::common::V2_0::AudioPort;
+using ::android::hardware::audio::common::V2_0::AudioPortConfig;
+using ::android::hardware::audio::common::V2_0::AudioMode;
+using ::android::hardware::audio::common::V2_0::AudioSource;
+using ::android::hardware::audio::V2_0::DeviceAddress;
+using ::android::hardware::audio::V2_0::IPrimaryDevice;
+using ::android::hardware::audio::V2_0::ParameterValue;
+using ::android::hardware::audio::V2_0::Result;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+
+namespace android {
+
+namespace {
+
+status_t deviceAddressFromHal(
+        audio_devices_t device, const char* halAddress, DeviceAddress* address) {
+    address->device = AudioDevice(device);
+    const bool isInput = (device & AUDIO_DEVICE_BIT_IN) != 0;
+    if (isInput) device &= ~AUDIO_DEVICE_BIT_IN;
+    if ((!isInput && (device & AUDIO_DEVICE_OUT_ALL_A2DP) != 0)
+            || (isInput && (device & AUDIO_DEVICE_IN_BLUETOOTH_A2DP) != 0)) {
+        int status = sscanf(halAddress,
+                "%hhX:%hhX:%hhX:%hhX:%hhX:%hhX",
+                &address->address.mac[0], &address->address.mac[1], &address->address.mac[2],
+                &address->address.mac[3], &address->address.mac[4], &address->address.mac[5]);
+        return status == 6 ? OK : BAD_VALUE;
+    } else if ((!isInput && (device & AUDIO_DEVICE_OUT_IP) != 0)
+            || (isInput && (device & AUDIO_DEVICE_IN_IP) != 0)) {
+        int status = sscanf(halAddress,
+                "%hhu.%hhu.%hhu.%hhu",
+                &address->address.ipv4[0], &address->address.ipv4[1],
+                &address->address.ipv4[2], &address->address.ipv4[3]);
+        return status == 4 ? OK : BAD_VALUE;
+    } else if ((!isInput && (device & AUDIO_DEVICE_OUT_ALL_USB)) != 0
+            || (isInput && (device & AUDIO_DEVICE_IN_ALL_USB)) != 0) {
+        int status = sscanf(halAddress,
+                "card=%d;device=%d",
+                &address->address.alsa.card, &address->address.alsa.device);
+        return status == 2 ? OK : BAD_VALUE;
+    } else if ((!isInput && (device & AUDIO_DEVICE_OUT_BUS) != 0)
+            || (isInput && (device & AUDIO_DEVICE_IN_BUS) != 0)) {
+        if (halAddress != NULL) {
+            address->busAddress = halAddress;
+            return OK;
+        }
+        return BAD_VALUE;
+    } else if ((!isInput && (device & AUDIO_DEVICE_OUT_REMOTE_SUBMIX)) != 0
+            || (isInput && (device & AUDIO_DEVICE_IN_REMOTE_SUBMIX) != 0)) {
+        if (halAddress != NULL) {
+            address->rSubmixAddress = halAddress;
+            return OK;
+        }
+        return BAD_VALUE;
+    }
+    return OK;
+}
+
+}  // namespace
+
+DeviceHalHidl::DeviceHalHidl(const sp<IDevice>& device)
+        : ConversionHelperHidl("Device"), mDevice(device) {
+}
+
+DeviceHalHidl::~DeviceHalHidl() {
+}
+
+status_t DeviceHalHidl::getSupportedDevices(uint32_t*) {
+    // Obsolete.
+    return INVALID_OPERATION;
+}
+
+status_t DeviceHalHidl::initCheck() {
+    if (mDevice == 0) return NO_INIT;
+    return processReturn("initCheck", mDevice->initCheck());
+}
+
+status_t DeviceHalHidl::setVoiceVolume(float volume) {
+    if (mDevice == 0) return NO_INIT;
+    sp<IPrimaryDevice> primaryDev = IPrimaryDevice::castFrom(mDevice);
+    if (primaryDev == 0) return INVALID_OPERATION;
+    return processReturn("setVoiceVolume", primaryDev->setVoiceVolume(volume));
+}
+
+status_t DeviceHalHidl::setMasterVolume(float volume) {
+    if (mDevice == 0) return NO_INIT;
+    sp<IPrimaryDevice> primaryDev = IPrimaryDevice::castFrom(mDevice);
+    if (primaryDev == 0) return INVALID_OPERATION;
+    return processReturn("setMasterVolume", primaryDev->setMasterVolume(volume));
+}
+
+status_t DeviceHalHidl::getMasterVolume(float *volume) {
+    if (mDevice == 0) return NO_INIT;
+    sp<IPrimaryDevice> primaryDev = IPrimaryDevice::castFrom(mDevice);
+    if (primaryDev == 0) return INVALID_OPERATION;
+    Result retval;
+    Return<void> ret = primaryDev->getMasterVolume(
+            [&](Result r, float v) {
+                retval = r;
+                if (retval == Result::OK) {
+                    *volume = v;
+                }
+            });
+    return processReturn("getMasterVolume", ret, retval);
+}
+
+status_t DeviceHalHidl::setMode(audio_mode_t mode) {
+    if (mDevice == 0) return NO_INIT;
+    sp<IPrimaryDevice> primaryDev = IPrimaryDevice::castFrom(mDevice);
+    if (primaryDev == 0) return INVALID_OPERATION;
+    return processReturn("setMode", primaryDev->setMode(AudioMode(mode)));
+}
+
+status_t DeviceHalHidl::setMicMute(bool state) {
+    if (mDevice == 0) return NO_INIT;
+    return processReturn("setMicMute", mDevice->setMicMute(state));
+}
+
+status_t DeviceHalHidl::getMicMute(bool *state) {
+    if (mDevice == 0) return NO_INIT;
+    Result retval;
+    Return<void> ret = mDevice->getMicMute(
+            [&](Result r, bool mute) {
+                retval = r;
+                if (retval == Result::OK) {
+                    *state = mute;
+                }
+            });
+    return processReturn("getMicMute", ret, retval);
+}
+
+status_t DeviceHalHidl::setMasterMute(bool state) {
+    if (mDevice == 0) return NO_INIT;
+    return processReturn("setMasterMute", mDevice->setMasterMute(state));
+}
+
+status_t DeviceHalHidl::getMasterMute(bool *state) {
+    if (mDevice == 0) return NO_INIT;
+    Result retval;
+    Return<void> ret = mDevice->getMasterMute(
+            [&](Result r, bool mute) {
+                retval = r;
+                if (retval == Result::OK) {
+                    *state = mute;
+                }
+            });
+    return processReturn("getMasterMute", ret, retval);
+}
+
+status_t DeviceHalHidl::setParameters(const String8& kvPairs) {
+    if (mDevice == 0) return NO_INIT;
+    hidl_vec<ParameterValue> hidlParams;
+    status_t status = parametersFromHal(kvPairs, &hidlParams);
+    if (status != OK) return status;
+    return processReturn("setParameters", mDevice->setParameters(hidlParams));
+}
+
+status_t DeviceHalHidl::getParameters(const String8& keys, String8 *values) {
+    values->clear();
+    if (mDevice == 0) return NO_INIT;
+    hidl_vec<hidl_string> hidlKeys;
+    status_t status = keysFromHal(keys, &hidlKeys);
+    if (status != OK) return status;
+    Result retval;
+    Return<void> ret = mDevice->getParameters(
+            hidlKeys,
+            [&](Result r, const hidl_vec<ParameterValue>& parameters) {
+                retval = r;
+                if (retval == Result::OK) {
+                    parametersToHal(parameters, values);
+                }
+            });
+    return processReturn("getParameters", ret, retval);
+}
+
+status_t DeviceHalHidl::getInputBufferSize(
+        const struct audio_config *config, size_t *size) {
+    if (mDevice == 0) return NO_INIT;
+    AudioConfig hidlConfig;
+    HidlUtils::audioConfigFromHal(*config, &hidlConfig);
+    Result retval;
+    Return<void> ret = mDevice->getInputBufferSize(
+            hidlConfig,
+            [&](Result r, uint64_t bufferSize) {
+                retval = r;
+                if (retval == Result::OK) {
+                    *size = static_cast<size_t>(bufferSize);
+                }
+            });
+    return processReturn("getInputBufferSize", ret, retval);
+}
+
+status_t DeviceHalHidl::openOutputStream(
+        audio_io_handle_t handle,
+        audio_devices_t devices,
+        audio_output_flags_t flags,
+        struct audio_config *config,
+        const char *address,
+        sp<StreamOutHalInterface> *outStream) {
+    if (mDevice == 0) return NO_INIT;
+    DeviceAddress hidlDevice;
+    status_t status = deviceAddressFromHal(devices, address, &hidlDevice);
+    if (status != OK) return status;
+    AudioConfig hidlConfig;
+    HidlUtils::audioConfigFromHal(*config, &hidlConfig);
+    Result retval = Result::NOT_INITIALIZED;
+    Return<void> ret = mDevice->openOutputStream(
+            handle,
+            hidlDevice,
+            hidlConfig,
+            AudioOutputFlag(flags),
+            [&](Result r, const sp<IStreamOut>& result, const AudioConfig& suggestedConfig) {
+                retval = r;
+                if (retval == Result::OK) {
+                    *outStream = new StreamOutHalHidl(result);
+                }
+                HidlUtils::audioConfigToHal(suggestedConfig, config);
+            });
+    return processReturn("openOutputStream", ret, retval);
+}
+
+status_t DeviceHalHidl::openInputStream(
+        audio_io_handle_t handle,
+        audio_devices_t devices,
+        struct audio_config *config,
+        audio_input_flags_t flags,
+        const char *address,
+        audio_source_t source,
+        sp<StreamInHalInterface> *inStream) {
+    if (mDevice == 0) return NO_INIT;
+    DeviceAddress hidlDevice;
+    status_t status = deviceAddressFromHal(devices, address, &hidlDevice);
+    if (status != OK) return status;
+    AudioConfig hidlConfig;
+    HidlUtils::audioConfigFromHal(*config, &hidlConfig);
+    Result retval = Result::NOT_INITIALIZED;
+    Return<void> ret = mDevice->openInputStream(
+            handle,
+            hidlDevice,
+            hidlConfig,
+            AudioInputFlag(flags),
+            AudioSource(source),
+            [&](Result r, const sp<IStreamIn>& result, const AudioConfig& suggestedConfig) {
+                retval = r;
+                if (retval == Result::OK) {
+                    *inStream = new StreamInHalHidl(result);
+                }
+                HidlUtils::audioConfigToHal(suggestedConfig, config);
+            });
+    return processReturn("openInputStream", ret, retval);
+}
+
+status_t DeviceHalHidl::supportsAudioPatches(bool *supportsPatches) {
+    if (mDevice == 0) return NO_INIT;
+    return processReturn("supportsAudioPatches", mDevice->supportsAudioPatches(), supportsPatches);
+}
+
+status_t DeviceHalHidl::createAudioPatch(
+        unsigned int num_sources,
+        const struct audio_port_config *sources,
+        unsigned int num_sinks,
+        const struct audio_port_config *sinks,
+        audio_patch_handle_t *patch) {
+    if (mDevice == 0) return NO_INIT;
+    hidl_vec<AudioPortConfig> hidlSources, hidlSinks;
+    HidlUtils::audioPortConfigsFromHal(num_sources, sources, &hidlSources);
+    HidlUtils::audioPortConfigsFromHal(num_sinks, sinks, &hidlSinks);
+    Result retval;
+    Return<void> ret = mDevice->createAudioPatch(
+            hidlSources, hidlSinks,
+            [&](Result r, AudioPatchHandle hidlPatch) {
+                retval = r;
+                if (retval == Result::OK) {
+                    *patch = static_cast<audio_patch_handle_t>(hidlPatch);
+                }
+            });
+    return processReturn("createAudioPatch", ret, retval);
+}
+
+status_t DeviceHalHidl::releaseAudioPatch(audio_patch_handle_t patch) {
+    if (mDevice == 0) return NO_INIT;
+    return processReturn("releaseAudioPatch", mDevice->releaseAudioPatch(patch));
+}
+
+status_t DeviceHalHidl::getAudioPort(struct audio_port *port) {
+    if (mDevice == 0) return NO_INIT;
+    AudioPort hidlPort;
+    HidlUtils::audioPortFromHal(*port, &hidlPort);
+    Result retval;
+    Return<void> ret = mDevice->getAudioPort(
+            hidlPort,
+            [&](Result r, const AudioPort& p) {
+                retval = r;
+                if (retval == Result::OK) {
+                    HidlUtils::audioPortToHal(p, port);
+                }
+            });
+    return processReturn("getAudioPort", ret, retval);
+}
+
+status_t DeviceHalHidl::setAudioPortConfig(const struct audio_port_config *config) {
+    if (mDevice == 0) return NO_INIT;
+    AudioPortConfig hidlConfig;
+    HidlUtils::audioPortConfigFromHal(*config, &hidlConfig);
+    return processReturn("setAudioPortConfig", mDevice->setAudioPortConfig(hidlConfig));
+}
+
+status_t DeviceHalHidl::dump(int fd) {
+    if (mDevice == 0) return NO_INIT;
+    native_handle_t* hidlHandle = native_handle_create(1, 0);
+    hidlHandle->data[0] = fd;
+    Return<void> ret = mDevice->debugDump(hidlHandle);
+    native_handle_delete(hidlHandle);
+    return processReturn("dump", ret);
+}
+
+} // namespace android
diff --git a/media/libaudiohal/DeviceHalHidl.h b/media/libaudiohal/DeviceHalHidl.h
new file mode 100644
index 0000000..9da02a4
--- /dev/null
+++ b/media/libaudiohal/DeviceHalHidl.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_DEVICE_HAL_HIDL_H
+#define ANDROID_HARDWARE_DEVICE_HAL_HIDL_H
+
+#include <android/hardware/audio/2.0/IDevice.h>
+#include <media/audiohal/DeviceHalInterface.h>
+
+#include "ConversionHelperHidl.h"
+
+using ::android::hardware::audio::V2_0::IDevice;
+using ::android::hardware::Return;
+
+namespace android {
+
+class DeviceHalHidl : public DeviceHalInterface, public ConversionHelperHidl
+{
+  public:
+    // Sets the value of 'devices' to a bitmask of 1 or more values of audio_devices_t.
+    virtual status_t getSupportedDevices(uint32_t *devices);
+
+    // Check to see if the audio hardware interface has been initialized.
+    virtual status_t initCheck();
+
+    // Set the audio volume of a voice call. Range is between 0.0 and 1.0.
+    virtual status_t setVoiceVolume(float volume);
+
+    // Set the audio volume for all audio activities other than voice call.
+    virtual status_t setMasterVolume(float volume);
+
+    // Get the current master volume value for the HAL.
+    virtual status_t getMasterVolume(float *volume);
+
+    // Called when the audio mode changes.
+    virtual status_t setMode(audio_mode_t mode);
+
+    // Muting control.
+    virtual status_t setMicMute(bool state);
+    virtual status_t getMicMute(bool *state);
+    virtual status_t setMasterMute(bool state);
+    virtual status_t getMasterMute(bool *state);
+
+    // Set global audio parameters.
+    virtual status_t setParameters(const String8& kvPairs);
+
+    // Get global audio parameters.
+    virtual status_t getParameters(const String8& keys, String8 *values);
+
+    // Returns audio input buffer size according to parameters passed.
+    virtual status_t getInputBufferSize(const struct audio_config *config,
+            size_t *size);
+
+    // Creates and opens the audio hardware output stream. The stream is closed
+    // by releasing all references to the returned object.
+    virtual status_t openOutputStream(
+            audio_io_handle_t handle,
+            audio_devices_t devices,
+            audio_output_flags_t flags,
+            struct audio_config *config,
+            const char *address,
+            sp<StreamOutHalInterface> *outStream);
+
+    // Creates and opens the audio hardware input stream. The stream is closed
+    // by releasing all references to the returned object.
+    virtual status_t openInputStream(
+            audio_io_handle_t handle,
+            audio_devices_t devices,
+            struct audio_config *config,
+            audio_input_flags_t flags,
+            const char *address,
+            audio_source_t source,
+            sp<StreamInHalInterface> *inStream);
+
+    // Returns whether createAudioPatch and releaseAudioPatch operations are supported.
+    virtual status_t supportsAudioPatches(bool *supportsPatches);
+
+    // Creates an audio patch between several source and sink ports.
+    virtual status_t createAudioPatch(
+            unsigned int num_sources,
+            const struct audio_port_config *sources,
+            unsigned int num_sinks,
+            const struct audio_port_config *sinks,
+            audio_patch_handle_t *patch);
+
+    // Releases an audio patch.
+    virtual status_t releaseAudioPatch(audio_patch_handle_t patch);
+
+    // Fills the list of supported attributes for a given audio port.
+    virtual status_t getAudioPort(struct audio_port *port);
+
+    // Set audio port configuration.
+    virtual status_t setAudioPortConfig(const struct audio_port_config *config);
+
+    virtual status_t dump(int fd);
+
+  private:
+    friend class DevicesFactoryHalHidl;
+    sp<IDevice> mDevice;
+
+    // Can not be constructed directly by clients.
+    explicit DeviceHalHidl(const sp<IDevice>& device);
+
+    // The destructor automatically closes the device.
+    virtual ~DeviceHalHidl();
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_DEVICE_HAL_HIDL_H
diff --git a/media/libaudiohal/DeviceHalLocal.cpp b/media/libaudiohal/DeviceHalLocal.cpp
new file mode 100644
index 0000000..fc098f5
--- /dev/null
+++ b/media/libaudiohal/DeviceHalLocal.cpp
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "DeviceHalLocal"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+
+#include "DeviceHalLocal.h"
+#include "StreamHalLocal.h"
+
+namespace android {
+
+DeviceHalLocal::DeviceHalLocal(audio_hw_device_t *dev)
+        : mDev(dev) {
+}
+
+DeviceHalLocal::~DeviceHalLocal() {
+    int status = audio_hw_device_close(mDev);
+    ALOGW_IF(status, "Error closing audio hw device %p: %s", mDev, strerror(-status));
+    mDev = 0;
+}
+
+status_t DeviceHalLocal::getSupportedDevices(uint32_t *devices) {
+    if (mDev->get_supported_devices == NULL) return INVALID_OPERATION;
+    *devices = mDev->get_supported_devices(mDev);
+    return OK;
+}
+
+status_t DeviceHalLocal::initCheck() {
+    return mDev->init_check(mDev);
+}
+
+status_t DeviceHalLocal::setVoiceVolume(float volume) {
+    return mDev->set_voice_volume(mDev, volume);
+}
+
+status_t DeviceHalLocal::setMasterVolume(float volume) {
+    if (mDev->set_master_volume == NULL) return INVALID_OPERATION;
+    return mDev->set_master_volume(mDev, volume);
+}
+
+status_t DeviceHalLocal::getMasterVolume(float *volume) {
+    if (mDev->get_master_volume == NULL) return INVALID_OPERATION;
+    return mDev->get_master_volume(mDev, volume);
+}
+
+status_t DeviceHalLocal::setMode(audio_mode_t mode) {
+    return mDev->set_mode(mDev, mode);
+}
+
+status_t DeviceHalLocal::setMicMute(bool state) {
+    return mDev->set_mic_mute(mDev, state);
+}
+
+status_t DeviceHalLocal::getMicMute(bool *state) {
+    return mDev->get_mic_mute(mDev, state);
+}
+
+status_t DeviceHalLocal::setMasterMute(bool state) {
+    if (mDev->set_master_mute == NULL) return INVALID_OPERATION;
+    return mDev->set_master_mute(mDev, state);
+}
+
+status_t DeviceHalLocal::getMasterMute(bool *state) {
+    if (mDev->get_master_mute == NULL) return INVALID_OPERATION;
+    return mDev->get_master_mute(mDev, state);
+}
+
+status_t DeviceHalLocal::setParameters(const String8& kvPairs) {
+    return mDev->set_parameters(mDev, kvPairs.string());
+}
+
+status_t DeviceHalLocal::getParameters(const String8& keys, String8 *values) {
+    char *halValues = mDev->get_parameters(mDev, keys.string());
+    if (halValues != NULL) {
+        values->setTo(halValues);
+        free(halValues);
+    } else {
+        values->clear();
+    }
+    return OK;
+}
+
+status_t DeviceHalLocal::getInputBufferSize(
+        const struct audio_config *config, size_t *size) {
+    *size = mDev->get_input_buffer_size(mDev, config);
+    return OK;
+}
+
+status_t DeviceHalLocal::openOutputStream(
+        audio_io_handle_t handle,
+        audio_devices_t devices,
+        audio_output_flags_t flags,
+        struct audio_config *config,
+        const char *address,
+        sp<StreamOutHalInterface> *outStream) {
+    audio_stream_out_t *halStream;
+    ALOGV("open_output_stream handle: %d devices: %x flags: %#x"
+            "srate: %d format %#x channels %x address %s",
+            handle, devices, flags,
+            config->sample_rate, config->format, config->channel_mask,
+            address);
+    int openResut = mDev->open_output_stream(
+            mDev, handle, devices, flags, config, &halStream, address);
+    if (openResut == OK) {
+        *outStream = new StreamOutHalLocal(halStream, this);
+    }
+    ALOGV("open_output_stream status %d stream %p", openResut, halStream);
+    return openResut;
+}
+
+status_t DeviceHalLocal::openInputStream(
+        audio_io_handle_t handle,
+        audio_devices_t devices,
+        struct audio_config *config,
+        audio_input_flags_t flags,
+        const char *address,
+        audio_source_t source,
+        sp<StreamInHalInterface> *inStream) {
+    audio_stream_in_t *halStream;
+    ALOGV("open_input_stream handle: %d devices: %x flags: %#x "
+            "srate: %d format %#x channels %x address %s source %d",
+            handle, devices, flags,
+            config->sample_rate, config->format, config->channel_mask,
+            address, source);
+    int openResult = mDev->open_input_stream(
+            mDev, handle, devices, config, &halStream, flags, address, source);
+    if (openResult == OK) {
+        *inStream = new StreamInHalLocal(halStream, this);
+    }
+    ALOGV("open_input_stream status %d stream %p", openResult, inStream);
+    return openResult;
+}
+
+status_t DeviceHalLocal::supportsAudioPatches(bool *supportsPatches) {
+    *supportsPatches = version() >= AUDIO_DEVICE_API_VERSION_3_0;
+    return OK;
+}
+
+status_t DeviceHalLocal::createAudioPatch(
+        unsigned int num_sources,
+        const struct audio_port_config *sources,
+        unsigned int num_sinks,
+        const struct audio_port_config *sinks,
+        audio_patch_handle_t *patch) {
+    if (version() >= AUDIO_DEVICE_API_VERSION_3_0) {
+        return mDev->create_audio_patch(
+                mDev, num_sources, sources, num_sinks, sinks, patch);
+    } else {
+        return INVALID_OPERATION;
+    }
+}
+
+status_t DeviceHalLocal::releaseAudioPatch(audio_patch_handle_t patch) {
+    if (version() >= AUDIO_DEVICE_API_VERSION_3_0) {
+        return mDev->release_audio_patch(mDev, patch);
+    } else {
+        return INVALID_OPERATION;
+    }
+}
+
+status_t DeviceHalLocal::getAudioPort(struct audio_port *port) {
+    return mDev->get_audio_port(mDev, port);
+}
+
+status_t DeviceHalLocal::setAudioPortConfig(const struct audio_port_config *config) {
+    if (version() >= AUDIO_DEVICE_API_VERSION_3_0)
+        return mDev->set_audio_port_config(mDev, config);
+    else
+        return INVALID_OPERATION;
+}
+
+status_t DeviceHalLocal::dump(int fd) {
+    return mDev->dump(mDev, fd);
+}
+
+void DeviceHalLocal::closeOutputStream(struct audio_stream_out *stream_out) {
+    mDev->close_output_stream(mDev, stream_out);
+}
+
+void DeviceHalLocal::closeInputStream(struct audio_stream_in *stream_in) {
+    mDev->close_input_stream(mDev, stream_in);
+}
+
+} // namespace android
diff --git a/media/libaudiohal/DeviceHalLocal.h b/media/libaudiohal/DeviceHalLocal.h
new file mode 100644
index 0000000..865f296
--- /dev/null
+++ b/media/libaudiohal/DeviceHalLocal.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_DEVICE_HAL_LOCAL_H
+#define ANDROID_HARDWARE_DEVICE_HAL_LOCAL_H
+
+#include <hardware/audio.h>
+#include <media/audiohal/DeviceHalInterface.h>
+
+namespace android {
+
+class DeviceHalLocal : public DeviceHalInterface
+{
+  public:
+    // Sets the value of 'devices' to a bitmask of 1 or more values of audio_devices_t.
+    virtual status_t getSupportedDevices(uint32_t *devices);
+
+    // Check to see if the audio hardware interface has been initialized.
+    virtual status_t initCheck();
+
+    // Set the audio volume of a voice call. Range is between 0.0 and 1.0.
+    virtual status_t setVoiceVolume(float volume);
+
+    // Set the audio volume for all audio activities other than voice call.
+    virtual status_t setMasterVolume(float volume);
+
+    // Get the current master volume value for the HAL.
+    virtual status_t getMasterVolume(float *volume);
+
+    // Called when the audio mode changes.
+    virtual status_t setMode(audio_mode_t mode);
+
+    // Muting control.
+    virtual status_t setMicMute(bool state);
+    virtual status_t getMicMute(bool *state);
+    virtual status_t setMasterMute(bool state);
+    virtual status_t getMasterMute(bool *state);
+
+    // Set global audio parameters.
+    virtual status_t setParameters(const String8& kvPairs);
+
+    // Get global audio parameters.
+    virtual status_t getParameters(const String8& keys, String8 *values);
+
+    // Returns audio input buffer size according to parameters passed.
+    virtual status_t getInputBufferSize(const struct audio_config *config,
+            size_t *size);
+
+    // Creates and opens the audio hardware output stream. The stream is closed
+    // by releasing all references to the returned object.
+    virtual status_t openOutputStream(
+            audio_io_handle_t handle,
+            audio_devices_t devices,
+            audio_output_flags_t flags,
+            struct audio_config *config,
+            const char *address,
+            sp<StreamOutHalInterface> *outStream);
+
+    // Creates and opens the audio hardware input stream. The stream is closed
+    // by releasing all references to the returned object.
+    virtual status_t openInputStream(
+            audio_io_handle_t handle,
+            audio_devices_t devices,
+            struct audio_config *config,
+            audio_input_flags_t flags,
+            const char *address,
+            audio_source_t source,
+            sp<StreamInHalInterface> *inStream);
+
+    // Returns whether createAudioPatch and releaseAudioPatch operations are supported.
+    virtual status_t supportsAudioPatches(bool *supportsPatches);
+
+    // Creates an audio patch between several source and sink ports.
+    virtual status_t createAudioPatch(
+            unsigned int num_sources,
+            const struct audio_port_config *sources,
+            unsigned int num_sinks,
+            const struct audio_port_config *sinks,
+            audio_patch_handle_t *patch);
+
+    // Releases an audio patch.
+    virtual status_t releaseAudioPatch(audio_patch_handle_t patch);
+
+    // Fills the list of supported attributes for a given audio port.
+    virtual status_t getAudioPort(struct audio_port *port);
+
+    // Set audio port configuration.
+    virtual status_t setAudioPortConfig(const struct audio_port_config *config);
+
+    virtual status_t dump(int fd);
+
+    void closeOutputStream(struct audio_stream_out *stream_out);
+    void closeInputStream(struct audio_stream_in *stream_in);
+
+  private:
+    audio_hw_device_t *mDev;
+
+    friend class DevicesFactoryHalLocal;
+
+    // Can not be constructed directly by clients.
+    explicit DeviceHalLocal(audio_hw_device_t *dev);
+
+    // The destructor automatically closes the device.
+    virtual ~DeviceHalLocal();
+
+    uint32_t version() const { return mDev->common.version; }
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_DEVICE_HAL_LOCAL_H
diff --git a/media/libaudiohal/DevicesFactoryHalHidl.cpp b/media/libaudiohal/DevicesFactoryHalHidl.cpp
new file mode 100644
index 0000000..a91f145
--- /dev/null
+++ b/media/libaudiohal/DevicesFactoryHalHidl.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string.h>
+
+#define LOG_TAG "DevicesFactoryHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <android/hardware/audio/2.0/IDevice.h>
+#include <media/audiohal/hidl/HalDeathHandler.h>
+#include <utils/Log.h>
+
+#include "ConversionHelperHidl.h"
+#include "DeviceHalHidl.h"
+#include "DevicesFactoryHalHidl.h"
+
+using ::android::hardware::audio::V2_0::IDevice;
+using ::android::hardware::audio::V2_0::Result;
+using ::android::hardware::Return;
+using ::android::hardware::Status;
+
+namespace android {
+
+// static
+sp<DevicesFactoryHalInterface> DevicesFactoryHalInterface::create() {
+    return new DevicesFactoryHalHidl();
+}
+
+DevicesFactoryHalHidl::DevicesFactoryHalHidl() {
+    mDevicesFactory = IDevicesFactory::getService("audio_devices_factory");
+    if (mDevicesFactory != 0) {
+        // It is assumet that DevicesFactory is owned by AudioFlinger
+        // and thus have the same lifespan.
+        mDevicesFactory->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
+    }
+}
+
+DevicesFactoryHalHidl::~DevicesFactoryHalHidl() {
+}
+
+// static
+status_t DevicesFactoryHalHidl::nameFromHal(const char *name, IDevicesFactory::Device *device) {
+    if (strcmp(name, AUDIO_HARDWARE_MODULE_ID_PRIMARY) == 0) {
+        *device = IDevicesFactory::Device::PRIMARY;
+        return OK;
+    } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_A2DP) == 0) {
+        *device = IDevicesFactory::Device::A2DP;
+        return OK;
+    } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_USB) == 0) {
+        *device = IDevicesFactory::Device::USB;
+        return OK;
+    } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX) == 0) {
+        *device = IDevicesFactory::Device::R_SUBMIX;
+        return OK;
+    }
+    ALOGE("Invalid device name %s", name);
+    return BAD_VALUE;
+}
+
+status_t DevicesFactoryHalHidl::openDevice(const char *name, sp<DeviceHalInterface> *device) {
+    if (mDevicesFactory == 0) return NO_INIT;
+    IDevicesFactory::Device hidlDevice;
+    status_t status = nameFromHal(name, &hidlDevice);
+    if (status != OK) return status;
+    Result retval = Result::NOT_INITIALIZED;
+    Return<void> ret = mDevicesFactory->openDevice(
+            hidlDevice,
+            [&](Result r, const sp<IDevice>& result) {
+                retval = r;
+                if (retval == Result::OK) {
+                    *device = new DeviceHalHidl(result);
+                }
+            });
+    if (ret.isOk()) {
+        if (retval == Result::OK) return OK;
+        else if (retval == Result::INVALID_ARGUMENTS) return BAD_VALUE;
+        else return NO_INIT;
+    }
+    return FAILED_TRANSACTION;
+}
+
+} // namespace android
diff --git a/media/libaudiohal/DevicesFactoryHalHidl.h b/media/libaudiohal/DevicesFactoryHalHidl.h
new file mode 100644
index 0000000..a26dec1
--- /dev/null
+++ b/media/libaudiohal/DevicesFactoryHalHidl.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_H
+#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_H
+
+#include <android/hardware/audio/2.0/IDevicesFactory.h>
+#include <media/audiohal/DevicesFactoryHalInterface.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+#include "DeviceHalHidl.h"
+
+using ::android::hardware::audio::V2_0::IDevicesFactory;
+
+namespace android {
+
+class DevicesFactoryHalHidl : public DevicesFactoryHalInterface
+{
+  public:
+    // Opens a device with the specified name. To close the device, it is
+    // necessary to release references to the returned object.
+    virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
+
+  private:
+    friend class DevicesFactoryHalInterface;
+
+    sp<IDevicesFactory> mDevicesFactory;
+
+    static status_t nameFromHal(const char *name, IDevicesFactory::Device *device);
+
+    // Can not be constructed directly by clients.
+    DevicesFactoryHalHidl();
+
+    virtual ~DevicesFactoryHalHidl();
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_H
diff --git a/media/libaudiohal/DevicesFactoryHalLocal.cpp b/media/libaudiohal/DevicesFactoryHalLocal.cpp
new file mode 100644
index 0000000..cd9a9e7
--- /dev/null
+++ b/media/libaudiohal/DevicesFactoryHalLocal.cpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "DevicesFactoryHalLocal"
+//#define LOG_NDEBUG 0
+
+#include <string.h>
+
+#include <hardware/audio.h>
+#include <utils/Log.h>
+
+#include "DeviceHalLocal.h"
+#include "DevicesFactoryHalLocal.h"
+
+namespace android {
+
+// static
+sp<DevicesFactoryHalInterface> DevicesFactoryHalInterface::create() {
+    return new DevicesFactoryHalLocal();
+}
+
+static status_t load_audio_interface(const char *if_name, audio_hw_device_t **dev)
+{
+    const hw_module_t *mod;
+    int rc;
+
+    rc = hw_get_module_by_class(AUDIO_HARDWARE_MODULE_ID, if_name, &mod);
+    if (rc) {
+        ALOGE("%s couldn't load audio hw module %s.%s (%s)", __func__,
+                AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
+        goto out;
+    }
+    rc = audio_hw_device_open(mod, dev);
+    if (rc) {
+        ALOGE("%s couldn't open audio hw device in %s.%s (%s)", __func__,
+                AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
+        goto out;
+    }
+    if ((*dev)->common.version < AUDIO_DEVICE_API_VERSION_MIN) {
+        ALOGE("%s wrong audio hw device version %04x", __func__, (*dev)->common.version);
+        rc = BAD_VALUE;
+        audio_hw_device_close(*dev);
+        goto out;
+    }
+    return OK;
+
+out:
+    *dev = NULL;
+    return rc;
+}
+
+status_t DevicesFactoryHalLocal::openDevice(const char *name, sp<DeviceHalInterface> *device) {
+    audio_hw_device_t *dev;
+    status_t rc = load_audio_interface(name, &dev);
+    if (rc == OK) {
+        *device = new DeviceHalLocal(dev);
+    }
+    return rc;
+}
+
+} // namespace android
diff --git a/media/libaudiohal/DevicesFactoryHalLocal.h b/media/libaudiohal/DevicesFactoryHalLocal.h
new file mode 100644
index 0000000..58ce4ff
--- /dev/null
+++ b/media/libaudiohal/DevicesFactoryHalLocal.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_LOCAL_H
+#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_LOCAL_H
+
+#include <media/audiohal/DevicesFactoryHalInterface.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+#include "DeviceHalLocal.h"
+
+namespace android {
+
+class DevicesFactoryHalLocal : public DevicesFactoryHalInterface
+{
+  public:
+    // Opens a device with the specified name. To close the device, it is
+    // necessary to release references to the returned object.
+    virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
+
+  private:
+    friend class DevicesFactoryHalInterface;
+
+    // Can not be constructed directly by clients.
+    DevicesFactoryHalLocal() {}
+
+    virtual ~DevicesFactoryHalLocal() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_LOCAL_H
diff --git a/media/libaudiohal/EffectBufferHalHidl.cpp b/media/libaudiohal/EffectBufferHalHidl.cpp
new file mode 100644
index 0000000..446d2ef
--- /dev/null
+++ b/media/libaudiohal/EffectBufferHalHidl.cpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <atomic>
+
+#define LOG_TAG "EffectBufferHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <android/hidl/memory/1.0/IAllocator.h>
+#include <hidlmemory/mapping.h>
+#include <utils/Log.h>
+
+#include "ConversionHelperHidl.h"
+#include "EffectBufferHalHidl.h"
+
+using ::android::hardware::Return;
+using ::android::hardware::Status;
+using ::android::hidl::memory::V1_0::IAllocator;
+
+namespace android {
+
+// static
+uint64_t EffectBufferHalHidl::makeUniqueId() {
+    static std::atomic<uint64_t> counter{1};
+    return counter++;
+}
+
+// static
+status_t EffectBufferHalInterface::allocate(
+        size_t size, sp<EffectBufferHalInterface>* buffer) {
+    return mirror(nullptr, size, buffer);
+}
+
+// static
+status_t EffectBufferHalInterface::mirror(
+        void* external, size_t size, sp<EffectBufferHalInterface>* buffer) {
+    sp<EffectBufferHalInterface> tempBuffer = new EffectBufferHalHidl(size);
+    status_t result = reinterpret_cast<EffectBufferHalHidl*>(tempBuffer.get())->init();
+    if (result == OK) {
+        tempBuffer->setExternalData(external);
+        *buffer = tempBuffer;
+    }
+    return result;
+}
+
+EffectBufferHalHidl::EffectBufferHalHidl(size_t size)
+        : mBufferSize(size), mExternalData(nullptr), mAudioBuffer{0, {nullptr}} {
+    mHidlBuffer.id = makeUniqueId();
+    mHidlBuffer.frameCount = 0;
+}
+
+EffectBufferHalHidl::~EffectBufferHalHidl() {
+}
+
+status_t EffectBufferHalHidl::init() {
+    sp<IAllocator> ashmem = IAllocator::getService("ashmem");
+    if (ashmem == 0) {
+        ALOGE("Failed to retrieve ashmem allocator service");
+        return NO_INIT;
+    }
+    status_t retval = NO_MEMORY;
+    Return<void> result = ashmem->allocate(
+            mBufferSize,
+            [&](bool success, const hidl_memory& memory) {
+                if (success) {
+                    mHidlBuffer.data = memory;
+                    retval = OK;
+                }
+            });
+    if (retval == OK) {
+        mMemory = hardware::mapMemory(mHidlBuffer.data);
+        if (mMemory != 0) {
+            mMemory->update();
+            mAudioBuffer.raw = static_cast<void*>(mMemory->getPointer());
+            memset(mAudioBuffer.raw, 0, mMemory->getSize());
+            mMemory->commit();
+        } else {
+            ALOGE("Failed to map allocated ashmem");
+            retval = NO_MEMORY;
+        }
+    }
+    return retval;
+}
+
+audio_buffer_t* EffectBufferHalHidl::audioBuffer() {
+    return &mAudioBuffer;
+}
+
+void* EffectBufferHalHidl::externalData() const {
+    return mExternalData;
+}
+
+void EffectBufferHalHidl::setFrameCount(size_t frameCount) {
+    mHidlBuffer.frameCount = frameCount;
+    mAudioBuffer.frameCount = frameCount;
+}
+
+void EffectBufferHalHidl::setExternalData(void* external) {
+    mExternalData = external;
+}
+
+void EffectBufferHalHidl::update() {
+    if (mExternalData == nullptr) return;
+    mMemory->update();
+    memcpy(mAudioBuffer.raw, mExternalData, mBufferSize);
+    mMemory->commit();
+}
+
+void EffectBufferHalHidl::commit() {
+    if (mExternalData == nullptr) return;
+    memcpy(mExternalData, mAudioBuffer.raw, mBufferSize);
+}
+
+} // namespace android
diff --git a/media/libaudiohal/EffectBufferHalHidl.h b/media/libaudiohal/EffectBufferHalHidl.h
new file mode 100644
index 0000000..4c4ec87
--- /dev/null
+++ b/media/libaudiohal/EffectBufferHalHidl.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_H
+#define ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_H
+
+#include <android/hardware/audio/effect/2.0/types.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+#include <hidl/HidlSupport.h>
+#include <media/audiohal/EffectBufferHalInterface.h>
+#include <system/audio_effect.h>
+
+using android::hardware::audio::effect::V2_0::AudioBuffer;
+using android::hardware::hidl_memory;
+using android::hidl::memory::V1_0::IMemory;
+
+namespace android {
+
+class EffectBufferHalHidl : public EffectBufferHalInterface
+{
+  public:
+    virtual audio_buffer_t* audioBuffer();
+    virtual void* externalData() const;
+
+    virtual void setExternalData(void* external);
+    virtual void setFrameCount(size_t frameCount);
+
+    virtual void update();
+    virtual void commit();
+
+    const AudioBuffer& hidlBuffer() const { return mHidlBuffer; }
+
+  private:
+    friend class EffectBufferHalInterface;
+
+    static uint64_t makeUniqueId();
+
+    const size_t mBufferSize;
+    void* mExternalData;
+    AudioBuffer mHidlBuffer;
+    sp<IMemory> mMemory;
+    audio_buffer_t mAudioBuffer;
+
+    // Can not be constructed directly by clients.
+    explicit EffectBufferHalHidl(size_t size);
+
+    virtual ~EffectBufferHalHidl();
+
+    status_t init();
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_H
diff --git a/media/libaudiohal/EffectBufferHalLocal.cpp b/media/libaudiohal/EffectBufferHalLocal.cpp
new file mode 100644
index 0000000..20b1339
--- /dev/null
+++ b/media/libaudiohal/EffectBufferHalLocal.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EffectBufferHalLocal"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+
+#include "EffectBufferHalLocal.h"
+
+namespace android {
+
+// static
+status_t EffectBufferHalInterface::allocate(
+        size_t size, sp<EffectBufferHalInterface>* buffer) {
+    *buffer = new EffectBufferHalLocal(size);
+    return OK;
+}
+
+// static
+status_t EffectBufferHalInterface::mirror(
+        void* external, size_t size, sp<EffectBufferHalInterface>* buffer) {
+    *buffer = new EffectBufferHalLocal(external, size);
+    return OK;
+}
+
+EffectBufferHalLocal::EffectBufferHalLocal(size_t size)
+        : mOwnBuffer(new uint8_t[size]),
+          mBufferSize(size),
+          mAudioBuffer{0, {mOwnBuffer.get()}} {
+}
+
+EffectBufferHalLocal::EffectBufferHalLocal(void* external, size_t size)
+        : mOwnBuffer(nullptr),
+          mBufferSize(size),
+          mAudioBuffer{0, {external}} {
+}
+
+EffectBufferHalLocal::~EffectBufferHalLocal() {
+}
+
+audio_buffer_t* EffectBufferHalLocal::audioBuffer() {
+    return &mAudioBuffer;
+}
+
+void* EffectBufferHalLocal::externalData() const {
+    return mAudioBuffer.raw;
+}
+
+void EffectBufferHalLocal::setFrameCount(size_t frameCount) {
+    mAudioBuffer.frameCount = frameCount;
+}
+
+void EffectBufferHalLocal::setExternalData(void* external) {
+    ALOGE_IF(mOwnBuffer != nullptr, "Attempt to set external data for allocated buffer");
+    mAudioBuffer.raw = external;
+}
+
+void EffectBufferHalLocal::update() {
+}
+
+void EffectBufferHalLocal::commit() {
+}
+
+} // namespace android
diff --git a/media/libaudiohal/EffectBufferHalLocal.h b/media/libaudiohal/EffectBufferHalLocal.h
new file mode 100644
index 0000000..df7bd43
--- /dev/null
+++ b/media/libaudiohal/EffectBufferHalLocal.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECT_BUFFER_HAL_LOCAL_H
+#define ANDROID_HARDWARE_EFFECT_BUFFER_HAL_LOCAL_H
+
+#include <memory>
+
+#include <media/audiohal/EffectBufferHalInterface.h>
+#include <system/audio_effect.h>
+
+namespace android {
+
+class EffectBufferHalLocal : public EffectBufferHalInterface
+{
+  public:
+    virtual audio_buffer_t* audioBuffer();
+    virtual void* externalData() const;
+
+    virtual void setExternalData(void* external);
+    virtual void setFrameCount(size_t frameCount);
+
+    virtual void update();
+    virtual void commit();
+
+  private:
+    friend class EffectBufferHalInterface;
+
+    std::unique_ptr<uint8_t[]> mOwnBuffer;
+    const size_t mBufferSize;
+    audio_buffer_t mAudioBuffer;
+
+    // Can not be constructed directly by clients.
+    explicit EffectBufferHalLocal(size_t size);
+    EffectBufferHalLocal(void* external, size_t size);
+
+    virtual ~EffectBufferHalLocal();
+
+    status_t init();
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECT_BUFFER_HAL_LOCAL_H
diff --git a/media/libaudiohal/EffectHalHidl.cpp b/media/libaudiohal/EffectHalHidl.cpp
new file mode 100644
index 0000000..6cf6412
--- /dev/null
+++ b/media/libaudiohal/EffectHalHidl.cpp
@@ -0,0 +1,208 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EffectHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <media/EffectsFactoryApi.h>
+#include <utils/Log.h>
+
+#include "ConversionHelperHidl.h"
+#include "EffectBufferHalHidl.h"
+#include "EffectHalHidl.h"
+#include "HidlUtils.h"
+
+using ::android::hardware::audio::effect::V2_0::AudioBuffer;
+using ::android::hardware::audio::effect::V2_0::MessageQueueFlagBits;
+using ::android::hardware::audio::effect::V2_0::Result;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::MQDescriptorSync;
+using ::android::hardware::Return;
+using ::android::hardware::Status;
+
+namespace android {
+
+EffectHalHidl::EffectHalHidl(const sp<IEffect>& effect, uint64_t effectId)
+        : mEffect(effect), mEffectId(effectId), mBuffersChanged(true) {
+}
+
+EffectHalHidl::~EffectHalHidl() {
+    close();
+}
+
+// static
+void EffectHalHidl::effectDescriptorToHal(
+        const EffectDescriptor& descriptor, effect_descriptor_t* halDescriptor) {
+    HidlUtils::uuidToHal(descriptor.type, &halDescriptor->type);
+    HidlUtils::uuidToHal(descriptor.uuid, &halDescriptor->uuid);
+    halDescriptor->flags = static_cast<uint32_t>(descriptor.flags);
+    halDescriptor->cpuLoad = descriptor.cpuLoad;
+    halDescriptor->memoryUsage = descriptor.memoryUsage;
+    memcpy(halDescriptor->name, descriptor.name.data(), descriptor.name.size());
+    memcpy(halDescriptor->implementor,
+            descriptor.implementor.data(), descriptor.implementor.size());
+}
+
+// static
+status_t EffectHalHidl::analyzeResult(const Result& result) {
+    switch (result) {
+        case Result::OK: return OK;
+        case Result::INVALID_ARGUMENTS: return BAD_VALUE;
+        case Result::INVALID_STATE: return NOT_ENOUGH_DATA;
+        case Result::NOT_INITIALIZED: return NO_INIT;
+        case Result::NOT_SUPPORTED: return INVALID_OPERATION;
+        case Result::RESULT_TOO_BIG: return NO_MEMORY;
+        default: return NO_INIT;
+    }
+}
+
+status_t EffectHalHidl::setInBuffer(const sp<EffectBufferHalInterface>& buffer) {
+    if (mInBuffer == 0 || buffer->audioBuffer() != mInBuffer->audioBuffer()) {
+        mBuffersChanged = true;
+    }
+    mInBuffer = buffer;
+    return OK;
+}
+
+status_t EffectHalHidl::setOutBuffer(const sp<EffectBufferHalInterface>& buffer) {
+    if (mOutBuffer == 0 || buffer->audioBuffer() != mOutBuffer->audioBuffer()) {
+        mBuffersChanged = true;
+    }
+    mOutBuffer = buffer;
+    return OK;
+}
+
+status_t EffectHalHidl::process() {
+    return processImpl(static_cast<uint32_t>(MessageQueueFlagBits::REQUEST_PROCESS));
+}
+
+status_t EffectHalHidl::processReverse() {
+    return processImpl(static_cast<uint32_t>(MessageQueueFlagBits::REQUEST_PROCESS_REVERSE));
+}
+
+status_t EffectHalHidl::prepareForProcessing() {
+    std::unique_ptr<StatusMQ> tempStatusMQ;
+    Result retval;
+    Return<void> ret = mEffect->prepareForProcessing(
+            [&](Result r, const MQDescriptorSync<Result>& statusMQ) {
+                retval = r;
+                if (retval == Result::OK) {
+                    tempStatusMQ.reset(new StatusMQ(statusMQ));
+                    if (tempStatusMQ->isValid() && tempStatusMQ->getEventFlagWord()) {
+                        EventFlag::createEventFlag(tempStatusMQ->getEventFlagWord(), &mEfGroup);
+                    }
+                }
+            });
+    if (!ret.isOk() || retval != Result::OK) {
+        return ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
+    }
+    if (!tempStatusMQ || !tempStatusMQ->isValid() || !mEfGroup) {
+        ALOGE_IF(!tempStatusMQ, "Failed to obtain status message queue for effects");
+        ALOGE_IF(tempStatusMQ && !tempStatusMQ->isValid(),
+                "Status message queue for effects is invalid");
+        ALOGE_IF(!mEfGroup, "Event flag creation for effects failed");
+        return NO_INIT;
+    }
+    mStatusMQ = std::move(tempStatusMQ);
+    return OK;
+}
+
+status_t EffectHalHidl::processImpl(uint32_t mqFlag) {
+    if (mEffect == 0 || mInBuffer == 0 || mOutBuffer == 0) return NO_INIT;
+    status_t status;
+    if (!mStatusMQ && (status = prepareForProcessing()) != OK) {
+        return status;
+    }
+    if (mBuffersChanged && (status = setProcessBuffers()) != OK) {
+        return status;
+    }
+    // The data is already in the buffers, just need to flush it and wake up the server side.
+    std::atomic_thread_fence(std::memory_order_release);
+    mEfGroup->wake(mqFlag);
+    uint32_t efState = 0;
+retry:
+    status_t ret = mEfGroup->wait(
+            static_cast<uint32_t>(MessageQueueFlagBits::DONE_PROCESSING), &efState, NS_PER_SEC);
+    if (efState & static_cast<uint32_t>(MessageQueueFlagBits::DONE_PROCESSING)) {
+        Result retval = Result::NOT_INITIALIZED;
+        mStatusMQ->read(&retval);
+        if (retval == Result::OK || retval == Result::INVALID_STATE) {
+            // Sync back the changed contents of the buffer.
+            std::atomic_thread_fence(std::memory_order_acquire);
+        }
+        return analyzeResult(retval);
+    }
+    if (ret == -EAGAIN) {
+        // This normally retries no more than once.
+        goto retry;
+    }
+    return ret;
+}
+
+status_t EffectHalHidl::setProcessBuffers() {
+    Return<Result> ret = mEffect->setProcessBuffers(
+            reinterpret_cast<EffectBufferHalHidl*>(mInBuffer.get())->hidlBuffer(),
+            reinterpret_cast<EffectBufferHalHidl*>(mOutBuffer.get())->hidlBuffer());
+    if (ret.isOk() && ret == Result::OK) {
+        mBuffersChanged = false;
+        return OK;
+    }
+    return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
+}
+
+status_t EffectHalHidl::command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
+        uint32_t *replySize, void *pReplyData) {
+    if (mEffect == 0) return NO_INIT;
+    hidl_vec<uint8_t> hidlData;
+    if (pCmdData != nullptr && cmdSize > 0) {
+        hidlData.setToExternal(reinterpret_cast<uint8_t*>(pCmdData), cmdSize);
+    }
+    status_t status;
+    uint32_t replySizeStub = 0;
+    if (replySize == nullptr) replySize = &replySizeStub;
+    Return<void> ret = mEffect->command(cmdCode, hidlData, *replySize,
+            [&](int32_t s, const hidl_vec<uint8_t>& result) {
+                status = s;
+                if (status == 0) {
+                    if (*replySize > result.size()) *replySize = result.size();
+                    if (pReplyData != nullptr && *replySize > 0) {
+                        memcpy(pReplyData, &result[0], *replySize);
+                    }
+                }
+            });
+    return ret.isOk() ? status : FAILED_TRANSACTION;
+}
+
+status_t EffectHalHidl::getDescriptor(effect_descriptor_t *pDescriptor) {
+    if (mEffect == 0) return NO_INIT;
+    Result retval = Result::NOT_INITIALIZED;
+    Return<void> ret = mEffect->getDescriptor(
+            [&](Result r, const EffectDescriptor& result) {
+                retval = r;
+                if (retval == Result::OK) {
+                    effectDescriptorToHal(result, pDescriptor);
+                }
+            });
+    return ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
+}
+
+status_t EffectHalHidl::close() {
+    if (mEffect == 0) return NO_INIT;
+    Return<Result> ret = mEffect->close();
+    return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
+}
+
+} // namespace android
diff --git a/media/libaudiohal/EffectHalHidl.h b/media/libaudiohal/EffectHalHidl.h
new file mode 100644
index 0000000..1ed1153
--- /dev/null
+++ b/media/libaudiohal/EffectHalHidl.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECT_HAL_HIDL_H
+#define ANDROID_HARDWARE_EFFECT_HAL_HIDL_H
+
+#include <android/hardware/audio/effect/2.0/IEffect.h>
+#include <media/audiohal/EffectHalInterface.h>
+#include <fmq/EventFlag.h>
+#include <fmq/MessageQueue.h>
+#include <system/audio_effect.h>
+
+using ::android::hardware::audio::effect::V2_0::EffectDescriptor;
+using ::android::hardware::audio::effect::V2_0::IEffect;
+using ::android::hardware::EventFlag;
+using ::android::hardware::MessageQueue;
+
+namespace android {
+
+class EffectHalHidl : public EffectHalInterface
+{
+  public:
+    // Set the input buffer.
+    virtual status_t setInBuffer(const sp<EffectBufferHalInterface>& buffer);
+
+    // Set the output buffer.
+    virtual status_t setOutBuffer(const sp<EffectBufferHalInterface>& buffer);
+
+    // Effect process function.
+    virtual status_t process();
+
+    // Process reverse stream function. This function is used to pass
+    // a reference stream to the effect engine.
+    virtual status_t processReverse();
+
+    // Send a command and receive a response to/from effect engine.
+    virtual status_t command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
+            uint32_t *replySize, void *pReplyData);
+
+    // Returns the effect descriptor.
+    virtual status_t getDescriptor(effect_descriptor_t *pDescriptor);
+
+    // Free resources on the remote side.
+    virtual status_t close();
+
+    uint64_t effectId() const { return mEffectId; }
+
+    static void effectDescriptorToHal(
+            const EffectDescriptor& descriptor, effect_descriptor_t* halDescriptor);
+
+  private:
+    friend class EffectsFactoryHalHidl;
+    typedef MessageQueue<
+        hardware::audio::effect::V2_0::Result, hardware::kSynchronizedReadWrite> StatusMQ;
+
+    sp<IEffect> mEffect;
+    const uint64_t mEffectId;
+    sp<EffectBufferHalInterface> mInBuffer;
+    sp<EffectBufferHalInterface> mOutBuffer;
+    bool mBuffersChanged;
+    std::unique_ptr<StatusMQ> mStatusMQ;
+    EventFlag* mEfGroup;
+
+    static status_t analyzeResult(const hardware::audio::effect::V2_0::Result& result);
+
+    // Can not be constructed directly by clients.
+    EffectHalHidl(const sp<IEffect>& effect, uint64_t effectId);
+
+    // The destructor automatically releases the effect.
+    virtual ~EffectHalHidl();
+
+    status_t prepareForProcessing();
+    status_t processImpl(uint32_t mqFlag);
+    status_t setProcessBuffers();
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECT_HAL_HIDL_H
diff --git a/media/libaudiohal/EffectHalLocal.cpp b/media/libaudiohal/EffectHalLocal.cpp
new file mode 100644
index 0000000..dd465c3
--- /dev/null
+++ b/media/libaudiohal/EffectHalLocal.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EffectHalLocal"
+//#define LOG_NDEBUG 0
+
+#include <media/EffectsFactoryApi.h>
+#include <utils/Log.h>
+
+#include "EffectHalLocal.h"
+
+namespace android {
+
+EffectHalLocal::EffectHalLocal(effect_handle_t handle)
+        : mHandle(handle) {
+}
+
+EffectHalLocal::~EffectHalLocal() {
+    int status = EffectRelease(mHandle);
+    ALOGW_IF(status, "Error releasing effect %p: %s", mHandle, strerror(-status));
+    mHandle = 0;
+}
+
+status_t EffectHalLocal::setInBuffer(const sp<EffectBufferHalInterface>& buffer) {
+    mInBuffer = buffer;
+    return OK;
+}
+
+status_t EffectHalLocal::setOutBuffer(const sp<EffectBufferHalInterface>& buffer) {
+    mOutBuffer = buffer;
+    return OK;
+}
+
+status_t EffectHalLocal::process() {
+    if (mInBuffer == nullptr || mOutBuffer == nullptr) {
+        ALOGE_IF(mInBuffer == nullptr, "Input buffer not set");
+        ALOGE_IF(mOutBuffer == nullptr, "Output buffer not set");
+        return NO_INIT;
+    }
+    return (*mHandle)->process(mHandle, mInBuffer->audioBuffer(), mOutBuffer->audioBuffer());
+}
+
+status_t EffectHalLocal::processReverse() {
+    if ((*mHandle)->process_reverse != NULL) {
+        if (mInBuffer == nullptr || mOutBuffer == nullptr) {
+            ALOGE_IF(mInBuffer == nullptr, "Input buffer not set");
+            ALOGE_IF(mOutBuffer == nullptr, "Output buffer not set");
+            return NO_INIT;
+        }
+        return (*mHandle)->process_reverse(
+                mHandle, mInBuffer->audioBuffer(), mOutBuffer->audioBuffer());
+    } else {
+        return INVALID_OPERATION;
+    }
+}
+
+status_t EffectHalLocal::command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
+        uint32_t *replySize, void *pReplyData) {
+    return (*mHandle)->command(mHandle, cmdCode, cmdSize, pCmdData, replySize, pReplyData);
+}
+
+status_t EffectHalLocal::getDescriptor(effect_descriptor_t *pDescriptor) {
+    return (*mHandle)->get_descriptor(mHandle, pDescriptor);
+}
+
+status_t EffectHalLocal::close() {
+    return OK;
+}
+
+} // namespace android
diff --git a/media/libaudiohal/EffectHalLocal.h b/media/libaudiohal/EffectHalLocal.h
new file mode 100644
index 0000000..b499462
--- /dev/null
+++ b/media/libaudiohal/EffectHalLocal.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECT_HAL_LOCAL_H
+#define ANDROID_HARDWARE_EFFECT_HAL_LOCAL_H
+
+#include <hardware/audio_effect.h>
+#include <media/audiohal/EffectHalInterface.h>
+
+namespace android {
+
+class EffectHalLocal : public EffectHalInterface
+{
+  public:
+    // Set the input buffer.
+    virtual status_t setInBuffer(const sp<EffectBufferHalInterface>& buffer);
+
+    // Set the output buffer.
+    virtual status_t setOutBuffer(const sp<EffectBufferHalInterface>& buffer);
+
+    // Effect process function.
+    virtual status_t process();
+
+    // Process reverse stream function. This function is used to pass
+    // a reference stream to the effect engine.
+    virtual status_t processReverse();
+
+    // Send a command and receive a response to/from effect engine.
+    virtual status_t command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
+            uint32_t *replySize, void *pReplyData);
+
+    // Returns the effect descriptor.
+    virtual status_t getDescriptor(effect_descriptor_t *pDescriptor);
+
+    // Free resources on the remote side.
+    virtual status_t close();
+
+    effect_handle_t handle() const { return mHandle; }
+
+  private:
+    effect_handle_t mHandle;
+    sp<EffectBufferHalInterface> mInBuffer;
+    sp<EffectBufferHalInterface> mOutBuffer;
+
+    friend class EffectsFactoryHalLocal;
+
+    // Can not be constructed directly by clients.
+    explicit EffectHalLocal(effect_handle_t handle);
+
+    // The destructor automatically releases the effect.
+    virtual ~EffectHalLocal();
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECT_HAL_LOCAL_H
diff --git a/media/libaudiohal/EffectsFactoryHalHidl.cpp b/media/libaudiohal/EffectsFactoryHalHidl.cpp
new file mode 100644
index 0000000..1ab5dad
--- /dev/null
+++ b/media/libaudiohal/EffectsFactoryHalHidl.cpp
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EffectsFactoryHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <cutils/native_handle.h>
+#include <media/EffectsFactoryApi.h>
+
+#include "ConversionHelperHidl.h"
+#include "EffectHalHidl.h"
+#include "EffectsFactoryHalHidl.h"
+#include "HidlUtils.h"
+
+using ::android::hardware::audio::common::V2_0::Uuid;
+using ::android::hardware::audio::effect::V2_0::IEffect;
+using ::android::hardware::audio::effect::V2_0::Result;
+using ::android::hardware::Return;
+using ::android::hardware::Status;
+
+namespace android {
+
+// static
+sp<EffectsFactoryHalInterface> EffectsFactoryHalInterface::create() {
+    return new EffectsFactoryHalHidl();
+}
+
+// static
+bool EffectsFactoryHalInterface::isNullUuid(const effect_uuid_t *pEffectUuid) {
+    return EffectIsNullUuid(pEffectUuid);
+}
+
+EffectsFactoryHalHidl::EffectsFactoryHalHidl() : ConversionHelperHidl("EffectsFactory"){
+    mEffectsFactory = IEffectsFactory::getService("audio_effects_factory");
+}
+
+EffectsFactoryHalHidl::~EffectsFactoryHalHidl() {
+}
+
+status_t EffectsFactoryHalHidl::queryAllDescriptors() {
+    if (mEffectsFactory == 0) return NO_INIT;
+    Result retval = Result::NOT_INITIALIZED;
+    Return<void> ret = mEffectsFactory->getAllDescriptors(
+            [&](Result r, const hidl_vec<EffectDescriptor>& result) {
+                retval = r;
+                if (retval == Result::OK) {
+                    mLastDescriptors = result;
+                }
+            });
+    if (ret.isOk()) {
+        return retval == Result::OK ? OK : NO_INIT;
+    }
+    mLastDescriptors.resize(0);
+    return processReturn(__FUNCTION__, ret);
+}
+
+status_t EffectsFactoryHalHidl::queryNumberEffects(uint32_t *pNumEffects) {
+    status_t queryResult = queryAllDescriptors();
+    if (queryResult == OK) {
+        *pNumEffects = mLastDescriptors.size();
+    }
+    return queryResult;
+}
+
+status_t EffectsFactoryHalHidl::getDescriptor(
+        uint32_t index, effect_descriptor_t *pDescriptor) {
+    // TODO: We need somehow to track the changes on the server side
+    // or figure out how to convert everybody to query all the descriptors at once.
+    // TODO: check for nullptr
+    if (mLastDescriptors.size() == 0) {
+        status_t queryResult = queryAllDescriptors();
+        if (queryResult != OK) return queryResult;
+    }
+    if (index >= mLastDescriptors.size()) return NAME_NOT_FOUND;
+    EffectHalHidl::effectDescriptorToHal(mLastDescriptors[index], pDescriptor);
+    return OK;
+}
+
+status_t EffectsFactoryHalHidl::getDescriptor(
+        const effect_uuid_t *pEffectUuid, effect_descriptor_t *pDescriptor) {
+    // TODO: check for nullptr
+    if (mEffectsFactory == 0) return NO_INIT;
+    Uuid hidlUuid;
+    HidlUtils::uuidFromHal(*pEffectUuid, &hidlUuid);
+    Result retval = Result::NOT_INITIALIZED;
+    Return<void> ret = mEffectsFactory->getDescriptor(hidlUuid,
+            [&](Result r, const EffectDescriptor& result) {
+                retval = r;
+                if (retval == Result::OK) {
+                    EffectHalHidl::effectDescriptorToHal(result, pDescriptor);
+                }
+            });
+    if (ret.isOk()) {
+        if (retval == Result::OK) return OK;
+        else if (retval == Result::INVALID_ARGUMENTS) return NAME_NOT_FOUND;
+        else return NO_INIT;
+    }
+    return processReturn(__FUNCTION__, ret);
+}
+
+status_t EffectsFactoryHalHidl::createEffect(
+        const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId,
+        sp<EffectHalInterface> *effect) {
+    if (mEffectsFactory == 0) return NO_INIT;
+    Uuid hidlUuid;
+    HidlUtils::uuidFromHal(*pEffectUuid, &hidlUuid);
+    Result retval = Result::NOT_INITIALIZED;
+    Return<void> ret = mEffectsFactory->createEffect(
+            hidlUuid, sessionId, ioId,
+            [&](Result r, const sp<IEffect>& result, uint64_t effectId) {
+                retval = r;
+                if (retval == Result::OK) {
+                    *effect = new EffectHalHidl(result, effectId);
+                }
+            });
+    if (ret.isOk()) {
+        if (retval == Result::OK) return OK;
+        else if (retval == Result::INVALID_ARGUMENTS) return NAME_NOT_FOUND;
+        else return NO_INIT;
+    }
+    return processReturn(__FUNCTION__, ret);
+}
+
+status_t EffectsFactoryHalHidl::dumpEffects(int fd) {
+    if (mEffectsFactory == 0) return NO_INIT;
+    native_handle_t* hidlHandle = native_handle_create(1, 0);
+    hidlHandle->data[0] = fd;
+    Return<void> ret = mEffectsFactory->debugDump(hidlHandle);
+    native_handle_delete(hidlHandle);
+    return processReturn(__FUNCTION__, ret);
+}
+
+} // namespace android
diff --git a/media/libaudiohal/EffectsFactoryHalHidl.h b/media/libaudiohal/EffectsFactoryHalHidl.h
new file mode 100644
index 0000000..e89f042
--- /dev/null
+++ b/media/libaudiohal/EffectsFactoryHalHidl.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
+#define ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
+
+#include <android/hardware/audio/effect/2.0/IEffectsFactory.h>
+#include <android/hardware/audio/effect/2.0/types.h>
+#include <media/audiohal/EffectsFactoryHalInterface.h>
+
+namespace android {
+
+using ::android::hardware::audio::effect::V2_0::EffectDescriptor;
+using ::android::hardware::audio::effect::V2_0::IEffectsFactory;
+using ::android::hardware::hidl_vec;
+
+class EffectsFactoryHalHidl : public EffectsFactoryHalInterface, public ConversionHelperHidl
+{
+  public:
+    // Returns the number of different effects in all loaded libraries.
+    virtual status_t queryNumberEffects(uint32_t *pNumEffects);
+
+    // Returns a descriptor of the next available effect.
+    virtual status_t getDescriptor(uint32_t index,
+            effect_descriptor_t *pDescriptor);
+
+    virtual status_t getDescriptor(const effect_uuid_t *pEffectUuid,
+            effect_descriptor_t *pDescriptor);
+
+    // Creates an effect engine of the specified type.
+    // To release the effect engine, it is necessary to release references
+    // to the returned effect object.
+    virtual status_t createEffect(const effect_uuid_t *pEffectUuid,
+            int32_t sessionId, int32_t ioId,
+            sp<EffectHalInterface> *effect);
+
+    virtual status_t dumpEffects(int fd);
+
+  private:
+    friend class EffectsFactoryHalInterface;
+
+    sp<IEffectsFactory> mEffectsFactory;
+    hidl_vec<EffectDescriptor> mLastDescriptors;
+
+    // Can not be constructed directly by clients.
+    EffectsFactoryHalHidl();
+    virtual ~EffectsFactoryHalHidl();
+
+    status_t queryAllDescriptors();
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
diff --git a/media/libaudiohal/EffectsFactoryHalLocal.cpp b/media/libaudiohal/EffectsFactoryHalLocal.cpp
new file mode 100644
index 0000000..bbdef5d
--- /dev/null
+++ b/media/libaudiohal/EffectsFactoryHalLocal.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/EffectsFactoryApi.h>
+
+#include "EffectHalLocal.h"
+#include "EffectsFactoryHalLocal.h"
+
+namespace android {
+
+// static
+sp<EffectsFactoryHalInterface> EffectsFactoryHalInterface::create() {
+    return new EffectsFactoryHalLocal();
+}
+
+// static
+bool EffectsFactoryHalInterface::isNullUuid(const effect_uuid_t *pEffectUuid) {
+    return EffectIsNullUuid(pEffectUuid);
+}
+
+status_t EffectsFactoryHalLocal::queryNumberEffects(uint32_t *pNumEffects) {
+    return EffectQueryNumberEffects(pNumEffects);
+}
+
+status_t EffectsFactoryHalLocal::getDescriptor(
+        uint32_t index, effect_descriptor_t *pDescriptor) {
+    return EffectQueryEffect(index, pDescriptor);
+}
+
+status_t EffectsFactoryHalLocal::getDescriptor(
+        const effect_uuid_t *pEffectUuid, effect_descriptor_t *pDescriptor) {
+    return EffectGetDescriptor(pEffectUuid, pDescriptor);
+}
+
+status_t EffectsFactoryHalLocal::createEffect(
+        const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId,
+        sp<EffectHalInterface> *effect) {
+    effect_handle_t handle;
+    int result = EffectCreate(pEffectUuid, sessionId, ioId, &handle);
+    if (result == 0) {
+        *effect = new EffectHalLocal(handle);
+    }
+    return result;
+}
+
+status_t EffectsFactoryHalLocal::dumpEffects(int fd) {
+    return EffectDumpEffects(fd);
+}
+
+} // namespace android
diff --git a/media/libaudiohal/EffectsFactoryHalLocal.h b/media/libaudiohal/EffectsFactoryHalLocal.h
new file mode 100644
index 0000000..d5b81be
--- /dev/null
+++ b/media/libaudiohal/EffectsFactoryHalLocal.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_LOCAL_H
+#define ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_LOCAL_H
+
+#include <media/audiohal/EffectsFactoryHalInterface.h>
+
+namespace android {
+
+class EffectsFactoryHalLocal : public EffectsFactoryHalInterface
+{
+  public:
+    // Returns the number of different effects in all loaded libraries.
+    virtual status_t queryNumberEffects(uint32_t *pNumEffects);
+
+    // Returns a descriptor of the next available effect.
+    virtual status_t getDescriptor(uint32_t index,
+            effect_descriptor_t *pDescriptor);
+
+    virtual status_t getDescriptor(const effect_uuid_t *pEffectUuid,
+            effect_descriptor_t *pDescriptor);
+
+    // Creates an effect engine of the specified type.
+    // To release the effect engine, it is necessary to release references
+    // to the returned effect object.
+    virtual status_t createEffect(const effect_uuid_t *pEffectUuid,
+            int32_t sessionId, int32_t ioId,
+            sp<EffectHalInterface> *effect);
+
+    virtual status_t dumpEffects(int fd);
+
+  private:
+    friend class EffectsFactoryHalInterface;
+
+    // Can not be constructed directly by clients.
+    EffectsFactoryHalLocal() {}
+
+    virtual ~EffectsFactoryHalLocal() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_LOCAL_H
diff --git a/media/libaudiohal/HalDeathHandlerHidl.cpp b/media/libaudiohal/HalDeathHandlerHidl.cpp
new file mode 100644
index 0000000..a742671
--- /dev/null
+++ b/media/libaudiohal/HalDeathHandlerHidl.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "HalDeathHandler"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+
+#include <media/audiohal/hidl/HalDeathHandler.h>
+
+namespace android {
+
+ANDROID_SINGLETON_STATIC_INSTANCE(HalDeathHandler);
+
+// static
+sp<HalDeathHandler> HalDeathHandler::getInstance() {
+    return &Singleton<HalDeathHandler>::getInstance();
+}
+
+HalDeathHandler::HalDeathHandler() : mSelf(this) {
+}
+
+HalDeathHandler::~HalDeathHandler() {
+}
+
+void HalDeathHandler::registerAtExitHandler(void* cookie, AtExitHandler handler) {
+    std::lock_guard<std::mutex> guard(mHandlersLock);
+    mHandlers.insert({cookie, handler});
+}
+
+void HalDeathHandler::unregisterAtExitHandler(void* cookie) {
+    std::lock_guard<std::mutex> guard(mHandlersLock);
+    mHandlers.erase(cookie);
+}
+
+void HalDeathHandler::serviceDied(uint64_t /*cookie*/, const wp<IBase>& /*who*/) {
+    // No matter which of the service objects has died,
+    // we need to run all the registered handlers and crash our process.
+    std::lock_guard<std::mutex> guard(mHandlersLock);
+    for (const auto& handler : mHandlers) {
+        handler.second();
+    }
+    LOG_ALWAYS_FATAL("HAL server crashed, need to restart");
+}
+
+} // namespace android
diff --git a/media/libaudiohal/StreamHalHidl.cpp b/media/libaudiohal/StreamHalHidl.cpp
new file mode 100644
index 0000000..cbc8a08
--- /dev/null
+++ b/media/libaudiohal/StreamHalHidl.cpp
@@ -0,0 +1,618 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <time.h>
+
+#define LOG_TAG "StreamHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <android/hardware/audio/2.0/IStreamOutCallback.h>
+#include <utils/Log.h>
+
+#include "DeviceHalHidl.h"
+#include "EffectHalHidl.h"
+#include "StreamHalHidl.h"
+
+using ::android::hardware::audio::common::V2_0::AudioChannelMask;
+using ::android::hardware::audio::common::V2_0::AudioFormat;
+using ::android::hardware::audio::V2_0::AudioDrain;
+using ::android::hardware::audio::V2_0::IStreamOutCallback;
+using ::android::hardware::audio::V2_0::MessageQueueFlagBits;
+using ::android::hardware::audio::V2_0::MmapBufferInfo;
+using ::android::hardware::audio::V2_0::MmapPosition;
+using ::android::hardware::audio::V2_0::ParameterValue;
+using ::android::hardware::audio::V2_0::Result;
+using ::android::hardware::audio::V2_0::ThreadPriority;
+using ::android::hardware::audio::V2_0::TimeSpec;
+using ::android::hardware::MQDescriptorSync;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+
+namespace android {
+
+StreamHalHidl::StreamHalHidl(IStream *stream)
+        : ConversionHelperHidl("Stream"),
+          mHalThreadPriority(static_cast<int>(ThreadPriority::NORMAL)),
+          mStream(stream) {
+}
+
+StreamHalHidl::~StreamHalHidl() {
+    mStream = nullptr;
+}
+
+status_t StreamHalHidl::getSampleRate(uint32_t *rate) {
+    if (!mStream) return NO_INIT;
+    return processReturn("getSampleRate", mStream->getSampleRate(), rate);
+}
+
+status_t StreamHalHidl::getBufferSize(size_t *size) {
+    if (!mStream) return NO_INIT;
+    return processReturn("getBufferSize", mStream->getBufferSize(), size);
+}
+
+status_t StreamHalHidl::getChannelMask(audio_channel_mask_t *mask) {
+    if (!mStream) return NO_INIT;
+    return processReturn("getChannelMask", mStream->getChannelMask(), mask);
+}
+
+status_t StreamHalHidl::getFormat(audio_format_t *format) {
+    if (!mStream) return NO_INIT;
+    return processReturn("getFormat", mStream->getFormat(), format);
+}
+
+status_t StreamHalHidl::getAudioProperties(
+        uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) {
+    if (!mStream) return NO_INIT;
+    Return<void> ret = mStream->getAudioProperties(
+            [&](uint32_t sr, AudioChannelMask m, AudioFormat f) {
+                *sampleRate = sr;
+                *mask = static_cast<audio_channel_mask_t>(m);
+                *format = static_cast<audio_format_t>(f);
+            });
+    return processReturn("getAudioProperties", ret);
+}
+
+status_t StreamHalHidl::setParameters(const String8& kvPairs) {
+    if (!mStream) return NO_INIT;
+    hidl_vec<ParameterValue> hidlParams;
+    status_t status = parametersFromHal(kvPairs, &hidlParams);
+    if (status != OK) return status;
+    return processReturn("setParameters", mStream->setParameters(hidlParams));
+}
+
+status_t StreamHalHidl::getParameters(const String8& keys, String8 *values) {
+    values->clear();
+    if (!mStream) return NO_INIT;
+    hidl_vec<hidl_string> hidlKeys;
+    status_t status = keysFromHal(keys, &hidlKeys);
+    if (status != OK) return status;
+    Result retval;
+    Return<void> ret = mStream->getParameters(
+            hidlKeys,
+            [&](Result r, const hidl_vec<ParameterValue>& parameters) {
+                retval = r;
+                if (retval == Result::OK) {
+                    parametersToHal(parameters, values);
+                }
+            });
+    return processReturn("getParameters", ret, retval);
+}
+
+status_t StreamHalHidl::addEffect(sp<EffectHalInterface> effect) {
+    if (!mStream) return NO_INIT;
+    return processReturn("addEffect", mStream->addEffect(
+                    static_cast<EffectHalHidl*>(effect.get())->effectId()));
+}
+
+status_t StreamHalHidl::removeEffect(sp<EffectHalInterface> effect) {
+    if (!mStream) return NO_INIT;
+    return processReturn("removeEffect", mStream->removeEffect(
+                    static_cast<EffectHalHidl*>(effect.get())->effectId()));
+}
+
+status_t StreamHalHidl::standby() {
+    if (!mStream) return NO_INIT;
+    return processReturn("standby", mStream->standby());
+}
+
+status_t StreamHalHidl::dump(int fd) {
+    if (!mStream) return NO_INIT;
+    native_handle_t* hidlHandle = native_handle_create(1, 0);
+    hidlHandle->data[0] = fd;
+    Return<void> ret = mStream->debugDump(hidlHandle);
+    native_handle_delete(hidlHandle);
+    return processReturn("dump", ret);
+}
+
+status_t StreamHalHidl::start() {
+    if (!mStream) return NO_INIT;
+    return processReturn("start", mStream->start());
+}
+
+status_t StreamHalHidl::stop() {
+    if (!mStream) return NO_INIT;
+    return processReturn("stop", mStream->stop());
+}
+
+status_t StreamHalHidl::createMmapBuffer(int32_t minSizeFrames,
+                                  struct audio_mmap_buffer_info *info) {
+    Result retval;
+    Return<void> ret = mStream->createMmapBuffer(
+            minSizeFrames,
+            [&](Result r, const MmapBufferInfo& hidlInfo) {
+                retval = r;
+                if (retval == Result::OK) {
+                    const native_handle *handle = hidlInfo.sharedMemory.handle();
+                    if (handle->numFds > 0) {
+                        info->shared_memory_fd = dup(handle->data[0]);
+                        info->buffer_size_frames = hidlInfo.bufferSizeFrames;
+                        info->burst_size_frames = hidlInfo.burstSizeFrames;
+                        // info->shared_memory_address is not needed in HIDL context
+                        info->shared_memory_address = NULL;
+                    } else {
+                        retval = Result::NOT_INITIALIZED;
+                    }
+                }
+            });
+    return processReturn("createMmapBuffer", ret, retval);
+}
+
+status_t StreamHalHidl::getMmapPosition(struct audio_mmap_position *position) {
+    Result retval;
+    Return<void> ret = mStream->getMmapPosition(
+            [&](Result r, const MmapPosition& hidlPosition) {
+                retval = r;
+                if (retval == Result::OK) {
+                    position->time_nanoseconds = hidlPosition.timeNanoseconds;
+                    position->position_frames = hidlPosition.positionFrames;
+                }
+            });
+    return processReturn("getMmapPosition", ret, retval);
+}
+
+status_t StreamHalHidl::setHalThreadPriority(int priority) {
+    mHalThreadPriority = priority;
+    return OK;
+}
+
+namespace {
+
+/* Notes on callback ownership.
+
+This is how (Hw)Binder ownership model looks like. The server implementation
+is owned by Binder framework (via sp<>). Proxies are owned by clients.
+When the last proxy disappears, Binder framework releases the server impl.
+
+Thus, it is not needed to keep any references to StreamOutCallback (this is
+the server impl) -- it will live as long as HAL server holds a strong ref to
+IStreamOutCallback proxy. We clear that reference by calling 'clearCallback'
+from the destructor of StreamOutHalHidl.
+
+The callback only keeps a weak reference to the stream. The stream is owned
+by AudioFlinger.
+
+*/
+
+struct StreamOutCallback : public IStreamOutCallback {
+    StreamOutCallback(const wp<StreamOutHalHidl>& stream) : mStream(stream) {}
+
+    // IStreamOutCallback implementation
+    Return<void> onWriteReady()  override {
+        sp<StreamOutHalHidl> stream = mStream.promote();
+        if (stream != 0) {
+            stream->onWriteReady();
+        }
+        return Void();
+    }
+
+    Return<void> onDrainReady()  override {
+        sp<StreamOutHalHidl> stream = mStream.promote();
+        if (stream != 0) {
+            stream->onDrainReady();
+        }
+        return Void();
+    }
+
+    Return<void> onError()  override {
+        sp<StreamOutHalHidl> stream = mStream.promote();
+        if (stream != 0) {
+            stream->onError();
+        }
+        return Void();
+    }
+
+  private:
+    wp<StreamOutHalHidl> mStream;
+};
+
+}  // namespace
+
+StreamOutHalHidl::StreamOutHalHidl(const sp<IStreamOut>& stream)
+        : StreamHalHidl(stream.get()), mStream(stream), mEfGroup(nullptr),
+          mGetPresentationPositionNotSupported(false), mPPosFromWrite{ 0, OK, 0, { 0, 0 } } {
+}
+
+StreamOutHalHidl::~StreamOutHalHidl() {
+    if (mStream != 0) {
+        if (mCallback.unsafe_get()) {
+            processReturn("clearCallback", mStream->clearCallback());
+        }
+        processReturn("close", mStream->close());
+    }
+    mCallback.clear();
+    if (mEfGroup) {
+        EventFlag::deleteEventFlag(&mEfGroup);
+    }
+}
+
+status_t StreamOutHalHidl::getFrameSize(size_t *size) {
+    if (mStream == 0) return NO_INIT;
+    return processReturn("getFrameSize", mStream->getFrameSize(), size);
+}
+
+status_t StreamOutHalHidl::getLatency(uint32_t *latency) {
+    if (mStream == 0) return NO_INIT;
+    return processReturn("getLatency", mStream->getLatency(), latency);
+}
+
+status_t StreamOutHalHidl::setVolume(float left, float right) {
+    if (mStream == 0) return NO_INIT;
+    return processReturn("setVolume", mStream->setVolume(left, right));
+}
+
+status_t StreamOutHalHidl::write(const void *buffer, size_t bytes, size_t *written) {
+    if (mStream == 0) return NO_INIT;
+    *written = 0;
+
+    if (bytes == 0 && !mDataMQ) {
+        // Can't determine the size for the MQ buffer. Wait for a non-empty write request.
+        ALOGW_IF(mCallback.unsafe_get(), "First call to async write with 0 bytes");
+        return OK;
+    }
+
+    status_t status;
+    if (!mDataMQ && (status = prepareForWriting(bytes)) != OK) {
+        return status;
+    }
+
+    const size_t availBytes = mDataMQ->availableToWrite();
+    if (bytes > availBytes) { bytes = availBytes; }
+    if (!mDataMQ->write(static_cast<const uint8_t*>(buffer), bytes)) {
+        ALOGW("data message queue write failed");
+    }
+    mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY));
+
+    // TODO: Remove manual event flag handling once blocking MQ is implemented. b/33815422
+    uint32_t efState = 0;
+retry:
+    status_t ret = mEfGroup->wait(
+            static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL), &efState, NS_PER_SEC);
+    if (efState & static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL)) {
+        WriteStatus writeStatus =
+                { Result::NOT_INITIALIZED, 0, Result::NOT_INITIALIZED, 0, { 0, 0 } };
+        mStatusMQ->read(&writeStatus);
+        if (writeStatus.writeRetval == Result::OK) {
+            status = OK;
+            *written = writeStatus.written;
+            mPPosFromWrite.status = processReturn(
+                    "get_presentation_position", writeStatus.presentationPositionRetval);
+            if (mPPosFromWrite.status == OK) {
+                mPPosFromWrite.frames = writeStatus.frames;
+                mPPosFromWrite.ts.tv_sec = writeStatus.timeStamp.tvSec;
+                mPPosFromWrite.ts.tv_nsec = writeStatus.timeStamp.tvNSec;
+            }
+            mPPosFromWrite.obtained = getCurrentTimeMs();
+        } else {
+            status = processReturn("write", writeStatus.writeRetval);
+        }
+        return status;
+    }
+    if (ret == -EAGAIN) {
+        // This normally retries no more than once.
+        goto retry;
+    }
+    return ret;
+}
+
+uint64_t StreamOutHalHidl::getCurrentTimeMs() {
+    struct timespec timeNow;
+    clock_gettime(CLOCK_MONOTONIC, &timeNow);
+    return timeNow.tv_sec * 1000000 + timeNow.tv_nsec / 1000;
+}
+
+status_t StreamOutHalHidl::prepareForWriting(size_t bufferSize) {
+    std::unique_ptr<DataMQ> tempDataMQ;
+    std::unique_ptr<StatusMQ> tempStatusMQ;
+    Result retval;
+    Return<void> ret = mStream->prepareForWriting(
+            1, bufferSize, ThreadPriority(mHalThreadPriority),
+            [&](Result r,
+                    const MQDescriptorSync<uint8_t>& dataMQ,
+                    const MQDescriptorSync<WriteStatus>& statusMQ) {
+                retval = r;
+                if (retval == Result::OK) {
+                    tempDataMQ.reset(new DataMQ(dataMQ));
+                    tempStatusMQ.reset(new StatusMQ(statusMQ));
+                    if (tempDataMQ->isValid() && tempDataMQ->getEventFlagWord()) {
+                        EventFlag::createEventFlag(tempDataMQ->getEventFlagWord(), &mEfGroup);
+                    }
+                }
+            });
+    if (!ret.isOk() || retval != Result::OK) {
+        return processReturn("prepareForWriting", ret, retval);
+    }
+    if (!tempDataMQ || !tempDataMQ->isValid() || !tempStatusMQ || !tempStatusMQ->isValid()
+        || !mEfGroup) {
+        ALOGE_IF(!tempDataMQ, "Failed to obtain data message queue for writing");
+        ALOGE_IF(tempDataMQ && !tempDataMQ->isValid(), "Data message queue for writing is invalid");
+        ALOGE_IF(!tempStatusMQ, "Failed to obtain status message queue for writing");
+        ALOGE_IF(tempStatusMQ && !tempStatusMQ->isValid(),
+                "Status message queue for writing is invalid");
+        ALOGE_IF(!mEfGroup, "Event flag creation for writing failed");
+        return NO_INIT;
+    }
+    mDataMQ = std::move(tempDataMQ);
+    mStatusMQ = std::move(tempStatusMQ);
+    return OK;
+}
+
+status_t StreamOutHalHidl::getRenderPosition(uint32_t *dspFrames) {
+    if (mStream == 0) return NO_INIT;
+    Result retval;
+    Return<void> ret = mStream->getRenderPosition(
+            [&](Result r, uint32_t d) {
+                retval = r;
+                if (retval == Result::OK) {
+                    *dspFrames = d;
+                }
+            });
+    return processReturn("getRenderPosition", ret, retval);
+}
+
+status_t StreamOutHalHidl::getNextWriteTimestamp(int64_t *timestamp) {
+    if (mStream == 0) return NO_INIT;
+    Result retval;
+    Return<void> ret = mStream->getNextWriteTimestamp(
+            [&](Result r, int64_t t) {
+                retval = r;
+                if (retval == Result::OK) {
+                    *timestamp = t;
+                }
+            });
+    return processReturn("getRenderPosition", ret, retval);
+}
+
+status_t StreamOutHalHidl::setCallback(wp<StreamOutHalInterfaceCallback> callback) {
+    if (mStream == 0) return NO_INIT;
+    status_t status = processReturn(
+            "setCallback", mStream->setCallback(new StreamOutCallback(this)));
+    if (status == OK) {
+        mCallback = callback;
+    }
+    return status;
+}
+
+status_t StreamOutHalHidl::supportsPauseAndResume(bool *supportsPause, bool *supportsResume) {
+    if (mStream == 0) return NO_INIT;
+    Return<void> ret = mStream->supportsPauseAndResume(
+            [&](bool p, bool r) {
+                *supportsPause = p;
+                *supportsResume = r;
+            });
+    return processReturn("supportsPauseAndResume", ret);
+}
+
+status_t StreamOutHalHidl::pause() {
+    if (mStream == 0) return NO_INIT;
+    return processReturn("pause", mStream->pause());
+}
+
+status_t StreamOutHalHidl::resume() {
+    if (mStream == 0) return NO_INIT;
+    return processReturn("pause", mStream->resume());
+}
+
+status_t StreamOutHalHidl::supportsDrain(bool *supportsDrain) {
+    if (mStream == 0) return NO_INIT;
+    return processReturn("supportsDrain", mStream->supportsDrain(), supportsDrain);
+}
+
+status_t StreamOutHalHidl::drain(bool earlyNotify) {
+    if (mStream == 0) return NO_INIT;
+    return processReturn(
+            "drain", mStream->drain(earlyNotify ? AudioDrain::EARLY_NOTIFY : AudioDrain::ALL));
+}
+
+status_t StreamOutHalHidl::flush() {
+    if (mStream == 0) return NO_INIT;
+    return processReturn("pause", mStream->flush());
+}
+
+status_t StreamOutHalHidl::getPresentationPosition(uint64_t *frames, struct timespec *timestamp) {
+    if (mStream == 0) return NO_INIT;
+    if (mGetPresentationPositionNotSupported) return INVALID_OPERATION;
+    if (getCurrentTimeMs() - mPPosFromWrite.obtained <= 1000) {
+        // No more than 1 ms passed since the last write, use cached result to avoid binder calls.
+        if (mPPosFromWrite.status == OK) {
+            *frames = mPPosFromWrite.frames;
+            timestamp->tv_sec = mPPosFromWrite.ts.tv_sec;
+            timestamp->tv_nsec = mPPosFromWrite.ts.tv_nsec;
+        }
+        return mPPosFromWrite.status;
+    }
+
+    Result retval;
+    Return<void> ret = mStream->getPresentationPosition(
+            [&](Result r, uint64_t hidlFrames, const TimeSpec& hidlTimeStamp) {
+                retval = r;
+                if (retval == Result::OK) {
+                    *frames = hidlFrames;
+                    timestamp->tv_sec = hidlTimeStamp.tvSec;
+                    timestamp->tv_nsec = hidlTimeStamp.tvNSec;
+                }
+            });
+    if (ret.isOk() && retval == Result::NOT_SUPPORTED) {
+        mGetPresentationPositionNotSupported = true;
+    }
+    return processReturn("getPresentationPosition", ret, retval);
+}
+
+void StreamOutHalHidl::onWriteReady() {
+    sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
+    if (callback == 0) return;
+    ALOGV("asyncCallback onWriteReady");
+    callback->onWriteReady();
+}
+
+void StreamOutHalHidl::onDrainReady() {
+    sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
+    if (callback == 0) return;
+    ALOGV("asyncCallback onDrainReady");
+    callback->onDrainReady();
+}
+
+void StreamOutHalHidl::onError() {
+    sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
+    if (callback == 0) return;
+    ALOGV("asyncCallback onError");
+    callback->onError();
+}
+
+
+StreamInHalHidl::StreamInHalHidl(const sp<IStreamIn>& stream)
+        : StreamHalHidl(stream.get()), mStream(stream), mEfGroup(nullptr) {
+}
+
+StreamInHalHidl::~StreamInHalHidl() {
+    if (mStream != 0) {
+        processReturn("close", mStream->close());
+    }
+    if (mEfGroup) {
+        EventFlag::deleteEventFlag(&mEfGroup);
+    }
+}
+
+status_t StreamInHalHidl::getFrameSize(size_t *size) {
+    if (mStream == 0) return NO_INIT;
+    return processReturn("getFrameSize", mStream->getFrameSize(), size);
+}
+
+status_t StreamInHalHidl::setGain(float gain) {
+    if (mStream == 0) return NO_INIT;
+    return processReturn("setGain", mStream->setGain(gain));
+}
+
+status_t StreamInHalHidl::read(void *buffer, size_t bytes, size_t *read) {
+    if (mStream == 0) return NO_INIT;
+    *read = 0;
+
+    if (bytes == 0 && !mDataMQ) {
+        // Can't determine the size for the MQ buffer. Wait for a non-empty read request.
+        return OK;
+    }
+
+    status_t status;
+    if (!mDataMQ) {
+        if ((status = prepareForReading(bytes)) != OK) return status;
+        // Trigger the first read.
+        mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL));
+    }
+
+    // TODO: Remove manual event flag handling once blocking MQ is implemented. b/33815422
+    uint32_t efState = 0;
+retry:
+    status_t ret = mEfGroup->wait(
+            static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY), &efState, NS_PER_SEC);
+    if (efState & static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY)) {
+        ReadStatus readStatus = { Result::NOT_INITIALIZED, 0 };
+        const size_t availToRead = mDataMQ->availableToRead();
+        if (bytes > availToRead) { bytes = availToRead; }
+        mDataMQ->read(static_cast<uint8_t*>(buffer), bytes);
+        mStatusMQ->read(&readStatus);
+        mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL));
+        if (readStatus.retval == Result::OK) {
+            ALOGW_IF(availToRead != readStatus.read,
+                    "HAL read report inconsistent: mq = %d, status = %d",
+                    (int32_t)availToRead, (int32_t)readStatus.read);
+            *read = readStatus.read;
+        } else {
+            status = processReturn("read", readStatus.retval);
+        }
+        return status;
+    }
+    if (ret == -EAGAIN) {
+        // This normally retries no more than once.
+        goto retry;
+    }
+    return ret;
+}
+
+status_t StreamInHalHidl::prepareForReading(size_t bufferSize) {
+    std::unique_ptr<DataMQ> tempDataMQ;
+    std::unique_ptr<StatusMQ> tempStatusMQ;
+    Result retval;
+    Return<void> ret = mStream->prepareForReading(
+            1, bufferSize, ThreadPriority(mHalThreadPriority),
+            [&](Result r,
+                    const MQDescriptorSync<uint8_t>& dataMQ,
+                    const MQDescriptorSync<ReadStatus>& statusMQ) {
+                retval = r;
+                if (retval == Result::OK) {
+                    tempDataMQ.reset(new DataMQ(dataMQ));
+                    tempStatusMQ.reset(new StatusMQ(statusMQ));
+                    if (tempDataMQ->isValid() && tempDataMQ->getEventFlagWord()) {
+                        EventFlag::createEventFlag(tempDataMQ->getEventFlagWord(), &mEfGroup);
+                    }
+                }
+            });
+    if (!ret.isOk() || retval != Result::OK) {
+        return processReturn("prepareForReading", ret, retval);
+    }
+    if (!tempDataMQ || !tempDataMQ->isValid() || !tempStatusMQ || !tempStatusMQ->isValid()
+        || !mEfGroup) {
+        ALOGE_IF(!tempDataMQ, "Failed to obtain data message queue for reading");
+        ALOGE_IF(tempDataMQ && !tempDataMQ->isValid(), "Data message queue for reading is invalid");
+        ALOGE_IF(!tempStatusMQ, "Failed to obtain status message queue for reading");
+        ALOGE_IF(tempStatusMQ && !tempStatusMQ->isValid(),
+                "Status message queue for reading is invalid");
+        ALOGE_IF(!mEfGroup, "Event flag creation for reading failed");
+        return NO_INIT;
+    }
+    mDataMQ = std::move(tempDataMQ);
+    mStatusMQ = std::move(tempStatusMQ);
+    return OK;
+}
+
+status_t StreamInHalHidl::getInputFramesLost(uint32_t *framesLost) {
+    if (mStream == 0) return NO_INIT;
+    return processReturn("getInputFramesLost", mStream->getInputFramesLost(), framesLost);
+}
+
+status_t StreamInHalHidl::getCapturePosition(int64_t *frames, int64_t *time) {
+    if (mStream == 0) return NO_INIT;
+    Result retval;
+    Return<void> ret = mStream->getCapturePosition(
+            [&](Result r, uint64_t hidlFrames, uint64_t hidlTime) {
+                retval = r;
+                if (retval == Result::OK) {
+                    *frames = hidlFrames;
+                    *time = hidlTime;
+                }
+            });
+    return processReturn("getCapturePosition", ret, retval);
+}
+
+} // namespace android
diff --git a/media/libaudiohal/StreamHalHidl.h b/media/libaudiohal/StreamHalHidl.h
new file mode 100644
index 0000000..8b5867e
--- /dev/null
+++ b/media/libaudiohal/StreamHalHidl.h
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_STREAM_HAL_HIDL_H
+#define ANDROID_HARDWARE_STREAM_HAL_HIDL_H
+
+#include <android/hardware/audio/2.0/IStream.h>
+#include <android/hardware/audio/2.0/IStreamIn.h>
+#include <android/hardware/audio/2.0/IStreamOut.h>
+#include <fmq/EventFlag.h>
+#include <fmq/MessageQueue.h>
+#include <media/audiohal/StreamHalInterface.h>
+
+#include "ConversionHelperHidl.h"
+
+using ::android::hardware::audio::V2_0::IStream;
+using ::android::hardware::audio::V2_0::IStreamIn;
+using ::android::hardware::audio::V2_0::IStreamOut;
+using ::android::hardware::EventFlag;
+using ::android::hardware::MessageQueue;
+using ::android::hardware::Return;
+using ReadStatus = ::android::hardware::audio::V2_0::IStreamIn::ReadStatus;
+using WriteStatus = ::android::hardware::audio::V2_0::IStreamOut::WriteStatus;
+
+namespace android {
+
+class DeviceHalHidl;
+
+class StreamHalHidl : public virtual StreamHalInterface, public ConversionHelperHidl
+{
+  public:
+    // Return the sampling rate in Hz - eg. 44100.
+    virtual status_t getSampleRate(uint32_t *rate);
+
+    // Return size of input/output buffer in bytes for this stream - eg. 4800.
+    virtual status_t getBufferSize(size_t *size);
+
+    // Return the channel mask.
+    virtual status_t getChannelMask(audio_channel_mask_t *mask);
+
+    // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
+    virtual status_t getFormat(audio_format_t *format);
+
+    // Convenience method.
+    virtual status_t getAudioProperties(
+            uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format);
+
+    // Set audio stream parameters.
+    virtual status_t setParameters(const String8& kvPairs);
+
+    // Get audio stream parameters.
+    virtual status_t getParameters(const String8& keys, String8 *values);
+
+    // Add or remove the effect on the stream.
+    virtual status_t addEffect(sp<EffectHalInterface> effect);
+    virtual status_t removeEffect(sp<EffectHalInterface> effect);
+
+    // Put the audio hardware input/output into standby mode.
+    virtual status_t standby();
+
+    virtual status_t dump(int fd);
+
+    // Start a stream operating in mmap mode.
+    virtual status_t start();
+
+    // Stop a stream operating in mmap mode.
+    virtual status_t stop();
+
+    // Retrieve information on the data buffer in mmap mode.
+    virtual status_t createMmapBuffer(int32_t minSizeFrames,
+                                      struct audio_mmap_buffer_info *info);
+
+    // Get current read/write position in the mmap buffer
+    virtual status_t getMmapPosition(struct audio_mmap_position *position);
+
+    // Set the priority of the thread that interacts with the HAL
+    // (must match the priority of the audioflinger's thread that calls 'read' / 'write')
+    virtual status_t setHalThreadPriority(int priority);
+
+  protected:
+    // Subclasses can not be constructed directly by clients.
+    explicit StreamHalHidl(IStream *stream);
+
+    // The destructor automatically closes the stream.
+    virtual ~StreamHalHidl();
+
+    int mHalThreadPriority;
+
+  private:
+    IStream *mStream;
+};
+
+class StreamOutHalHidl : public StreamOutHalInterface, public StreamHalHidl {
+  public:
+    // Return the frame size (number of bytes per sample) of a stream.
+    virtual status_t getFrameSize(size_t *size);
+
+    // Return the audio hardware driver estimated latency in milliseconds.
+    virtual status_t getLatency(uint32_t *latency);
+
+    // Use this method in situations where audio mixing is done in the hardware.
+    virtual status_t setVolume(float left, float right);
+
+    // Write audio buffer to driver.
+    virtual status_t write(const void *buffer, size_t bytes, size_t *written);
+
+    // Return the number of audio frames written by the audio dsp to DAC since
+    // the output has exited standby.
+    virtual status_t getRenderPosition(uint32_t *dspFrames);
+
+    // Get the local time at which the next write to the audio driver will be presented.
+    virtual status_t getNextWriteTimestamp(int64_t *timestamp);
+
+    // Set the callback for notifying completion of non-blocking write and drain.
+    virtual status_t setCallback(wp<StreamOutHalInterfaceCallback> callback);
+
+    // Returns whether pause and resume operations are supported.
+    virtual status_t supportsPauseAndResume(bool *supportsPause, bool *supportsResume);
+
+    // Notifies to the audio driver to resume playback following a pause.
+    virtual status_t pause();
+
+    // Notifies to the audio driver to resume playback following a pause.
+    virtual status_t resume();
+
+    // Returns whether drain operation is supported.
+    virtual status_t supportsDrain(bool *supportsDrain);
+
+    // Requests notification when data buffered by the driver/hardware has been played.
+    virtual status_t drain(bool earlyNotify);
+
+    // Notifies to the audio driver to flush the queued data.
+    virtual status_t flush();
+
+    // Return a recent count of the number of audio frames presented to an external observer.
+    virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp);
+
+    // Methods used by StreamOutCallback (HIDL).
+    void onWriteReady();
+    void onDrainReady();
+    void onError();
+
+  private:
+    friend class DeviceHalHidl;
+    typedef MessageQueue<uint8_t, hardware::kSynchronizedReadWrite> DataMQ;
+    typedef MessageQueue<WriteStatus, hardware::kSynchronizedReadWrite> StatusMQ;
+
+    wp<StreamOutHalInterfaceCallback> mCallback;
+    sp<IStreamOut> mStream;
+    std::unique_ptr<DataMQ> mDataMQ;
+    std::unique_ptr<StatusMQ> mStatusMQ;
+    EventFlag* mEfGroup;
+    bool mGetPresentationPositionNotSupported;
+    struct {
+        uint64_t obtained;
+        status_t status;
+        uint64_t frames;
+        struct timespec ts;
+    } mPPosFromWrite;
+
+    // Can not be constructed directly by clients.
+    StreamOutHalHidl(const sp<IStreamOut>& stream);
+
+    virtual ~StreamOutHalHidl();
+
+    uint64_t getCurrentTimeMs();
+    status_t prepareForWriting(size_t bufferSize);
+};
+
+class StreamInHalHidl : public StreamInHalInterface, public StreamHalHidl {
+  public:
+    // Return the frame size (number of bytes per sample) of a stream.
+    virtual status_t getFrameSize(size_t *size);
+
+    // Set the input gain for the audio driver.
+    virtual status_t setGain(float gain);
+
+    // Read audio buffer in from driver.
+    virtual status_t read(void *buffer, size_t bytes, size_t *read);
+
+    // Return the amount of input frames lost in the audio driver.
+    virtual status_t getInputFramesLost(uint32_t *framesLost);
+
+    // Return a recent count of the number of audio frames received and
+    // the clock time associated with that frame count.
+    virtual status_t getCapturePosition(int64_t *frames, int64_t *time);
+
+  private:
+    friend class DeviceHalHidl;
+    typedef MessageQueue<uint8_t, hardware::kSynchronizedReadWrite> DataMQ;
+    typedef MessageQueue<ReadStatus, hardware::kSynchronizedReadWrite> StatusMQ;
+
+    sp<IStreamIn> mStream;
+    std::unique_ptr<DataMQ> mDataMQ;
+    std::unique_ptr<StatusMQ> mStatusMQ;
+    EventFlag* mEfGroup;
+
+    // Can not be constructed directly by clients.
+    StreamInHalHidl(const sp<IStreamIn>& stream);
+
+    virtual ~StreamInHalHidl();
+
+    status_t prepareForReading(size_t bufferSize);
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_STREAM_HAL_HIDL_H
diff --git a/media/libaudiohal/StreamHalLocal.cpp b/media/libaudiohal/StreamHalLocal.cpp
new file mode 100644
index 0000000..b25e518
--- /dev/null
+++ b/media/libaudiohal/StreamHalLocal.cpp
@@ -0,0 +1,305 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "StreamHalLocal"
+//#define LOG_NDEBUG 0
+
+#include <hardware/audio.h>
+#include <utils/Log.h>
+
+#include "DeviceHalLocal.h"
+#include "EffectHalLocal.h"
+#include "StreamHalLocal.h"
+
+namespace android {
+
+StreamHalLocal::StreamHalLocal(audio_stream_t *stream, sp<DeviceHalLocal> device)
+        : mDevice(device), mStream(stream) {
+}
+
+StreamHalLocal::~StreamHalLocal() {
+    mStream = 0;
+    mDevice.clear();
+}
+
+status_t StreamHalLocal::getSampleRate(uint32_t *rate) {
+    *rate = mStream->get_sample_rate(mStream);
+    return OK;
+}
+
+status_t StreamHalLocal::getBufferSize(size_t *size) {
+    *size = mStream->get_buffer_size(mStream);
+    return OK;
+}
+
+status_t StreamHalLocal::getChannelMask(audio_channel_mask_t *mask) {
+    *mask = mStream->get_channels(mStream);
+    return OK;
+}
+
+status_t StreamHalLocal::getFormat(audio_format_t *format) {
+    *format = mStream->get_format(mStream);
+    return OK;
+}
+
+status_t StreamHalLocal::getAudioProperties(
+        uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) {
+    *sampleRate = mStream->get_sample_rate(mStream);
+    *mask = mStream->get_channels(mStream);
+    *format = mStream->get_format(mStream);
+    return OK;
+}
+
+status_t StreamHalLocal::setParameters(const String8& kvPairs) {
+    return mStream->set_parameters(mStream, kvPairs.string());
+}
+
+status_t StreamHalLocal::getParameters(const String8& keys, String8 *values) {
+    char *halValues = mStream->get_parameters(mStream, keys.string());
+    if (halValues != NULL) {
+        values->setTo(halValues);
+        free(halValues);
+    } else {
+        values->clear();
+    }
+    return OK;
+}
+
+status_t StreamHalLocal::addEffect(sp<EffectHalInterface> effect) {
+    return mStream->add_audio_effect(mStream,
+            static_cast<EffectHalLocal*>(effect.get())->handle());
+}
+
+status_t StreamHalLocal::removeEffect(sp<EffectHalInterface> effect) {
+    return mStream->remove_audio_effect(mStream,
+            static_cast<EffectHalLocal*>(effect.get())->handle());
+}
+
+status_t StreamHalLocal::standby() {
+    return mStream->standby(mStream);
+}
+
+status_t StreamHalLocal::dump(int fd) {
+    return mStream->dump(mStream, fd);
+}
+
+status_t StreamHalLocal::setHalThreadPriority(int) {
+    // Don't need to do anything as local hal is executed by audioflinger directly
+    // on the same thread.
+    return OK;
+}
+
+StreamOutHalLocal::StreamOutHalLocal(audio_stream_out_t *stream, sp<DeviceHalLocal> device)
+        : StreamHalLocal(&stream->common, device), mStream(stream) {
+}
+
+StreamOutHalLocal::~StreamOutHalLocal() {
+    mCallback.clear();
+    mDevice->closeOutputStream(mStream);
+    mStream = 0;
+}
+
+status_t StreamOutHalLocal::getFrameSize(size_t *size) {
+    *size = audio_stream_out_frame_size(mStream);
+    return OK;
+}
+
+status_t StreamOutHalLocal::getLatency(uint32_t *latency) {
+    *latency = mStream->get_latency(mStream);
+    return OK;
+}
+
+status_t StreamOutHalLocal::setVolume(float left, float right) {
+    if (mStream->set_volume == NULL) return INVALID_OPERATION;
+    return mStream->set_volume(mStream, left, right);
+}
+
+status_t StreamOutHalLocal::write(const void *buffer, size_t bytes, size_t *written) {
+    ssize_t writeResult = mStream->write(mStream, buffer, bytes);
+    if (writeResult > 0) {
+        *written = writeResult;
+        return OK;
+    } else {
+        *written = 0;
+        return writeResult;
+    }
+}
+
+status_t StreamOutHalLocal::getRenderPosition(uint32_t *dspFrames) {
+    return mStream->get_render_position(mStream, dspFrames);
+}
+
+status_t StreamOutHalLocal::getNextWriteTimestamp(int64_t *timestamp) {
+    if (mStream->get_next_write_timestamp == NULL) return INVALID_OPERATION;
+    return mStream->get_next_write_timestamp(mStream, timestamp);
+}
+
+status_t StreamOutHalLocal::setCallback(wp<StreamOutHalInterfaceCallback> callback) {
+    if (mStream->set_callback == NULL) return INVALID_OPERATION;
+    status_t result = mStream->set_callback(mStream, StreamOutHalLocal::asyncCallback, this);
+    if (result == OK) {
+        mCallback = callback;
+    }
+    return result;
+}
+
+// static
+int StreamOutHalLocal::asyncCallback(stream_callback_event_t event, void*, void *cookie) {
+    // We act as if we gave a wp<StreamOutHalLocal> to HAL. This way we should handle
+    // correctly the case when the callback is invoked while StreamOutHalLocal's destructor is
+    // already running, because the destructor is invoked after the refcount has been atomically
+    // decremented.
+    wp<StreamOutHalLocal> weakSelf(reinterpret_cast<StreamOutHalLocal*>(cookie));
+    sp<StreamOutHalLocal> self = weakSelf.promote();
+    if (self == 0) return 0;
+    sp<StreamOutHalInterfaceCallback> callback = self->mCallback.promote();
+    if (callback == 0) return 0;
+    ALOGV("asyncCallback() event %d", event);
+    switch (event) {
+        case STREAM_CBK_EVENT_WRITE_READY:
+            callback->onWriteReady();
+            break;
+        case STREAM_CBK_EVENT_DRAIN_READY:
+            callback->onDrainReady();
+            break;
+        case STREAM_CBK_EVENT_ERROR:
+            callback->onError();
+            break;
+        default:
+            ALOGW("asyncCallback() unknown event %d", event);
+            break;
+    }
+    return 0;
+}
+
+status_t StreamOutHalLocal::supportsPauseAndResume(bool *supportsPause, bool *supportsResume) {
+    *supportsPause = mStream->pause != NULL;
+    *supportsResume = mStream->resume != NULL;
+    return OK;
+}
+
+status_t StreamOutHalLocal::pause() {
+    if (mStream->pause == NULL) return INVALID_OPERATION;
+    return mStream->pause(mStream);
+}
+
+status_t StreamOutHalLocal::resume() {
+    if (mStream->resume == NULL) return INVALID_OPERATION;
+    return mStream->resume(mStream);
+}
+
+status_t StreamOutHalLocal::supportsDrain(bool *supportsDrain) {
+    *supportsDrain = mStream->drain != NULL;
+    return OK;
+}
+
+status_t StreamOutHalLocal::drain(bool earlyNotify) {
+    if (mStream->drain == NULL) return INVALID_OPERATION;
+    return mStream->drain(mStream, earlyNotify ? AUDIO_DRAIN_EARLY_NOTIFY : AUDIO_DRAIN_ALL);
+}
+
+status_t StreamOutHalLocal::flush() {
+    if (mStream->flush == NULL) return INVALID_OPERATION;
+    return mStream->flush(mStream);
+}
+
+status_t StreamOutHalLocal::getPresentationPosition(uint64_t *frames, struct timespec *timestamp) {
+    if (mStream->get_presentation_position == NULL) return INVALID_OPERATION;
+    return mStream->get_presentation_position(mStream, frames, timestamp);
+}
+
+status_t StreamOutHalLocal::start() {
+    if (mStream->start == NULL) return INVALID_OPERATION;
+    return mStream->start(mStream);
+}
+
+status_t StreamOutHalLocal::stop() {
+    if (mStream->stop == NULL) return INVALID_OPERATION;
+    return mStream->stop(mStream);
+}
+
+status_t StreamOutHalLocal::createMmapBuffer(int32_t minSizeFrames,
+                                  struct audio_mmap_buffer_info *info) {
+    if (mStream->create_mmap_buffer == NULL) return INVALID_OPERATION;
+    return mStream->create_mmap_buffer(mStream, minSizeFrames, info);
+}
+
+status_t StreamOutHalLocal::getMmapPosition(struct audio_mmap_position *position) {
+    if (mStream->get_mmap_position == NULL) return INVALID_OPERATION;
+    return mStream->get_mmap_position(mStream, position);
+}
+
+StreamInHalLocal::StreamInHalLocal(audio_stream_in_t *stream, sp<DeviceHalLocal> device)
+        : StreamHalLocal(&stream->common, device), mStream(stream) {
+}
+
+StreamInHalLocal::~StreamInHalLocal() {
+    mDevice->closeInputStream(mStream);
+    mStream = 0;
+}
+
+status_t StreamInHalLocal::getFrameSize(size_t *size) {
+    *size = audio_stream_in_frame_size(mStream);
+    return OK;
+}
+
+status_t StreamInHalLocal::setGain(float gain) {
+    return mStream->set_gain(mStream, gain);
+}
+
+status_t StreamInHalLocal::read(void *buffer, size_t bytes, size_t *read) {
+    ssize_t readResult = mStream->read(mStream, buffer, bytes);
+    if (readResult > 0) {
+        *read = readResult;
+        return OK;
+    } else {
+        *read = 0;
+        return readResult;
+    }
+}
+
+status_t StreamInHalLocal::getInputFramesLost(uint32_t *framesLost) {
+    *framesLost = mStream->get_input_frames_lost(mStream);
+    return OK;
+}
+
+status_t StreamInHalLocal::getCapturePosition(int64_t *frames, int64_t *time) {
+    if (mStream->get_capture_position == NULL) return INVALID_OPERATION;
+    return mStream->get_capture_position(mStream, frames, time);
+}
+
+status_t StreamInHalLocal::start() {
+    if (mStream->start == NULL) return INVALID_OPERATION;
+    return mStream->start(mStream);
+}
+
+status_t StreamInHalLocal::stop() {
+    if (mStream->stop == NULL) return INVALID_OPERATION;
+    return mStream->stop(mStream);
+}
+
+status_t StreamInHalLocal::createMmapBuffer(int32_t minSizeFrames,
+                                  struct audio_mmap_buffer_info *info) {
+    if (mStream->create_mmap_buffer == NULL) return INVALID_OPERATION;
+    return mStream->create_mmap_buffer(mStream, minSizeFrames, info);
+}
+
+status_t StreamInHalLocal::getMmapPosition(struct audio_mmap_position *position) {
+    if (mStream->get_mmap_position == NULL) return INVALID_OPERATION;
+    return mStream->get_mmap_position(mStream, position);
+}
+
+} // namespace android
diff --git a/media/libaudiohal/StreamHalLocal.h b/media/libaudiohal/StreamHalLocal.h
new file mode 100644
index 0000000..8c96c1f
--- /dev/null
+++ b/media/libaudiohal/StreamHalLocal.h
@@ -0,0 +1,206 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_STREAM_HAL_LOCAL_H
+#define ANDROID_HARDWARE_STREAM_HAL_LOCAL_H
+
+#include <media/audiohal/StreamHalInterface.h>
+
+namespace android {
+
+class DeviceHalLocal;
+
+class StreamHalLocal : public virtual StreamHalInterface
+{
+  public:
+    // Return the sampling rate in Hz - eg. 44100.
+    virtual status_t getSampleRate(uint32_t *rate);
+
+    // Return size of input/output buffer in bytes for this stream - eg. 4800.
+    virtual status_t getBufferSize(size_t *size);
+
+    // Return the channel mask.
+    virtual status_t getChannelMask(audio_channel_mask_t *mask);
+
+    // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
+    virtual status_t getFormat(audio_format_t *format);
+
+    // Convenience method.
+    virtual status_t getAudioProperties(
+            uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format);
+
+    // Set audio stream parameters.
+    virtual status_t setParameters(const String8& kvPairs);
+
+    // Get audio stream parameters.
+    virtual status_t getParameters(const String8& keys, String8 *values);
+
+    // Add or remove the effect on the stream.
+    virtual status_t addEffect(sp<EffectHalInterface> effect);
+    virtual status_t removeEffect(sp<EffectHalInterface> effect);
+
+    // Put the audio hardware input/output into standby mode.
+    virtual status_t standby();
+
+    virtual status_t dump(int fd);
+
+    // Start a stream operating in mmap mode.
+    virtual status_t start() = 0;
+
+    // Stop a stream operating in mmap mode.
+    virtual status_t stop() = 0;
+
+    // Retrieve information on the data buffer in mmap mode.
+    virtual status_t createMmapBuffer(int32_t minSizeFrames,
+                                      struct audio_mmap_buffer_info *info) = 0;
+
+    // Get current read/write position in the mmap buffer
+    virtual status_t getMmapPosition(struct audio_mmap_position *position) = 0;
+
+    // Set the priority of the thread that interacts with the HAL
+    // (must match the priority of the audioflinger's thread that calls 'read' / 'write')
+    virtual status_t setHalThreadPriority(int priority);
+
+  protected:
+    // Subclasses can not be constructed directly by clients.
+    StreamHalLocal(audio_stream_t *stream, sp<DeviceHalLocal> device);
+
+    // The destructor automatically closes the stream.
+    virtual ~StreamHalLocal();
+
+    sp<DeviceHalLocal> mDevice;
+
+  private:
+    audio_stream_t *mStream;
+};
+
+class StreamOutHalLocal : public StreamOutHalInterface, public StreamHalLocal {
+  public:
+    // Return the frame size (number of bytes per sample) of a stream.
+    virtual status_t getFrameSize(size_t *size);
+
+    // Return the audio hardware driver estimated latency in milliseconds.
+    virtual status_t getLatency(uint32_t *latency);
+
+    // Use this method in situations where audio mixing is done in the hardware.
+    virtual status_t setVolume(float left, float right);
+
+    // Write audio buffer to driver.
+    virtual status_t write(const void *buffer, size_t bytes, size_t *written);
+
+    // Return the number of audio frames written by the audio dsp to DAC since
+    // the output has exited standby.
+    virtual status_t getRenderPosition(uint32_t *dspFrames);
+
+    // Get the local time at which the next write to the audio driver will be presented.
+    virtual status_t getNextWriteTimestamp(int64_t *timestamp);
+
+    // Set the callback for notifying completion of non-blocking write and drain.
+    virtual status_t setCallback(wp<StreamOutHalInterfaceCallback> callback);
+
+    // Returns whether pause and resume operations are supported.
+    virtual status_t supportsPauseAndResume(bool *supportsPause, bool *supportsResume);
+
+    // Notifies to the audio driver to resume playback following a pause.
+    virtual status_t pause();
+
+    // Notifies to the audio driver to resume playback following a pause.
+    virtual status_t resume();
+
+    // Returns whether drain operation is supported.
+    virtual status_t supportsDrain(bool *supportsDrain);
+
+    // Requests notification when data buffered by the driver/hardware has been played.
+    virtual status_t drain(bool earlyNotify);
+
+    // Notifies to the audio driver to flush the queued data.
+    virtual status_t flush();
+
+    // Return a recent count of the number of audio frames presented to an external observer.
+    virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp);
+
+    // Start a stream operating in mmap mode.
+    virtual status_t start();
+
+    // Stop a stream operating in mmap mode.
+    virtual status_t stop();
+
+    // Retrieve information on the data buffer in mmap mode.
+    virtual status_t createMmapBuffer(int32_t minSizeFrames,
+                                      struct audio_mmap_buffer_info *info);
+
+    // Get current read/write position in the mmap buffer
+    virtual status_t getMmapPosition(struct audio_mmap_position *position);
+
+  private:
+    audio_stream_out_t *mStream;
+    wp<StreamOutHalInterfaceCallback> mCallback;
+
+    friend class DeviceHalLocal;
+
+    // Can not be constructed directly by clients.
+    StreamOutHalLocal(audio_stream_out_t *stream, sp<DeviceHalLocal> device);
+
+    virtual ~StreamOutHalLocal();
+
+    static int asyncCallback(stream_callback_event_t event, void *param, void *cookie);
+};
+
+class StreamInHalLocal : public StreamInHalInterface, public StreamHalLocal {
+  public:
+    // Return the frame size (number of bytes per sample) of a stream.
+    virtual status_t getFrameSize(size_t *size);
+
+    // Set the input gain for the audio driver.
+    virtual status_t setGain(float gain);
+
+    // Read audio buffer in from driver.
+    virtual status_t read(void *buffer, size_t bytes, size_t *read);
+
+    // Return the amount of input frames lost in the audio driver.
+    virtual status_t getInputFramesLost(uint32_t *framesLost);
+
+    // Return a recent count of the number of audio frames received and
+    // the clock time associated with that frame count.
+    virtual status_t getCapturePosition(int64_t *frames, int64_t *time);
+
+    // Start a stream operating in mmap mode.
+    virtual status_t start();
+
+    // Stop a stream operating in mmap mode.
+    virtual status_t stop();
+
+    // Retrieve information on the data buffer in mmap mode.
+    virtual status_t createMmapBuffer(int32_t minSizeFrames,
+                                      struct audio_mmap_buffer_info *info);
+
+    // Get current read/write position in the mmap buffer
+    virtual status_t getMmapPosition(struct audio_mmap_position *position);
+
+  private:
+    audio_stream_in_t *mStream;
+
+    friend class DeviceHalLocal;
+
+    // Can not be constructed directly by clients.
+    StreamInHalLocal(audio_stream_in_t *stream, sp<DeviceHalLocal> device);
+
+    virtual ~StreamInHalLocal();
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_STREAM_HAL_LOCAL_H
diff --git a/media/libaudioprocessing/Android.mk b/media/libaudioprocessing/Android.mk
new file mode 100644
index 0000000..b7ea99e
--- /dev/null
+++ b/media/libaudioprocessing/Android.mk
@@ -0,0 +1,36 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+    AudioMixer.cpp.arm \
+    AudioResampler.cpp.arm \
+    AudioResamplerCubic.cpp.arm \
+    AudioResamplerSinc.cpp.arm \
+    AudioResamplerDyn.cpp.arm \
+    BufferProviders.cpp \
+    RecordBufferConverter.cpp \
+
+LOCAL_C_INCLUDES := \
+    $(TOP) \
+    $(call include-path-for, audio-utils) \
+
+LOCAL_SHARED_LIBRARIES := \
+    libaudiohal \
+    libaudioutils \
+    libcutils \
+    liblog \
+    libnbaio \
+    libsonic \
+    libutils \
+
+LOCAL_MODULE := libaudioprocessing
+
+LOCAL_CFLAGS := -Werror -Wall
+
+# uncomment to disable NEON on architectures that actually do support NEON, for benchmarking
+#LOCAL_CFLAGS += -DUSE_NEON=false
+
+include $(BUILD_SHARED_LIBRARY)
+
+include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/services/audioflinger/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
similarity index 99%
rename from services/audioflinger/AudioMixer.cpp
rename to media/libaudioprocessing/AudioMixer.cpp
index 945f4b3..a7d9f0f 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -18,7 +18,6 @@
 #define LOG_TAG "AudioMixer"
 //#define LOG_NDEBUG 0
 
-#include "Configuration.h"
 #include <stdint.h>
 #include <string.h>
 #include <stdlib.h>
@@ -36,9 +35,9 @@
 
 #include <audio_utils/primitives.h>
 #include <audio_utils/format.h>
+#include <media/AudioMixer.h>
 
 #include "AudioMixerOps.h"
-#include "AudioMixer.h"
 
 // The FCC_2 macro refers to the Fixed Channel Count of 2 for the legacy integer mixer.
 #ifndef FCC_2
diff --git a/services/audioflinger/AudioMixerOps.h b/media/libaudioprocessing/AudioMixerOps.h
similarity index 100%
rename from services/audioflinger/AudioMixerOps.h
rename to media/libaudioprocessing/AudioMixerOps.h
diff --git a/services/audioflinger/AudioResampler.cpp b/media/libaudioprocessing/AudioResampler.cpp
similarity index 99%
rename from services/audioflinger/AudioResampler.cpp
rename to media/libaudioprocessing/AudioResampler.cpp
index 8b7259d..c761b38 100644
--- a/services/audioflinger/AudioResampler.cpp
+++ b/media/libaudioprocessing/AudioResampler.cpp
@@ -26,7 +26,7 @@
 #include <log/log.h>
 
 #include <audio_utils/primitives.h>
-#include "AudioResampler.h"
+#include <media/AudioResampler.h>
 #include "AudioResamplerSinc.h"
 #include "AudioResamplerCubic.h"
 #include "AudioResamplerDyn.h"
diff --git a/services/audioflinger/AudioResamplerCubic.cpp b/media/libaudioprocessing/AudioResamplerCubic.cpp
similarity index 99%
rename from services/audioflinger/AudioResamplerCubic.cpp
rename to media/libaudioprocessing/AudioResamplerCubic.cpp
index 9fb6699..9bcd8e1 100644
--- a/services/audioflinger/AudioResamplerCubic.cpp
+++ b/media/libaudioprocessing/AudioResamplerCubic.cpp
@@ -22,7 +22,6 @@
 
 #include <log/log.h>
 
-#include "AudioResampler.h"
 #include "AudioResamplerCubic.h"
 
 namespace android {
diff --git a/services/audioflinger/AudioResamplerCubic.h b/media/libaudioprocessing/AudioResamplerCubic.h
similarity index 98%
rename from services/audioflinger/AudioResamplerCubic.h
rename to media/libaudioprocessing/AudioResamplerCubic.h
index f218fd9..defaf33 100644
--- a/services/audioflinger/AudioResamplerCubic.h
+++ b/media/libaudioprocessing/AudioResamplerCubic.h
@@ -21,7 +21,7 @@
 #include <sys/types.h>
 #include <android/log.h>
 
-#include "AudioResampler.h"
+#include <media/AudioResampler.h>
 
 namespace android {
 // ----------------------------------------------------------------------------
diff --git a/services/audioflinger/AudioResamplerDyn.cpp b/media/libaudioprocessing/AudioResamplerDyn.cpp
similarity index 97%
rename from services/audioflinger/AudioResamplerDyn.cpp
rename to media/libaudioprocessing/AudioResamplerDyn.cpp
index 213cd1a..8f7b982 100644
--- a/services/audioflinger/AudioResamplerDyn.cpp
+++ b/media/libaudioprocessing/AudioResamplerDyn.cpp
@@ -150,6 +150,15 @@
 }
 
 template<typename TC, typename TI, typename TO>
+void AudioResamplerDyn<TC, TI, TO>::InBuffer::reset()
+{
+    // clear resampler state
+    if (mState != nullptr) {
+        memset(mState, 0, mStateCount * sizeof(TI));
+    }
+}
+
+template<typename TC, typename TI, typename TO>
 void AudioResamplerDyn<TC, TI, TO>::Constants::set(
         int L, int halfNumCoefs, int inSampleRate, int outSampleRate)
 {
@@ -529,6 +538,9 @@
             mBuffer.frameCount = inFrameCount;
             provider->getNextBuffer(&mBuffer);
             if (mBuffer.raw == NULL) {
+                // We are either at the end of playback or in an underrun situation.
+                // Reset buffer to prevent pop noise at the next buffer.
+                mInBuffer.reset();
                 goto resample_exit;
             }
             inFrameCount -= mBuffer.frameCount;
diff --git a/services/audioflinger/AudioResamplerDyn.h b/media/libaudioprocessing/AudioResamplerDyn.h
similarity index 98%
rename from services/audioflinger/AudioResamplerDyn.h
rename to media/libaudioprocessing/AudioResamplerDyn.h
index f8b8fa1..1840fc7 100644
--- a/services/audioflinger/AudioResamplerDyn.h
+++ b/media/libaudioprocessing/AudioResamplerDyn.h
@@ -21,7 +21,7 @@
 #include <sys/types.h>
 #include <android/log.h>
 
-#include "AudioResampler.h"
+#include <media/AudioResampler.h>
 
 namespace android {
 
@@ -96,6 +96,8 @@
         inline void readAdvance(TI*& impulse, const int halfNumCoefs,
                 const TI* const in, const size_t inputIndex);
 
+        void reset();
+
     private:
         // tuning parameter guidelines: 2 <= multiple <= 8
         static const int kStateSizeMultipleOfFilterLength = 4;
diff --git a/services/audioflinger/AudioResamplerFirGen.h b/media/libaudioprocessing/AudioResamplerFirGen.h
similarity index 100%
rename from services/audioflinger/AudioResamplerFirGen.h
rename to media/libaudioprocessing/AudioResamplerFirGen.h
diff --git a/services/audioflinger/AudioResamplerFirOps.h b/media/libaudioprocessing/AudioResamplerFirOps.h
similarity index 96%
rename from services/audioflinger/AudioResamplerFirOps.h
rename to media/libaudioprocessing/AudioResamplerFirOps.h
index 776903c..2e4cee3 100644
--- a/services/audioflinger/AudioResamplerFirOps.h
+++ b/media/libaudioprocessing/AudioResamplerFirOps.h
@@ -126,7 +126,7 @@
 static inline
 int32_t mulAddRL(int left, uint32_t inRL, int16_t v, int32_t a)
 {
-#if USE_INLINE_ASSEMBLY
+#if 0 // USE_INLINE_ASSEMBLY Seems to fail with Clang b/34110890
     int32_t out;
     if (left) {
         asm( "smlabb %[out], %[v], %[inRL], %[a] \n"
@@ -149,7 +149,7 @@
 static inline
 int32_t mulAddRL(int left, uint32_t inRL, int32_t v, int32_t a)
 {
-#if USE_INLINE_ASSEMBLY
+#if 0 // USE_INLINE_ASSEMBLY Seems to fail with Clang b/34110890
     int32_t out;
     if (left) {
         asm( "smlawb %[out], %[v], %[inRL], %[a] \n"
diff --git a/services/audioflinger/AudioResamplerFirProcess.h b/media/libaudioprocessing/AudioResamplerFirProcess.h
similarity index 100%
rename from services/audioflinger/AudioResamplerFirProcess.h
rename to media/libaudioprocessing/AudioResamplerFirProcess.h
diff --git a/services/audioflinger/AudioResamplerFirProcessNeon.h b/media/libaudioprocessing/AudioResamplerFirProcessNeon.h
similarity index 99%
rename from services/audioflinger/AudioResamplerFirProcessNeon.h
rename to media/libaudioprocessing/AudioResamplerFirProcessNeon.h
index 3de9edd..1ce76a8 100644
--- a/services/audioflinger/AudioResamplerFirProcessNeon.h
+++ b/media/libaudioprocessing/AudioResamplerFirProcessNeon.h
@@ -155,8 +155,8 @@
             accum2 = vmlal_s16(accum2, vget_low_s16(negSamp.val[1]), vget_low_s16(negCoef));
             accum2 = vmlal_s16(accum2, vget_high_s16(negSamp.val[1]), vget_high_s16(negCoef));
             sP -= 16;
-        }
         } break;
+        }
     } while (count -= 8);
 
     // multiply by volume and save
diff --git a/services/audioflinger/AudioResamplerFirProcessSSE.h b/media/libaudioprocessing/AudioResamplerFirProcessSSE.h
similarity index 100%
rename from services/audioflinger/AudioResamplerFirProcessSSE.h
rename to media/libaudioprocessing/AudioResamplerFirProcessSSE.h
diff --git a/services/audioflinger/AudioResamplerSinc.cpp b/media/libaudioprocessing/AudioResamplerSinc.cpp
similarity index 100%
rename from services/audioflinger/AudioResamplerSinc.cpp
rename to media/libaudioprocessing/AudioResamplerSinc.cpp
diff --git a/services/audioflinger/AudioResamplerSinc.h b/media/libaudioprocessing/AudioResamplerSinc.h
similarity index 98%
rename from services/audioflinger/AudioResamplerSinc.h
rename to media/libaudioprocessing/AudioResamplerSinc.h
index df8b45a..f6dcf91 100644
--- a/services/audioflinger/AudioResamplerSinc.h
+++ b/media/libaudioprocessing/AudioResamplerSinc.h
@@ -21,7 +21,7 @@
 #include <sys/types.h>
 #include <android/log.h>
 
-#include "AudioResampler.h"
+#include <media/AudioResampler.h>
 
 namespace android {
 
diff --git a/services/audioflinger/AudioResamplerSincDown.h b/media/libaudioprocessing/AudioResamplerSincDown.h
similarity index 100%
rename from services/audioflinger/AudioResamplerSincDown.h
rename to media/libaudioprocessing/AudioResamplerSincDown.h
diff --git a/services/audioflinger/AudioResamplerSincUp.h b/media/libaudioprocessing/AudioResamplerSincUp.h
similarity index 100%
rename from services/audioflinger/AudioResamplerSincUp.h
rename to media/libaudioprocessing/AudioResamplerSincUp.h
diff --git a/services/audioflinger/BufferProviders.cpp b/media/libaudioprocessing/BufferProviders.cpp
similarity index 85%
rename from services/audioflinger/BufferProviders.cpp
rename to media/libaudioprocessing/BufferProviders.cpp
index 7b6dfcb..8341a1e 100644
--- a/services/audioflinger/BufferProviders.cpp
+++ b/media/libaudioprocessing/BufferProviders.cpp
@@ -17,17 +17,17 @@
 #define LOG_TAG "BufferProvider"
 //#define LOG_NDEBUG 0
 
-#include <audio_effects/effect_downmix.h>
 #include <audio_utils/primitives.h>
 #include <audio_utils/format.h>
+#include <external/sonic/sonic.h>
+#include <media/audiohal/EffectBufferHalInterface.h>
+#include <media/audiohal/EffectHalInterface.h>
+#include <media/audiohal/EffectsFactoryHalInterface.h>
 #include <media/AudioResamplerPublic.h>
-#include <media/EffectsFactoryApi.h>
-
+#include <media/BufferProviders.h>
+#include <system/audio_effects/effect_downmix.h>
 #include <utils/Log.h>
 
-#include "Configuration.h"
-#include "BufferProviders.h"
-
 #ifndef ARRAY_SIZE
 #define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
 #endif
@@ -145,13 +145,22 @@
     ALOGV("DownmixerBufferProvider(%p)(%#x, %#x, %#x %u %d)",
             this, inputChannelMask, outputChannelMask, format,
             sampleRate, sessionId);
-    if (!sIsMultichannelCapable
-            || EffectCreate(&sDwnmFxDesc.uuid,
-                    sessionId,
-                    SESSION_ID_INVALID_AND_IGNORED,
-                    &mDownmixHandle) != 0) {
+    if (!sIsMultichannelCapable) {
+        ALOGE("DownmixerBufferProvider() error: not multichannel capable");
+        return;
+    }
+    mEffectsFactory = EffectsFactoryHalInterface::create();
+    if (mEffectsFactory == 0) {
+        ALOGE("DownmixerBufferProvider() error: could not obtain the effects factory");
+        return;
+    }
+    if (mEffectsFactory->createEffect(&sDwnmFxDesc.uuid,
+                                      sessionId,
+                                      SESSION_ID_INVALID_AND_IGNORED,
+                                      &mDownmixInterface) != 0) {
          ALOGE("DownmixerBufferProvider() error creating downmixer effect");
-         mDownmixHandle = NULL;
+         mDownmixInterface.clear();
+         mEffectsFactory.clear();
          return;
      }
      // channel input configuration will be overridden per-track
@@ -169,32 +178,61 @@
              EFFECT_CONFIG_FORMAT | EFFECT_CONFIG_ACC_MODE;
      mDownmixConfig.outputCfg.mask = mDownmixConfig.inputCfg.mask;
 
+     status_t status;
+     status = EffectBufferHalInterface::mirror(
+             nullptr,
+             audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(inputChannelMask),
+             &mInBuffer);
+     if (status != 0) {
+         ALOGE("DownmixerBufferProvider() error %d while creating input buffer", status);
+         mDownmixInterface.clear();
+         mEffectsFactory.clear();
+         return;
+     }
+     status = EffectBufferHalInterface::mirror(
+             nullptr,
+             audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(outputChannelMask),
+             &mOutBuffer);
+     if (status != 0) {
+         ALOGE("DownmixerBufferProvider() error %d while creating output buffer", status);
+         mInBuffer.clear();
+         mDownmixInterface.clear();
+         mEffectsFactory.clear();
+         return;
+     }
+     mDownmixInterface->setInBuffer(mInBuffer);
+     mDownmixInterface->setOutBuffer(mOutBuffer);
+
      int cmdStatus;
      uint32_t replySize = sizeof(int);
 
      // Configure downmixer
-     status_t status = (*mDownmixHandle)->command(mDownmixHandle,
+     status = mDownmixInterface->command(
              EFFECT_CMD_SET_CONFIG /*cmdCode*/, sizeof(effect_config_t) /*cmdSize*/,
              &mDownmixConfig /*pCmdData*/,
              &replySize, &cmdStatus /*pReplyData*/);
      if (status != 0 || cmdStatus != 0) {
          ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while configuring downmixer",
                  status, cmdStatus);
-         EffectRelease(mDownmixHandle);
-         mDownmixHandle = NULL;
+         mOutBuffer.clear();
+         mInBuffer.clear();
+         mDownmixInterface.clear();
+         mEffectsFactory.clear();
          return;
      }
 
      // Enable downmixer
      replySize = sizeof(int);
-     status = (*mDownmixHandle)->command(mDownmixHandle,
+     status = mDownmixInterface->command(
              EFFECT_CMD_ENABLE /*cmdCode*/, 0 /*cmdSize*/, NULL /*pCmdData*/,
              &replySize, &cmdStatus /*pReplyData*/);
      if (status != 0 || cmdStatus != 0) {
          ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while enabling downmixer",
                  status, cmdStatus);
-         EffectRelease(mDownmixHandle);
-         mDownmixHandle = NULL;
+         mOutBuffer.clear();
+         mInBuffer.clear();
+         mDownmixInterface.clear();
+         mEffectsFactory.clear();
          return;
      }
 
@@ -211,15 +249,17 @@
      param->vsize = sizeof(downmix_type_t);
      memcpy(param->data + psizePadded, &downmixType, param->vsize);
      replySize = sizeof(int);
-     status = (*mDownmixHandle)->command(mDownmixHandle,
+     status = mDownmixInterface->command(
              EFFECT_CMD_SET_PARAM /* cmdCode */, downmixParamSize /* cmdSize */,
              param /*pCmdData*/, &replySize, &cmdStatus /*pReplyData*/);
      free(param);
      if (status != 0 || cmdStatus != 0) {
          ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while setting downmix type",
                  status, cmdStatus);
-         EffectRelease(mDownmixHandle);
-         mDownmixHandle = NULL;
+         mOutBuffer.clear();
+         mInBuffer.clear();
+         mDownmixInterface.clear();
+         mEffectsFactory.clear();
          return;
      }
      ALOGV("DownmixerBufferProvider() downmix type set to %d", (int) downmixType);
@@ -228,28 +268,39 @@
 DownmixerBufferProvider::~DownmixerBufferProvider()
 {
     ALOGV("~DownmixerBufferProvider (%p)", this);
-    EffectRelease(mDownmixHandle);
-    mDownmixHandle = NULL;
+    if (mDownmixInterface != 0) {
+        mDownmixInterface->close();
+    }
 }
 
 void DownmixerBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
 {
-    mDownmixConfig.inputCfg.buffer.frameCount = frames;
-    mDownmixConfig.inputCfg.buffer.raw = const_cast<void *>(src);
-    mDownmixConfig.outputCfg.buffer.frameCount = frames;
-    mDownmixConfig.outputCfg.buffer.raw = dst;
+    mInBuffer->setExternalData(const_cast<void*>(src));
+    mInBuffer->setFrameCount(frames);
+    mInBuffer->update();
+    mOutBuffer->setExternalData(dst);
+    mOutBuffer->setFrameCount(frames);
+    mOutBuffer->update();
     // may be in-place if src == dst.
-    status_t res = (*mDownmixHandle)->process(mDownmixHandle,
-            &mDownmixConfig.inputCfg.buffer, &mDownmixConfig.outputCfg.buffer);
-    ALOGE_IF(res != OK, "DownmixBufferProvider error %d", res);
+    status_t res = mDownmixInterface->process();
+    if (res == OK) {
+        mOutBuffer->commit();
+    } else {
+        ALOGE("DownmixBufferProvider error %d", res);
+    }
 }
 
 /* call once in a pthread_once handler. */
 /*static*/ status_t DownmixerBufferProvider::init()
 {
     // find multichannel downmix effect if we have to play multichannel content
+    sp<EffectsFactoryHalInterface> effectsFactory = EffectsFactoryHalInterface::create();
+    if (effectsFactory == 0) {
+        ALOGE("AudioMixer() error: could not obtain the effects factory");
+        return NO_INIT;
+    }
     uint32_t numEffects = 0;
-    int ret = EffectQueryNumberEffects(&numEffects);
+    int ret = effectsFactory->queryNumberEffects(&numEffects);
     if (ret != 0) {
         ALOGE("AudioMixer() error %d querying number of effects", ret);
         return NO_INIT;
@@ -257,7 +308,7 @@
     ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects);
 
     for (uint32_t i = 0 ; i < numEffects ; i++) {
-        if (EffectQueryEffect(i, &sDwnmFxDesc) == 0) {
+        if (effectsFactory->getDescriptor(i, &sDwnmFxDesc) == 0) {
             ALOGV("effect %d is called %s", i, sDwnmFxDesc.name);
             if (memcmp(&sDwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
                 ALOGI("found effect \"%s\" from %s",
diff --git a/media/libaudioprocessing/RecordBufferConverter.cpp b/media/libaudioprocessing/RecordBufferConverter.cpp
new file mode 100644
index 0000000..54151f5
--- /dev/null
+++ b/media/libaudioprocessing/RecordBufferConverter.cpp
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "RecordBufferConverter"
+//#define LOG_NDEBUG 0
+
+#include <audio_utils/primitives.h>
+#include <audio_utils/format.h>
+#include <media/AudioMixer.h>  // for UNITY_GAIN_FLOAT
+#include <media/AudioResampler.h>
+#include <media/BufferProviders.h>
+#include <media/RecordBufferConverter.h>
+#include <utils/Log.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
+#endif
+
+template <typename T>
+static inline T max(const T& a, const T& b)
+{
+    return a > b ? a : b;
+}
+
+namespace android {
+
+RecordBufferConverter::RecordBufferConverter(
+        audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
+        uint32_t srcSampleRate,
+        audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
+        uint32_t dstSampleRate) :
+            mSrcChannelMask(AUDIO_CHANNEL_INVALID), // updateParameters will set following vars
+            // mSrcFormat
+            // mSrcSampleRate
+            // mDstChannelMask
+            // mDstFormat
+            // mDstSampleRate
+            // mSrcChannelCount
+            // mDstChannelCount
+            // mDstFrameSize
+            mBuf(NULL), mBufFrames(0), mBufFrameSize(0),
+            mResampler(NULL),
+            mIsLegacyDownmix(false),
+            mIsLegacyUpmix(false),
+            mRequiresFloat(false),
+            mInputConverterProvider(NULL)
+{
+    (void)updateParameters(srcChannelMask, srcFormat, srcSampleRate,
+            dstChannelMask, dstFormat, dstSampleRate);
+}
+
+RecordBufferConverter::~RecordBufferConverter() {
+    free(mBuf);
+    delete mResampler;
+    delete mInputConverterProvider;
+}
+
+void RecordBufferConverter::reset() {
+    if (mResampler != NULL) {
+        mResampler->reset();
+    }
+}
+
+size_t RecordBufferConverter::convert(void *dst,
+        AudioBufferProvider *provider, size_t frames)
+{
+    if (mInputConverterProvider != NULL) {
+        mInputConverterProvider->setBufferProvider(provider);
+        provider = mInputConverterProvider;
+    }
+
+    if (mResampler == NULL) {
+        ALOGV("NO RESAMPLING sampleRate:%u mSrcFormat:%#x mDstFormat:%#x",
+                mSrcSampleRate, mSrcFormat, mDstFormat);
+
+        AudioBufferProvider::Buffer buffer;
+        for (size_t i = frames; i > 0; ) {
+            buffer.frameCount = i;
+            status_t status = provider->getNextBuffer(&buffer);
+            if (status != OK || buffer.frameCount == 0) {
+                frames -= i; // cannot fill request.
+                break;
+            }
+            // format convert to destination buffer
+            convertNoResampler(dst, buffer.raw, buffer.frameCount);
+
+            dst = (int8_t*)dst + buffer.frameCount * mDstFrameSize;
+            i -= buffer.frameCount;
+            provider->releaseBuffer(&buffer);
+        }
+    } else {
+         ALOGV("RESAMPLING mSrcSampleRate:%u mDstSampleRate:%u mSrcFormat:%#x mDstFormat:%#x",
+                 mSrcSampleRate, mDstSampleRate, mSrcFormat, mDstFormat);
+
+         // reallocate buffer if needed
+         if (mBufFrameSize != 0 && mBufFrames < frames) {
+             free(mBuf);
+             mBufFrames = frames;
+             (void)posix_memalign(&mBuf, 32, mBufFrames * mBufFrameSize);
+         }
+        // resampler accumulates, but we only have one source track
+        memset(mBuf, 0, frames * mBufFrameSize);
+        frames = mResampler->resample((int32_t*)mBuf, frames, provider);
+        // format convert to destination buffer
+        convertResampler(dst, mBuf, frames);
+    }
+    return frames;
+}
+
+status_t RecordBufferConverter::updateParameters(
+        audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
+        uint32_t srcSampleRate,
+        audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
+        uint32_t dstSampleRate)
+{
+    // quick evaluation if there is any change.
+    if (mSrcFormat == srcFormat
+            && mSrcChannelMask == srcChannelMask
+            && mSrcSampleRate == srcSampleRate
+            && mDstFormat == dstFormat
+            && mDstChannelMask == dstChannelMask
+            && mDstSampleRate == dstSampleRate) {
+        return NO_ERROR;
+    }
+
+    ALOGV("RecordBufferConverter updateParameters srcMask:%#x dstMask:%#x"
+            "  srcFormat:%#x dstFormat:%#x  srcRate:%u dstRate:%u",
+            srcChannelMask, dstChannelMask, srcFormat, dstFormat, srcSampleRate, dstSampleRate);
+    const bool valid =
+            audio_is_input_channel(srcChannelMask)
+            && audio_is_input_channel(dstChannelMask)
+            && audio_is_valid_format(srcFormat) && audio_is_linear_pcm(srcFormat)
+            && audio_is_valid_format(dstFormat) && audio_is_linear_pcm(dstFormat)
+            && (srcSampleRate <= dstSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX)
+            ; // no upsampling checks for now
+    if (!valid) {
+        return BAD_VALUE;
+    }
+
+    mSrcFormat = srcFormat;
+    mSrcChannelMask = srcChannelMask;
+    mSrcSampleRate = srcSampleRate;
+    mDstFormat = dstFormat;
+    mDstChannelMask = dstChannelMask;
+    mDstSampleRate = dstSampleRate;
+
+    // compute derived parameters
+    mSrcChannelCount = audio_channel_count_from_in_mask(srcChannelMask);
+    mDstChannelCount = audio_channel_count_from_in_mask(dstChannelMask);
+    mDstFrameSize = mDstChannelCount * audio_bytes_per_sample(mDstFormat);
+
+    // do we need to resample?
+    delete mResampler;
+    mResampler = NULL;
+    if (mSrcSampleRate != mDstSampleRate) {
+        mResampler = AudioResampler::create(AUDIO_FORMAT_PCM_FLOAT,
+                mSrcChannelCount, mDstSampleRate);
+        mResampler->setSampleRate(mSrcSampleRate);
+        mResampler->setVolume(AudioMixer::UNITY_GAIN_FLOAT, AudioMixer::UNITY_GAIN_FLOAT);
+    }
+
+    // are we running legacy channel conversion modes?
+    mIsLegacyDownmix = (mSrcChannelMask == AUDIO_CHANNEL_IN_STEREO
+                            || mSrcChannelMask == AUDIO_CHANNEL_IN_FRONT_BACK)
+                   && mDstChannelMask == AUDIO_CHANNEL_IN_MONO;
+    mIsLegacyUpmix = mSrcChannelMask == AUDIO_CHANNEL_IN_MONO
+                   && (mDstChannelMask == AUDIO_CHANNEL_IN_STEREO
+                            || mDstChannelMask == AUDIO_CHANNEL_IN_FRONT_BACK);
+
+    // do we need to process in float?
+    mRequiresFloat = mResampler != NULL || mIsLegacyDownmix || mIsLegacyUpmix;
+
+    // do we need a staging buffer to convert for destination (we can still optimize this)?
+    // we use mBufFrameSize > 0 to indicate both frame size as well as buffer necessity
+    if (mResampler != NULL) {
+        mBufFrameSize = max(mSrcChannelCount, (uint32_t)FCC_2)
+                * audio_bytes_per_sample(AUDIO_FORMAT_PCM_FLOAT);
+    } else if (mIsLegacyUpmix || mIsLegacyDownmix) { // legacy modes always float
+        mBufFrameSize = mDstChannelCount * audio_bytes_per_sample(AUDIO_FORMAT_PCM_FLOAT);
+    } else if (mSrcChannelMask != mDstChannelMask && mDstFormat != mSrcFormat) {
+        mBufFrameSize = mDstChannelCount * audio_bytes_per_sample(mSrcFormat);
+    } else {
+        mBufFrameSize = 0;
+    }
+    mBufFrames = 0; // force the buffer to be resized.
+
+    // do we need an input converter buffer provider to give us float?
+    delete mInputConverterProvider;
+    mInputConverterProvider = NULL;
+    if (mRequiresFloat && mSrcFormat != AUDIO_FORMAT_PCM_FLOAT) {
+        mInputConverterProvider = new ReformatBufferProvider(
+                audio_channel_count_from_in_mask(mSrcChannelMask),
+                mSrcFormat,
+                AUDIO_FORMAT_PCM_FLOAT,
+                256 /* provider buffer frame count */);
+    }
+
+    // do we need a remixer to do channel mask conversion
+    if (!mIsLegacyDownmix && !mIsLegacyUpmix && mSrcChannelMask != mDstChannelMask) {
+        (void) memcpy_by_index_array_initialization_from_channel_mask(
+                mIdxAry, ARRAY_SIZE(mIdxAry), mDstChannelMask, mSrcChannelMask);
+    }
+    return NO_ERROR;
+}
+
+void RecordBufferConverter::convertNoResampler(
+        void *dst, const void *src, size_t frames)
+{
+    // src is native type unless there is legacy upmix or downmix, whereupon it is float.
+    if (mBufFrameSize != 0 && mBufFrames < frames) {
+        free(mBuf);
+        mBufFrames = frames;
+        (void)posix_memalign(&mBuf, 32, mBufFrames * mBufFrameSize);
+    }
+    // do we need to do legacy upmix and downmix?
+    if (mIsLegacyUpmix || mIsLegacyDownmix) {
+        void *dstBuf = mBuf != NULL ? mBuf : dst;
+        if (mIsLegacyUpmix) {
+            upmix_to_stereo_float_from_mono_float((float *)dstBuf,
+                    (const float *)src, frames);
+        } else /*mIsLegacyDownmix */ {
+            downmix_to_mono_float_from_stereo_float((float *)dstBuf,
+                    (const float *)src, frames);
+        }
+        if (mBuf != NULL) {
+            memcpy_by_audio_format(dst, mDstFormat, mBuf, AUDIO_FORMAT_PCM_FLOAT,
+                    frames * mDstChannelCount);
+        }
+        return;
+    }
+    // do we need to do channel mask conversion?
+    if (mSrcChannelMask != mDstChannelMask) {
+        void *dstBuf = mBuf != NULL ? mBuf : dst;
+        memcpy_by_index_array(dstBuf, mDstChannelCount,
+                src, mSrcChannelCount, mIdxAry, audio_bytes_per_sample(mSrcFormat), frames);
+        if (dstBuf == dst) {
+            return; // format is the same
+        }
+    }
+    // convert to destination buffer
+    const void *convertBuf = mBuf != NULL ? mBuf : src;
+    memcpy_by_audio_format(dst, mDstFormat, convertBuf, mSrcFormat,
+            frames * mDstChannelCount);
+}
+
+void RecordBufferConverter::convertResampler(
+        void *dst, /*not-a-const*/ void *src, size_t frames)
+{
+    // src buffer format is ALWAYS float when entering this routine
+    if (mIsLegacyUpmix) {
+        ; // mono to stereo already handled by resampler
+    } else if (mIsLegacyDownmix
+            || (mSrcChannelMask == mDstChannelMask && mSrcChannelCount == 1)) {
+        // the resampler outputs stereo for mono input channel (a feature?)
+        // must convert to mono
+        downmix_to_mono_float_from_stereo_float((float *)src,
+                (const float *)src, frames);
+    } else if (mSrcChannelMask != mDstChannelMask) {
+        // convert to mono channel again for channel mask conversion (could be skipped
+        // with further optimization).
+        if (mSrcChannelCount == 1) {
+            downmix_to_mono_float_from_stereo_float((float *)src,
+                (const float *)src, frames);
+        }
+        // convert to destination format (in place, OK as float is larger than other types)
+        if (mDstFormat != AUDIO_FORMAT_PCM_FLOAT) {
+            memcpy_by_audio_format(src, mDstFormat, src, AUDIO_FORMAT_PCM_FLOAT,
+                    frames * mSrcChannelCount);
+        }
+        // channel convert and save to dst
+        memcpy_by_index_array(dst, mDstChannelCount,
+                src, mSrcChannelCount, mIdxAry, audio_bytes_per_sample(mDstFormat), frames);
+        return;
+    }
+    // convert to destination format and save to dst
+    memcpy_by_audio_format(dst, mDstFormat, src, AUDIO_FORMAT_PCM_FLOAT,
+            frames * mDstChannelCount);
+}
+
+// ----------------------------------------------------------------------------
+} // namespace android
diff --git a/services/audioflinger/audio-resampler/Android.mk b/media/libaudioprocessing/audio-resampler/Android.mk
similarity index 100%
rename from services/audioflinger/audio-resampler/Android.mk
rename to media/libaudioprocessing/audio-resampler/Android.mk
diff --git a/services/audioflinger/audio-resampler/AudioResamplerCoefficients.cpp b/media/libaudioprocessing/audio-resampler/AudioResamplerCoefficients.cpp
similarity index 100%
rename from services/audioflinger/audio-resampler/AudioResamplerCoefficients.cpp
rename to media/libaudioprocessing/audio-resampler/AudioResamplerCoefficients.cpp
diff --git a/services/audioflinger/audio-resampler/filter_coefficients.h b/media/libaudioprocessing/audio-resampler/filter_coefficients.h
similarity index 100%
rename from services/audioflinger/audio-resampler/filter_coefficients.h
rename to media/libaudioprocessing/audio-resampler/filter_coefficients.h
diff --git a/media/libaudioprocessing/tests/Android.mk b/media/libaudioprocessing/tests/Android.mk
new file mode 100644
index 0000000..23e1c3a
--- /dev/null
+++ b/media/libaudioprocessing/tests/Android.mk
@@ -0,0 +1,87 @@
+# Build the unit tests for libaudioprocessing
+
+LOCAL_PATH := $(call my-dir)
+
+#
+# resampler unit test
+#
+include $(CLEAR_VARS)
+
+LOCAL_SHARED_LIBRARIES := \
+    libaudioutils \
+    libaudioprocessing \
+    libcutils \
+    liblog \
+    libutils \
+
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+
+LOCAL_SRC_FILES := \
+    resampler_tests.cpp
+
+LOCAL_MODULE := resampler_tests
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_CFLAGS := -Werror -Wall
+
+include $(BUILD_NATIVE_TEST)
+
+#
+# audio mixer test tool
+#
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+    test-mixer.cpp \
+
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+
+LOCAL_STATIC_LIBRARIES := \
+    libsndfile \
+
+LOCAL_SHARED_LIBRARIES := \
+    libaudioprocessing \
+    libaudioutils \
+    libcutils \
+    liblog \
+    libutils \
+
+LOCAL_MODULE := test-mixer
+
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_CFLAGS := -Werror -Wall
+
+include $(BUILD_EXECUTABLE)
+
+#
+# build audio resampler test tool
+#
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+    test-resampler.cpp \
+
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+
+LOCAL_STATIC_LIBRARIES := \
+    libsndfile \
+
+LOCAL_SHARED_LIBRARIES := \
+    libaudioprocessing \
+    libaudioutils \
+    libcutils \
+    liblog \
+    libutils \
+
+LOCAL_MODULE := test-resampler
+
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_CFLAGS := -Werror -Wall
+
+include $(BUILD_EXECUTABLE)
diff --git a/services/audioflinger/tests/README b/media/libaudioprocessing/tests/README
similarity index 61%
rename from services/audioflinger/tests/README
rename to media/libaudioprocessing/tests/README
index 508e960..ed7e2ed 100644
--- a/services/audioflinger/tests/README
+++ b/media/libaudioprocessing/tests/README
@@ -1,9 +1,9 @@
 For libsonic dependency:
-pushd external/sonic
+pushd $ANDROID_BUILD_TOP/external/sonic
 mm
 popd
 
-To build resampler library:
+To build audio processing library:
 pushd ..
 Optionally uncomment USE_NEON=false in Android.mk
 mm
diff --git a/media/libaudioprocessing/tests/build_and_run_all_unit_tests.sh b/media/libaudioprocessing/tests/build_and_run_all_unit_tests.sh
new file mode 100755
index 0000000..704d095
--- /dev/null
+++ b/media/libaudioprocessing/tests/build_and_run_all_unit_tests.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+if [ -z "$ANDROID_BUILD_TOP" ]; then
+    echo "Android build environment not set"
+    exit -1
+fi
+
+# ensure we have mm
+. $ANDROID_BUILD_TOP/build/envsetup.sh
+
+pushd $ANDROID_BUILD_TOP/frameworks/av/media/libaudioprocessing
+pwd
+mm
+
+echo "waiting for device"
+adb root && adb wait-for-device remount
+adb push $OUT/system/lib/libaudioresampler.so /system/lib
+adb push $OUT/system/lib64/libaudioresampler.so /system/lib64
+adb push $OUT/data/nativetest/resampler_tests/resampler_tests /data/nativetest/resampler_tests/resampler_tests
+adb push $OUT/data/nativetest64/resampler_tests/resampler_tests /data/nativetest64/resampler_tests/resampler_tests
+
+sh $ANDROID_BUILD_TOP/frameworks/av/media/libaudioprocessing/tests/run_all_unit_tests.sh
+
+popd
diff --git a/services/audioflinger/tests/mixer_to_wav_tests.sh b/media/libaudioprocessing/tests/mixer_to_wav_tests.sh
similarity index 95%
rename from services/audioflinger/tests/mixer_to_wav_tests.sh
rename to media/libaudioprocessing/tests/mixer_to_wav_tests.sh
index d0482a1..72b02fc 100755
--- a/services/audioflinger/tests/mixer_to_wav_tests.sh
+++ b/media/libaudioprocessing/tests/mixer_to_wav_tests.sh
@@ -37,7 +37,7 @@
 # ensure we have mm
 . $ANDROID_BUILD_TOP/build/envsetup.sh
 
-pushd $ANDROID_BUILD_TOP/frameworks/av/services/audioflinger/
+pushd $ANDROID_BUILD_TOP/frameworks/av/media/libaudioprocessing
 
 # build
 pwd
@@ -46,7 +46,8 @@
 # send to device
 echo "waiting for device"
 adb root && adb wait-for-device remount
-adb push $OUT/system/lib/libaudioresampler.so /system/lib
+adb push $OUT/system/lib/libaudioprocessing.so /system/lib
+adb push $OUT/system/lib64/libaudioprocessing.so /system/lib64
 adb push $OUT/system/bin/test-mixer /system/bin
 
 # createwav creates a series of WAV files testing various
diff --git a/services/audioflinger/tests/resampler_tests.cpp b/media/libaudioprocessing/tests/resampler_tests.cpp
similarity index 81%
rename from services/audioflinger/tests/resampler_tests.cpp
rename to media/libaudioprocessing/tests/resampler_tests.cpp
index b0d384d..a23c000 100644
--- a/services/audioflinger/tests/resampler_tests.cpp
+++ b/media/libaudioprocessing/tests/resampler_tests.cpp
@@ -36,9 +36,20 @@
 #include <log/log.h>
 #include <media/AudioBufferProvider.h>
 
-#include "AudioResampler.h"
+#include <media/AudioResampler.h>
 #include "test_utils.h"
 
+template <typename T>
+static void printData(T *data, size_t size) {
+    const size_t stride = 8;
+    for (size_t i = 0; i < size; ) {
+        for (size_t j = 0; j < stride && i < size; ++j) {
+            std::cout << data[i++] << ' ';  // extra space before newline
+        }
+        std::cout << '\n'; // or endl
+    }
+}
+
 void resample(int channels, void *output,
         size_t outputFrames, const std::vector<size_t> &outputIncr,
         android::AudioBufferProvider *provider, android::AudioResampler *resampler)
@@ -91,7 +102,7 @@
 
     // calculate the output size
     size_t outputFrames = ((int64_t) provider.getNumFrames() * outputFreq) / inputFreq;
-    size_t outputFrameSize = channels * (useFloat ? sizeof(float) : sizeof(int32_t));
+    size_t outputFrameSize = (channels == 1 ? 2 : channels) * (useFloat ? sizeof(float) : sizeof(int32_t));
     size_t outputSize = outputFrameSize * outputFrames;
     outputSize &= ~7;
 
@@ -106,7 +117,7 @@
     // set up the reference run
     std::vector<size_t> refIncr;
     refIncr.push_back(outputFrames);
-    void* reference = malloc(outputSize);
+    void* reference = calloc(outputFrames, outputFrameSize);
     resample(channels, reference, outputFrames, refIncr, &provider, resampler);
 
     provider.reset();
@@ -127,7 +138,7 @@
     outIncr.push_back(1);
     outIncr.push_back(2);
     outIncr.push_back(3);
-    void* test = malloc(outputSize);
+    void* test = calloc(outputFrames, outputFrameSize);
     inputIncr.push_back(1);
     inputIncr.push_back(3);
     provider.setIncr(inputIncr);
@@ -177,7 +188,7 @@
 
     // calculate the output size
     size_t outputFrames = ((int64_t) provider.getNumFrames() * outputFreq) / inputFreq;
-    size_t outputFrameSize = channels * sizeof(TO);
+    size_t outputFrameSize = (channels == 1 ? 2 : channels) * sizeof(TO);
     size_t outputSize = outputFrameSize * outputFrames;
     outputSize &= ~7;
 
@@ -194,7 +205,7 @@
     // set up the reference run
     std::vector<size_t> refIncr;
     refIncr.push_back(outputFrames);
-    void* reference = malloc(outputSize);
+    void* reference = calloc(outputFrames, outputFrameSize);
     resample(channels, reference, outputFrames, refIncr, &provider, resampler);
 
     TO *out = reinterpret_cast<TO *>(reference);
@@ -204,6 +215,8 @@
     const unsigned stopbandFrame = stopband * outputFreq / 1000.;
 
     // check each channel separately
+    if (channels == 1) channels = 2; // workaround (mono duplicates output channel)
+
     for (size_t i = 0; i < channels; ++i) {
         double passbandEnergy = signalEnergy(out, out + passbandFrame * channels, channels);
         double stopbandEnergy = signalEnergy(out + stopbandFrame * channels,
@@ -331,6 +344,34 @@
     }
 }
 
+TEST(audioflinger_resampler, stopbandresponse_integer_mono) {
+    // not all of these may work (old resamplers fail on downsampling)
+    static const enum android::AudioResampler::src_quality kQualityArray[] = {
+            //android::AudioResampler::LOW_QUALITY,
+            //android::AudioResampler::MED_QUALITY,
+            //android::AudioResampler::HIGH_QUALITY,
+            //android::AudioResampler::VERY_HIGH_QUALITY,
+            android::AudioResampler::DYN_LOW_QUALITY,
+            android::AudioResampler::DYN_MED_QUALITY,
+            android::AudioResampler::DYN_HIGH_QUALITY,
+    };
+
+    // in this test we assume a maximum transition band between 12kHz and 20kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<int16_t, int32_t>(
+                1, 48000, 32000, 12000, 20000, kQualityArray[i]);
+    }
+
+    // in this test we assume a maximum transition band between 7kHz and 15kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    // (the weird ratio triggers interpolative resampling)
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<int16_t, int32_t>(
+                1, 48000, 22101, 7000, 15000, kQualityArray[i]);
+    }
+}
+
 TEST(audioflinger_resampler, stopbandresponse_integer_multichannel) {
     // not all of these may work (old resamplers fail on downsampling)
     static const enum android::AudioResampler::src_quality kQualityArray[] = {
@@ -387,6 +428,34 @@
     }
 }
 
+TEST(audioflinger_resampler, stopbandresponse_float_mono) {
+    // not all of these may work (old resamplers fail on downsampling)
+    static const enum android::AudioResampler::src_quality kQualityArray[] = {
+            //android::AudioResampler::LOW_QUALITY,
+            //android::AudioResampler::MED_QUALITY,
+            //android::AudioResampler::HIGH_QUALITY,
+            //android::AudioResampler::VERY_HIGH_QUALITY,
+            android::AudioResampler::DYN_LOW_QUALITY,
+            android::AudioResampler::DYN_MED_QUALITY,
+            android::AudioResampler::DYN_HIGH_QUALITY,
+    };
+
+    // in this test we assume a maximum transition band between 12kHz and 20kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<float, float>(
+                1, 48000, 32000, 12000, 20000, kQualityArray[i]);
+    }
+
+    // in this test we assume a maximum transition band between 7kHz and 15kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    // (the weird ratio triggers interpolative resampling)
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<float, float>(
+                1, 48000, 22101, 7000, 15000, kQualityArray[i]);
+    }
+}
+
 TEST(audioflinger_resampler, stopbandresponse_float_multichannel) {
     // not all of these may work (old resamplers fail on downsampling)
     static const enum android::AudioResampler::src_quality kQualityArray[] = {
diff --git a/services/audioflinger/tests/run_all_unit_tests.sh b/media/libaudioprocessing/tests/run_all_unit_tests.sh
similarity index 79%
rename from services/audioflinger/tests/run_all_unit_tests.sh
rename to media/libaudioprocessing/tests/run_all_unit_tests.sh
index 113f39e..15a94c2 100755
--- a/services/audioflinger/tests/run_all_unit_tests.sh
+++ b/media/libaudioprocessing/tests/run_all_unit_tests.sh
@@ -8,5 +8,5 @@
 echo "waiting for device"
 adb root && adb wait-for-device remount
 
-#adb shell /system/bin/resampler_tests
 adb shell /data/nativetest/resampler_tests/resampler_tests
+adb shell /data/nativetest64/resampler_tests/resampler_tests
diff --git a/services/audioflinger/tests/test-mixer.cpp b/media/libaudioprocessing/tests/test-mixer.cpp
similarity index 99%
rename from services/audioflinger/tests/test-mixer.cpp
rename to media/libaudioprocessing/tests/test-mixer.cpp
index 65e22da..75dbf91 100644
--- a/services/audioflinger/tests/test-mixer.cpp
+++ b/media/libaudioprocessing/tests/test-mixer.cpp
@@ -21,7 +21,7 @@
 #include <audio_utils/primitives.h>
 #include <audio_utils/sndfile.h>
 #include <media/AudioBufferProvider.h>
-#include "AudioMixer.h"
+#include <media/AudioMixer.h>
 #include "test_utils.h"
 
 /* Testing is typically through creation of an output WAV file from several
diff --git a/services/audioflinger/test-resample.cpp b/media/libaudioprocessing/tests/test-resampler.cpp
similarity index 98%
rename from services/audioflinger/test-resample.cpp
rename to media/libaudioprocessing/tests/test-resampler.cpp
index bae3c5b..fbc9326 100644
--- a/services/audioflinger/test-resample.cpp
+++ b/media/libaudioprocessing/tests/test-resampler.cpp
@@ -29,7 +29,7 @@
 #include <audio_utils/sndfile.h>
 #include <utils/Vector.h>
 #include <media/AudioBufferProvider.h>
-#include "AudioResampler.h"
+#include <media/AudioResampler.h>
 
 using namespace android;
 
diff --git a/services/audioflinger/tests/test_utils.h b/media/libaudioprocessing/tests/test_utils.h
similarity index 100%
rename from services/audioflinger/tests/test_utils.h
rename to media/libaudioprocessing/tests/test_utils.h
diff --git a/media/libeffects/downmix/Android.mk b/media/libeffects/downmix/Android.mk
index e0ca8af..78601d5 100644
--- a/media/libeffects/downmix/Android.mk
+++ b/media/libeffects/downmix/Android.mk
@@ -20,5 +20,6 @@
 	$(call include-path-for, audio-utils)
 
 LOCAL_CFLAGS += -fvisibility=hidden
+LOCAL_CFLAGS += -Wall -Werror
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libeffects/downmix/EffectDownmix.c b/media/libeffects/downmix/EffectDownmix.c
index 5b74845..f27d5ca 100644
--- a/media/libeffects/downmix/EffectDownmix.c
+++ b/media/libeffects/downmix/EffectDownmix.c
@@ -392,7 +392,6 @@
 
     downmix_module_t *pDwmModule = (downmix_module_t *) self;
     downmix_object_t *pDownmixer;
-    int retsize;
 
     if (pDwmModule == NULL || pDwmModule->context.state == DOWNMIX_STATE_UNINITIALIZED) {
         return -EINVAL;
diff --git a/media/libeffects/factory/EffectsFactory.c b/media/libeffects/factory/EffectsFactory.c
index ba20ac2..554c14d 100644
--- a/media/libeffects/factory/EffectsFactory.c
+++ b/media/libeffects/factory/EffectsFactory.c
@@ -29,7 +29,7 @@
 #include <cutils/properties.h>
 #include <log/log.h>
 
-#include <audio_effects/audio_effects_conf.h>
+#include <system/audio_effects/audio_effects_conf.h>
 
 static list_elem_t *gEffectList; // list of effect_entry_t: all currently created effects
 static list_elem_t *gLibraryList; // list of lib_entry_t: all currently loaded libraries
diff --git a/media/libeffects/loudness/Android.mk b/media/libeffects/loudness/Android.mk
index 55d0611..3db4a79 100644
--- a/media/libeffects/loudness/Android.mk
+++ b/media/libeffects/loudness/Android.mk
@@ -8,6 +8,7 @@
 	dsp/core/dynamic_range_compression.cpp
 
 LOCAL_CFLAGS+= -O2 -fvisibility=hidden
+LOCAL_CFLAGS += -Wall -Werror
 
 LOCAL_SHARED_LIBRARIES := \
 	libcutils \
diff --git a/media/libeffects/loudness/EffectLoudnessEnhancer.cpp b/media/libeffects/loudness/EffectLoudnessEnhancer.cpp
index cf00e60..9d29cf1 100644
--- a/media/libeffects/loudness/EffectLoudnessEnhancer.cpp
+++ b/media/libeffects/loudness/EffectLoudnessEnhancer.cpp
@@ -198,7 +198,6 @@
                          effect_handle_t *pHandle) {
     ALOGV("LELib_Create()");
     int ret;
-    int i;
 
     if (pHandle == NULL || uuid == NULL) {
         return -EINVAL;
@@ -315,7 +314,6 @@
         void *pCmdData, uint32_t *replySize, void *pReplyData) {
 
     LoudnessEnhancerContext * pContext = (LoudnessEnhancerContext *)self;
-    int retsize;
 
     if (pContext == NULL || pContext->mState == LOUDNESS_ENHANCER_STATE_UNINITIALIZED) {
         return -EINVAL;
diff --git a/media/libeffects/lvm/lib/Android.mk b/media/libeffects/lvm/lib/Android.mk
index bb56c75..afc87bb 100644
--- a/media/libeffects/lvm/lib/Android.mk
+++ b/media/libeffects/lvm/lib/Android.mk
@@ -120,6 +120,7 @@
     $(LOCAL_PATH)/StereoWidening/lib
 
 LOCAL_CFLAGS += -fvisibility=hidden
+LOCAL_CFLAGS += -Wall -Werror
 
 include $(BUILD_STATIC_LIBRARY)
 
@@ -177,4 +178,5 @@
     $(LOCAL_PATH)/Common/src
 
 LOCAL_CFLAGS += -fvisibility=hidden
+LOCAL_CFLAGS += -Wall -Werror
 include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libeffects/lvm/wrapper/Android.mk b/media/libeffects/lvm/wrapper/Android.mk
index 4e38e3d..9051587 100644
--- a/media/libeffects/lvm/wrapper/Android.mk
+++ b/media/libeffects/lvm/wrapper/Android.mk
@@ -10,6 +10,7 @@
 	Bundle/EffectBundle.cpp
 
 LOCAL_CFLAGS += -fvisibility=hidden
+LOCAL_CFLAGS += -Wall -Werror
 
 LOCAL_MODULE:= libbundlewrapper
 
@@ -40,6 +41,7 @@
     Reverb/EffectReverb.cpp
 
 LOCAL_CFLAGS += -fvisibility=hidden
+LOCAL_CFLAGS += -Wall -Werror
 
 LOCAL_MODULE:= libreverbwrapper
 
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index d5fb6e7..616fb9c 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -340,8 +340,10 @@
             }
             delete pContext;
         }
-        *pHandle = (effect_handle_t)NULL;
+        if (pHandle != NULL)
+          *pHandle = (effect_handle_t)NULL;
     } else {
+      if (pHandle != NULL)
         *pHandle = (effect_handle_t)pContext;
     }
     ALOGV("\tEffectCreate end..\n\n");
@@ -501,8 +503,6 @@
 //----------------------------------------------------------------------------
 
 int LvmBundle_init(EffectContext *pContext){
-    int status;
-
     ALOGV("\tLvmBundle_init start");
 
     pContext->config.inputCfg.accessMode                    = EFFECT_BUFFER_ACCESS_READ;
@@ -716,7 +716,6 @@
                       int              frameCount,
                       EffectContext    *pContext){
 
-    LVM_ControlParams_t     ActiveParams;                           /* Current control Parameters */
     LVM_ReturnStatus_en     LvmStatus = LVM_SUCCESS;                /* Function call status */
     LVM_INT16               *pOutTmp;
 
@@ -1040,7 +1039,6 @@
 
 void LvmEffect_free(EffectContext *pContext){
     LVM_ReturnStatus_en     LvmStatus=LVM_SUCCESS;         /* Function call status */
-    LVM_ControlParams_t     params;                        /* Control Parameters */
     LVM_MemTab_t            MemTab;
 
     /* Free the algorithm memory */
@@ -2007,8 +2005,6 @@
     int status = 0;
     int32_t *pParamTemp = (int32_t *)pParam;
     int32_t param = *pParamTemp++;
-    int32_t param2;
-    char *name;
 
     //ALOGV("\tBassBoost_getParameter start");
 
@@ -2125,7 +2121,6 @@
     int status = 0;
     int32_t *pParamTemp = (int32_t *)pParam;
     int32_t param = *pParamTemp++;
-    char *name;
 
     //ALOGV("\tVirtualizer_getParameter start");
 
@@ -2282,7 +2277,6 @@
                            uint32_t          *pValueSize,
                            void              *pValue){
     int status = 0;
-    int bMute = 0;
     int32_t *pParamTemp = (int32_t *)pParam;
     int32_t param = *pParamTemp++;
     int32_t param2;
@@ -2560,10 +2554,8 @@
                         uint32_t          *pValueSize,
                         void              *pValue){
     int status = 0;
-    int bMute = 0;
     int32_t *pParamTemp = (int32_t *)pParam;
     int32_t param = *pParamTemp++;;
-    char *name;
 
     //ALOGV("\tVolume_getParameter start");
 
@@ -2679,8 +2671,8 @@
 
         case VOLUME_PARAM_ENABLESTEREOPOSITION:
             positionEnabled = *(uint32_t *)pValue;
-            status = VolumeEnableStereoPosition(pContext, positionEnabled);
-            status = VolumeSetStereoPosition(pContext, pContext->pBundledContext->positionSaved);
+            (void) VolumeEnableStereoPosition(pContext, positionEnabled);
+            (void) VolumeSetStereoPosition(pContext, pContext->pBundledContext->positionSaved);
             //ALOGV("\tVolume_setParameter() VOLUME_PARAM_ENABLESTEREOPOSITION called");
             break;
 
@@ -2893,11 +2885,8 @@
                               audio_buffer_t         *inBuffer,
                               audio_buffer_t         *outBuffer){
     EffectContext * pContext = (EffectContext *) self;
-    LVM_ReturnStatus_en     LvmStatus = LVM_SUCCESS;                /* Function call status */
     int    status = 0;
     int    processStatus = 0;
-    LVM_INT16   *in  = (LVM_INT16 *)inBuffer->raw;
-    LVM_INT16   *out = (LVM_INT16 *)outBuffer->raw;
 
 //ALOGV("\tEffect_process Start : Enabled = %d     Called = %d (%8d %8d %8d)",
 //pContext->pBundledContext->NumberEffectsEnabled,pContext->pBundledContext->NumberEffectsCalled,
@@ -3026,7 +3015,6 @@
                               uint32_t            *replySize,
                               void                *pReplyData){
     EffectContext * pContext = (EffectContext *) self;
-    int retsize;
 
     //ALOGV("\t\nEffect_command start");
 
@@ -3425,7 +3413,6 @@
             int16_t  leftdB, rightdB;
             int16_t  maxdB, pandB;
             int32_t  vol_ret[2] = {1<<24,1<<24}; // Apply no volume
-            int      status = 0;
             LVM_ControlParams_t     ActiveParams;           /* Current control Parameters */
             LVM_ReturnStatus_en     LvmStatus=LVM_SUCCESS;  /* Function call status */
 
diff --git a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
index fc82dd1..12a038f 100644
--- a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
+++ b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
@@ -327,6 +327,7 @@
     }                                         \
 }
 
+#if 0
 //----------------------------------------------------------------------------
 // MonoTo2I_32()
 //----------------------------------------------------------------------------
@@ -385,6 +386,7 @@
 
    return;
 }
+#endif
 
 static inline int16_t clamp16(int32_t sample)
 {
@@ -560,7 +562,6 @@
 void Reverb_free(ReverbContext *pContext){
 
     LVREV_ReturnStatus_en     LvmStatus=LVREV_SUCCESS;         /* Function call status */
-    LVREV_ControlParams_st    params;                        /* Control Parameters */
     LVREV_MemoryTable_st      MemTab;
 
     /* Free the algorithm memory */
@@ -709,8 +710,6 @@
 //----------------------------------------------------------------------------
 
 int Reverb_init(ReverbContext *pContext){
-    int status;
-
     ALOGV("\tReverb_init start");
 
     CHECK_ARG(pContext != NULL);
@@ -1543,7 +1542,6 @@
     int status = 0;
     int32_t *pParamTemp = (int32_t *)pParam;
     int32_t param = *pParamTemp++;
-    char *name;
     t_reverb_settings *pProperties;
 
     //ALOGV("\tReverb_getParameter start");
@@ -1899,7 +1897,6 @@
                               uint32_t            *replySize,
                               void                *pReplyData){
     android::ReverbContext * pContext = (android::ReverbContext *) self;
-    int retsize;
     LVREV_ControlParams_st    ActiveParams;              /* Current control Parameters */
     LVREV_ReturnStatus_en     LvmStatus=LVREV_SUCCESS;     /* Function call status */
 
diff --git a/media/libeffects/preprocessing/Android.mk b/media/libeffects/preprocessing/Android.mk
index bd67aa1..60030ac 100644
--- a/media/libeffects/preprocessing/Android.mk
+++ b/media/libeffects/preprocessing/Android.mk
@@ -28,5 +28,6 @@
     -DWEBRTC_POSIX
 
 LOCAL_CFLAGS += -fvisibility=hidden
+LOCAL_CFLAGS += -Wall -Werror
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libeffects/preprocessing/PreProcessing.cpp b/media/libeffects/preprocessing/PreProcessing.cpp
index f48bac1..ccfd29c 100644
--- a/media/libeffects/preprocessing/PreProcessing.cpp
+++ b/media/libeffects/preprocessing/PreProcessing.cpp
@@ -933,7 +933,6 @@
 
 int Session_SetConfig(preproc_session_t *session, effect_config_t *config)
 {
-    uint32_t sr;
     uint32_t inCnl = audio_channel_count_from_in_mask(config->inputCfg.channels);
     uint32_t outCnl = audio_channel_count_from_in_mask(config->outputCfg.channels);
 
@@ -1153,7 +1152,6 @@
 preproc_session_t *PreProc_GetSession(int32_t procId, int32_t  sessionId, int32_t  ioId)
 {
     size_t i;
-    int free = -1;
     for (i = 0; i < PREPROC_NUM_SESSIONS; i++) {
         if (sSessions[i].io == ioId) {
             if (sSessions[i].createdMsk & (1 << procId)) {
@@ -1210,7 +1208,6 @@
                             audio_buffer_t    *outBuffer)
 {
     preproc_effect_t * effect = (preproc_effect_t *)self;
-    int    status = 0;
 
     if (effect == NULL){
         ALOGV("PreProcessingFx_Process() ERROR effect == NULL");
@@ -1402,8 +1399,6 @@
                             void                *pReplyData)
 {
     preproc_effect_t * effect = (preproc_effect_t *) self;
-    int retsize;
-    int status;
 
     if (effect == NULL){
         return -EINVAL;
@@ -1777,7 +1772,6 @@
                                    audio_buffer_t    *outBuffer __unused)
 {
     preproc_effect_t * effect = (preproc_effect_t *)self;
-    int    status = 0;
 
     if (effect == NULL){
         ALOGW("PreProcessingFx_ProcessReverse() ERROR effect == NULL");
@@ -1926,7 +1920,6 @@
 
 int PreProcessingLib_Release(effect_handle_t interface)
 {
-    int status;
     ALOGV("EffectRelease start %p", interface);
     if (PreProc_Init() != 0) {
         return sInitStatus;
diff --git a/media/libeffects/proxy/Android.mk b/media/libeffects/proxy/Android.mk
index 2ba452e..0acf1c0 100644
--- a/media/libeffects/proxy/Android.mk
+++ b/media/libeffects/proxy/Android.mk
@@ -23,6 +23,7 @@
         EffectProxy.cpp
 
 LOCAL_CFLAGS+= -fvisibility=hidden
+LOCAL_CFLAGS += -Wall -Werror
 
 LOCAL_SHARED_LIBRARIES := liblog libcutils libutils libdl libeffects
 
diff --git a/media/libeffects/proxy/EffectProxy.cpp b/media/libeffects/proxy/EffectProxy.cpp
index 0eddc15..42e44f0 100644
--- a/media/libeffects/proxy/EffectProxy.cpp
+++ b/media/libeffects/proxy/EffectProxy.cpp
@@ -47,12 +47,6 @@
 };
 
 
-static const effect_descriptor_t *const gDescriptors[] =
-{
-    &gProxyDescriptor,
-};
-
-
 int EffectProxyCreate(const effect_uuid_t *uuid,
                             int32_t             sessionId,
                             int32_t             ioId,
@@ -245,6 +239,11 @@
     // pCmdData points to a memory holding effect_offload_param_t structure
     if (cmdCode == EFFECT_CMD_OFFLOAD) {
         ALOGV("Effect_command() cmdCode = EFFECT_CMD_OFFLOAD");
+        if (replySize == NULL || *replySize < sizeof(int)) {
+            ALOGV("effectsOffload: Effect_command: CMD_OFFLOAD has no reply");
+            android_errorWriteLog(0x534e4554, "32448121");
+            return FAILED_TRANSACTION;
+        }
         if (cmdSize == 0 || pCmdData == NULL) {
             ALOGV("effectsOffload: Effect_command: CMD_OFFLOAD has no data");
              *(int*)pReplyData = FAILED_TRANSACTION;
diff --git a/media/libeffects/visualizer/Android.mk b/media/libeffects/visualizer/Android.mk
index c92c543..ddcc565 100644
--- a/media/libeffects/visualizer/Android.mk
+++ b/media/libeffects/visualizer/Android.mk
@@ -7,6 +7,7 @@
 	EffectVisualizer.cpp
 
 LOCAL_CFLAGS+= -O2 -fvisibility=hidden
+LOCAL_CFLAGS += -Wall -Werror
 
 LOCAL_SHARED_LIBRARIES := \
 	libcutils \
diff --git a/media/libeffects/visualizer/EffectVisualizer.cpp b/media/libeffects/visualizer/EffectVisualizer.cpp
index 6a126ef..0e82339 100644
--- a/media/libeffects/visualizer/EffectVisualizer.cpp
+++ b/media/libeffects/visualizer/EffectVisualizer.cpp
@@ -237,7 +237,6 @@
                          int32_t /*ioId*/,
                          effect_handle_t *pHandle) {
     int ret;
-    int i;
 
     if (pHandle == NULL || uuid == NULL) {
         return -EINVAL;
@@ -419,7 +418,6 @@
         void *pCmdData, uint32_t *replySize, void *pReplyData) {
 
     VisualizerContext * pContext = (VisualizerContext *)self;
-    int retsize;
 
     if (pContext == NULL || pContext->mState == VISUALIZER_STATE_UNINITIALIZED) {
         return -EINVAL;
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 7fde4b2..4b14543 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -1,10 +1,13 @@
-cc_library_static {
+cc_library {
     name: "libmedia_helper",
-    srcs: ["AudioParameter.cpp"],
+    srcs: ["AudioParameter.cpp", "TypeConverter.cpp"],
     cflags: [
         "-Werror",
         "-Wno-error=deprecated-declarations",
         "-Wall",
     ],
+    shared: {
+      shared_libs: ["libutils", "liblog"],
+    },
     clang: true,
 }
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 3ba7ec1..8fff414 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -2,16 +2,20 @@
 
 include $(CLEAR_VARS)
 
+LOCAL_AIDL_INCLUDES := \
+    frameworks/av/media/libmedia/aidl
+
 LOCAL_SRC_FILES:= \
-    ICrypto.cpp \
+    aidl/android/IGraphicBufferSource.aidl \
+    aidl/android/IOMXBufferSource.aidl
+
+LOCAL_SRC_FILES += \
     IDataSource.cpp \
-    IDrm.cpp \
-    IDrmClient.cpp \
     IHDCP.cpp \
+    BufferingSettings.cpp \
     mediaplayer.cpp \
     IMediaCodecList.cpp \
     IMediaCodecService.cpp \
-    IMediaDrmService.cpp \
     IMediaHTTPConnection.cpp \
     IMediaHTTPService.cpp \
     IMediaExtractor.cpp           \
@@ -22,17 +26,22 @@
     IMediaPlayer.cpp \
     IMediaRecorder.cpp \
     IMediaSource.cpp \
+    IMediaAnalyticsService.cpp \
     IRemoteDisplay.cpp \
     IRemoteDisplayClient.cpp \
     IResourceManagerClient.cpp \
     IResourceManagerService.cpp \
     IStreamSource.cpp \
+    MediaCodecBuffer.cpp \
     MediaCodecInfo.cpp \
+    MediaDefs.cpp \
+    MediaAnalyticsItem.cpp \
     MediaUtils.cpp \
     Metadata.cpp \
     mediarecorder.cpp \
     IMediaMetadataRetriever.cpp \
     mediametadataretriever.cpp \
+    MidiDeviceInfo.cpp \
     MidiIoWrapper.cpp \
     JetPlayer.cpp \
     IOMX.cpp \
@@ -43,17 +52,17 @@
     MediaProfiles.cpp \
     MediaResource.cpp \
     MediaResourcePolicy.cpp \
+    OMXBuffer.cpp \
     Visualizer.cpp \
     StringArray.cpp \
 
 LOCAL_SHARED_LIBRARIES := \
 	libui liblog libcutils libutils libbinder libsonivox libicuuc libicui18n libexpat \
         libcamera_client libstagefright_foundation \
-        libgui libdl libaudioutils libaudioclient
+        libgui libdl libaudioutils libaudioclient \
+        libmedia_helper
 
-LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := libbinder
-
-LOCAL_WHOLE_STATIC_LIBRARIES := libmedia_helper
+LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := libbinder libsonivox
 
 # for memory heap analysis
 LOCAL_STATIC_LIBRARIES := libc_malloc_debug_backtrace libc_logging
@@ -66,11 +75,14 @@
     $(TOP)/frameworks/native/include/media/openmax \
     $(TOP)/frameworks/av/include/media/ \
     $(TOP)/frameworks/av/media/libstagefright \
-    $(call include-path-for, audio-effects) \
+    $(TOP)/frameworks/av/media/libmedia/aidl \
     $(call include-path-for, audio-utils)
 
+LOCAL_EXPORT_C_INCLUDE_DIRS := \
+    frameworks/av/include/media \
+    frameworks/av/media/libmedia/aidl
+
 LOCAL_CFLAGS += -Werror -Wno-error=deprecated-declarations -Wall
-LOCAL_CLANG := true
 LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libmedia/AudioParameter.cpp b/media/libmedia/AudioParameter.cpp
index 8c8cf45..65fc70b 100644
--- a/media/libmedia/AudioParameter.cpp
+++ b/media/libmedia/AudioParameter.cpp
@@ -19,8 +19,8 @@
 
 #include <utils/Log.h>
 
-#include <hardware/audio.h>
 #include <media/AudioParameter.h>
+#include <system/audio.h>
 
 namespace android {
 
@@ -32,6 +32,19 @@
 const char * const AudioParameter::keyFrameCount = AUDIO_PARAMETER_STREAM_FRAME_COUNT;
 const char * const AudioParameter::keyInputSource = AUDIO_PARAMETER_STREAM_INPUT_SOURCE;
 const char * const AudioParameter::keyScreenState = AUDIO_PARAMETER_KEY_SCREEN_STATE;
+const char * const AudioParameter::keyBtNrec = AUDIO_PARAMETER_KEY_BT_NREC;
+const char * const AudioParameter::keyHwAvSync = AUDIO_PARAMETER_HW_AV_SYNC;
+const char * const AudioParameter::keyMonoOutput = AUDIO_PARAMETER_MONO_OUTPUT;
+const char * const AudioParameter::keyStreamHwAvSync = AUDIO_PARAMETER_STREAM_HW_AV_SYNC;
+const char * const AudioParameter::keyStreamConnect = AUDIO_PARAMETER_DEVICE_CONNECT;
+const char * const AudioParameter::keyStreamDisconnect = AUDIO_PARAMETER_DEVICE_DISCONNECT;
+const char * const AudioParameter::keyStreamSupportedFormats = AUDIO_PARAMETER_STREAM_SUP_FORMATS;
+const char * const AudioParameter::keyStreamSupportedChannels = AUDIO_PARAMETER_STREAM_SUP_CHANNELS;
+const char * const AudioParameter::keyStreamSupportedSamplingRates =
+        AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES;
+const char * const AudioParameter::valueOn = AUDIO_PARAMETER_VALUE_ON;
+const char * const AudioParameter::valueOff = AUDIO_PARAMETER_VALUE_OFF;
+const char * const AudioParameter::valueListSeparator = AUDIO_PARAMETER_VALUE_LIST_SEPARATOR;
 
 AudioParameter::AudioParameter(const String8& keyValuePairs)
 {
@@ -70,15 +83,17 @@
     mParameters.clear();
 }
 
-String8 AudioParameter::toString()
+String8 AudioParameter::toStringImpl(bool useValues) const
 {
     String8 str = String8("");
 
     size_t size = mParameters.size();
     for (size_t i = 0; i < size; i++) {
         str += mParameters.keyAt(i);
-        str += "=";
-        str += mParameters.valueAt(i);
+        if (useValues) {
+            str += "=";
+            str += mParameters.valueAt(i);
+        }
         if (i < (size - 1)) str += ";";
     }
     return str;
@@ -95,6 +110,11 @@
     }
 }
 
+status_t AudioParameter::addKey(const String8& key)
+{
+    return add(key, String8());
+}
+
 status_t AudioParameter::addInt(const String8& key, const int value)
 {
     char str[12];
@@ -127,7 +147,7 @@
     }
 }
 
-status_t AudioParameter::get(const String8& key, String8& value)
+status_t AudioParameter::get(const String8& key, String8& value) const
 {
     if (mParameters.indexOfKey(key) >= 0) {
         value = mParameters.valueFor(key);
@@ -137,7 +157,7 @@
     }
 }
 
-status_t AudioParameter::getInt(const String8& key, int& value)
+status_t AudioParameter::getInt(const String8& key, int& value) const
 {
     String8 str8;
     status_t result = get(key, str8);
@@ -153,7 +173,7 @@
     return result;
 }
 
-status_t AudioParameter::getFloat(const String8& key, float& value)
+status_t AudioParameter::getFloat(const String8& key, float& value) const
 {
     String8 str8;
     status_t result = get(key, str8);
@@ -169,7 +189,17 @@
     return result;
 }
 
-status_t AudioParameter::getAt(size_t index, String8& key, String8& value)
+status_t AudioParameter::getAt(size_t index, String8& key) const
+{
+    if (mParameters.size() > index) {
+        key = mParameters.keyAt(index);
+        return NO_ERROR;
+    } else {
+        return BAD_VALUE;
+    }
+}
+
+status_t AudioParameter::getAt(size_t index, String8& key, String8& value) const
 {
     if (mParameters.size() > index) {
         key = mParameters.keyAt(index);
diff --git a/media/libmedia/BufferingSettings.cpp b/media/libmedia/BufferingSettings.cpp
new file mode 100644
index 0000000..a69497e
--- /dev/null
+++ b/media/libmedia/BufferingSettings.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "BufferingSettings"
+//#define LOG_NDEBUG 0
+
+#include <binder/Parcel.h>
+
+#include <media/BufferingSettings.h>
+
+namespace android {
+
+// static
+bool BufferingSettings::IsValidBufferingMode(int mode) {
+    return (mode >= BUFFERING_MODE_NONE && mode < BUFFERING_MODE_COUNT);
+}
+
+// static
+bool BufferingSettings::IsTimeBasedBufferingMode(int mode) {
+    return (mode == BUFFERING_MODE_TIME_ONLY || mode == BUFFERING_MODE_TIME_THEN_SIZE);
+}
+
+// static
+bool BufferingSettings::IsSizeBasedBufferingMode(int mode) {
+    return (mode == BUFFERING_MODE_SIZE_ONLY || mode == BUFFERING_MODE_TIME_THEN_SIZE);
+}
+
+BufferingSettings::BufferingSettings()
+        : mInitialBufferingMode(BUFFERING_MODE_NONE),
+          mRebufferingMode(BUFFERING_MODE_NONE),
+          mInitialWatermarkMs(kNoWatermark),
+          mInitialWatermarkKB(kNoWatermark),
+          mRebufferingWatermarkLowMs(kNoWatermark),
+          mRebufferingWatermarkHighMs(kNoWatermark),
+          mRebufferingWatermarkLowKB(kNoWatermark),
+          mRebufferingWatermarkHighKB(kNoWatermark) { }
+
+status_t BufferingSettings::readFromParcel(const Parcel* parcel) {
+    if (parcel == nullptr) {
+        return BAD_VALUE;
+    }
+    mInitialBufferingMode = (BufferingMode)parcel->readInt32();
+    mRebufferingMode = (BufferingMode)parcel->readInt32();
+    mInitialWatermarkMs = parcel->readInt32();
+    mInitialWatermarkKB = parcel->readInt32();
+    mRebufferingWatermarkLowMs = parcel->readInt32();
+    mRebufferingWatermarkHighMs = parcel->readInt32();
+    mRebufferingWatermarkLowKB = parcel->readInt32();
+    mRebufferingWatermarkHighKB = parcel->readInt32();
+
+    return OK;
+}
+
+status_t BufferingSettings::writeToParcel(Parcel* parcel) const {
+    if (parcel == nullptr) {
+        return BAD_VALUE;
+    }
+    parcel->writeInt32(mInitialBufferingMode);
+    parcel->writeInt32(mRebufferingMode);
+    parcel->writeInt32(mInitialWatermarkMs);
+    parcel->writeInt32(mInitialWatermarkKB);
+    parcel->writeInt32(mRebufferingWatermarkLowMs);
+    parcel->writeInt32(mRebufferingWatermarkHighMs);
+    parcel->writeInt32(mRebufferingWatermarkLowKB);
+    parcel->writeInt32(mRebufferingWatermarkHighKB);
+
+    return OK;
+}
+
+String8 BufferingSettings::toString() const {
+    String8 s;
+    s.appendFormat("initialMode(%d), rebufferingMode(%d), "
+            "initialMarks(%d ms, %d KB), rebufferingMarks(%d, %d)ms, (%d, %d)KB",
+            mInitialBufferingMode, mRebufferingMode,
+            mInitialWatermarkMs, mInitialWatermarkKB,
+            mRebufferingWatermarkLowMs, mRebufferingWatermarkHighMs,
+            mRebufferingWatermarkLowKB, mRebufferingWatermarkHighKB);
+    return s;
+}
+
+} // namespace android
diff --git a/media/libmedia/IMediaAnalyticsService.cpp b/media/libmedia/IMediaAnalyticsService.cpp
new file mode 100644
index 0000000..cc4aa35
--- /dev/null
+++ b/media/libmedia/IMediaAnalyticsService.cpp
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "MediaAnalytics"
+
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+
+#include <binder/Parcel.h>
+#include <binder/IMemory.h>
+#include <binder/IPCThreadState.h>
+#include <media/IHDCP.h>
+#include <media/IMediaCodecList.h>
+#include <media/IMediaHTTPService.h>
+#include <media/IMediaPlayerService.h>
+#include <media/IMediaRecorder.h>
+#include <media/IOMX.h>
+#include <media/IRemoteDisplay.h>
+#include <media/IRemoteDisplayClient.h>
+#include <media/IStreamSource.h>
+
+#include <utils/Errors.h>  // for status_t
+#include <utils/List.h>
+#include <utils/Log.h>
+#include <utils/String8.h>
+
+#include <media/MediaAnalyticsItem.h>
+#include <media/IMediaAnalyticsService.h>
+
+#define DEBUGGING               0
+#define DEBUGGING_FLOW          0
+#define DEBUGGING_RETURNS       0
+
+namespace android {
+
+enum {
+    GENERATE_UNIQUE_SESSIONID = IBinder::FIRST_CALL_TRANSACTION,
+    SUBMIT_ITEM,
+    GET_ITEM_LIST,
+};
+
+class BpMediaAnalyticsService: public BpInterface<IMediaAnalyticsService>
+{
+public:
+    explicit BpMediaAnalyticsService(const sp<IBinder>& impl)
+        : BpInterface<IMediaAnalyticsService>(impl)
+    {
+    }
+
+    virtual MediaAnalyticsItem::SessionID_t generateUniqueSessionID() {
+        Parcel data, reply;
+        status_t err;
+        MediaAnalyticsItem::SessionID_t sessionid =
+                        MediaAnalyticsItem::SessionIDInvalid;
+
+        data.writeInterfaceToken(IMediaAnalyticsService::getInterfaceDescriptor());
+        err = remote()->transact(GENERATE_UNIQUE_SESSIONID, data, &reply);
+        if (err != NO_ERROR) {
+            ALOGW("bad response from service");
+            return MediaAnalyticsItem::SessionIDInvalid;
+        }
+        sessionid = reply.readInt64();
+        if (DEBUGGING_RETURNS) {
+            ALOGD("the caller gets a sessionid of %" PRId64 " back", sessionid);
+        }
+        return sessionid;
+    }
+
+    virtual MediaAnalyticsItem::SessionID_t submit(MediaAnalyticsItem *item, bool forcenew)
+    {
+        // have this record submit itself
+        // this will be a binder call with appropriate timing
+        // return value is the uuid that the system generated for it.
+        // the return value 0 and -1 are reserved.
+        // -1 to indicate that there was a problem recording...
+
+        Parcel data, reply;
+        status_t err;
+
+        if (item == NULL) {
+                return MediaAnalyticsItem::SessionIDInvalid;
+        }
+
+        data.writeInterfaceToken(IMediaAnalyticsService::getInterfaceDescriptor());
+        if(DEBUGGING_FLOW) {
+            ALOGD("client offers record: %s", item->toString().c_str());
+        }
+        data.writeBool(forcenew);
+        item->writeToParcel(&data);
+
+        err = remote()->transact(SUBMIT_ITEM, data, &reply);
+        if (err != NO_ERROR) {
+            return MediaAnalyticsItem::SessionIDInvalid;
+        }
+
+        // get an answer out of 'reply'
+        int64_t sessionid = reply.readInt64();
+        if (DEBUGGING_RETURNS) {
+            ALOGD("the caller gets sessionid=%" PRId64 "", sessionid);
+        }
+        return sessionid;
+    }
+
+    virtual List<MediaAnalyticsItem*> *getMediaAnalyticsItemList(bool finished, nsecs_t ts)
+    {
+            return getMediaAnalyticsItemList(finished, ts, MediaAnalyticsItem::kKeyAny);
+    }
+
+    virtual List<MediaAnalyticsItem*> *getMediaAnalyticsItemList(bool finished, nsecs_t ts, MediaAnalyticsItem::Key key)
+    {
+        Parcel data, reply;
+        status_t err;
+
+        data.writeInterfaceToken(IMediaAnalyticsService::getInterfaceDescriptor());
+        data.writeInt32(finished);
+        data.writeInt64(ts);
+        const char *str = key.c_str();
+        if (key.empty()) {
+            str = MediaAnalyticsItem::kKeyNone.c_str();
+        }
+        data.writeCString(str);
+        err = remote()->transact(GET_ITEM_LIST, data, &reply);
+        if (err != NO_ERROR) {
+            return NULL;
+        }
+
+        // read a count
+        int32_t count = reply.readInt32();
+        List<MediaAnalyticsItem*> *list = NULL;
+
+        if (count > 0) {
+            list = new List<MediaAnalyticsItem*>();
+            for (int i=0;i<count;i++) {
+                MediaAnalyticsItem *item = new MediaAnalyticsItem();
+                // XXX: watch for failures here
+                item->readFromParcel(reply);
+                list->push_back(item);
+            }
+        }
+
+        return list;
+    }
+};
+
+IMPLEMENT_META_INTERFACE(MediaAnalyticsService, "android.media.IMediaAnalyticsService");
+
+// ----------------------------------------------------------------------
+
+status_t BnMediaAnalyticsService::onTransact(
+    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+
+
+    // get calling pid/tid
+    IPCThreadState *ipc = IPCThreadState::self();
+    int clientPid = ipc->getCallingPid();
+    // permission checking
+
+    if(DEBUGGING_FLOW) {
+        ALOGD("running in service, code %d, pid %d; called from pid %d",
+            code, getpid(), clientPid);
+    }
+
+    switch (code) {
+
+        case GENERATE_UNIQUE_SESSIONID: {
+            CHECK_INTERFACE(IMediaAnalyticsService, data, reply);
+
+            MediaAnalyticsItem::SessionID_t sessionid = generateUniqueSessionID();
+            reply->writeInt64(sessionid);
+
+            return NO_ERROR;
+        } break;
+
+        case SUBMIT_ITEM: {
+            CHECK_INTERFACE(IMediaAnalyticsService, data, reply);
+
+            bool forcenew;
+            MediaAnalyticsItem *item = new MediaAnalyticsItem;
+
+            data.readBool(&forcenew);
+            item->readFromParcel(data);
+
+            item->setPid(clientPid);
+
+            // submit() takes over ownership of 'item'
+            MediaAnalyticsItem::SessionID_t sessionid = submit(item, forcenew);
+            reply->writeInt64(sessionid);
+
+            return NO_ERROR;
+        } break;
+
+        case GET_ITEM_LIST: {
+            CHECK_INTERFACE(IMediaPlayerService, data, reply);
+            // get the parameters
+            bool finished = data.readInt32();
+            nsecs_t ts = data.readInt64();
+            MediaAnalyticsItem::Key key = data.readCString();
+
+            // find the (0 or more) items
+            List<MediaAnalyticsItem*> *list =  getMediaAnalyticsItemList(finished, ts, key);
+            // encapsulate/serialize them
+            reply->writeInt32(list->size());
+            if (list->size() > 0) {
+                    for (List<MediaAnalyticsItem*>::iterator it = list->begin();
+                         it != list->end(); it++) {
+                            (*it)->writeToParcel(reply);
+                    }
+
+
+            }
+
+            // avoid leakiness; organized discarding of list and its contents
+            list->clear();
+            delete list;
+
+            return NO_ERROR;
+        } break;
+
+        default:
+            return BBinder::onTransact(code, data, reply, flags);
+    }
+}
+
+// ----------------------------------------------------------------------------
+
+} // namespace android
diff --git a/media/libmedia/IMediaCodecService.cpp b/media/libmedia/IMediaCodecService.cpp
index dcf2b27..2d62419 100644
--- a/media/libmedia/IMediaCodecService.cpp
+++ b/media/libmedia/IMediaCodecService.cpp
@@ -33,7 +33,7 @@
 class BpMediaCodecService : public BpInterface<IMediaCodecService>
 {
 public:
-    BpMediaCodecService(const sp<IBinder>& impl)
+    explicit BpMediaCodecService(const sp<IBinder>& impl)
         : BpInterface<IMediaCodecService>(impl)
     {
     }
diff --git a/media/libmedia/IMediaExtractor.cpp b/media/libmedia/IMediaExtractor.cpp
index 4be1118..0f4f092 100644
--- a/media/libmedia/IMediaExtractor.cpp
+++ b/media/libmedia/IMediaExtractor.cpp
@@ -34,8 +34,6 @@
     GETTRACKMETADATA,
     GETMETADATA,
     FLAGS,
-    SETDRMFLAG,
-    GETDRMFLAG,
     GETDRMTRACKINFO,
     SETUID,
     NAME
@@ -43,7 +41,7 @@
 
 class BpMediaExtractor : public BpInterface<IMediaExtractor> {
 public:
-    BpMediaExtractor(const sp<IBinder>& impl)
+    explicit BpMediaExtractor(const sp<IBinder>& impl)
         : BpInterface<IMediaExtractor>(impl)
     {
     }
@@ -101,13 +99,6 @@
         return 0;
     }
 
-    virtual void setDrmFlag(bool flag __unused) {
-        ALOGV("setDrmFlag NOT IMPLEMENTED");
-    }
-    virtual bool getDrmFlag() {
-        ALOGV("getDrmFlag NOT IMPLEMENTED");
-       return false;
-    }
     virtual char* getDrmTrackInfo(size_t trackID __unused, int *len __unused) {
         ALOGV("getDrmTrackInfo NOT IMPLEMENTED");
         return NULL;
diff --git a/media/libmedia/IMediaExtractorService.cpp b/media/libmedia/IMediaExtractorService.cpp
index d170c22..8b00d85 100644
--- a/media/libmedia/IMediaExtractorService.cpp
+++ b/media/libmedia/IMediaExtractorService.cpp
@@ -33,7 +33,7 @@
 class BpMediaExtractorService : public BpInterface<IMediaExtractorService>
 {
 public:
-    BpMediaExtractorService(const sp<IBinder>& impl)
+    explicit BpMediaExtractorService(const sp<IBinder>& impl)
         : BpInterface<IMediaExtractorService>(impl)
     {
     }
diff --git a/media/libmedia/IMediaPlayer.cpp b/media/libmedia/IMediaPlayer.cpp
index f8345e4..9ffde4e 100644
--- a/media/libmedia/IMediaPlayer.cpp
+++ b/media/libmedia/IMediaPlayer.cpp
@@ -23,6 +23,7 @@
 
 #include <media/AudioResamplerPublic.h>
 #include <media/AVSyncSettings.h>
+#include <media/BufferingSettings.h>
 
 #include <media/IDataSource.h>
 #include <media/IMediaHTTPService.h>
@@ -40,6 +41,8 @@
     SET_DATA_SOURCE_FD,
     SET_DATA_SOURCE_STREAM,
     SET_DATA_SOURCE_CALLBACK,
+    SET_BUFFERING_SETTINGS,
+    GET_DEFAULT_BUFFERING_SETTINGS,
     PREPARE_ASYNC,
     START,
     STOP,
@@ -148,6 +151,30 @@
         return reply.readInt32();
     }
 
+    status_t setBufferingSettings(const BufferingSettings& buffering)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+        buffering.writeToParcel(&data);
+        remote()->transact(SET_BUFFERING_SETTINGS, data, &reply);
+        return reply.readInt32();
+    }
+
+    status_t getDefaultBufferingSettings(BufferingSettings* buffering /* nonnull */)
+    {
+        if (buffering == nullptr) {
+            return BAD_VALUE;
+        }
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+        remote()->transact(GET_DEFAULT_BUFFERING_SETTINGS, data, &reply);
+        status_t err = reply.readInt32();
+        if (err == OK) {
+            err = buffering->readFromParcel(&reply);
+        }
+        return err;
+    }
+
     status_t prepareAsync()
     {
         Parcel data, reply;
@@ -246,11 +273,12 @@
         return reply.readInt32();
     }
 
-    status_t seekTo(int msec)
+    status_t seekTo(int msec, MediaPlayerSeekMode mode)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
         data.writeInt32(msec);
+        data.writeInt32(mode);
         remote()->transact(SEEK_TO, data, &reply);
         return reply.readInt32();
     }
@@ -496,6 +524,23 @@
             reply->writeInt32(setVideoSurfaceTexture(bufferProducer));
             return NO_ERROR;
         } break;
+        case SET_BUFFERING_SETTINGS: {
+            CHECK_INTERFACE(IMediaPlayer, data, reply);
+            BufferingSettings buffering;
+            buffering.readFromParcel(&data);
+            reply->writeInt32(setBufferingSettings(buffering));
+            return NO_ERROR;
+        } break;
+        case GET_DEFAULT_BUFFERING_SETTINGS: {
+            CHECK_INTERFACE(IMediaPlayer, data, reply);
+            BufferingSettings buffering;
+            status_t err = getDefaultBufferingSettings(&buffering);
+            reply->writeInt32(err);
+            if (err == OK) {
+                buffering.writeToParcel(reply);
+            }
+            return NO_ERROR;
+        } break;
         case PREPARE_ASYNC: {
             CHECK_INTERFACE(IMediaPlayer, data, reply);
             reply->writeInt32(prepareAsync());
@@ -573,7 +618,9 @@
         } break;
         case SEEK_TO: {
             CHECK_INTERFACE(IMediaPlayer, data, reply);
-            reply->writeInt32(seekTo(data.readInt32()));
+            int msec = data.readInt32();
+            MediaPlayerSeekMode mode = (MediaPlayerSeekMode)data.readInt32();
+            reply->writeInt32(seekTo(msec, mode));
             return NO_ERROR;
         } break;
         case GET_CURRENT_POSITION: {
diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp
index a6860e2..5599830 100644
--- a/media/libmedia/IMediaRecorder.cpp
+++ b/media/libmedia/IMediaRecorder.cpp
@@ -29,6 +29,7 @@
 #include <media/IMediaRecorder.h>
 #include <gui/Surface.h>
 #include <gui/IGraphicBufferProducer.h>
+#include <media/stagefright/PersistentSurface.h>
 
 namespace android {
 
@@ -79,12 +80,12 @@
         return reply.readInt32();
     }
 
-    status_t setInputSurface(const sp<IGraphicBufferConsumer>& surface)
+    status_t setInputSurface(const sp<PersistentSurface>& surface)
     {
         ALOGV("setInputSurface(%p)", surface.get());
         Parcel data, reply;
         data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
-        data.writeStrongBinder(IInterface::asBinder(surface));
+        surface->writeToParcel(&data);
         remote()->transact(SET_INPUT_SURFACE, data, &reply);
         return reply.readInt32();
     }
@@ -490,8 +491,8 @@
         case SET_INPUT_SURFACE: {
             ALOGV("SET_INPUT_SURFACE");
             CHECK_INTERFACE(IMediaRecorder, data, reply);
-            sp<IGraphicBufferConsumer> surface = interface_cast<IGraphicBufferConsumer>(
-                    data.readStrongBinder());
+            sp<PersistentSurface> surface = new PersistentSurface();
+            surface->readFromParcel(&data);
             reply->writeInt32(setInputSurface(surface));
             return NO_ERROR;
         } break;
diff --git a/media/libmedia/IMediaSource.cpp b/media/libmedia/IMediaSource.cpp
index 595bad9..fdbc869 100644
--- a/media/libmedia/IMediaSource.cpp
+++ b/media/libmedia/IMediaSource.cpp
@@ -67,7 +67,7 @@
 
 class BpMediaSource : public BpInterface<IMediaSource> {
 public:
-    BpMediaSource(const sp<IBinder>& impl)
+    explicit BpMediaSource(const sp<IBinder>& impl)
         : BpInterface<IMediaSource>(impl), mBuffersSinceStop(0)
     {
     }
diff --git a/media/libmedia/IOMX.cpp b/media/libmedia/IOMX.cpp
index beca464..3d466b1 100644
--- a/media/libmedia/IOMX.cpp
+++ b/media/libmedia/IOMX.cpp
@@ -26,42 +26,38 @@
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/openmax/OMX_IndexExt.h>
 #include <utils/NativeHandle.h>
+#include <media/OMXBuffer.h>
+
+#include <android/IGraphicBufferSource.h>
+#include <android/IOMXBufferSource.h>
 
 namespace android {
 
 enum {
     CONNECT = IBinder::FIRST_CALL_TRANSACTION,
-    LIVES_LOCALLY,
     LIST_NODES,
     ALLOCATE_NODE,
+    CREATE_INPUT_SURFACE,
     FREE_NODE,
     SEND_COMMAND,
     GET_PARAMETER,
     SET_PARAMETER,
     GET_CONFIG,
     SET_CONFIG,
-    GET_STATE,
-    ENABLE_NATIVE_BUFFERS,
-    USE_BUFFER,
-    USE_GRAPHIC_BUFFER,
-    CREATE_INPUT_SURFACE,
-    CREATE_PERSISTENT_INPUT_SURFACE,
+    SET_PORT_MODE,
     SET_INPUT_SURFACE,
-    SIGNAL_END_OF_INPUT_STREAM,
-    STORE_META_DATA_IN_BUFFERS,
     PREPARE_FOR_ADAPTIVE_PLAYBACK,
     ALLOC_SECURE_BUFFER,
-    ALLOC_BUFFER_WITH_BACKUP,
+    USE_BUFFER,
     FREE_BUFFER,
     FILL_BUFFER,
     EMPTY_BUFFER,
     GET_EXTENSION_INDEX,
     OBSERVER_ON_MSG,
     GET_GRAPHIC_BUFFER_USAGE,
-    SET_INTERNAL_OPTION,
-    UPDATE_GRAPHIC_BUFFER_IN_META,
     CONFIGURE_VIDEO_TUNNEL_MODE,
-    UPDATE_NATIVE_HANDLE_IN_META,
+    DISPATCH_MESSAGE,
+    SET_QUIRKS,
 };
 
 class BpOMX : public BpInterface<IOMX> {
@@ -70,16 +66,6 @@
         : BpInterface<IOMX>(impl) {
     }
 
-    virtual bool livesLocally(node_id node, pid_t pid) {
-        Parcel data, reply;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        data.writeInt32((int32_t)node);
-        data.writeInt32(pid);
-        remote()->transact(LIVES_LOCALLY, data, &reply);
-
-        return reply.readInt32() != 0;
-    }
-
     virtual status_t listNodes(List<ComponentInfo> *list) {
         list->clear();
 
@@ -104,8 +90,7 @@
 
     virtual status_t allocateNode(
             const char *name, const sp<IOMXObserver> &observer,
-            sp<IBinder> *nodeBinder,
-            node_id *node) {
+            sp<IOMXNode> *omxNode) {
         Parcel data, reply;
         data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
         data.writeCString(name);
@@ -114,31 +99,58 @@
 
         status_t err = reply.readInt32();
         if (err == OK) {
-            *node = (node_id)reply.readInt32();
-            if (nodeBinder != NULL) {
-                *nodeBinder = remote();
-            }
+            *omxNode = IOMXNode::asInterface(reply.readStrongBinder());
         } else {
-            *node = 0;
+            omxNode->clear();
         }
 
         return err;
     }
 
-    virtual status_t freeNode(node_id node) {
+    virtual status_t createInputSurface(
+            sp<IGraphicBufferProducer> *bufferProducer,
+            sp<IGraphicBufferSource> *bufferSource) {
         Parcel data, reply;
+        status_t err;
         data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        data.writeInt32((int32_t)node);
+        err = remote()->transact(CREATE_INPUT_SURFACE, data, &reply);
+        if (err != OK) {
+            ALOGW("binder transaction failed: %d", err);
+            return err;
+        }
+
+        err = reply.readInt32();
+        if (err != OK) {
+            return err;
+        }
+
+        *bufferProducer = IGraphicBufferProducer::asInterface(
+                reply.readStrongBinder());
+        *bufferSource = IGraphicBufferSource::asInterface(
+                reply.readStrongBinder());
+
+        return err;
+    }
+};
+
+class BpOMXNode : public BpInterface<IOMXNode> {
+public:
+    explicit BpOMXNode(const sp<IBinder> &impl)
+        : BpInterface<IOMXNode>(impl) {
+    }
+
+    virtual status_t freeNode() {
+        Parcel data, reply;
+        data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
         remote()->transact(FREE_NODE, data, &reply);
 
         return reply.readInt32();
     }
 
     virtual status_t sendCommand(
-            node_id node, OMX_COMMANDTYPE cmd, OMX_S32 param) {
+            OMX_COMMANDTYPE cmd, OMX_S32 param) {
         Parcel data, reply;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        data.writeInt32((int32_t)node);
+        data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
         data.writeInt32(cmd);
         data.writeInt32(param);
         remote()->transact(SEND_COMMAND, data, &reply);
@@ -147,11 +159,10 @@
     }
 
     virtual status_t getParameter(
-            node_id node, OMX_INDEXTYPE index,
+            OMX_INDEXTYPE index,
             void *params, size_t size) {
         Parcel data, reply;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        data.writeInt32((int32_t)node);
+        data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
         data.writeInt32(index);
         data.writeInt64(size);
         data.write(params, size);
@@ -168,11 +179,10 @@
     }
 
     virtual status_t setParameter(
-            node_id node, OMX_INDEXTYPE index,
+            OMX_INDEXTYPE index,
             const void *params, size_t size) {
         Parcel data, reply;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        data.writeInt32((int32_t)node);
+        data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
         data.writeInt32(index);
         data.writeInt64(size);
         data.write(params, size);
@@ -182,11 +192,10 @@
     }
 
     virtual status_t getConfig(
-            node_id node, OMX_INDEXTYPE index,
+            OMX_INDEXTYPE index,
             void *params, size_t size) {
         Parcel data, reply;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        data.writeInt32((int32_t)node);
+        data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
         data.writeInt32(index);
         data.writeInt64(size);
         data.write(params, size);
@@ -203,11 +212,10 @@
     }
 
     virtual status_t setConfig(
-            node_id node, OMX_INDEXTYPE index,
+            OMX_INDEXTYPE index,
             const void *params, size_t size) {
         Parcel data, reply;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        data.writeInt32((int32_t)node);
+        data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
         data.writeInt32(index);
         data.writeInt64(size);
         data.write(params, size);
@@ -216,36 +224,21 @@
         return reply.readInt32();
     }
 
-    virtual status_t getState(
-            node_id node, OMX_STATETYPE* state) {
+    virtual status_t setPortMode(
+            OMX_U32 port_index, IOMX::PortMode mode) {
         Parcel data, reply;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        data.writeInt32((int32_t)node);
-        remote()->transact(GET_STATE, data, &reply);
+        data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
+        data.writeInt32(port_index);
+        data.writeInt32(mode);
+        remote()->transact(SET_PORT_MODE, data, &reply);
 
-        *state = static_cast<OMX_STATETYPE>(reply.readInt32());
         return reply.readInt32();
     }
 
-    virtual status_t enableNativeBuffers(
-            node_id node, OMX_U32 port_index, OMX_BOOL graphic, OMX_BOOL enable) {
-        Parcel data, reply;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        data.writeInt32((int32_t)node);
-        data.writeInt32(port_index);
-        data.writeInt32((uint32_t)graphic);
-        data.writeInt32((uint32_t)enable);
-        remote()->transact(ENABLE_NATIVE_BUFFERS, data, &reply);
-
-        status_t err = reply.readInt32();
-        return err;
-    }
-
     virtual status_t getGraphicBufferUsage(
-            node_id node, OMX_U32 port_index, OMX_U32* usage) {
+            OMX_U32 port_index, OMX_U32* usage) {
         Parcel data, reply;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        data.writeInt32((int32_t)node);
+        data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
         data.writeInt32(port_index);
         remote()->transact(GET_GRAPHIC_BUFFER_USAGE, data, &reply);
 
@@ -255,17 +248,19 @@
     }
 
     virtual status_t useBuffer(
-            node_id node, OMX_U32 port_index, const sp<IMemory> &params,
-            buffer_id *buffer, OMX_U32 allottedSize) {
+            OMX_U32 port_index, const OMXBuffer &omxBuf, buffer_id *buffer) {
         Parcel data, reply;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        data.writeInt32((int32_t)node);
+        data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
         data.writeInt32(port_index);
-        data.writeStrongBinder(IInterface::asBinder(params));
-        data.writeInt32(allottedSize);
+
+        status_t err = omxBuf.writeToParcel(&data);
+        if (err != OK) {
+            return err;
+        }
+
         remote()->transact(USE_BUFFER, data, &reply);
 
-        status_t err = reply.readInt32();
+        err = reply.readInt32();
         if (err != OK) {
             *buffer = 0;
 
@@ -277,185 +272,30 @@
         return err;
     }
 
-
-    virtual status_t useGraphicBuffer(
-            node_id node, OMX_U32 port_index,
-            const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer) {
-        Parcel data, reply;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        data.writeInt32((int32_t)node);
-        data.writeInt32(port_index);
-        data.write(*graphicBuffer);
-        remote()->transact(USE_GRAPHIC_BUFFER, data, &reply);
-
-        status_t err = reply.readInt32();
-        if (err != OK) {
-            *buffer = 0;
-
-            return err;
-        }
-
-        *buffer = (buffer_id)reply.readInt32();
-
-        return err;
-    }
-
-    virtual status_t updateGraphicBufferInMeta(
-            node_id node, OMX_U32 port_index,
-            const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer) {
-        Parcel data, reply;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        data.writeInt32((int32_t)node);
-        data.writeInt32(port_index);
-        data.write(*graphicBuffer);
-        data.writeInt32((int32_t)buffer);
-        remote()->transact(UPDATE_GRAPHIC_BUFFER_IN_META, data, &reply);
-
-        status_t err = reply.readInt32();
-        return err;
-    }
-
-    virtual status_t updateNativeHandleInMeta(
-            node_id node, OMX_U32 port_index,
-            const sp<NativeHandle> &nativeHandle, buffer_id buffer) {
-        Parcel data, reply;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        data.writeInt32((int32_t)node);
-        data.writeInt32(port_index);
-        data.writeInt32(nativeHandle != NULL);
-        if (nativeHandle != NULL) {
-            data.writeNativeHandle(nativeHandle->handle());
-        }
-        data.writeInt32((int32_t)buffer);
-        remote()->transact(UPDATE_NATIVE_HANDLE_IN_META, data, &reply);
-
-        status_t err = reply.readInt32();
-        return err;
-    }
-
-    virtual status_t createInputSurface(
-            node_id node, OMX_U32 port_index, android_dataspace dataSpace,
-            sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
-        Parcel data, reply;
-        status_t err;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        data.writeInt32((int32_t)node);
-        data.writeInt32(port_index);
-        data.writeInt32(dataSpace);
-        err = remote()->transact(CREATE_INPUT_SURFACE, data, &reply);
-        if (err != OK) {
-            ALOGW("binder transaction failed: %d", err);
-            return err;
-        }
-
-        // read type even if createInputSurface failed
-        int negotiatedType = reply.readInt32();
-        if (type != NULL) {
-            *type = (MetadataBufferType)negotiatedType;
-        }
-
-        err = reply.readInt32();
-        if (err != OK) {
-            return err;
-        }
-
-        *bufferProducer = IGraphicBufferProducer::asInterface(
-                reply.readStrongBinder());
-
-        return err;
-    }
-
-    virtual status_t createPersistentInputSurface(
-            sp<IGraphicBufferProducer> *bufferProducer,
-            sp<IGraphicBufferConsumer> *bufferConsumer) {
-        Parcel data, reply;
-        status_t err;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        err = remote()->transact(CREATE_PERSISTENT_INPUT_SURFACE, data, &reply);
-        if (err != OK) {
-            ALOGW("binder transaction failed: %d", err);
-            return err;
-        }
-
-        err = reply.readInt32();
-        if (err != OK) {
-            return err;
-        }
-
-        *bufferProducer = IGraphicBufferProducer::asInterface(
-                reply.readStrongBinder());
-        *bufferConsumer = IGraphicBufferConsumer::asInterface(
-                reply.readStrongBinder());
-
-        return err;
-    }
-
     virtual status_t setInputSurface(
-            node_id node, OMX_U32 port_index,
-            const sp<IGraphicBufferConsumer> &bufferConsumer, MetadataBufferType *type) {
+            const sp<IOMXBufferSource> &bufferSource) {
         Parcel data, reply;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        status_t err;
-        data.writeInt32((int32_t)node);
-        data.writeInt32(port_index);
-        data.writeStrongBinder(IInterface::asBinder(bufferConsumer));
+        data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
 
-        err = remote()->transact(SET_INPUT_SURFACE, data, &reply);
+        data.writeStrongBinder(IInterface::asBinder(bufferSource));
+
+        status_t err = remote()->transact(SET_INPUT_SURFACE, data, &reply);
 
         if (err != OK) {
             ALOGW("binder transaction failed: %d", err);
             return err;
         }
 
-        // read type even if setInputSurface failed
-        int negotiatedType = reply.readInt32();
-        if (type != NULL) {
-            *type = (MetadataBufferType)negotiatedType;
-        }
+        err = reply.readInt32();
 
-        return reply.readInt32();
-    }
-
-    virtual status_t signalEndOfInputStream(node_id node) {
-        Parcel data, reply;
-        status_t err;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        data.writeInt32((int32_t)node);
-        err = remote()->transact(SIGNAL_END_OF_INPUT_STREAM, data, &reply);
-        if (err != OK) {
-            ALOGW("binder transaction failed: %d", err);
-            return err;
-        }
-
-        return reply.readInt32();
-    }
-
-    virtual status_t storeMetaDataInBuffers(
-            node_id node, OMX_U32 port_index, OMX_BOOL enable, MetadataBufferType *type) {
-        Parcel data, reply;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        data.writeInt32((int32_t)node);
-        data.writeInt32(port_index);
-        data.writeInt32((int32_t)enable);
-        data.writeInt32(type == NULL ? kMetadataBufferTypeANWBuffer : *type);
-
-        remote()->transact(STORE_META_DATA_IN_BUFFERS, data, &reply);
-
-        // read type even storeMetaDataInBuffers failed
-        int negotiatedType = reply.readInt32();
-        if (type != NULL) {
-            *type = (MetadataBufferType)negotiatedType;
-        }
-
-        return reply.readInt32();
+        return err;
     }
 
     virtual status_t prepareForAdaptivePlayback(
-            node_id node, OMX_U32 port_index, OMX_BOOL enable,
+            OMX_U32 port_index, OMX_BOOL enable,
             OMX_U32 max_width, OMX_U32 max_height) {
         Parcel data, reply;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        data.writeInt32((int32_t)node);
+        data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
         data.writeInt32(port_index);
         data.writeInt32((int32_t)enable);
         data.writeInt32(max_width);
@@ -467,11 +307,10 @@
     }
 
     virtual status_t configureVideoTunnelMode(
-            node_id node, OMX_U32 portIndex, OMX_BOOL tunneled,
+            OMX_U32 portIndex, OMX_BOOL tunneled,
             OMX_U32 audioHwSync, native_handle_t **sidebandHandle ) {
         Parcel data, reply;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        data.writeInt32((int32_t)node);
+        data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
         data.writeInt32(portIndex);
         data.writeInt32((int32_t)tunneled);
         data.writeInt32(audioHwSync);
@@ -486,11 +325,10 @@
 
 
     virtual status_t allocateSecureBuffer(
-            node_id node, OMX_U32 port_index, size_t size,
+            OMX_U32 port_index, size_t size,
             buffer_id *buffer, void **buffer_data, sp<NativeHandle> *native_handle) {
         Parcel data, reply;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        data.writeInt32((int32_t)node);
+        data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
         data.writeInt32(port_index);
         data.writeInt64(size);
         remote()->transact(ALLOC_SECURE_BUFFER, data, &reply);
@@ -514,34 +352,10 @@
         return err;
     }
 
-    virtual status_t allocateBufferWithBackup(
-            node_id node, OMX_U32 port_index, const sp<IMemory> &params,
-            buffer_id *buffer, OMX_U32 allottedSize) {
-        Parcel data, reply;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        data.writeInt32((int32_t)node);
-        data.writeInt32(port_index);
-        data.writeStrongBinder(IInterface::asBinder(params));
-        data.writeInt32(allottedSize);
-        remote()->transact(ALLOC_BUFFER_WITH_BACKUP, data, &reply);
-
-        status_t err = reply.readInt32();
-        if (err != OK) {
-            *buffer = 0;
-
-            return err;
-        }
-
-        *buffer = (buffer_id)reply.readInt32();
-
-        return err;
-    }
-
     virtual status_t freeBuffer(
-            node_id node, OMX_U32 port_index, buffer_id buffer) {
+            OMX_U32 port_index, buffer_id buffer) {
         Parcel data, reply;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        data.writeInt32((int32_t)node);
+        data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
         data.writeInt32(port_index);
         data.writeInt32((int32_t)buffer);
         remote()->transact(FREE_BUFFER, data, &reply);
@@ -549,11 +363,15 @@
         return reply.readInt32();
     }
 
-    virtual status_t fillBuffer(node_id node, buffer_id buffer, int fenceFd) {
+    virtual status_t fillBuffer(
+            buffer_id buffer, const OMXBuffer &omxBuf, int fenceFd) {
         Parcel data, reply;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        data.writeInt32((int32_t)node);
+        data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
         data.writeInt32((int32_t)buffer);
+        status_t err = omxBuf.writeToParcel(&data);
+        if (err != OK) {
+            return err;
+        }
         data.writeInt32(fenceFd >= 0);
         if (fenceFd >= 0) {
             data.writeFileDescriptor(fenceFd, true /* takeOwnership */);
@@ -564,16 +382,15 @@
     }
 
     virtual status_t emptyBuffer(
-            node_id node,
-            buffer_id buffer,
-            OMX_U32 range_offset, OMX_U32 range_length,
+            buffer_id buffer, const OMXBuffer &omxBuf,
             OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
         Parcel data, reply;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        data.writeInt32((int32_t)node);
+        data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
         data.writeInt32((int32_t)buffer);
-        data.writeInt32(range_offset);
-        data.writeInt32(range_length);
+        status_t err = omxBuf.writeToParcel(&data);
+        if (err != OK) {
+            return err;
+        }
         data.writeInt32(flags);
         data.writeInt64(timestamp);
         data.writeInt32(fenceFd >= 0);
@@ -586,12 +403,10 @@
     }
 
     virtual status_t getExtensionIndex(
-            node_id node,
             const char *parameter_name,
             OMX_INDEXTYPE *index) {
         Parcel data, reply;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        data.writeInt32((int32_t)node);
+        data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
         data.writeCString(parameter_name);
 
         remote()->transact(GET_EXTENSION_INDEX, data, &reply);
@@ -606,26 +421,34 @@
         return err;
     }
 
-    virtual status_t setInternalOption(
-            node_id node,
-            OMX_U32 port_index,
-            InternalOptionType type,
-            const void *optionData,
-            size_t size) {
+    virtual status_t dispatchMessage(const omx_message &msg) {
         Parcel data, reply;
-        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
-        data.writeInt32((int32_t)node);
-        data.writeInt32(port_index);
-        data.writeInt64(size);
-        data.write(optionData, size);
-        data.writeInt32(type);
-        remote()->transact(SET_INTERNAL_OPTION, data, &reply);
+        data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
+        data.writeInt32(msg.fenceFd >= 0);
+        if (msg.fenceFd >= 0) {
+            data.writeFileDescriptor(msg.fenceFd, true /* takeOwnership */);
+        }
+        data.writeInt32(msg.type);
+        data.write(&msg.u, sizeof(msg.u));
+
+        remote()->transact(DISPATCH_MESSAGE, data, &reply);
+
+        return reply.readInt32();
+    }
+
+    virtual status_t setQuirks(OMX_U32 quirks) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
+        data.writeInt32(quirks);
+
+        remote()->transact(SET_QUIRKS, data, &reply);
 
         return reply.readInt32();
     }
 };
 
 IMPLEMENT_META_INTERFACE(OMX, "android.hardware.IOMX");
+IMPLEMENT_META_INTERFACE(OMXNode, "android.hardware.IOMXNode");
 
 ////////////////////////////////////////////////////////////////////////////////
 
@@ -638,16 +461,6 @@
 status_t BnOMX::onTransact(
     uint32_t code, const Parcel &data, Parcel *reply, uint32_t flags) {
     switch (code) {
-        case LIVES_LOCALLY:
-        {
-            CHECK_OMX_INTERFACE(IOMX, data, reply);
-            node_id node = (node_id)data.readInt32();
-            pid_t pid = (pid_t)data.readInt32();
-            reply->writeInt32(livesLocally(node, pid));
-
-            return OK;
-        }
-
         case LIST_NODES:
         {
             CHECK_OMX_INTERFACE(IOMX, data, reply);
@@ -686,40 +499,62 @@
                 return NO_ERROR;
             }
 
-            node_id node;
+            sp<IOMXNode> omxNode;
 
-            status_t err = allocateNode(name, observer,
-                    NULL /* nodeBinder */, &node);
+            status_t err = allocateNode(name, observer, &omxNode);
+
             reply->writeInt32(err);
             if (err == OK) {
-                reply->writeInt32((int32_t)node);
+                reply->writeStrongBinder(IInterface::asBinder(omxNode));
             }
 
             return NO_ERROR;
         }
 
-        case FREE_NODE:
+        case CREATE_INPUT_SURFACE:
         {
             CHECK_OMX_INTERFACE(IOMX, data, reply);
 
-            node_id node = (node_id)data.readInt32();
+            sp<IGraphicBufferProducer> bufferProducer;
+            sp<IGraphicBufferSource> bufferSource;
+            status_t err = createInputSurface(&bufferProducer, &bufferSource);
 
-            reply->writeInt32(freeNode(node));
+            reply->writeInt32(err);
+
+            if (err == OK) {
+                reply->writeStrongBinder(IInterface::asBinder(bufferProducer));
+                reply->writeStrongBinder(IInterface::asBinder(bufferSource));
+            }
+
+            return NO_ERROR;
+        }
+
+        default:
+            return BBinder::onTransact(code, data, reply, flags);
+    }
+}
+
+status_t BnOMXNode::onTransact(
+    uint32_t code, const Parcel &data, Parcel *reply, uint32_t flags) {
+    switch (code) {
+        case FREE_NODE:
+        {
+            CHECK_OMX_INTERFACE(IOMXNode, data, reply);
+
+            reply->writeInt32(freeNode());
 
             return NO_ERROR;
         }
 
         case SEND_COMMAND:
         {
-            CHECK_OMX_INTERFACE(IOMX, data, reply);
-
-            node_id node = (node_id)data.readInt32();
+            CHECK_OMX_INTERFACE(IOMXNode, data, reply);
 
             OMX_COMMANDTYPE cmd =
                 static_cast<OMX_COMMANDTYPE>(data.readInt32());
 
             OMX_S32 param = data.readInt32();
-            reply->writeInt32(sendCommand(node, cmd, param));
+            reply->writeInt32(sendCommand(cmd, param));
 
             return NO_ERROR;
         }
@@ -728,11 +563,9 @@
         case SET_PARAMETER:
         case GET_CONFIG:
         case SET_CONFIG:
-        case SET_INTERNAL_OPTION:
         {
-            CHECK_OMX_INTERFACE(IOMX, data, reply);
+            CHECK_OMX_INTERFACE(IOMXNode, data, reply);
 
-            node_id node = (node_id)data.readInt32();
             OMX_INDEXTYPE index = static_cast<OMX_INDEXTYPE>(data.readInt32());
 
             size_t size = data.readInt64();
@@ -742,8 +575,7 @@
             size_t pageSize = 0;
             size_t allocSize = 0;
             bool isUsageBits = (index == (OMX_INDEXTYPE) OMX_IndexParamConsumerUsageBits);
-            if ((isUsageBits && size < 4) ||
-                    (!isUsageBits && code != SET_INTERNAL_OPTION && size < 8)) {
+            if ((isUsageBits && size < 4) || (!isUsageBits && size < 8)) {
                 // we expect the structure to contain at least the size and
                 // version, 8 bytes total
                 ALOGE("b/27207275 (%zu) (%d/%d)", size, int(index), int(code));
@@ -765,8 +597,7 @@
                     } else {
                         err = NOT_ENOUGH_DATA;
                         OMX_U32 declaredSize = *(OMX_U32*)params;
-                        if (code != SET_INTERNAL_OPTION &&
-                                index != (OMX_INDEXTYPE) OMX_IndexParamConsumerUsageBits &&
+                        if (index != (OMX_INDEXTYPE) OMX_IndexParamConsumerUsageBits &&
                                 declaredSize > size) {
                             // the buffer says it's bigger than it actually is
                             ALOGE("b/27207275 (%u/%zu)", declaredSize, size);
@@ -781,26 +612,17 @@
                             } else {
                                 switch (code) {
                                     case GET_PARAMETER:
-                                        err = getParameter(node, index, params, size);
+                                        err = getParameter(index, params, size);
                                         break;
                                     case SET_PARAMETER:
-                                        err = setParameter(node, index, params, size);
+                                        err = setParameter(index, params, size);
                                         break;
                                     case GET_CONFIG:
-                                        err = getConfig(node, index, params, size);
+                                        err = getConfig(index, params, size);
                                         break;
                                     case SET_CONFIG:
-                                        err = setConfig(node, index, params, size);
+                                        err = setConfig(index, params, size);
                                         break;
-                                    case SET_INTERNAL_OPTION:
-                                    {
-                                        InternalOptionType type =
-                                            (InternalOptionType)data.readInt32();
-
-                                        err = setInternalOption(node, index, type, params, size);
-                                        break;
-                                    }
-
                                     default:
                                         TRESPASS();
                                 }
@@ -826,44 +648,24 @@
             return NO_ERROR;
         }
 
-        case GET_STATE:
+        case SET_PORT_MODE:
         {
-            CHECK_OMX_INTERFACE(IOMX, data, reply);
-
-            node_id node = (node_id)data.readInt32();
-            OMX_STATETYPE state = OMX_StateInvalid;
-
-            status_t err = getState(node, &state);
-            reply->writeInt32(state);
-            reply->writeInt32(err);
-
-            return NO_ERROR;
-        }
-
-        case ENABLE_NATIVE_BUFFERS:
-        {
-            CHECK_OMX_INTERFACE(IOMX, data, reply);
-
-            node_id node = (node_id)data.readInt32();
+            CHECK_OMX_INTERFACE(IOMXNode, data, reply);
             OMX_U32 port_index = data.readInt32();
-            OMX_BOOL graphic = (OMX_BOOL)data.readInt32();
-            OMX_BOOL enable = (OMX_BOOL)data.readInt32();
-
-            status_t err = enableNativeBuffers(node, port_index, graphic, enable);
-            reply->writeInt32(err);
+            IOMX::PortMode mode = (IOMX::PortMode) data.readInt32();
+            reply->writeInt32(setPortMode(port_index, mode));
 
             return NO_ERROR;
         }
 
         case GET_GRAPHIC_BUFFER_USAGE:
         {
-            CHECK_OMX_INTERFACE(IOMX, data, reply);
+            CHECK_OMX_INTERFACE(IOMXNode, data, reply);
 
-            node_id node = (node_id)data.readInt32();
             OMX_U32 port_index = data.readInt32();
 
             OMX_U32 usage = 0;
-            status_t err = getGraphicBufferUsage(node, port_index, &usage);
+            status_t err = getGraphicBufferUsage(port_index, &usage);
             reply->writeInt32(err);
             reply->writeInt32(usage);
 
@@ -872,22 +674,18 @@
 
         case USE_BUFFER:
         {
-            CHECK_OMX_INTERFACE(IOMX, data, reply);
+            CHECK_OMX_INTERFACE(IOMXNode, data, reply);
 
-            node_id node = (node_id)data.readInt32();
             OMX_U32 port_index = data.readInt32();
-            sp<IMemory> params =
-                interface_cast<IMemory>(data.readStrongBinder());
-            OMX_U32 allottedSize = data.readInt32();
 
-            if (params == NULL) {
-                ALOGE("b/26392700");
-                reply->writeInt32(INVALID_OPERATION);
-                return NO_ERROR;
+            OMXBuffer omxBuf;
+            status_t err = omxBuf.readFromParcel(&data);
+            if (err != OK) {
+                return err;
             }
 
             buffer_id buffer;
-            status_t err = useBuffer(node, port_index, params, &buffer, allottedSize);
+            err = useBuffer(port_index, omxBuf, &buffer);
             reply->writeInt32(err);
 
             if (err == OK) {
@@ -897,160 +695,14 @@
             return NO_ERROR;
         }
 
-        case USE_GRAPHIC_BUFFER:
-        {
-            CHECK_OMX_INTERFACE(IOMX, data, reply);
-
-            node_id node = (node_id)data.readInt32();
-            OMX_U32 port_index = data.readInt32();
-            sp<GraphicBuffer> graphicBuffer = new GraphicBuffer();
-            data.read(*graphicBuffer);
-
-            buffer_id buffer;
-            status_t err = useGraphicBuffer(
-                    node, port_index, graphicBuffer, &buffer);
-            reply->writeInt32(err);
-
-            if (err == OK) {
-                reply->writeInt32((int32_t)buffer);
-            }
-
-            return NO_ERROR;
-        }
-
-        case UPDATE_GRAPHIC_BUFFER_IN_META:
-        {
-            CHECK_OMX_INTERFACE(IOMX, data, reply);
-
-            node_id node = (node_id)data.readInt32();
-            OMX_U32 port_index = data.readInt32();
-            sp<GraphicBuffer> graphicBuffer = new GraphicBuffer();
-            data.read(*graphicBuffer);
-            buffer_id buffer = (buffer_id)data.readInt32();
-
-            status_t err = updateGraphicBufferInMeta(
-                    node, port_index, graphicBuffer, buffer);
-            reply->writeInt32(err);
-
-            return NO_ERROR;
-        }
-
-        case UPDATE_NATIVE_HANDLE_IN_META:
-        {
-            CHECK_OMX_INTERFACE(IOMX, data, reply);
-
-            node_id node = (node_id)data.readInt32();
-            OMX_U32 port_index = data.readInt32();
-            native_handle *handle = NULL;
-            if (data.readInt32()) {
-                handle = data.readNativeHandle();
-            }
-            buffer_id buffer = (buffer_id)data.readInt32();
-
-            status_t err = updateNativeHandleInMeta(
-                    node, port_index, NativeHandle::create(handle, true /* ownshandle */), buffer);
-            reply->writeInt32(err);
-
-            return NO_ERROR;
-        }
-
-        case CREATE_INPUT_SURFACE:
-        {
-            CHECK_OMX_INTERFACE(IOMX, data, reply);
-
-            node_id node = (node_id)data.readInt32();
-            OMX_U32 port_index = data.readInt32();
-            android_dataspace dataSpace = (android_dataspace)data.readInt32();
-
-            sp<IGraphicBufferProducer> bufferProducer;
-            MetadataBufferType type = kMetadataBufferTypeInvalid;
-            status_t err = createInputSurface(node, port_index, dataSpace, &bufferProducer, &type);
-
-            if ((err != OK) && (type == kMetadataBufferTypeInvalid)) {
-                android_errorWriteLog(0x534e4554, "26324358");
-            }
-
-            reply->writeInt32(type);
-            reply->writeInt32(err);
-
-            if (err == OK) {
-                reply->writeStrongBinder(IInterface::asBinder(bufferProducer));
-            }
-
-            return NO_ERROR;
-        }
-
-        case CREATE_PERSISTENT_INPUT_SURFACE:
-        {
-            CHECK_OMX_INTERFACE(IOMX, data, reply);
-
-            sp<IGraphicBufferProducer> bufferProducer;
-            sp<IGraphicBufferConsumer> bufferConsumer;
-            status_t err = createPersistentInputSurface(
-                    &bufferProducer, &bufferConsumer);
-
-            reply->writeInt32(err);
-
-            if (err == OK) {
-                reply->writeStrongBinder(IInterface::asBinder(bufferProducer));
-                reply->writeStrongBinder(IInterface::asBinder(bufferConsumer));
-            }
-
-            return NO_ERROR;
-        }
-
         case SET_INPUT_SURFACE:
         {
-            CHECK_OMX_INTERFACE(IOMX, data, reply);
+            CHECK_OMX_INTERFACE(IOMXNode, data, reply);
 
-            node_id node = (node_id)data.readInt32();
-            OMX_U32 port_index = data.readInt32();
+            sp<IOMXBufferSource> bufferSource =
+                    interface_cast<IOMXBufferSource>(data.readStrongBinder());
 
-            sp<IGraphicBufferConsumer> bufferConsumer =
-                    interface_cast<IGraphicBufferConsumer>(data.readStrongBinder());
-
-            MetadataBufferType type = kMetadataBufferTypeInvalid;
-
-            status_t err = INVALID_OPERATION;
-            if (bufferConsumer == NULL) {
-                ALOGE("b/26392700");
-            } else {
-                err = setInputSurface(node, port_index, bufferConsumer, &type);
-
-                if ((err != OK) && (type == kMetadataBufferTypeInvalid)) {
-                   android_errorWriteLog(0x534e4554, "26324358");
-                }
-            }
-
-            reply->writeInt32(type);
-            reply->writeInt32(err);
-            return NO_ERROR;
-        }
-
-        case SIGNAL_END_OF_INPUT_STREAM:
-        {
-            CHECK_OMX_INTERFACE(IOMX, data, reply);
-
-            node_id node = (node_id)data.readInt32();
-
-            status_t err = signalEndOfInputStream(node);
-            reply->writeInt32(err);
-
-            return NO_ERROR;
-        }
-
-        case STORE_META_DATA_IN_BUFFERS:
-        {
-            CHECK_OMX_INTERFACE(IOMX, data, reply);
-
-            node_id node = (node_id)data.readInt32();
-            OMX_U32 port_index = data.readInt32();
-            OMX_BOOL enable = (OMX_BOOL)data.readInt32();
-
-            MetadataBufferType type = (MetadataBufferType)data.readInt32();
-            status_t err = storeMetaDataInBuffers(node, port_index, enable, &type);
-
-            reply->writeInt32(type);
+            status_t err = setInputSurface(bufferSource);
             reply->writeInt32(err);
 
             return NO_ERROR;
@@ -1058,16 +710,15 @@
 
         case PREPARE_FOR_ADAPTIVE_PLAYBACK:
         {
-            CHECK_OMX_INTERFACE(IOMX, data, reply);
+            CHECK_OMX_INTERFACE(IOMXNode, data, reply);
 
-            node_id node = (node_id)data.readInt32();
             OMX_U32 port_index = data.readInt32();
             OMX_BOOL enable = (OMX_BOOL)data.readInt32();
             OMX_U32 max_width = data.readInt32();
             OMX_U32 max_height = data.readInt32();
 
             status_t err = prepareForAdaptivePlayback(
-                    node, port_index, enable, max_width, max_height);
+                    port_index, enable, max_width, max_height);
             reply->writeInt32(err);
 
             return NO_ERROR;
@@ -1075,16 +726,15 @@
 
         case CONFIGURE_VIDEO_TUNNEL_MODE:
         {
-            CHECK_OMX_INTERFACE(IOMX, data, reply);
+            CHECK_OMX_INTERFACE(IOMXNode, data, reply);
 
-            node_id node = (node_id)data.readInt32();
             OMX_U32 port_index = data.readInt32();
             OMX_BOOL tunneled = (OMX_BOOL)data.readInt32();
             OMX_U32 audio_hw_sync = data.readInt32();
 
             native_handle_t *sideband_handle = NULL;
             status_t err = configureVideoTunnelMode(
-                    node, port_index, tunneled, audio_hw_sync, &sideband_handle);
+                    port_index, tunneled, audio_hw_sync, &sideband_handle);
             reply->writeInt32(err);
             if(err == OK){
                 reply->writeNativeHandle(sideband_handle);
@@ -1095,11 +745,10 @@
 
         case ALLOC_SECURE_BUFFER:
         {
-            CHECK_OMX_INTERFACE(IOMX, data, reply);
+            CHECK_OMX_INTERFACE(IOMXNode, data, reply);
 
-            node_id node = (node_id)data.readInt32();
             OMX_U32 port_index = data.readInt32();
-            if (!isSecure(node) || port_index != 0 /* kPortIndexInput */) {
+            if (!isSecure() || port_index != 0 /* kPortIndexInput */) {
                 ALOGE("b/24310423");
                 reply->writeInt32(INVALID_OPERATION);
                 return NO_ERROR;
@@ -1111,7 +760,7 @@
             void *buffer_data = NULL;
             sp<NativeHandle> native_handle;
             status_t err = allocateSecureBuffer(
-                    node, port_index, size, &buffer, &buffer_data, &native_handle);
+                    port_index, size, &buffer, &buffer_data, &native_handle);
             reply->writeInt32(err);
 
             if (err == OK) {
@@ -1125,83 +774,61 @@
             return NO_ERROR;
         }
 
-        case ALLOC_BUFFER_WITH_BACKUP:
-        {
-            CHECK_OMX_INTERFACE(IOMX, data, reply);
-
-            node_id node = (node_id)data.readInt32();
-            OMX_U32 port_index = data.readInt32();
-            sp<IMemory> params =
-                interface_cast<IMemory>(data.readStrongBinder());
-            OMX_U32 allottedSize = data.readInt32();
-
-            if (params == NULL) {
-                ALOGE("b/26392700");
-                reply->writeInt32(INVALID_OPERATION);
-                return NO_ERROR;
-            }
-
-            buffer_id buffer;
-            status_t err = allocateBufferWithBackup(
-                    node, port_index, params, &buffer, allottedSize);
-
-            reply->writeInt32(err);
-
-            if (err == OK) {
-                reply->writeInt32((int32_t)buffer);
-            }
-
-            return NO_ERROR;
-        }
-
         case FREE_BUFFER:
         {
-            CHECK_OMX_INTERFACE(IOMX, data, reply);
+            CHECK_OMX_INTERFACE(IOMXNode, data, reply);
 
-            node_id node = (node_id)data.readInt32();
             OMX_U32 port_index = data.readInt32();
             buffer_id buffer = (buffer_id)data.readInt32();
-            reply->writeInt32(freeBuffer(node, port_index, buffer));
+            reply->writeInt32(freeBuffer(port_index, buffer));
 
             return NO_ERROR;
         }
 
         case FILL_BUFFER:
         {
-            CHECK_OMX_INTERFACE(IOMX, data, reply);
+            CHECK_OMX_INTERFACE(IOMXNode, data, reply);
 
-            node_id node = (node_id)data.readInt32();
             buffer_id buffer = (buffer_id)data.readInt32();
+
+            OMXBuffer omxBuf;
+            status_t err = omxBuf.readFromParcel(&data);
+            if (err != OK) {
+                return err;
+            }
+
             bool haveFence = data.readInt32();
             int fenceFd = haveFence ? ::dup(data.readFileDescriptor()) : -1;
-            reply->writeInt32(fillBuffer(node, buffer, fenceFd));
+
+            reply->writeInt32(fillBuffer(buffer, omxBuf, fenceFd));
 
             return NO_ERROR;
         }
 
         case EMPTY_BUFFER:
         {
-            CHECK_OMX_INTERFACE(IOMX, data, reply);
+            CHECK_OMX_INTERFACE(IOMXNode, data, reply);
 
-            node_id node = (node_id)data.readInt32();
             buffer_id buffer = (buffer_id)data.readInt32();
-            OMX_U32 range_offset = data.readInt32();
-            OMX_U32 range_length = data.readInt32();
+            OMXBuffer omxBuf;
+            status_t err = omxBuf.readFromParcel(&data);
+            if (err != OK) {
+                return err;
+            }
             OMX_U32 flags = data.readInt32();
             OMX_TICKS timestamp = data.readInt64();
             bool haveFence = data.readInt32();
             int fenceFd = haveFence ? ::dup(data.readFileDescriptor()) : -1;
             reply->writeInt32(emptyBuffer(
-                    node, buffer, range_offset, range_length, flags, timestamp, fenceFd));
+                    buffer, omxBuf, flags, timestamp, fenceFd));
 
             return NO_ERROR;
         }
 
         case GET_EXTENSION_INDEX:
         {
-            CHECK_OMX_INTERFACE(IOMX, data, reply);
+            CHECK_OMX_INTERFACE(IOMXNode, data, reply);
 
-            node_id node = (node_id)data.readInt32();
             const char *parameter_name = data.readCString();
 
             if (parameter_name == NULL) {
@@ -1211,7 +838,7 @@
             }
 
             OMX_INDEXTYPE index;
-            status_t err = getExtensionIndex(node, parameter_name, &index);
+            status_t err = getExtensionIndex(parameter_name, &index);
 
             reply->writeInt32(err);
 
@@ -1222,6 +849,34 @@
             return OK;
         }
 
+        case DISPATCH_MESSAGE:
+        {
+            CHECK_OMX_INTERFACE(IOMXNode, data, reply);
+            omx_message msg;
+            int haveFence = data.readInt32();
+            msg.fenceFd = haveFence ? ::dup(data.readFileDescriptor()) : -1;
+            msg.type = (typeof(msg.type))data.readInt32();
+            status_t err = data.read(&msg.u, sizeof(msg.u));
+
+            if (err == OK) {
+                err = dispatchMessage(msg);
+            }
+            reply->writeInt32(err);
+
+            return NO_ERROR;
+        }
+
+        case SET_QUIRKS:
+        {
+            CHECK_OMX_INTERFACE(IOMXNode, data, reply);
+
+            OMX_U32 quirks = data.readInt32();
+
+            reply->writeInt32(setQuirks(quirks));
+
+            return NO_ERROR;
+        }
+
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
@@ -1238,14 +893,12 @@
     virtual void onMessages(const std::list<omx_message> &messages) {
         Parcel data, reply;
         std::list<omx_message>::const_iterator it = messages.cbegin();
-        bool first = true;
+        if (messages.empty()) {
+            return;
+        }
+        data.writeInterfaceToken(IOMXObserver::getInterfaceDescriptor());
         while (it != messages.cend()) {
             const omx_message &msg = *it++;
-            if (first) {
-                data.writeInterfaceToken(IOMXObserver::getInterfaceDescriptor());
-                data.writeInt32(msg.node);
-                first = false;
-            }
             data.writeInt32(msg.fenceFd >= 0);
             if (msg.fenceFd >= 0) {
                 data.writeFileDescriptor(msg.fenceFd, true /* takeOwnership */);
@@ -1254,10 +907,8 @@
             data.write(&msg.u, sizeof(msg.u));
             ALOGV("onMessage writing message %d, size %zu", msg.type, sizeof(msg));
         }
-        if (!first) {
-            data.writeInt32(-1); // mark end
-            remote()->transact(OBSERVER_ON_MSG, data, &reply, IBinder::FLAG_ONEWAY);
-        }
+        data.writeInt32(-1); // mark end
+        remote()->transact(OBSERVER_ON_MSG, data, &reply, IBinder::FLAG_ONEWAY);
     }
 };
 
@@ -1269,7 +920,6 @@
         case OBSERVER_ON_MSG:
         {
             CHECK_OMX_INTERFACE(IOMXObserver, data, reply);
-            IOMX::node_id node = data.readInt32();
             std::list<omx_message> messages;
             status_t err = FAILED_TRANSACTION; // must receive at least one message
             do {
@@ -1278,7 +928,6 @@
                     break;
                 }
                 omx_message msg;
-                msg.node = node;
                 msg.fenceFd = haveFence ? ::dup(data.readFileDescriptor()) : -1;
                 msg.type = (typeof(msg.type))data.readInt32();
                 err = data.read(&msg.u, sizeof(msg.u));
diff --git a/media/libmedia/MediaAnalyticsItem.cpp b/media/libmedia/MediaAnalyticsItem.cpp
new file mode 100644
index 0000000..76397c7
--- /dev/null
+++ b/media/libmedia/MediaAnalyticsItem.cpp
@@ -0,0 +1,865 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#undef LOG_TAG
+#define LOG_TAG "MediaAnalyticsItem"
+
+#include <inttypes.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+
+#include <binder/Parcel.h>
+#include <utils/Errors.h>
+#include <utils/Log.h>
+#include <utils/Mutex.h>
+#include <utils/SortedVector.h>
+#include <utils/threads.h>
+
+#include <media/stagefright/foundation/AString.h>
+
+#include <binder/IServiceManager.h>
+#include <media/IMediaAnalyticsService.h>
+#include <media/MediaAnalyticsItem.h>
+
+namespace android {
+
+#define DEBUG_SERVICEACCESS     0
+#define DEBUG_API               0
+#define DEBUG_ALLOCATIONS       0
+
+// after this many failed attempts, we stop trying [from this process] and just say that
+// the service is off.
+#define SVC_TRIES               2
+
+// the few universal keys we have
+const MediaAnalyticsItem::Key MediaAnalyticsItem::kKeyAny  = "any";
+const MediaAnalyticsItem::Key MediaAnalyticsItem::kKeyNone  = "none";
+
+const char * const MediaAnalyticsItem::EnabledProperty  = "media.analytics.enabled";
+const char * const MediaAnalyticsItem::EnabledPropertyPersist  = "persist.media.analytics.enabled";
+const int MediaAnalyticsItem::EnabledProperty_default  = 0;
+
+
+// access functions for the class
+MediaAnalyticsItem::MediaAnalyticsItem()
+    : mPid(0),
+      mUid(0),
+      mSessionID(MediaAnalyticsItem::SessionIDNone),
+      mTimestamp(0),
+      mFinalized(0),
+      mPropCount(0), mPropSize(0), mProps(NULL)
+{
+    mKey = MediaAnalyticsItem::kKeyNone;
+}
+
+MediaAnalyticsItem::MediaAnalyticsItem(MediaAnalyticsItem::Key key)
+    : mPid(0),
+      mUid(0),
+      mSessionID(MediaAnalyticsItem::SessionIDNone),
+      mTimestamp(0),
+      mFinalized(0),
+      mPropCount(0), mPropSize(0), mProps(NULL)
+{
+    if (DEBUG_ALLOCATIONS) {
+        ALOGD("Allocate MediaAnalyticsItem @ %p", this);
+    }
+    mKey = key;
+}
+
+MediaAnalyticsItem::~MediaAnalyticsItem() {
+    if (DEBUG_ALLOCATIONS) {
+        ALOGD("Destroy  MediaAnalyticsItem @ %p", this);
+    }
+    clear();
+}
+
+void MediaAnalyticsItem::clear() {
+
+    // clean allocated storage from key
+    mKey.clear();
+
+    // clean attributes
+    // contents of the attributes
+    for (size_t i = 0 ; i < mPropSize; i++ ) {
+        clearProp(&mProps[i]);
+    }
+    // the attribute records themselves
+    if (mProps != NULL) {
+        free(mProps);
+        mProps = NULL;
+    }
+    mPropSize = 0;
+    mPropCount = 0;
+
+    return;
+}
+
+// make a deep copy of myself
+MediaAnalyticsItem *MediaAnalyticsItem::dup() {
+    MediaAnalyticsItem *dst = new MediaAnalyticsItem(this->mKey);
+
+    if (dst != NULL) {
+        // key as part of constructor
+        dst->mPid = this->mPid;
+        dst->mUid = this->mUid;
+        dst->mSessionID = this->mSessionID;
+        dst->mTimestamp = this->mTimestamp;
+        dst->mFinalized = this->mFinalized;
+
+        // properties aka attributes
+        dst->growProps(this->mPropCount);
+        for(size_t i=0;i<mPropCount;i++) {
+            copyProp(&dst->mProps[i], &this->mProps[i]);
+        }
+        dst->mPropCount = this->mPropCount;
+    }
+
+    return dst;
+}
+
+// so clients can send intermediate values to be overlaid later
+MediaAnalyticsItem &MediaAnalyticsItem::setFinalized(bool value) {
+    mFinalized = value;
+    return *this;
+}
+
+bool MediaAnalyticsItem::getFinalized() const {
+    return mFinalized;
+}
+
+MediaAnalyticsItem &MediaAnalyticsItem::setSessionID(MediaAnalyticsItem::SessionID_t id) {
+    mSessionID = id;
+    return *this;
+}
+
+MediaAnalyticsItem::SessionID_t MediaAnalyticsItem::getSessionID() const {
+    return mSessionID;
+}
+
+MediaAnalyticsItem::SessionID_t MediaAnalyticsItem::generateSessionID() {
+
+    if (mSessionID == SessionIDNone) {
+        // get one from the server
+        MediaAnalyticsItem::SessionID_t newid = SessionIDNone;
+        sp<IMediaAnalyticsService> svc = getInstance();
+        if (svc != NULL) {
+            newid = svc->generateUniqueSessionID();
+        }
+        mSessionID = newid;
+    }
+
+    return mSessionID;
+}
+
+MediaAnalyticsItem &MediaAnalyticsItem::clearSessionID() {
+    mSessionID = MediaAnalyticsItem::SessionIDNone;
+    return *this;
+}
+
+MediaAnalyticsItem &MediaAnalyticsItem::setTimestamp(nsecs_t ts) {
+    mTimestamp = ts;
+    return *this;
+}
+
+nsecs_t MediaAnalyticsItem::getTimestamp() const {
+    return mTimestamp;
+}
+
+MediaAnalyticsItem &MediaAnalyticsItem::setPid(pid_t pid) {
+    mPid = pid;
+    return *this;
+}
+
+pid_t MediaAnalyticsItem::getPid() const {
+    return mPid;
+}
+
+MediaAnalyticsItem &MediaAnalyticsItem::setUid(uid_t uid) {
+    mUid = uid;
+    return *this;
+}
+
+uid_t MediaAnalyticsItem::getUid() const {
+    return mUid;
+}
+
+// this key is for the overall record -- "codec", "player", "drm", etc
+MediaAnalyticsItem &MediaAnalyticsItem::setKey(MediaAnalyticsItem::Key key) {
+    mKey = key;
+    return *this;
+}
+
+MediaAnalyticsItem::Key MediaAnalyticsItem::getKey() {
+    return mKey;
+}
+
+// number of attributes we have in this record
+int32_t MediaAnalyticsItem::count() const {
+    return mPropCount;
+}
+
+// find the proper entry in the list
+size_t MediaAnalyticsItem::findPropIndex(const char *name, size_t len)
+{
+    size_t i = 0;
+    for (; i < mPropCount; i++) {
+        Prop *prop = &mProps[i];
+        if (prop->mNameLen != len) {
+            continue;
+        }
+        if (memcmp(name, prop->mName, len) == 0) {
+            break;
+        }
+    }
+    return i;
+}
+
+MediaAnalyticsItem::Prop *MediaAnalyticsItem::findProp(const char *name) {
+    size_t len = strlen(name);
+    size_t i = findPropIndex(name, len);
+    if (i < mPropCount) {
+        return &mProps[i];
+    }
+    return NULL;
+}
+
+void MediaAnalyticsItem::Prop::setName(const char *name, size_t len) {
+    mNameLen = len;
+    mName = (const char *) malloc(len+1);
+    memcpy ((void *)mName, name, len+1);
+}
+
+// used only as part of a storing operation
+MediaAnalyticsItem::Prop *MediaAnalyticsItem::allocateProp(const char *name) {
+    size_t len = strlen(name);
+    size_t i = findPropIndex(name, len);
+    Prop *prop;
+
+    if (i < mPropCount) {
+        prop = &mProps[i];
+    } else {
+        if (i == mPropSize) {
+            growProps();
+            // XXX: verify success
+        }
+        i = mPropCount++;
+        prop = &mProps[i];
+        prop->setName(name, len);
+    }
+
+    return prop;
+}
+
+// set the values
+void MediaAnalyticsItem::setInt32(MediaAnalyticsItem::Attr name, int32_t value) {
+    Prop *prop = allocateProp(name);
+    prop->mType = kTypeInt32;
+    prop->u.int32Value = value;
+}
+
+void MediaAnalyticsItem::setInt64(MediaAnalyticsItem::Attr name, int64_t value) {
+    Prop *prop = allocateProp(name);
+    prop->mType = kTypeInt64;
+    prop->u.int64Value = value;
+}
+
+void MediaAnalyticsItem::setDouble(MediaAnalyticsItem::Attr name, double value) {
+    Prop *prop = allocateProp(name);
+    prop->mType = kTypeDouble;
+    prop->u.doubleValue = value;
+}
+
+void MediaAnalyticsItem::setCString(MediaAnalyticsItem::Attr name, const char *value) {
+
+    Prop *prop = allocateProp(name);
+    // any old value will be gone
+    prop->mType = kTypeCString;
+    prop->u.CStringValue = strdup(value);
+}
+
+void MediaAnalyticsItem::setRate(MediaAnalyticsItem::Attr name, int64_t count, int64_t duration) {
+    Prop *prop = allocateProp(name);
+    prop->mType = kTypeRate;
+    prop->u.rate.count = count;
+    prop->u.rate.duration = duration;
+}
+
+
+// find/add/set fused into a single operation
+void MediaAnalyticsItem::addInt32(MediaAnalyticsItem::Attr name, int32_t value) {
+    Prop *prop = allocateProp(name);
+    switch (prop->mType) {
+        case kTypeInt32:
+            prop->u.int32Value += value;
+            break;
+        default:
+            clearPropValue(prop);
+            prop->mType = kTypeInt32;
+            prop->u.int32Value = value;
+            break;
+    }
+}
+
+void MediaAnalyticsItem::addInt64(MediaAnalyticsItem::Attr name, int64_t value) {
+    Prop *prop = allocateProp(name);
+    switch (prop->mType) {
+        case kTypeInt64:
+            prop->u.int64Value += value;
+            break;
+        default:
+            clearPropValue(prop);
+            prop->mType = kTypeInt64;
+            prop->u.int64Value = value;
+            break;
+    }
+}
+
+void MediaAnalyticsItem::addRate(MediaAnalyticsItem::Attr name, int64_t count, int64_t duration) {
+    Prop *prop = allocateProp(name);
+    switch (prop->mType) {
+        case kTypeRate:
+            prop->u.rate.count += count;
+            prop->u.rate.duration += duration;
+            break;
+        default:
+            clearPropValue(prop);
+            prop->mType = kTypeRate;
+            prop->u.rate.count = count;
+            prop->u.rate.duration = duration;
+            break;
+    }
+}
+
+void MediaAnalyticsItem::addDouble(MediaAnalyticsItem::Attr name, double value) {
+    Prop *prop = allocateProp(name);
+    switch (prop->mType) {
+        case kTypeDouble:
+            prop->u.doubleValue += value;
+            break;
+        default:
+            clearPropValue(prop);
+            prop->mType = kTypeDouble;
+            prop->u.doubleValue = value;
+            break;
+    }
+}
+
+// find & extract values
+bool MediaAnalyticsItem::getInt32(MediaAnalyticsItem::Attr name, int32_t *value) {
+    Prop *prop = findProp(name);
+    if (prop == NULL || prop->mType != kTypeInt32) {
+        return false;
+    }
+    if (value != NULL) {
+        *value = prop->u.int32Value;
+    }
+    return true;
+}
+
+bool MediaAnalyticsItem::getInt64(MediaAnalyticsItem::Attr name, int64_t *value) {
+    Prop *prop = findProp(name);
+    if (prop == NULL || prop->mType != kTypeInt64) {
+        return false;
+    }
+    if (value != NULL) {
+        *value = prop->u.int64Value;
+    }
+    return true;
+}
+
+bool MediaAnalyticsItem::getRate(MediaAnalyticsItem::Attr name, int64_t *count, int64_t *duration, double *rate) {
+    Prop *prop = findProp(name);
+    if (prop == NULL || prop->mType != kTypeRate) {
+        return false;
+    }
+    if (count != NULL) {
+        *count = prop->u.rate.count;
+    }
+    if (duration != NULL) {
+        *duration = prop->u.rate.duration;
+    }
+    if (rate != NULL) {
+        double r = 0.0;
+        if (prop->u.rate.duration != 0) {
+            r = prop->u.rate.count / (double) prop->u.rate.duration;
+        }
+        *rate = r;
+    }
+    return true;
+}
+
+bool MediaAnalyticsItem::getDouble(MediaAnalyticsItem::Attr name, double *value) {
+    Prop *prop = findProp(name);
+    if (prop == NULL || prop->mType != kTypeDouble) {
+        return false;
+    }
+    if (value != NULL) {
+        *value = prop->u.doubleValue;
+    }
+    return true;
+}
+
+// caller responsible for the returned string
+bool MediaAnalyticsItem::getCString(MediaAnalyticsItem::Attr name, char **value) {
+    Prop *prop = findProp(name);
+    if (prop == NULL || prop->mType != kTypeDouble) {
+        return false;
+    }
+    if (value != NULL) {
+        *value = strdup(prop->u.CStringValue);
+    }
+    return true;
+}
+
+// remove indicated keys and their values
+// return value is # keys removed
+int32_t MediaAnalyticsItem::filter(int n, MediaAnalyticsItem::Attr attrs[]) {
+    int zapped = 0;
+    if (attrs == NULL || n <= 0) {
+        return -1;
+    }
+    for (ssize_t i = 0 ; i < n ;  i++) {
+        const char *name = attrs[i];
+        size_t len = strlen(name);
+        size_t j = findPropIndex(name, len);
+        if (j >= mPropCount) {
+            // not there
+            continue;
+        } else if (j+1 == mPropCount) {
+            // last one, shorten
+            zapped++;
+            clearProp(&mProps[j]);
+            mPropCount--;
+        } else {
+            // in the middle, bring last one down and shorten
+            zapped++;
+            clearProp(&mProps[j]);
+            mProps[j] = mProps[mPropCount-1];
+            mPropCount--;
+        }
+    }
+    return zapped;
+}
+
+// remove any keys NOT in the provided list
+// return value is # keys removed
+int32_t MediaAnalyticsItem::filterNot(int n, MediaAnalyticsItem::Attr attrs[]) {
+    int zapped = 0;
+    if (attrs == NULL || n <= 0) {
+        return -1;
+    }
+    for (ssize_t i = mPropCount-1 ; i >=0 ;  i--) {
+        Prop *prop = &mProps[i];
+        for (ssize_t j = 0; j < n ; j++) {
+            if (strcmp(prop->mName, attrs[j]) == 0) {
+                clearProp(prop);
+                zapped++;
+                if (i != (ssize_t)(mPropCount-1)) {
+                    *prop = mProps[mPropCount-1];
+                }
+                initProp(&mProps[mPropCount-1]);
+                mPropCount--;
+                break;
+            }
+        }
+    }
+    return zapped;
+}
+
+// remove a single key
+// return value is 0 (not found) or 1 (found and removed)
+int32_t MediaAnalyticsItem::filter(MediaAnalyticsItem::Attr name) {
+    return filter(1, &name);
+}
+
+// handle individual items/properties stored within the class
+//
+
+void MediaAnalyticsItem::initProp(Prop *prop) {
+    if (prop != NULL) {
+        prop->mName = NULL;
+        prop->mNameLen = 0;
+
+        prop->mType = kTypeNone;
+    }
+}
+
+void MediaAnalyticsItem::clearProp(Prop *prop)
+{
+    if (prop != NULL) {
+        if (prop->mName != NULL) {
+            free((void *)prop->mName);
+            prop->mName = NULL;
+            prop->mNameLen = 0;
+        }
+
+        clearPropValue(prop);
+    }
+}
+
+void MediaAnalyticsItem::clearPropValue(Prop *prop)
+{
+    if (prop != NULL) {
+        if (prop->mType == kTypeCString && prop->u.CStringValue != NULL) {
+            free(prop->u.CStringValue);
+            prop->u.CStringValue = NULL;
+        }
+        prop->mType = kTypeNone;
+    }
+}
+
+void MediaAnalyticsItem::copyProp(Prop *dst, const Prop *src)
+{
+    // get rid of any pointers in the dst
+    clearProp(dst);
+
+    *dst = *src;
+
+    // fix any pointers that we blindly copied, so we have our own copies
+    if (dst->mName) {
+        void *p =  malloc(dst->mNameLen + 1);
+        memcpy (p, src->mName, dst->mNameLen + 1);
+        dst->mName = (const char *) p;
+    }
+    if (dst->mType == kTypeCString) {
+        dst->u.CStringValue = strdup(src->u.CStringValue);
+    }
+}
+
+void MediaAnalyticsItem::growProps(int increment)
+{
+    if (increment <= 0) {
+        increment = kGrowProps;
+    }
+    int nsize = mPropSize + increment;
+    Prop *ni = (Prop *)realloc(mProps, sizeof(Prop) * nsize);
+
+    if (ni != NULL) {
+        for (int i = mPropSize; i < nsize; i++) {
+            initProp(&ni[i]);
+        }
+        mProps = ni;
+        mPropSize = nsize;
+    }
+}
+
+// Parcel / serialize things for binder calls
+//
+
+int32_t MediaAnalyticsItem::readFromParcel(const Parcel& data) {
+    // into 'this' object
+    // .. we make a copy of the string to put away.
+    mKey = data.readCString();
+    mSessionID = data.readInt64();
+    mFinalized = data.readInt32();
+    mTimestamp = data.readInt64();
+
+    int count = data.readInt32();
+    for (int i = 0; i < count ; i++) {
+            MediaAnalyticsItem::Attr attr = data.readCString();
+            int32_t ztype = data.readInt32();
+                switch (ztype) {
+                    case MediaAnalyticsItem::kTypeInt32:
+                            setInt32(attr, data.readInt32());
+                            break;
+                    case MediaAnalyticsItem::kTypeInt64:
+                            setInt64(attr, data.readInt64());
+                            break;
+                    case MediaAnalyticsItem::kTypeDouble:
+                            setDouble(attr, data.readDouble());
+                            break;
+                    case MediaAnalyticsItem::kTypeCString:
+                            setCString(attr, data.readCString());
+                            break;
+                    case MediaAnalyticsItem::kTypeRate:
+                            {
+                                int64_t count = data.readInt64();
+                                int64_t duration = data.readInt64();
+                                setRate(attr, count, duration);
+                            }
+                            break;
+                    default:
+                            ALOGE("reading bad item type: %d, idx %d",
+                                  ztype, i);
+                            return -1;
+                }
+    }
+
+    return 0;
+}
+
+int32_t MediaAnalyticsItem::writeToParcel(Parcel *data) {
+    if (data == NULL) return -1;
+
+
+    data->writeCString(mKey.c_str());
+    data->writeInt64(mSessionID);
+    data->writeInt32(mFinalized);
+    data->writeInt64(mTimestamp);
+
+    // set of items
+    int count = mPropCount;
+    data->writeInt32(count);
+    for (int i = 0 ; i < count; i++ ) {
+            Prop *prop = &mProps[i];
+            data->writeCString(prop->mName);
+            data->writeInt32(prop->mType);
+            switch (prop->mType) {
+                case MediaAnalyticsItem::kTypeInt32:
+                        data->writeInt32(prop->u.int32Value);
+                        break;
+                case MediaAnalyticsItem::kTypeInt64:
+                        data->writeInt64(prop->u.int64Value);
+                        break;
+                case MediaAnalyticsItem::kTypeDouble:
+                        data->writeDouble(prop->u.doubleValue);
+                        break;
+                case MediaAnalyticsItem::kTypeRate:
+                        data->writeInt64(prop->u.rate.count);
+                        data->writeInt64(prop->u.rate.duration);
+                        break;
+                case MediaAnalyticsItem::kTypeCString:
+                        data->writeCString(prop->u.CStringValue);
+                        break;
+                default:
+                        ALOGE("found bad Prop type: %d, idx %d, name %s",
+                              prop->mType, i, prop->mName);
+                        break;
+            }
+    }
+
+    return 0;
+}
+
+
+
+AString MediaAnalyticsItem::toString() {
+
+    AString result = "(";
+    char buffer[512];
+
+    // same order as we spill into the parcel, although not required
+    // key+session are our primary matching criteria
+    //RBE ALOGD("mKey.c_str");
+    result.append(mKey.c_str());
+    //RBE ALOGD("post-mKey.c_str");
+    result.append(":");
+    snprintf(buffer, sizeof(buffer), "%" PRId64 ":", mSessionID);
+    result.append(buffer);
+
+    // we need these internally, but don't want to upload them
+    snprintf(buffer, sizeof(buffer), "%d:%d", mUid, mPid);
+    result.append(buffer);
+
+    snprintf(buffer, sizeof(buffer), "%d:", mFinalized);
+    result.append(buffer);
+    snprintf(buffer, sizeof(buffer), "%" PRId64 ":", mTimestamp);
+    result.append(buffer);
+
+    // set of items
+    int count = mPropCount;
+    snprintf(buffer, sizeof(buffer), "%d:", count);
+    result.append(buffer);
+    for (int i = 0 ; i < count; i++ ) {
+            Prop *prop = &mProps[i];
+            switch (prop->mType) {
+                case MediaAnalyticsItem::kTypeInt32:
+                        snprintf(buffer,sizeof(buffer),
+                        "%s=%d:", prop->mName, prop->u.int32Value);
+                        break;
+                case MediaAnalyticsItem::kTypeInt64:
+                        snprintf(buffer,sizeof(buffer),
+                        "%s=%" PRId64 ":", prop->mName, prop->u.int64Value);
+                        break;
+                case MediaAnalyticsItem::kTypeDouble:
+                        snprintf(buffer,sizeof(buffer),
+                        "%s=%e:", prop->mName, prop->u.doubleValue);
+                        break;
+                case MediaAnalyticsItem::kTypeRate:
+                        snprintf(buffer,sizeof(buffer),
+                        "%s=%" PRId64 "/%" PRId64 ":", prop->mName,
+                        prop->u.rate.count, prop->u.rate.duration);
+                        break;
+                case MediaAnalyticsItem::kTypeCString:
+                        snprintf(buffer,sizeof(buffer), "%s=", prop->mName);
+                        result.append(buffer);
+                        // XXX: sanitize string for ':' '='
+                        result.append(prop->u.CStringValue);
+                        buffer[0] = ':';
+                        buffer[1] = '\0';
+                        break;
+                default:
+                        ALOGE("to_String bad item type: %d for %s",
+                              prop->mType, prop->mName);
+                        break;
+            }
+            result.append(buffer);
+    }
+
+    result.append(")");
+
+    return result;
+}
+
+// for the lazy, we offer methods that finds the service and
+// calls the appropriate daemon
+bool MediaAnalyticsItem::selfrecord() {
+    return selfrecord(false);
+}
+
+bool MediaAnalyticsItem::selfrecord(bool forcenew) {
+
+    if (DEBUG_API) {
+        AString p = this->toString();
+        ALOGD("selfrecord of: %s [forcenew=%d]", p.c_str(), forcenew);
+    }
+
+    sp<IMediaAnalyticsService> svc = getInstance();
+
+    if (svc != NULL) {
+        svc->submit(this, forcenew);
+        return true;
+    } else {
+        AString p = this->toString();
+        ALOGD("Unable to record: %s [forcenew=%d]", p.c_str(), forcenew);
+        return false;
+    }
+}
+
+// get a connection we can reuse for most of our lifetime
+// static
+sp<IMediaAnalyticsService> MediaAnalyticsItem::sAnalyticsService;
+static Mutex sInitMutex;
+
+//static
+bool MediaAnalyticsItem::isEnabled() {
+    int enabled = property_get_int32(MediaAnalyticsItem::EnabledProperty, -1);
+
+    if (enabled == -1) {
+        enabled = property_get_int32(MediaAnalyticsItem::EnabledPropertyPersist, -1);
+    }
+    if (enabled == -1) {
+        enabled = MediaAnalyticsItem::EnabledProperty_default;
+    }
+    if (enabled <= 0) {
+        return false;
+    }
+    return true;
+}
+
+//static
+sp<IMediaAnalyticsService> MediaAnalyticsItem::getInstance() {
+    static const char *servicename = "media.analytics";
+    static int tries_remaining = SVC_TRIES;
+    int enabled = isEnabled();
+
+    if (enabled == false) {
+        if (DEBUG_SERVICEACCESS) {
+                ALOGD("disabled");
+        }
+        return NULL;
+    }
+
+    {
+        Mutex::Autolock _l(sInitMutex);
+        const char *badness = "";
+
+        // think of tries_remaining as telling us whether service==NULL because
+        // (1) we haven't tried to initialize it yet
+        // (2) we've tried to initialize it, but failed.
+        if (sAnalyticsService == NULL && tries_remaining > 0) {
+            sp<IServiceManager> sm = defaultServiceManager();
+            if (sm != NULL) {
+                sp<IBinder> binder = sm->getService(String16(servicename));
+                if (binder != NULL) {
+                    sAnalyticsService = interface_cast<IMediaAnalyticsService>(binder);
+                } else {
+                    badness = "did not find service";
+                }
+            } else {
+                badness = "No Service Manager access";
+            }
+
+            if (sAnalyticsService == NULL) {
+                if (tries_remaining > 0) {
+                    tries_remaining--;
+                }
+                if (DEBUG_SERVICEACCESS) {
+                    ALOGD("Unable to bind to service %s: %s", servicename, badness);
+                }
+            }
+        }
+
+        return sAnalyticsService;
+    }
+}
+
+
+// merge the info from 'incoming' into this record.
+// we finish with a union of this+incoming and special handling for collisions
+bool MediaAnalyticsItem::merge(MediaAnalyticsItem *incoming) {
+
+    // if I don't have key or session id, take them from incoming
+    // 'this' should never be missing both of them...
+    if (mKey.empty()) {
+        mKey = incoming->mKey;
+    } else if (mSessionID == 0) {
+        mSessionID = incoming->mSessionID;
+    }
+
+    // we always take the more recent 'finalized' value
+    setFinalized(incoming->getFinalized());
+
+    // for each attribute from 'incoming', resolve appropriately
+    int nattr = incoming->mPropCount;
+    for (int i = 0 ; i < nattr; i++ ) {
+        Prop *iprop = &incoming->mProps[i];
+        Prop *oprop = findProp(iprop->mName);
+        const char *p = iprop->mName;
+        size_t len = strlen(p);
+        char semantic = p[len-1];
+
+        if (oprop == NULL) {
+            // no oprop, so we insert the new one
+            oprop = allocateProp(p);
+            copyProp(oprop, iprop);
+        } else {
+            // merge iprop into oprop
+            switch (semantic) {
+                case '<':       // first  aka keep old)
+                    /* nop */
+                    break;
+
+                default:        // default is 'last'
+                case '>':       // last (aka keep new)
+                    copyProp(oprop, iprop);
+                    break;
+
+                case '+':       /* sum */
+                    // XXX validate numeric types, sum in place
+                    break;
+
+            }
+        }
+    }
+
+    // not sure when we'd return false...
+    return true;
+}
+
+} // namespace android
+
diff --git a/media/libmedia/MediaCodecBuffer.cpp b/media/libmedia/MediaCodecBuffer.cpp
new file mode 100644
index 0000000..59d6164
--- /dev/null
+++ b/media/libmedia/MediaCodecBuffer.cpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaCodecBuffer"
+#include <utils/Log.h>
+
+#include <media/MediaCodecBuffer.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/MediaBufferBase.h>
+
+namespace android {
+
+MediaCodecBuffer::MediaCodecBuffer(const sp<AMessage> &format, const sp<ABuffer> &buffer)
+    : mMeta(new AMessage),
+      mFormat(format),
+      mBuffer(buffer),
+      mMediaBufferBase(nullptr) {
+}
+
+// ABuffer-like interface
+uint8_t *MediaCodecBuffer::base() {
+    return mBuffer->base();
+}
+
+uint8_t *MediaCodecBuffer::data() {
+    return mBuffer->data();
+}
+
+size_t MediaCodecBuffer::capacity() const {
+    return mBuffer->capacity();
+}
+
+size_t MediaCodecBuffer::size() const {
+    return mBuffer->size();
+}
+
+size_t MediaCodecBuffer::offset() const {
+    return mBuffer->offset();
+}
+
+status_t MediaCodecBuffer::setRange(size_t offset, size_t size) {
+    mBuffer->setRange(offset, size);
+    return OK;
+}
+
+MediaBufferBase *MediaCodecBuffer::getMediaBufferBase() {
+    if (mMediaBufferBase != NULL) {
+        mMediaBufferBase->add_ref();
+    }
+    return mMediaBufferBase;
+}
+
+void MediaCodecBuffer::setMediaBufferBase(MediaBufferBase *mediaBuffer) {
+    if (mMediaBufferBase != NULL) {
+        mMediaBufferBase->release();
+    }
+    mMediaBufferBase = mediaBuffer;
+}
+
+sp<AMessage> MediaCodecBuffer::meta() {
+    return mMeta;
+}
+
+sp<AMessage> MediaCodecBuffer::format() {
+    return mFormat;
+}
+
+void MediaCodecBuffer::setFormat(const sp<AMessage> &format) {
+    mMeta->clear();
+    mFormat = format;
+}
+
+}  // namespace android
diff --git a/media/libmedia/MediaCodecInfo.cpp b/media/libmedia/MediaCodecInfo.cpp
index 1b3b3eb..62a7bdf 100644
--- a/media/libmedia/MediaCodecInfo.cpp
+++ b/media/libmedia/MediaCodecInfo.cpp
@@ -121,9 +121,11 @@
 }
 
 bool MediaCodecInfo::hasQuirk(const char *name) const {
-    for (size_t ix = 0; ix < mQuirks.size(); ix++) {
-        if (mQuirks.itemAt(ix).equalsIgnoreCase(name)) {
-            return true;
+    if (name) {
+        for (size_t ix = 0; ix < mQuirks.size(); ix++) {
+            if (mQuirks.itemAt(ix).equalsIgnoreCase(name)) {
+                return true;
+            }
         }
     }
     return false;
@@ -190,9 +192,11 @@
 }
 
 ssize_t MediaCodecInfo::getCapabilityIndex(const char *mime) const {
-    for (size_t ix = 0; ix < mCaps.size(); ix++) {
-        if (mCaps.keyAt(ix).equalsIgnoreCase(mime)) {
-            return ix;
+    if (mime) {
+        for (size_t ix = 0; ix < mCaps.size(); ix++) {
+            if (mCaps.keyAt(ix).equalsIgnoreCase(mime)) {
+                return ix;
+            }
         }
     }
     return -1;
diff --git a/media/libstagefright/MediaDefs.cpp b/media/libmedia/MediaDefs.cpp
similarity index 96%
rename from media/libstagefright/MediaDefs.cpp
rename to media/libmedia/MediaDefs.cpp
index 845462b..2ae71f7 100644
--- a/media/libstagefright/MediaDefs.cpp
+++ b/media/libmedia/MediaDefs.cpp
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#include <media/stagefright/MediaDefs.h>
+#include <media/MediaDefs.h>
 
 namespace android {
 
@@ -57,8 +57,6 @@
 const char *MEDIA_MIMETYPE_CONTAINER_AVI = "video/avi";
 const char *MEDIA_MIMETYPE_CONTAINER_MPEG2PS = "video/mp2p";
 
-const char *MEDIA_MIMETYPE_CONTAINER_WVM = "video/wvm";
-
 const char *MEDIA_MIMETYPE_TEXT_3GPP = "text/3gpp-tt";
 const char *MEDIA_MIMETYPE_TEXT_SUBRIP = "application/x-subrip";
 const char *MEDIA_MIMETYPE_TEXT_VTT = "text/vtt";
diff --git a/media/libmedia/MidiDeviceInfo.cpp b/media/libmedia/MidiDeviceInfo.cpp
new file mode 100644
index 0000000..02efc5f
--- /dev/null
+++ b/media/libmedia/MidiDeviceInfo.cpp
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "MidiDeviceInfo"
+
+#include "MidiDeviceInfo.h"
+
+#include <binder/Parcel.h>
+#include <log/log.h>
+#include <utils/Errors.h>
+#include <utils/String16.h>
+
+namespace android {
+namespace media {
+namespace midi {
+
+// The constant values need to be kept in sync with MidiDeviceInfo.java.
+// static
+const char* const MidiDeviceInfo::PROPERTY_NAME = "name";
+const char* const MidiDeviceInfo::PROPERTY_MANUFACTURER = "manufacturer";
+const char* const MidiDeviceInfo::PROPERTY_PRODUCT = "product";
+const char* const MidiDeviceInfo::PROPERTY_VERSION = "version";
+const char* const MidiDeviceInfo::PROPERTY_SERIAL_NUMBER = "serial_number";
+const char* const MidiDeviceInfo::PROPERTY_ALSA_CARD = "alsa_card";
+const char* const MidiDeviceInfo::PROPERTY_ALSA_DEVICE = "alsa_device";
+
+String16 MidiDeviceInfo::getProperty(const char* propertyName) {
+    String16 value;
+    if (mProperties.getString(String16(propertyName), &value)) {
+        return value;
+    } else {
+        return String16();
+    }
+}
+
+#define RETURN_IF_FAILED(calledOnce)                                     \
+    {                                                                    \
+        status_t returnStatus = calledOnce;                              \
+        if (returnStatus) {                                              \
+            ALOGE("Failed at %s:%d (%s)", __FILE__, __LINE__, __func__); \
+            return returnStatus;                                         \
+         }                                                               \
+    }
+
+status_t MidiDeviceInfo::writeToParcel(Parcel* parcel) const {
+    // Needs to be kept in sync with code in MidiDeviceInfo.java
+    RETURN_IF_FAILED(parcel->writeInt32(mType));
+    RETURN_IF_FAILED(parcel->writeInt32(mId));
+    RETURN_IF_FAILED(parcel->writeInt32((int32_t)mInputPortNames.size()));
+    RETURN_IF_FAILED(parcel->writeInt32((int32_t)mOutputPortNames.size()));
+    RETURN_IF_FAILED(writeStringVector(parcel, mInputPortNames));
+    RETURN_IF_FAILED(writeStringVector(parcel, mOutputPortNames));
+    RETURN_IF_FAILED(parcel->writeInt32(mIsPrivate ? 1 : 0));
+    RETURN_IF_FAILED(mProperties.writeToParcel(parcel));
+    // This corresponds to "extra" properties written by Java code
+    RETURN_IF_FAILED(mProperties.writeToParcel(parcel));
+    return OK;
+}
+
+status_t MidiDeviceInfo::readFromParcel(const Parcel* parcel) {
+    // Needs to be kept in sync with code in MidiDeviceInfo.java
+    RETURN_IF_FAILED(parcel->readInt32(&mType));
+    RETURN_IF_FAILED(parcel->readInt32(&mId));
+    int32_t inputPortCount;
+    RETURN_IF_FAILED(parcel->readInt32(&inputPortCount));
+    int32_t outputPortCount;
+    RETURN_IF_FAILED(parcel->readInt32(&outputPortCount));
+    RETURN_IF_FAILED(readStringVector(parcel, &mInputPortNames, inputPortCount));
+    RETURN_IF_FAILED(readStringVector(parcel, &mOutputPortNames, outputPortCount));
+    int32_t isPrivate;
+    RETURN_IF_FAILED(parcel->readInt32(&isPrivate));
+    mIsPrivate = isPrivate == 1;
+    RETURN_IF_FAILED(mProperties.readFromParcel(parcel));
+    // Ignore "extra" properties as they may contain Java Parcelables
+    return OK;
+}
+
+status_t MidiDeviceInfo::readStringVector(
+        const Parcel* parcel, Vector<String16> *vectorPtr, size_t defaultLength) {
+    std::unique_ptr<std::vector<std::unique_ptr<String16>>> v;
+    status_t result = parcel->readString16Vector(&v);
+    if (result != OK) return result;
+    vectorPtr->clear();
+    if (v.get() != nullptr) {
+        for (const auto& iter : *v) {
+            if (iter.get() != nullptr) {
+                vectorPtr->push_back(*iter);
+            } else {
+                vectorPtr->push_back(String16());
+            }
+        }
+    } else {
+        vectorPtr->resize(defaultLength);
+    }
+    return OK;
+}
+
+status_t MidiDeviceInfo::writeStringVector(Parcel* parcel, const Vector<String16>& vector) const {
+    std::vector<String16> v;
+    for (size_t i = 0; i < vector.size(); ++i) {
+        v.push_back(vector[i]);
+    }
+    return parcel->writeString16Vector(v);
+}
+
+// Vector does not define operator==
+static inline bool areVectorsEqual(const Vector<String16>& lhs, const Vector<String16>& rhs) {
+    if (lhs.size() != rhs.size()) return false;
+    for (size_t i = 0; i < lhs.size(); ++i) {
+        if (lhs[i] != rhs[i]) return false;
+    }
+    return true;
+}
+
+bool operator==(const MidiDeviceInfo& lhs, const MidiDeviceInfo& rhs) {
+    return (lhs.mType == rhs.mType && lhs.mId == rhs.mId &&
+            areVectorsEqual(lhs.mInputPortNames, rhs.mInputPortNames) &&
+            areVectorsEqual(lhs.mOutputPortNames, rhs.mOutputPortNames) &&
+            lhs.mProperties == rhs.mProperties &&
+            lhs.mIsPrivate == rhs.mIsPrivate);
+}
+
+}  // namespace midi
+}  // namespace media
+}  // namespace android
diff --git a/media/libmedia/OMXBuffer.cpp b/media/libmedia/OMXBuffer.cpp
new file mode 100644
index 0000000..8ea70e4
--- /dev/null
+++ b/media/libmedia/OMXBuffer.cpp
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "OMXBuffer"
+
+#include <media/MediaCodecBuffer.h>
+#include <media/OMXBuffer.h>
+#include <binder/IMemory.h>
+#include <binder/Parcel.h>
+#include <ui/GraphicBuffer.h>
+#include <utils/NativeHandle.h>
+
+namespace android {
+
+//static
+OMXBuffer OMXBuffer::sPreset(static_cast<sp<MediaCodecBuffer> >(NULL));
+
+OMXBuffer::OMXBuffer()
+    : mBufferType(kBufferTypeInvalid) {
+}
+
+OMXBuffer::OMXBuffer(const sp<MediaCodecBuffer>& codecBuffer)
+    : mBufferType(kBufferTypePreset),
+      mRangeOffset(codecBuffer != NULL ? codecBuffer->offset() : 0),
+      mRangeLength(codecBuffer != NULL ? codecBuffer->size() : 0) {
+}
+
+OMXBuffer::OMXBuffer(const sp<IMemory> &mem)
+    : mBufferType(kBufferTypeSharedMem),
+      mMem(mem) {
+}
+
+OMXBuffer::OMXBuffer(const sp<GraphicBuffer> &gbuf)
+    : mBufferType(kBufferTypeANWBuffer),
+      mGraphicBuffer(gbuf) {
+}
+
+OMXBuffer::OMXBuffer(const sp<NativeHandle> &handle)
+    : mBufferType(kBufferTypeNativeHandle),
+      mNativeHandle(handle) {
+}
+
+OMXBuffer::~OMXBuffer() {
+}
+
+status_t OMXBuffer::writeToParcel(Parcel *parcel) const {
+    parcel->writeInt32(mBufferType);
+
+    switch(mBufferType) {
+        case kBufferTypePreset:
+        {
+            status_t err = parcel->writeUint32(mRangeOffset);
+            if (err != OK) {
+                return err;
+            }
+            return parcel->writeUint32(mRangeLength);
+        }
+
+        case kBufferTypeSharedMem:
+        {
+            return parcel->writeStrongBinder(IInterface::asBinder(mMem));
+        }
+
+        case kBufferTypeANWBuffer:
+        {
+            return parcel->write(*mGraphicBuffer);
+        }
+
+        case kBufferTypeNativeHandle:
+        {
+            return parcel->writeNativeHandle(mNativeHandle->handle());
+        }
+
+        default:
+            return BAD_VALUE;
+    }
+    return BAD_VALUE;
+}
+
+status_t OMXBuffer::readFromParcel(const Parcel *parcel) {
+    BufferType bufferType = (BufferType) parcel->readInt32();
+
+    switch(bufferType) {
+        case kBufferTypePreset:
+        {
+            status_t err = parcel->readUint32(&mRangeOffset);
+            if (err != OK) {
+                return err;
+            }
+            err = parcel->readUint32(&mRangeLength);
+            if (err != OK) {
+                return err;
+            }
+            break;
+        }
+
+        case kBufferTypeSharedMem:
+        {
+            mMem = interface_cast<IMemory>(parcel->readStrongBinder());
+            break;
+        }
+
+        case kBufferTypeANWBuffer:
+        {
+            sp<GraphicBuffer> buffer = new GraphicBuffer();
+
+            status_t err = parcel->read(*buffer);
+
+            if (err != OK) {
+                return err;
+            }
+
+            mGraphicBuffer = buffer;
+            break;
+        }
+
+        case kBufferTypeNativeHandle:
+        {
+            sp<NativeHandle> handle = NativeHandle::create(
+                    parcel->readNativeHandle(), true /* ownsHandle */);
+
+            mNativeHandle = handle;
+            break;
+        }
+
+        default:
+            return BAD_VALUE;
+    }
+
+    mBufferType = bufferType;
+    return OK;
+}
+
+OMXBuffer& OMXBuffer::operator=(OMXBuffer&& source) {
+    mBufferType = std::move(source.mBufferType);
+    mRangeLength = std::move(source.mRangeLength);
+    mMem = std::move(source.mMem);
+    mGraphicBuffer = std::move(source.mGraphicBuffer);
+    mNativeHandle = std::move(source.mNativeHandle);
+    return *this;
+}
+
+} // namespace android
+
+
+
+
diff --git a/media/libmedia/TypeConverter.cpp b/media/libmedia/TypeConverter.cpp
new file mode 100644
index 0000000..25c29f2
--- /dev/null
+++ b/media/libmedia/TypeConverter.cpp
@@ -0,0 +1,405 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/TypeConverter.h>
+
+namespace android {
+
+#define MAKE_STRING_FROM_ENUM(string) { #string, string }
+#define TERMINATOR { .literal = nullptr }
+
+template <>
+const OutputDeviceConverter::Table OutputDeviceConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_NONE),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_EARPIECE),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_SPEAKER),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_SPEAKER_SAFE),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADSET),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADPHONE),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ALL_SCO),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ALL_A2DP),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_AUX_DIGITAL),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_HDMI),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_USB_ACCESSORY),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_USB_DEVICE),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ALL_USB),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_REMOTE_SUBMIX),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_TELEPHONY_TX),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_LINE),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_HDMI_ARC),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_SPDIF),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_FM),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_AUX_LINE),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_IP),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BUS),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_PROXY),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_DEFAULT),
+    // STUB must be after DEFAULT, so the latter is picked up by toString first.
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_STUB),
+    TERMINATOR
+};
+
+template <>
+const InputDeviceConverter::Table InputDeviceConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_NONE),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_COMMUNICATION),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_AMBIENT),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BUILTIN_MIC),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_ALL_SCO),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_WIRED_HEADSET),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_AUX_DIGITAL),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_HDMI),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_TELEPHONY_RX),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_VOICE_CALL),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BACK_MIC),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_REMOTE_SUBMIX),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_USB_ACCESSORY),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_USB_DEVICE),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_ALL_USB),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_FM_TUNER),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_TV_TUNER),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_LINE),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_SPDIF),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_A2DP),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_LOOPBACK),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_IP),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BUS),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_PROXY),
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_DEFAULT),
+    // STUB must be after DEFAULT, so the latter is picked up by toString first.
+    MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_STUB),
+    TERMINATOR
+};
+
+
+template <>
+const OutputFlagConverter::Table OutputFlagConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_NONE),
+    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_DIRECT),
+    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_PRIMARY),
+    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_FAST),
+    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_DEEP_BUFFER),
+    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD),
+    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_NON_BLOCKING),
+    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_HW_AV_SYNC),
+    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_TTS),
+    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_RAW),
+    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_SYNC),
+    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO),
+    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_DIRECT_PCM),
+    TERMINATOR
+};
+
+
+template <>
+const InputFlagConverter::Table InputFlagConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_NONE),
+    MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_FAST),
+    MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_HW_HOTWORD),
+    MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_RAW),
+    MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_SYNC),
+    TERMINATOR
+};
+
+
+template <>
+const FormatConverter::Table FormatConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_16_BIT),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_8_BIT),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_32_BIT),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_8_24_BIT),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_FLOAT),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_24_BIT_PACKED),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_MP3),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AMR_NB),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AMR_WB),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_MAIN),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LC),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_SSR),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LTP),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_HE_V1),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_SCALABLE),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ERLC),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LD),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_HE_V2),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ELD),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_MAIN),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_LC),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_SSR),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_LTP),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_HE_V1),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_SCALABLE),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_ERLC),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_LD),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_HE_V2),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_ELD),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_VORBIS),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_HE_AAC_V1),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_HE_AAC_V2),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_OPUS),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AC3),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_E_AC3),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DTS),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DTS_HD),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_IEC61937),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DOLBY_TRUEHD),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_EVRC),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_EVRCB),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_EVRCWB),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_EVRCNW),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADIF),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_WMA),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_WMA_PRO),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AMR_WB_PLUS),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_MP2),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_QCELP),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DSD),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_FLAC),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_ALAC),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_APE),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_SBC),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_APTX),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_APTX_HD),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_LDAC),
+    TERMINATOR
+};
+
+
+template <>
+const OutputChannelConverter::Table OutputChannelConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_MONO),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_STEREO),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_2POINT1),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_QUAD),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_SURROUND),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_PENTA),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_5POINT1),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_6POINT1),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_7POINT1),
+    TERMINATOR
+};
+
+
+template <>
+const InputChannelConverter::Table InputChannelConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_MONO),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_STEREO),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_FRONT_BACK),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_VOICE_UPLINK_MONO),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_VOICE_DNLINK_MONO),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_VOICE_CALL_MONO),
+    TERMINATOR
+};
+
+template <>
+const ChannelIndexConverter::Table ChannelIndexConverter::mTable[] = {
+    {"AUDIO_CHANNEL_INDEX_MASK_1", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_1)},
+    {"AUDIO_CHANNEL_INDEX_MASK_2", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_2)},
+    {"AUDIO_CHANNEL_INDEX_MASK_3", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_3)},
+    {"AUDIO_CHANNEL_INDEX_MASK_4", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_4)},
+    {"AUDIO_CHANNEL_INDEX_MASK_5", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_5)},
+    {"AUDIO_CHANNEL_INDEX_MASK_6", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_6)},
+    {"AUDIO_CHANNEL_INDEX_MASK_7", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_7)},
+    {"AUDIO_CHANNEL_INDEX_MASK_8", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_8)},
+    TERMINATOR
+};
+
+
+template <>
+const GainModeConverter::Table GainModeConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(AUDIO_GAIN_MODE_JOINT),
+    MAKE_STRING_FROM_ENUM(AUDIO_GAIN_MODE_CHANNELS),
+    MAKE_STRING_FROM_ENUM(AUDIO_GAIN_MODE_RAMP),
+    TERMINATOR
+};
+
+
+template <>
+const StreamTypeConverter::Table StreamTypeConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_VOICE_CALL),
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_SYSTEM),
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_RING),
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_MUSIC),
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_ALARM),
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_NOTIFICATION),
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_BLUETOOTH_SCO ),
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_ENFORCED_AUDIBLE),
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_DTMF),
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_TTS),
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_ACCESSIBILITY),
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_REROUTING),
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_PATCH),
+    TERMINATOR
+};
+
+template<>
+const AudioModeConverter::Table AudioModeConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(AUDIO_MODE_INVALID),
+    MAKE_STRING_FROM_ENUM(AUDIO_MODE_CURRENT),
+    MAKE_STRING_FROM_ENUM(AUDIO_MODE_NORMAL),
+    MAKE_STRING_FROM_ENUM(AUDIO_MODE_RINGTONE),
+    MAKE_STRING_FROM_ENUM(AUDIO_MODE_IN_CALL),
+    MAKE_STRING_FROM_ENUM(AUDIO_MODE_IN_COMMUNICATION),
+    TERMINATOR
+};
+
+template <>
+const UsageTypeConverter::Table UsageTypeConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_UNKNOWN),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_MEDIA),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_VOICE_COMMUNICATION),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_ALARM),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_NOTIFICATION),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_NOTIFICATION_EVENT),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_ASSISTANCE_SONIFICATION),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_GAME),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_VIRTUAL_SOURCE),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_CNT),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_MAX),
+    TERMINATOR
+};
+
+template <>
+const SourceTypeConverter::Table SourceTypeConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_DEFAULT),
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_MIC),
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_VOICE_UPLINK),
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_VOICE_DOWNLINK),
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_VOICE_CALL),
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_CAMCORDER),
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_VOICE_RECOGNITION),
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_VOICE_COMMUNICATION),
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_REMOTE_SUBMIX),
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_UNPROCESSED),
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_CNT),
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_MAX),
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_FM_TUNER),
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_HOTWORD),
+    TERMINATOR
+};
+
+template class TypeConverter<OutputDeviceTraits>;
+template class TypeConverter<InputDeviceTraits>;
+template class TypeConverter<OutputFlagTraits>;
+template class TypeConverter<InputFlagTraits>;
+template class TypeConverter<FormatTraits>;
+template class TypeConverter<OutputChannelTraits>;
+template class TypeConverter<InputChannelTraits>;
+template class TypeConverter<ChannelIndexTraits>;
+template class TypeConverter<GainModeTraits>;
+template class TypeConverter<StreamTraits>;
+template class TypeConverter<AudioModeTraits>;
+template class TypeConverter<UsageTraits>;
+template class TypeConverter<SourceTraits>;
+
+bool deviceFromString(const std::string& literalDevice, audio_devices_t& device) {
+    return InputDeviceConverter::fromString(literalDevice, device) ||
+            OutputDeviceConverter::fromString(literalDevice, device);
+}
+
+bool deviceToString(audio_devices_t device, std::string& literalDevice) {
+    if (device & AUDIO_DEVICE_BIT_IN) {
+        return InputDeviceConverter::toString(device, literalDevice);
+    } else {
+        return OutputDeviceConverter::toString(device, literalDevice);
+    }
+}
+
+SampleRateTraits::Collection samplingRatesFromString(
+        const std::string &samplingRates, const char *del)
+{
+    SampleRateTraits::Collection samplingRateCollection;
+    collectionFromString<SampleRateTraits>(samplingRates, samplingRateCollection, del);
+    return samplingRateCollection;
+}
+
+FormatTraits::Collection formatsFromString(
+        const std::string &formats, const char *del)
+{
+    FormatTraits::Collection formatCollection;
+    FormatConverter::collectionFromString(formats, formatCollection, del);
+    return formatCollection;
+}
+
+audio_format_t formatFromString(const std::string &literalFormat, audio_format_t defaultFormat)
+{
+    audio_format_t format;
+    if (literalFormat.empty()) {
+        return defaultFormat;
+    }
+    FormatConverter::fromString(literalFormat, format);
+    return format;
+}
+
+audio_channel_mask_t channelMaskFromString(const std::string &literalChannels)
+{
+    audio_channel_mask_t channels;
+    if (!OutputChannelConverter::fromString(literalChannels, channels) ||
+            !InputChannelConverter::fromString(literalChannels, channels)) {
+        return AUDIO_CHANNEL_INVALID;
+    }
+    return channels;
+}
+
+ChannelTraits::Collection channelMasksFromString(
+        const std::string &channels, const char *del)
+{
+    ChannelTraits::Collection channelMaskCollection;
+    OutputChannelConverter::collectionFromString(channels, channelMaskCollection, del);
+    InputChannelConverter::collectionFromString(channels, channelMaskCollection, del);
+    ChannelIndexConverter::collectionFromString(channels, channelMaskCollection, del);
+    return channelMaskCollection;
+}
+
+InputChannelTraits::Collection inputChannelMasksFromString(
+        const std::string &inChannels, const char *del)
+{
+    InputChannelTraits::Collection inputChannelMaskCollection;
+    InputChannelConverter::collectionFromString(inChannels, inputChannelMaskCollection, del);
+    ChannelIndexConverter::collectionFromString(inChannels, inputChannelMaskCollection, del);
+    return inputChannelMaskCollection;
+}
+
+OutputChannelTraits::Collection outputChannelMasksFromString(
+        const std::string &outChannels, const char *del)
+{
+    OutputChannelTraits::Collection outputChannelMaskCollection;
+    OutputChannelConverter::collectionFromString(outChannels, outputChannelMaskCollection, del);
+    ChannelIndexConverter::collectionFromString(outChannels, outputChannelMaskCollection, del);
+    return outputChannelMaskCollection;
+}
+
+}; // namespace android
diff --git a/media/libmedia/aidl/android/IGraphicBufferSource.aidl b/media/libmedia/aidl/android/IGraphicBufferSource.aidl
new file mode 100644
index 0000000..a8dd309
--- /dev/null
+++ b/media/libmedia/aidl/android/IGraphicBufferSource.aidl
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android;
+
+import android.IOMXNode;
+
+/**
+ * Binder interface for controlling a graphic buffer source.
+ *
+ * @hide
+ */
+interface IGraphicBufferSource {
+    void configure(IOMXNode omxNode, int dataSpace);
+    void setSuspend(boolean suspend);
+    void setRepeatPreviousFrameDelayUs(long repeatAfterUs);
+    void setMaxFps(float maxFps);
+    void setTimeLapseConfig(long timePerFrameUs, long timePerCaptureUs);
+    void setStartTimeUs(long startTimeUs);
+    void setColorAspects(int aspects);
+    void setTimeOffsetUs(long timeOffsetsUs);
+    void signalEndOfInputStream();
+}
\ No newline at end of file
diff --git a/media/libmedia/aidl/android/IOMXBufferSource.aidl b/media/libmedia/aidl/android/IOMXBufferSource.aidl
new file mode 100644
index 0000000..a5bf448
--- /dev/null
+++ b/media/libmedia/aidl/android/IOMXBufferSource.aidl
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android;
+
+import android.OMXFenceParcelable;
+
+/**
+ * Binder interface for a buffer source to be used together with an OMX encoder
+ *
+ * @hide
+ */
+interface IOMXBufferSource {
+    /**
+     * This is called when OMX transitions to OMX_StateExecuting, which means
+     * we can start handing it buffers.  If we already have buffers of data
+     * sitting in the BufferQueue, this will send them to the codec.
+     */
+    void onOmxExecuting();
+
+    /**
+     * This is called when OMX transitions to OMX_StateIdle, indicating that
+     * the codec is meant to return all buffers back to the client for them
+     * to be freed. Do NOT submit any more buffers to the component.
+     */
+    void onOmxIdle();
+
+    /**
+     * This is called when OMX transitions to OMX_StateLoaded, indicating that
+     * we are shutting down.
+     */
+    void onOmxLoaded();
+
+    /**
+     * A "codec buffer", i.e. a buffer that can be used to pass data into
+     * the encoder, has been allocated.
+     */
+    void onInputBufferAdded(int bufferID);
+
+    /**
+     * Called from OnEmptyBufferDone. If we have a BQ buffer available,
+     * fill it with a new frame of data; otherwise, just mark it as available.
+     *
+     * fenceParcel contains the fence's fd that the callee should wait on before
+     * using the buffer (or pass on to the user of the buffer, if the user supports
+     * fences). Callee takes ownership of the fence fd even if it fails.
+     */
+    void onInputBufferEmptied(int bufferID, in OMXFenceParcelable fenceParcel);
+}
\ No newline at end of file
diff --git a/media/libmedia/aidl/android/IOMXNode.aidl b/media/libmedia/aidl/android/IOMXNode.aidl
new file mode 100644
index 0000000..ec87fd2
--- /dev/null
+++ b/media/libmedia/aidl/android/IOMXNode.aidl
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android;
+
+/** @hide */
+interface IOMXNode {
+    // Stub for manual implementation
+}
diff --git a/media/libmedia/aidl/android/IOMXNode.h b/media/libmedia/aidl/android/IOMXNode.h
new file mode 100644
index 0000000..7b17614
--- /dev/null
+++ b/media/libmedia/aidl/android/IOMXNode.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/IOMX.h>
diff --git a/media/libmedia/aidl/android/OMXFenceParcelable.aidl b/media/libmedia/aidl/android/OMXFenceParcelable.aidl
new file mode 100644
index 0000000..6d517e8
--- /dev/null
+++ b/media/libmedia/aidl/android/OMXFenceParcelable.aidl
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android;
+
+/** @hide */
+parcelable OMXFenceParcelable cpp_header "media/OMXFenceParcelable.h";
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index fbe749c..6bba1f1 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -55,7 +55,9 @@
     mStreamType = AUDIO_STREAM_MUSIC;
     mAudioAttributesParcel = NULL;
     mCurrentPosition = -1;
+    mCurrentSeekMode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC;
     mSeekPosition = -1;
+    mSeekMode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC;
     mCurrentState = MEDIA_PLAYER_IDLE;
     mPrepareSync = false;
     mPrepareStatus = NO_ERROR;
@@ -100,7 +102,9 @@
 void MediaPlayer::clear_l()
 {
     mCurrentPosition = -1;
+    mCurrentSeekMode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC;
     mSeekPosition = -1;
+    mSeekMode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC;
     mVideoWidth = mVideoHeight = 0;
     mRetransmitEndpointValid = false;
 }
@@ -240,6 +244,28 @@
     return mPlayer->setVideoSurfaceTexture(bufferProducer);
 }
 
+status_t MediaPlayer::getDefaultBufferingSettings(BufferingSettings* buffering /* nonnull */)
+{
+    ALOGV("getDefaultBufferingSettings");
+
+    Mutex::Autolock _l(mLock);
+    if (mPlayer == 0) {
+        return NO_INIT;
+    }
+    return mPlayer->getDefaultBufferingSettings(buffering);
+}
+
+status_t MediaPlayer::setBufferingSettings(const BufferingSettings& buffering)
+{
+    ALOGV("setBufferingSettings");
+
+    Mutex::Autolock _l(mLock);
+    if (mPlayer == 0) {
+        return NO_INIT;
+    }
+    return mPlayer->setBufferingSettings(buffering);
+}
+
 // must call with lock held
 status_t MediaPlayer::prepareAsync_l()
 {
@@ -508,9 +534,9 @@
     return getDuration_l(msec);
 }
 
-status_t MediaPlayer::seekTo_l(int msec)
+status_t MediaPlayer::seekTo_l(int msec, MediaPlayerSeekMode mode)
 {
-    ALOGV("seekTo %d", msec);
+    ALOGV("seekTo (%d, %d)", msec, mode);
     if ((mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER_STARTED | MEDIA_PLAYER_PREPARED |
             MEDIA_PLAYER_PAUSED |  MEDIA_PLAYER_PLAYBACK_COMPLETE) ) ) {
         if ( msec < 0 ) {
@@ -537,12 +563,14 @@
 
         // cache duration
         mCurrentPosition = msec;
+        mCurrentSeekMode = mode;
         if (mSeekPosition < 0) {
             mSeekPosition = msec;
-            return mPlayer->seekTo(msec);
+            mSeekMode = mode;
+            return mPlayer->seekTo(msec, mode);
         }
         else {
-            ALOGV("Seek in progress - queue up seekTo[%d]", msec);
+            ALOGV("Seek in progress - queue up seekTo[%d, %d]", msec, mode);
             return NO_ERROR;
         }
     }
@@ -551,11 +579,11 @@
     return INVALID_OPERATION;
 }
 
-status_t MediaPlayer::seekTo(int msec)
+status_t MediaPlayer::seekTo(int msec, MediaPlayerSeekMode mode)
 {
     mLockThreadId = getThreadId();
     Mutex::Autolock _l(mLock);
-    status_t result = seekTo_l(msec);
+    status_t result = seekTo_l(msec, mode);
     mLockThreadId = 0;
 
     return result;
@@ -869,14 +897,16 @@
         break;
     case MEDIA_SEEK_COMPLETE:
         ALOGV("Received seek complete");
-        if (mSeekPosition != mCurrentPosition) {
-            ALOGV("Executing queued seekTo(%d)", mSeekPosition);
+        if (mSeekPosition != mCurrentPosition || (mSeekMode != mCurrentSeekMode)) {
+            ALOGV("Executing queued seekTo(%d, %d)", mCurrentPosition, mCurrentSeekMode);
             mSeekPosition = -1;
-            seekTo_l(mCurrentPosition);
+            mSeekMode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC;
+            seekTo_l(mCurrentPosition, mCurrentSeekMode);
         }
         else {
             ALOGV("All seeks complete - return to regularly scheduled program");
             mCurrentPosition = mSeekPosition = -1;
+            mCurrentSeekMode = mSeekMode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC;
         }
         break;
     case MEDIA_BUFFERING_UPDATE:
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index 59c077a..6eb208c 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -361,7 +361,7 @@
         return INVALID_OPERATION;
     }
 
-    return mMediaRecorder->setInputSurface(surface->getBufferConsumer());
+    return mMediaRecorder->setInputSurface(surface);
 }
 
 status_t MediaRecorder::setVideoFrameRate(int frames_per_second)
diff --git a/media/libmediaanalyticsservice/Android.mk b/media/libmediaanalyticsservice/Android.mk
new file mode 100644
index 0000000..dd59651
--- /dev/null
+++ b/media/libmediaanalyticsservice/Android.mk
@@ -0,0 +1,44 @@
+LOCAL_PATH:= $(call my-dir)
+
+#
+# libmediaanalyticsservice
+#
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:=               \
+    MediaAnalyticsService.cpp      \
+
+LOCAL_SHARED_LIBRARIES :=       \
+    libbinder                   \
+    libcutils                   \
+    liblog                      \
+    libdl                       \
+    libgui                      \
+    libmedia                    \
+    libmediautils               \
+    libstagefright_foundation   \
+    libutils
+
+LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := libmedia
+
+LOCAL_C_INCLUDES :=                                                 \
+    $(TOP)/frameworks/av/media/libstagefright/include               \
+    $(TOP)/frameworks/av/media/libstagefright/rtsp                  \
+    $(TOP)/frameworks/av/media/libstagefright/wifi-display          \
+    $(TOP)/frameworks/av/media/libstagefright/webm                  \
+    $(TOP)/frameworks/av/include/media                              \
+    $(TOP)/frameworks/av/include/camera                             \
+    $(TOP)/frameworks/native/include/media/openmax                  \
+    $(TOP)/frameworks/native/include/media/hardware                 \
+    $(TOP)/external/tremolo/Tremolo                                 \
+    libcore/include                                                 \
+
+LOCAL_CFLAGS += -Werror -Wno-error=deprecated-declarations -Wall
+LOCAL_CLANG := true
+
+LOCAL_MODULE:= libmediaanalyticsservice
+
+include $(BUILD_SHARED_LIBRARY)
+
+include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/media/libmediaanalyticsservice/MediaAnalyticsService.cpp b/media/libmediaanalyticsservice/MediaAnalyticsService.cpp
new file mode 100644
index 0000000..1d0246d
--- /dev/null
+++ b/media/libmediaanalyticsservice/MediaAnalyticsService.cpp
@@ -0,0 +1,529 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Proxy for media player implementations
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaAnalyticsService"
+#include <utils/Log.h>
+
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <dirent.h>
+#include <unistd.h>
+
+#include <string.h>
+
+#include <cutils/atomic.h>
+#include <cutils/properties.h> // for property_get
+
+#include <utils/misc.h>
+
+#include <binder/IPCThreadState.h>
+#include <binder/IServiceManager.h>
+#include <binder/MemoryHeapBase.h>
+#include <binder/MemoryBase.h>
+#include <gui/Surface.h>
+#include <utils/Errors.h>  // for status_t
+#include <utils/List.h>
+#include <utils/String8.h>
+#include <utils/SystemClock.h>
+#include <utils/Timers.h>
+#include <utils/Vector.h>
+
+#include <media/AudioPolicyHelper.h>
+#include <media/IMediaHTTPService.h>
+#include <media/IRemoteDisplay.h>
+#include <media/IRemoteDisplayClient.h>
+#include <media/MediaPlayerInterface.h>
+#include <media/mediarecorder.h>
+#include <media/MediaMetadataRetrieverInterface.h>
+#include <media/Metadata.h>
+#include <media/AudioTrack.h>
+#include <media/MemoryLeakTrackUtil.h>
+#include <media/stagefright/MediaCodecList.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/Utils.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooperRoster.h>
+#include <mediautils/BatteryNotifier.h>
+
+//#include <memunreachable/memunreachable.h>
+#include <system/audio.h>
+
+#include <private/android_filesystem_config.h>
+
+#include "MediaAnalyticsService.h"
+
+
+namespace android {
+
+
+#define DEBUG_QUEUE     0
+
+//using android::status_t;
+//using android::OK;
+//using android::BAD_VALUE;
+//using android::NOT_ENOUGH_DATA;
+//using android::Parcel;
+
+
+void MediaAnalyticsService::instantiate() {
+    defaultServiceManager()->addService(
+            String16("media.analytics"), new MediaAnalyticsService());
+}
+
+// XXX: add dynamic controls for mMaxRecords
+MediaAnalyticsService::MediaAnalyticsService()
+        : mMaxRecords(100) {
+
+    ALOGD("MediaAnalyticsService created");
+    // clear our queues
+    mOpen = new List<MediaAnalyticsItem *>();
+    mFinalized = new List<MediaAnalyticsItem *>();
+
+    mItemsSubmitted = 0;
+    mItemsFinalized = 0;
+    mItemsDiscarded = 0;
+
+    mLastSessionID = 0;
+    // recover any persistency we set up
+    // etc
+}
+
+MediaAnalyticsService::~MediaAnalyticsService() {
+        ALOGD("MediaAnalyticsService destroyed");
+
+    // XXX: clean out mOpen and mFinalized
+}
+
+
+MediaAnalyticsItem::SessionID_t MediaAnalyticsService::generateUniqueSessionID() {
+    // generate a new sessionid
+
+    Mutex::Autolock _l(mLock_ids);
+    return (++mLastSessionID);
+}
+
+// caller surrenders ownership of 'item'
+MediaAnalyticsItem::SessionID_t MediaAnalyticsService::submit(MediaAnalyticsItem *item, bool forcenew) {
+
+    MediaAnalyticsItem::SessionID_t id = MediaAnalyticsItem::SessionIDInvalid;
+
+    // we control these, not using whatever the user might have sent
+    nsecs_t now = systemTime(SYSTEM_TIME_REALTIME);
+    item->setTimestamp(now);
+    int pid = IPCThreadState::self()->getCallingPid();
+    item->setPid(pid);
+    int uid = IPCThreadState::self()->getCallingUid();
+    item->setUid(uid);
+
+    mItemsSubmitted++;
+
+    // validate the record; we discard if we don't like it
+    if (contentValid(item) == false) {
+        delete item;
+        return MediaAnalyticsItem::SessionIDInvalid;
+    }
+
+
+    // if we have a sesisonid in the new record, look to make
+    // sure it doesn't appear in the finalized list.
+    // XXX: this is for security / DOS prevention.
+    // may also require that we persist the unique sessionIDs
+    // across boots [instead of within a single boot]
+
+
+    // match this new record up against records in the open
+    // list...
+    // if there's a match, merge them together
+    // deal with moving the old / merged record into the finalized que
+
+    bool finalizing = item->getFinalized();
+
+    // if finalizing, we'll remove it
+    MediaAnalyticsItem *oitem = findItem(mOpen, item, finalizing | forcenew);
+    if (oitem != NULL) {
+        if (forcenew) {
+            // old one gets finalized, then we insert the new one
+            // so we'll have 2 records at the end of this.
+            // but don't finalize an empty record
+            if (oitem->count() == 0) {
+                // we're responsible for disposing of the dead record
+                delete oitem;
+                oitem = NULL;
+            } else {
+                oitem->setFinalized(true);
+                saveItem(mFinalized, oitem, 0);
+            }
+            // new record could itself be marked finalized...
+            if (finalizing) {
+                saveItem(mFinalized, item, 0);
+                mItemsFinalized++;
+            } else {
+                saveItem(mOpen, item, 1);
+            }
+            id = item->getSessionID();
+        } else {
+            // combine the records, send it to finalized if appropriate
+            oitem->merge(item);
+            if (finalizing) {
+                saveItem(mFinalized, oitem, 0);
+                mItemsFinalized++;
+            }
+            id = oitem->getSessionID();
+
+            // we're responsible for disposing of the dead record
+            delete item;
+            item = NULL;
+        }
+    } else {
+        // nothing to merge, save the new record
+        id = item->getSessionID();
+        if (finalizing) {
+            if (item->count() == 0) {
+                // drop empty records
+                delete item;
+                item = NULL;
+            } else {
+                saveItem(mFinalized, item, 0);
+                mItemsFinalized++;
+            }
+        } else {
+            saveItem(mOpen, item, 1);
+        }
+    }
+    return id;
+}
+
+List<MediaAnalyticsItem *> *MediaAnalyticsService::getMediaAnalyticsItemList(bool finished, nsecs_t ts) {
+    // this might never get called; the binder interface maps to the full parm list
+    // on the client side before making the binder call.
+    // but this lets us be sure...
+    List<MediaAnalyticsItem*> *list;
+    list = getMediaAnalyticsItemList(finished, ts, MediaAnalyticsItem::kKeyAny);
+    return list;
+}
+
+List<MediaAnalyticsItem *> *MediaAnalyticsService::getMediaAnalyticsItemList(bool , nsecs_t , MediaAnalyticsItem::Key ) {
+
+    // XXX: implement the get-item-list semantics
+
+    List<MediaAnalyticsItem *> *list = NULL;
+    // set up our query on the persistent data
+    // slurp in all of the pieces
+    // return that
+    return list;
+}
+
+status_t MediaAnalyticsService::dump(int fd, const Vector<String16>& args)
+{
+    const size_t SIZE = 512;
+    char buffer[SIZE];
+    String8 result;
+
+    if (checkCallingPermission(String16("android.permission.DUMP")) == false) {
+        snprintf(buffer, SIZE, "Permission Denial: "
+                "can't dump MediaAnalyticsService from pid=%d, uid=%d\n",
+                IPCThreadState::self()->getCallingPid(),
+                IPCThreadState::self()->getCallingUid());
+        result.append(buffer);
+        write(fd, result.string(), result.size());
+        return NO_ERROR;
+    }
+
+    // crack any parameters
+    bool clear = false;
+    nsecs_t ts_since = 0;
+    String16 clearOption("-clear");
+    String16 sinceOption("-since");
+    int n = args.size();
+    for (int i = 0; i < n; i++) {
+        String8 myarg(args[i]);
+        if (args[i] == clearOption) {
+            clear = true;
+        } else if (args[i] == sinceOption) {
+            i++;
+            if (i < n) {
+                String8 value(args[i]);
+                char *endp;
+                const char *p = value.string();
+                ts_since = strtoll(p, &endp, 10);
+                if (endp == p || *endp != '\0') {
+                    ts_since = 0;
+                }
+            } else {
+                ts_since = 0;
+            }
+        }
+    }
+
+    Mutex::Autolock _l(mLock);
+
+    snprintf(buffer, SIZE, "Dump of the mediaanalytics process:\n");
+    result.append(buffer);
+
+    int enabled = MediaAnalyticsItem::isEnabled();
+    if (enabled) {
+        snprintf(buffer, SIZE, "Analytics gathering: enabled\n");
+    } else {
+        snprintf(buffer, SIZE, "Analytics gathering: DISABLED via property\n");
+    }
+    result.append(buffer);
+
+    snprintf(buffer, SIZE,
+        "Since Boot: Submissions: %" PRId64
+            " Finalizations: %" PRId64
+        " Discarded: %" PRId64 "\n",
+        mItemsSubmitted, mItemsFinalized, mItemsDiscarded);
+    result.append(buffer);
+    if (ts_since != 0) {
+        snprintf(buffer, SIZE,
+            "Dumping Queue entries more recent than: %" PRId64 "\n",
+            (int64_t) ts_since);
+        result.append(buffer);
+    }
+
+    // show the recently recorded records
+    snprintf(buffer, sizeof(buffer), "\nFinalized Analytics (oldest first):\n");
+    result.append(buffer);
+    result.append(this->dumpQueue(mFinalized, ts_since));
+
+    snprintf(buffer, sizeof(buffer), "\nIn-Progress Analytics (newest first):\n");
+    result.append(buffer);
+    result.append(this->dumpQueue(mOpen, ts_since));
+
+    // show who is connected and injecting records?
+    // talk about # records fed to the 'readers'
+    // talk about # records we discarded, perhaps "discarded w/o reading" too
+
+    if (clear) {
+        // remove everything from the finalized queue
+        while (mFinalized->size() > 0) {
+            MediaAnalyticsItem * oitem = *(mFinalized->begin());
+            if (DEBUG_QUEUE) {
+                ALOGD("zap old record: key %s sessionID %" PRId64 " ts %" PRId64 "",
+                    oitem->getKey().c_str(), oitem->getSessionID(),
+                    oitem->getTimestamp());
+            }
+            mFinalized->erase(mFinalized->begin());
+            mItemsDiscarded++;
+        }
+    }
+
+    write(fd, result.string(), result.size());
+    return NO_ERROR;
+}
+
+// caller has locked mLock...
+String8 MediaAnalyticsService::dumpQueue(List<MediaAnalyticsItem *> *theList) {
+    return dumpQueue(theList, (nsecs_t) 0);
+}
+
+String8 MediaAnalyticsService::dumpQueue(List<MediaAnalyticsItem *> *theList, nsecs_t ts_since) {
+    const size_t SIZE = 512;
+    char buffer[SIZE];
+    String8 result;
+    int slot = 0;
+
+    if (theList->empty()) {
+            result.append("empty\n");
+    } else {
+        List<MediaAnalyticsItem *>::iterator it = theList->begin();
+        for (; it != theList->end(); it++) {
+            nsecs_t when = (*it)->getTimestamp();
+            if (when < ts_since) {
+                continue;
+            }
+            AString entry = (*it)->toString();
+            snprintf(buffer, sizeof(buffer), "%4d: %s",
+                        slot, entry.c_str());
+            result.append(buffer);
+            buffer[0] = '\n';
+            buffer[1] = '\0';
+            result.append(buffer);
+            slot++;
+        }
+    }
+
+    return result;
+}
+
+//
+// Our Cheap in-core, non-persistent records management.
+// XXX: rewrite this to manage persistence, etc.
+
+// insert appropriately into queue
+void MediaAnalyticsService::saveItem(List<MediaAnalyticsItem *> *l, MediaAnalyticsItem * item, int front) {
+
+    Mutex::Autolock _l(mLock);
+
+    if (DEBUG_QUEUE) {
+        ALOGD("Inject a record: session %" PRId64 " ts %" PRId64 "",
+            item->getSessionID(), item->getTimestamp());
+        String8 before = dumpQueue(l);
+        ALOGD("Q before insert: %s", before.string());
+    }
+
+    // adding at back of queue (fifo order)
+    if (front)  {
+        l->push_front(item);
+    } else {
+        l->push_back(item);
+    }
+
+    if (DEBUG_QUEUE) {
+        String8 after = dumpQueue(l);
+        ALOGD("Q after insert: %s", after.string());
+    }
+
+    // keep removing old records the front until we're in-bounds
+    if (mMaxRecords > 0) {
+        while (l->size() > (size_t) mMaxRecords) {
+            MediaAnalyticsItem * oitem = *(l->begin());
+            if (DEBUG_QUEUE) {
+                ALOGD("zap old record: key %s sessionID %" PRId64 " ts %" PRId64 "",
+                    oitem->getKey().c_str(), oitem->getSessionID(),
+                    oitem->getTimestamp());
+            }
+            l->erase(l->begin());
+        ALOGD("drop record at %s:%d", __FILE__, __LINE__);
+            delete oitem;
+        ALOGD("[done] drop record at %s:%d", __FILE__, __LINE__);
+            mItemsDiscarded++;
+        }
+    }
+
+    if (DEBUG_QUEUE) {
+        String8 after = dumpQueue(l);
+        ALOGD("Q after cleanup: %s", after.string());
+    }
+}
+
+// are they alike enough that nitem can be folded into oitem?
+static bool compatibleItems(MediaAnalyticsItem * oitem, MediaAnalyticsItem * nitem) {
+
+    if (0) {
+        ALOGD("Compare: o %s n %s",
+              oitem->toString().c_str(), nitem->toString().c_str());
+    }
+
+    // general safety
+    if (nitem->getUid() != oitem->getUid()) {
+        return false;
+    }
+    if (nitem->getPid() != oitem->getPid()) {
+        return false;
+    }
+
+    // key -- needs to match
+    if (nitem->getKey() == oitem->getKey()) {
+        // still in the game.
+    } else {
+        return false;
+    }
+
+    // session id -- empty field in new is allowed
+    MediaAnalyticsItem::SessionID_t osession = oitem->getSessionID();
+    MediaAnalyticsItem::SessionID_t nsession = nitem->getSessionID();
+    if (nsession != osession) {
+        // incoming '0' matches value in osession
+        if (nsession != 0) {
+            return false;
+        }
+    }
+
+    return true;
+}
+
+// find the incomplete record that this will overlay
+MediaAnalyticsItem *MediaAnalyticsService::findItem(List<MediaAnalyticsItem*> *theList, MediaAnalyticsItem *nitem, bool removeit) {
+    if (nitem == NULL) {
+        return NULL;
+    }
+
+    MediaAnalyticsItem *item = NULL;
+
+    Mutex::Autolock _l(mLock);
+
+    for (List<MediaAnalyticsItem *>::iterator it = theList->begin();
+        it != theList->end(); it++) {
+        MediaAnalyticsItem *tmp = (*it);
+
+        if (!compatibleItems(tmp, nitem)) {
+            continue;
+        }
+
+        // we match! this is the one I want.
+        if (removeit) {
+            theList->erase(it);
+        }
+        item = tmp;
+        break;
+    }
+    return item;
+}
+
+
+// delete the indicated record
+void MediaAnalyticsService::deleteItem(List<MediaAnalyticsItem *> *l, MediaAnalyticsItem *item) {
+
+    Mutex::Autolock _l(mLock);
+
+    if(DEBUG_QUEUE) {
+        String8 before = dumpQueue(l);
+        ALOGD("Q before delete: %s", before.string());
+    }
+
+    for (List<MediaAnalyticsItem *>::iterator it = l->begin();
+        it != l->end(); it++) {
+        if ((*it)->getSessionID() != item->getSessionID())
+            continue;
+
+        if (DEBUG_QUEUE) {
+            ALOGD(" --- removing record for SessionID %" PRId64 "", item->getSessionID());
+            ALOGD("drop record at %s:%d", __FILE__, __LINE__);
+        }
+        delete *it;
+        l->erase(it);
+        break;
+    }
+
+    if (DEBUG_QUEUE) {
+        String8 after = dumpQueue(l);
+        ALOGD("Q after delete: %s", after.string());
+    }
+}
+
+// are the contents good
+bool MediaAnalyticsService::contentValid(MediaAnalyticsItem *) {
+
+    // certain keys require certain uids
+    // internal consistency
+
+    return true;
+}
+
+// are we rate limited, normally false
+bool MediaAnalyticsService::rateLimited(MediaAnalyticsItem *) {
+
+    return false;
+}
+
+
+} // namespace android
diff --git a/media/libmediaanalyticsservice/MediaAnalyticsService.h b/media/libmediaanalyticsservice/MediaAnalyticsService.h
new file mode 100644
index 0000000..3e2298f
--- /dev/null
+++ b/media/libmediaanalyticsservice/MediaAnalyticsService.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ANDROID_MEDIAANALYTICSSERVICE_H
+#define ANDROID_MEDIAANALYTICSSERVICE_H
+
+#include <arpa/inet.h>
+
+#include <utils/threads.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/List.h>
+
+#include <media/IMediaAnalyticsService.h>
+
+
+namespace android {
+
+class MediaAnalyticsService : public BnMediaAnalyticsService
+{
+
+ public:
+
+    // on this side, caller surrenders ownership
+    virtual int64_t submit(MediaAnalyticsItem *item, bool forcenew);
+
+    virtual List<MediaAnalyticsItem *>
+            *getMediaAnalyticsItemList(bool finished, int64_t ts);
+    virtual List<MediaAnalyticsItem *>
+            *getMediaAnalyticsItemList(bool finished, int64_t ts, MediaAnalyticsItem::Key key);
+
+
+    static  void            instantiate();
+    virtual status_t        dump(int fd, const Vector<String16>& args);
+
+                            MediaAnalyticsService();
+    virtual                 ~MediaAnalyticsService();
+
+ private:
+    MediaAnalyticsItem::SessionID_t generateUniqueSessionID();
+
+    // statistics about our analytics
+    int64_t mItemsSubmitted;
+    int64_t mItemsFinalized;
+    int64_t mItemsDiscarded;
+    MediaAnalyticsItem::SessionID_t mLastSessionID;
+
+    // partitioned a bit so we don't over serialize
+    mutable Mutex           mLock;
+    mutable Mutex           mLock_ids;
+
+    // the most we hold in memory
+    // up to this many in each queue (open, finalized)
+    int32_t mMaxRecords;
+
+    // input validation after arrival from client
+    bool contentValid(MediaAnalyticsItem *);
+    bool rateLimited(MediaAnalyticsItem *);
+
+    // the ones that are still open
+    // (newest at front) since we keep looking for them
+    List<MediaAnalyticsItem *> *mOpen;
+    // the ones we've finalized
+    // (oldest at front) so it prints nicely for dumpsys
+    List<MediaAnalyticsItem *> *mFinalized;
+    // searching within these queues: queue, key
+    MediaAnalyticsItem *findItem(List<MediaAnalyticsItem *> *,
+                                     MediaAnalyticsItem *, bool removeit);
+
+    void saveItem(MediaAnalyticsItem);
+    void saveItem(List<MediaAnalyticsItem *> *, MediaAnalyticsItem *, int);
+    void deleteItem(List<MediaAnalyticsItem *> *, MediaAnalyticsItem *);
+
+    String8 dumpQueue(List<MediaAnalyticsItem*> *);
+    String8 dumpQueue(List<MediaAnalyticsItem*> *, nsecs_t);
+
+};
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
+
+#endif // ANDROID_MEDIAANALYTICSSERVICE_H
diff --git a/media/libmediaplayerservice/ActivityManager.cpp b/media/libmediaplayerservice/ActivityManager.cpp
index 60a209f..438d422 100644
--- a/media/libmediaplayerservice/ActivityManager.cpp
+++ b/media/libmediaplayerservice/ActivityManager.cpp
@@ -14,18 +14,14 @@
  * limitations under the License.
  */
 
-#include <unistd.h>
+#include <binder/IActivityManager.h>
 #include <binder/IBinder.h>
 #include <binder/IServiceManager.h>
-#include <binder/Parcel.h>
-#include <utils/String8.h>
 
 #include "ActivityManager.h"
 
 namespace android {
 
-const uint32_t OPEN_CONTENT_URI_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION + 4;
-
 // Perform ContentProvider.openFile() on the given URI, returning
 // the resulting native file descriptor.  Returns < 0 on error.
 int openContentProviderFile(const String16& uri)
@@ -33,26 +29,10 @@
     int fd = -1;
 
     sp<IServiceManager> sm = defaultServiceManager();
-    sp<IBinder> am = sm->getService(String16("activity"));
+    sp<IBinder> binder = sm->getService(String16("activity"));
+    sp<IActivityManager> am = interface_cast<IActivityManager>(binder);
     if (am != NULL) {
-        Parcel data, reply;
-        data.writeInterfaceToken(String16("android.app.IActivityManager"));
-        data.writeString16(uri);
-        status_t ret = am->transact(OPEN_CONTENT_URI_TRANSACTION, data, &reply);
-        if (ret == NO_ERROR) {
-            int32_t exceptionCode = reply.readExceptionCode();
-            if (!exceptionCode) {
-                // Success is indicated here by a nonzero int followed by the fd;
-                // failure by a zero int with no data following.
-                if (reply.readInt32() != 0) {
-                    fd = dup(reply.readFileDescriptor());
-                }
-            } else {
-                // An exception was thrown back; fall through to return failure
-                ALOGD("openContentUri(%s) caught exception %d\n",
-                        String8(uri).string(), exceptionCode);
-            }
-        }
+        fd = am->openContentUri(uri);
     }
 
     return fd;
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index 93064c3..1786e6b 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -55,7 +55,6 @@
     $(TOP)/external/tremolo/Tremolo                                 \
 
 LOCAL_CFLAGS += -Werror -Wno-error=deprecated-declarations -Wall
-LOCAL_CLANG := true
 
 LOCAL_MODULE:= libmediaplayerservice
 
diff --git a/media/libmediaplayerservice/MediaPlayerFactory.cpp b/media/libmediaplayerservice/MediaPlayerFactory.cpp
index 605c710..0a9f791 100644
--- a/media/libmediaplayerservice/MediaPlayerFactory.cpp
+++ b/media/libmediaplayerservice/MediaPlayerFactory.cpp
@@ -26,7 +26,6 @@
 #include <media/stagefright/foundation/ADebug.h>
 #include <utils/Errors.h>
 #include <utils/misc.h>
-#include <../libstagefright/include/WVMExtractor.h>
 
 #include "MediaPlayerFactory.h"
 
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 95c91d1..3199495 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -577,7 +577,7 @@
     mLoop = false;
     mStatus = NO_INIT;
     mAudioSessionId = audioSessionId;
-    mUID = uid;
+    mUid = uid;
     mRetransmitEndpointValid = false;
     mAudioAttributes = NULL;
 
@@ -643,7 +643,7 @@
     }
 
     if (p != NULL) {
-        p->setUID(mUID);
+        p->setUID(mUid);
     }
 
     return p;
@@ -684,10 +684,18 @@
 
     sp<IServiceManager> sm = defaultServiceManager();
     sp<IBinder> binder = sm->getService(String16("media.extractor"));
+    if (binder == NULL) {
+        ALOGE("extractor service not available");
+        return NULL;
+    }
     mExtractorDeathListener = new ServiceDeathNotifier(binder, p, MEDIAEXTRACTOR_PROCESS_DEATH);
     binder->linkToDeath(mExtractorDeathListener);
 
     binder = sm->getService(String16("media.codec"));
+    if (binder == NULL) {
+        ALOGE("codec service not available");
+        return NULL;
+    }
     mCodecDeathListener = new ServiceDeathNotifier(binder, p, MEDIACODEC_PROCESS_DEATH);
     binder->linkToDeath(mCodecDeathListener);
 
@@ -964,6 +972,32 @@
     return OK;
 }
 
+status_t MediaPlayerService::Client::setBufferingSettings(
+        const BufferingSettings& buffering)
+{
+    ALOGV("[%d] setBufferingSettings{%s}",
+            mConnId, buffering.toString().string());
+    sp<MediaPlayerBase> p = getPlayer();
+    if (p == 0) return UNKNOWN_ERROR;
+    return p->setBufferingSettings(buffering);
+}
+
+status_t MediaPlayerService::Client::getDefaultBufferingSettings(
+        BufferingSettings* buffering /* nonnull */)
+{
+    sp<MediaPlayerBase> p = getPlayer();
+    // TODO: create mPlayer on demand.
+    if (p == 0) return UNKNOWN_ERROR;
+    status_t ret = p->getDefaultBufferingSettings(buffering);
+    if (ret == NO_ERROR) {
+        ALOGV("[%d] getDefaultBufferingSettings{%s}",
+                mConnId, buffering->toString().string());
+    } else {
+        ALOGV("[%d] getDefaultBufferingSettings returned %d", mConnId, ret);
+    }
+    return ret;
+}
+
 status_t MediaPlayerService::Client::prepareAsync()
 {
     ALOGV("[%d] prepareAsync", mConnId);
@@ -1113,12 +1147,12 @@
     return OK;
 }
 
-status_t MediaPlayerService::Client::seekTo(int msec)
+status_t MediaPlayerService::Client::seekTo(int msec, MediaPlayerSeekMode mode)
 {
-    ALOGV("[%d] seekTo(%d)", mConnId, msec);
+    ALOGV("[%d] seekTo(%d, %d)", mConnId, msec, mode);
     sp<MediaPlayerBase> p = getPlayer();
     if (p == 0) return UNKNOWN_ERROR;
-    return p->seekTo(msec);
+    return p->seekTo(msec, mode);
 }
 
 status_t MediaPlayerService::Client::reset()
@@ -1378,7 +1412,7 @@
 
 #undef LOG_TAG
 #define LOG_TAG "AudioSink"
-MediaPlayerService::AudioOutput::AudioOutput(audio_session_t sessionId, int uid, int pid,
+MediaPlayerService::AudioOutput::AudioOutput(audio_session_t sessionId, uid_t uid, int pid,
         const audio_attributes_t* attr)
     : mCallback(NULL),
       mCallbackCookie(NULL),
@@ -1513,57 +1547,45 @@
     }
 
     uint32_t numFramesPlayed;
-    int64_t numFramesPlayedAt;
+    int64_t numFramesPlayedAtUs;
     AudioTimestamp ts;
-    static const int64_t kStaleTimestamp100ms = 100000;
 
     status_t res = mTrack->getTimestamp(ts);
     if (res == OK) {                 // case 1: mixing audio tracks and offloaded tracks.
         numFramesPlayed = ts.mPosition;
-        numFramesPlayedAt = ts.mTime.tv_sec * 1000000LL + ts.mTime.tv_nsec / 1000;
-        const int64_t timestampAge = nowUs - numFramesPlayedAt;
-        if (timestampAge > kStaleTimestamp100ms) {
-            // This is an audio FIXME.
-            // getTimestamp returns a timestamp which may come from audio mixing threads.
-            // After pausing, the MixerThread may go idle, thus the mTime estimate may
-            // become stale. Assuming that the MixerThread runs 20ms, with FastMixer at 5ms,
-            // the max latency should be about 25ms with an average around 12ms (to be verified).
-            // For safety we use 100ms.
-            ALOGV("getTimestamp: returned stale timestamp nowUs(%lld) numFramesPlayedAt(%lld)",
-                    (long long)nowUs, (long long)numFramesPlayedAt);
-            numFramesPlayedAt = nowUs - kStaleTimestamp100ms;
-        }
-        //ALOGD("getTimestamp: OK %d %lld", numFramesPlayed, (long long)numFramesPlayedAt);
+        numFramesPlayedAtUs = ts.mTime.tv_sec * 1000000LL + ts.mTime.tv_nsec / 1000;
+        //ALOGD("getTimestamp: OK %d %lld", numFramesPlayed, (long long)numFramesPlayedAtUs);
     } else if (res == WOULD_BLOCK) { // case 2: transitory state on start of a new track
         numFramesPlayed = 0;
-        numFramesPlayedAt = nowUs;
+        numFramesPlayedAtUs = nowUs;
         //ALOGD("getTimestamp: WOULD_BLOCK %d %lld",
-        //        numFramesPlayed, (long long)numFramesPlayedAt);
+        //        numFramesPlayed, (long long)numFramesPlayedAtUs);
     } else {                         // case 3: transitory at new track or audio fast tracks.
         res = mTrack->getPosition(&numFramesPlayed);
         CHECK_EQ(res, (status_t)OK);
-        numFramesPlayedAt = nowUs;
-        numFramesPlayedAt += 1000LL * mTrack->latency() / 2; /* XXX */
-        //ALOGD("getPosition: %u %lld", numFramesPlayed, (long long)numFramesPlayedAt);
+        numFramesPlayedAtUs = nowUs;
+        numFramesPlayedAtUs += 1000LL * mTrack->latency() / 2; /* XXX */
+        //ALOGD("getPosition: %u %lld", numFramesPlayed, (long long)numFramesPlayedAtUs);
     }
 
     // CHECK_EQ(numFramesPlayed & (1 << 31), 0);  // can't be negative until 12.4 hrs, test
     // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
     int64_t durationUs = (int64_t)((int32_t)numFramesPlayed * 1000000LL / mSampleRateHz)
-            + nowUs - numFramesPlayedAt;
+            + nowUs - numFramesPlayedAtUs;
     if (durationUs < 0) {
         // Occurs when numFramesPlayed position is very small and the following:
         // (1) In case 1, the time nowUs is computed before getTimestamp() is called and
-        //     numFramesPlayedAt is greater than nowUs by time more than numFramesPlayed.
+        //     numFramesPlayedAtUs is greater than nowUs by time more than numFramesPlayed.
         // (2) In case 3, using getPosition and adding mAudioSink->latency() to
-        //     numFramesPlayedAt, by a time amount greater than numFramesPlayed.
+        //     numFramesPlayedAtUs, by a time amount greater than numFramesPlayed.
         //
         // Both of these are transitory conditions.
         ALOGV("getPlayedOutDurationUs: negative duration %lld set to zero", (long long)durationUs);
         durationUs = 0;
     }
     ALOGV("getPlayedOutDurationUs(%lld) nowUs(%lld) frames(%u) framesAt(%lld)",
-            (long long)durationUs, (long long)nowUs, numFramesPlayed, (long long)numFramesPlayedAt);
+            (long long)durationUs, (long long)nowUs,
+            numFramesPlayed, (long long)numFramesPlayedAtUs);
     return durationUs;
 }
 
@@ -2352,7 +2374,7 @@
         return;
     }
 
-    int uid = IPCThreadState::self()->getCallingUid();
+    uid_t uid = IPCThreadState::self()->getCallingUid();
     if (uid == AID_MEDIA) {
         return;
     }
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index 601b046..db182b2 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -75,7 +75,7 @@
         class CallbackData;
 
      public:
-                                AudioOutput(audio_session_t sessionId, int uid, int pid,
+                                AudioOutput(audio_session_t sessionId, uid_t uid, int pid,
                                         const audio_attributes_t * attr);
         virtual                 ~AudioOutput();
 
@@ -152,7 +152,7 @@
         float                   mMsecsPerFrame;
         size_t                  mFrameSize;
         audio_session_t         mSessionId;
-        int                     mUid;
+        uid_t                   mUid;
         int                     mPid;
         float                   mSendLevel;
         int                     mAuxEffectId;
@@ -288,6 +288,9 @@
         virtual void            disconnect();
         virtual status_t        setVideoSurfaceTexture(
                                         const sp<IGraphicBufferProducer>& bufferProducer);
+        virtual status_t        setBufferingSettings(const BufferingSettings& buffering) override;
+        virtual status_t        getDefaultBufferingSettings(
+                                        BufferingSettings* buffering /* nonnull */) override;
         virtual status_t        prepareAsync();
         virtual status_t        start();
         virtual status_t        stop();
@@ -298,7 +301,9 @@
         virtual status_t        setSyncSettings(const AVSyncSettings& rate, float videoFpsHint);
         virtual status_t        getSyncSettings(AVSyncSettings* rate /* nonnull */,
                                                 float* videoFps /* nonnull */);
-        virtual status_t        seekTo(int msec);
+        virtual status_t        seekTo(
+                int msec,
+                MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC);
         virtual status_t        getCurrentPosition(int* msec);
         virtual status_t        getDuration(int* msec);
         virtual status_t        reset();
@@ -402,7 +407,7 @@
                     int32_t                     mConnId;
                     audio_session_t             mAudioSessionId;
                     audio_attributes_t *        mAudioAttributes;
-                    uid_t                       mUID;
+                    uid_t                       mUid;
                     sp<ANativeWindow>           mConnectedWindow;
                     sp<IBinder>                 mConnectedWindowBinder;
                     struct sockaddr_in          mRetransmitEndpoint;
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index 609b00d..2a07e5b 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -52,7 +52,7 @@
     return ok;
 }
 
-status_t MediaRecorderClient::setInputSurface(const sp<IGraphicBufferConsumer>& surface)
+status_t MediaRecorderClient::setInputSurface(const sp<PersistentSurface>& surface)
 {
     ALOGV("setInputSurface");
     Mutex::Autolock lock(mLock);
diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h
index b2d0f0e..83ef80a 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.h
+++ b/media/libmediaplayerservice/MediaRecorderClient.h
@@ -25,7 +25,6 @@
 struct MediaRecorderBase;
 class MediaPlayerService;
 class ICameraRecordingProxy;
-class IGraphicBufferProducer;
 
 class MediaRecorderClient : public BnMediaRecorder
 {
@@ -73,7 +72,7 @@
     virtual     status_t   close();
     virtual     status_t   release();
     virtual     status_t   dump(int fd, const Vector<String16>& args);
-    virtual     status_t   setInputSurface(const sp<IGraphicBufferConsumer>& surface);
+    virtual     status_t   setInputSurface(const sp<PersistentSurface>& surface);
     virtual     sp<IGraphicBufferProducer> querySurfaceMediaSource();
 
 private:
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index cdb0a7b..279bc86 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -45,6 +45,7 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/MediaCodecSource.h>
+#include <media/stagefright/PersistentSurface.h>
 #include <media/MediaProfiles.h>
 #include <camera/CameraParameters.h>
 
@@ -248,7 +249,7 @@
 }
 
 status_t StagefrightRecorder::setInputSurface(
-        const sp<IGraphicBufferConsumer>& surface) {
+        const sp<PersistentSurface>& surface) {
     mPersistentSurface = surface;
 
     return OK;
@@ -884,7 +885,10 @@
         case OUTPUT_FORMAT_RTP_AVP:
         case OUTPUT_FORMAT_MPEG2TS:
         {
-            status = mWriter->start();
+            sp<MetaData> meta = new MetaData;
+            int64_t startTimeUs = systemTime() / 1000;
+            meta->setInt64(kKeyTime, startTimeUs);
+            status = mWriter->start(meta.get());
             break;
         }
 
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 4dbd039..4c2e65c 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -38,9 +38,6 @@
 class MetaData;
 struct AudioSource;
 class MediaProfiles;
-class IGraphicBufferConsumer;
-class IGraphicBufferProducer;
-class SurfaceMediaSource;
 struct ALooper;
 
 struct StagefrightRecorder : public MediaRecorderBase {
@@ -57,7 +54,7 @@
     virtual status_t setVideoFrameRate(int frames_per_second);
     virtual status_t setCamera(const sp<hardware::ICamera>& camera, const sp<ICameraRecordingProxy>& proxy);
     virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface);
-    virtual status_t setInputSurface(const sp<IGraphicBufferConsumer>& surface);
+    virtual status_t setInputSurface(const sp<PersistentSurface>& surface);
     virtual status_t setOutputFile(int fd, int64_t offset, int64_t length);
     virtual status_t setParameters(const String8 &params);
     virtual status_t setListener(const sp<IMediaRecorderClient> &listener);
@@ -78,7 +75,7 @@
     sp<hardware::ICamera> mCamera;
     sp<ICameraRecordingProxy> mCameraProxy;
     sp<IGraphicBufferProducer> mPreviewSurface;
-    sp<IGraphicBufferConsumer> mPersistentSurface;
+    sp<PersistentSurface> mPersistentSurface;
     sp<IMediaRecorderClient> mListener;
     String16 mClientName;
     uid_t mClientUid;
diff --git a/media/libmediaplayerservice/TestPlayerStub.h b/media/libmediaplayerservice/TestPlayerStub.h
index 55bf2c8..11fddf6 100644
--- a/media/libmediaplayerservice/TestPlayerStub.h
+++ b/media/libmediaplayerservice/TestPlayerStub.h
@@ -87,7 +87,11 @@
     virtual status_t stop()  {return mPlayer->stop();}
     virtual status_t pause()  {return mPlayer->pause();}
     virtual bool isPlaying() {return mPlayer->isPlaying();}
-    virtual status_t seekTo(int msec) {return mPlayer->seekTo(msec);}
+    virtual status_t seekTo(
+            int msec,
+            MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC) {
+        return mPlayer->seekTo(msec, mode);
+    }
     virtual status_t getCurrentPosition(int *p)  {
         return mPlayer->getCurrentPosition(p);
     }
diff --git a/media/libmediaplayerservice/nuplayer/Android.mk b/media/libmediaplayerservice/nuplayer/Android.mk
index 3ea2159..a0e633c 100644
--- a/media/libmediaplayerservice/nuplayer/Android.mk
+++ b/media/libmediaplayerservice/nuplayer/Android.mk
@@ -32,8 +32,6 @@
 LOCAL_CFLAGS += -DENABLE_STAGEFRIGHT_EXPERIMENTS
 endif
 
-LOCAL_CLANG := true
-
 LOCAL_SHARED_LIBRARIES := libmedia
 
 LOCAL_MODULE:= libstagefright_nuplayer
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 57a7286..91a2b7b 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -33,18 +33,17 @@
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/Utils.h>
-#include "../../libstagefright/include/DRMExtractor.h"
 #include "../../libstagefright/include/NuCachedSource2.h"
-#include "../../libstagefright/include/WVMExtractor.h"
 #include "../../libstagefright/include/HTTPBase.h"
 
 namespace android {
 
-static int64_t kLowWaterMarkUs = 2000000ll;  // 2secs
-static int64_t kHighWaterMarkUs = 5000000ll;  // 5secs
-static int64_t kHighWaterMarkRebufferUs = 15000000ll;  // 15secs
-static const ssize_t kLowWaterMarkBytes = 40000;
-static const ssize_t kHighWaterMarkBytes = 200000;
+static const int kLowWaterMarkMs          = 2000;  // 2secs
+static const int kHighWaterMarkMs         = 5000;  // 5secs
+static const int kHighWaterMarkRebufferMs = 15000;  // 15secs
+
+static const int kLowWaterMarkKB  = 40;
+static const int kHighWaterMarkKB = 200;
 
 NuPlayer::GenericSource::GenericSource(
         const sp<AMessage> &notify,
@@ -59,7 +58,6 @@
       mFetchTimedTextDataGeneration(0),
       mDurationUs(-1ll),
       mAudioIsVorbis(false),
-      mIsWidevine(false),
       mIsSecure(false),
       mIsStreaming(false),
       mUIDValid(uidValid),
@@ -70,7 +68,6 @@
       mPendingReadBufferTypes(0) {
     mBufferingMonitor = new BufferingMonitor(notify);
     resetDataSource();
-    DataSource::RegisterDefaultSniffers();
 }
 
 void NuPlayer::GenericSource::resetDataSource() {
@@ -141,70 +138,20 @@
 
 status_t NuPlayer::GenericSource::initFromDataSource() {
     sp<IMediaExtractor> extractor;
-    String8 mimeType;
-    float confidence;
-    sp<AMessage> dummy;
-    bool isWidevineStreaming = false;
-
     CHECK(mDataSource != NULL);
 
-    if (mIsWidevine) {
-        isWidevineStreaming = SniffWVM(
-                mDataSource, &mimeType, &confidence, &dummy);
-        if (!isWidevineStreaming ||
-                strcasecmp(
-                    mimeType.string(), MEDIA_MIMETYPE_CONTAINER_WVM)) {
-            ALOGE("unsupported widevine mime: %s", mimeType.string());
-            return UNKNOWN_ERROR;
-        }
-    } else if (mIsStreaming) {
-        if (!mDataSource->sniff(&mimeType, &confidence, &dummy)) {
-            return UNKNOWN_ERROR;
-        }
-        isWidevineStreaming = !strcasecmp(
-                mimeType.string(), MEDIA_MIMETYPE_CONTAINER_WVM);
-    }
-
-    if (isWidevineStreaming) {
-        // we don't want cached source for widevine streaming.
-        mCachedSource.clear();
-        mDataSource = mHttpSource;
-        mWVMExtractor = new WVMExtractor(mDataSource);
-        mWVMExtractor->setAdaptiveStreamingMode(true);
-        if (mUIDValid) {
-            mWVMExtractor->setUID(mUID);
-        }
-        extractor = mWVMExtractor;
-    } else {
-        extractor = MediaExtractor::Create(mDataSource,
-                mimeType.isEmpty() ? NULL : mimeType.string());
-    }
+    extractor = MediaExtractor::Create(mDataSource, NULL);
 
     if (extractor == NULL) {
         return UNKNOWN_ERROR;
     }
 
-    if (extractor->getDrmFlag()) {
-        checkDrmStatus(mDataSource);
-    }
-
     mFileMeta = extractor->getMetaData();
     if (mFileMeta != NULL) {
         int64_t duration;
         if (mFileMeta->findInt64(kKeyDuration, &duration)) {
             mDurationUs = duration;
         }
-
-        if (!mIsWidevine) {
-            // Check mime to see if we actually have a widevine source.
-            // If the data source is not URL-type (eg. file source), we
-            // won't be able to tell until now.
-            const char *fileMime;
-            if (mFileMeta->findCString(kKeyMIMEType, &fileMime)
-                    && !strncasecmp(fileMime, "video/wvm", 9)) {
-                mIsWidevine = true;
-            }
-        }
     }
 
     int32_t totalBitrate = 0;
@@ -291,11 +238,24 @@
     return OK;
 }
 
+status_t NuPlayer::GenericSource::getDefaultBufferingSettings(
+        BufferingSettings* buffering /* nonnull */) {
+    mBufferingMonitor->getDefaultBufferingSettings(buffering);
+    return OK;
+}
+
+status_t NuPlayer::GenericSource::setBufferingSettings(const BufferingSettings& buffering) {
+    return mBufferingMonitor->setBufferingSettings(buffering);
+}
+
 status_t NuPlayer::GenericSource::startSources() {
     // Start the selected A/V tracks now before we start buffering.
     // Widevine sources might re-initialize crypto when starting, if we delay
     // this to start(), all data buffered during prepare would be wasted.
     // (We don't actually start reading until start().)
+    //
+    // TODO: this logic may no longer be relevant after the removal of widevine
+    // support
     if (mAudioTrack.mSource != NULL && mAudioTrack.mSource->start() != OK) {
         ALOGE("failed to start audio track!");
         return UNKNOWN_ERROR;
@@ -309,18 +269,6 @@
     return OK;
 }
 
-void NuPlayer::GenericSource::checkDrmStatus(const sp<DataSource>& dataSource) {
-    dataSource->getDrmInfo(mDecryptHandle, &mDrmManagerClient);
-    if (mDecryptHandle != NULL) {
-        CHECK(mDrmManagerClient);
-        if (RightsStatus::RIGHTS_VALID != mDecryptHandle->status) {
-            sp<AMessage> msg = dupNotify();
-            msg->setInt32("what", kWhatDrmNoLicense);
-            msg->post();
-        }
-    }
-}
-
 int64_t NuPlayer::GenericSource::getLastReadPosition() {
     if (mAudioTrack.mSource != NULL) {
         return mAudioTimeUs;
@@ -378,11 +326,8 @@
         if (!mUri.empty()) {
             const char* uri = mUri.c_str();
             String8 contentType;
-            mIsWidevine = !strncasecmp(uri, "widevine://", 11);
 
-            if (!strncasecmp("http://", uri, 7)
-                    || !strncasecmp("https://", uri, 8)
-                    || mIsWidevine) {
+            if (!strncasecmp("http://", uri, 7) || !strncasecmp("https://", uri, 8)) {
                 mHttpSource = DataSource::CreateMediaHTTP(mHTTPService);
                 if (mHttpSource == NULL) {
                     ALOGE("Failed to create http source!");
@@ -395,8 +340,6 @@
                    mHTTPService, uri, &mUriHeaders, &contentType,
                    static_cast<HTTPBase *>(mHttpSource.get()));
         } else {
-            mIsWidevine = false;
-
             mDataSource = new FileSource(mFd, mOffset, mLength);
             mFd = -1;
         }
@@ -412,13 +355,9 @@
         mCachedSource = static_cast<NuCachedSource2 *>(mDataSource.get());
     }
 
-    // For widevine or other cached streaming cases, we need to wait for
-    // enough buffering before reporting prepared.
-    // Note that even when URL doesn't start with widevine://, mIsWidevine
-    // could still be set to true later, if the streaming or file source
-    // is sniffed to be widevine. We don't want to buffer for file source
-    // in that case, so must check the flag now.
-    mIsStreaming = (mIsWidevine || mCachedSource != NULL);
+    // For cached streaming cases, we need to wait for enough
+    // buffering before reporting prepared.
+    mIsStreaming = (mCachedSource != NULL);
 
     // init extractor from data source
     status_t err = initFromDataSource();
@@ -450,6 +389,9 @@
 
     if (mIsSecure) {
         // secure decoders must be instantiated before starting widevine source
+        //
+        // TODO: mIsSecure and FLAG_SECURE may be obsolete, revisit after
+        // removing widevine
         sp<AMessage> reply = new AMessage(kWhatSecureDecodersInstantiated, this);
         notifyInstantiateSecureDecoders(reply);
     } else {
@@ -476,7 +418,7 @@
 
     if (mIsStreaming) {
         if (mBufferingMonitorLooper == NULL) {
-            mBufferingMonitor->prepare(mCachedSource, mWVMExtractor, mDurationUs, mBitrate,
+            mBufferingMonitor->prepare(mCachedSource, mDurationUs, mBitrate,
                     mIsStreaming);
 
             mBufferingMonitorLooper = new ALooper;
@@ -536,12 +478,6 @@
     // nothing to do, just account for DRM playback status
     setDrmPlaybackStatusIfNeeded(Playback::STOP, 0);
     mStarted = false;
-    if (mIsWidevine || mIsSecure) {
-        // For widevine or secure sources we need to prevent any further reads.
-        sp<AMessage> msg = new AMessage(kWhatStopWidevine, this);
-        sp<AMessage> response;
-        (void) msg->postAndAwaitResponse(&response);
-    }
 }
 
 void NuPlayer::GenericSource::pause() {
@@ -665,8 +601,10 @@
           } else {
               timeUs = mVideoLastDequeueTimeUs;
           }
-          readBuffer(trackType, timeUs, &actualTimeUs, formatChange);
-          readBuffer(counterpartType, -1, NULL, formatChange);
+          readBuffer(trackType, timeUs, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */,
+                  &actualTimeUs, formatChange);
+          readBuffer(counterpartType, -1, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */,
+                  NULL, !formatChange);
           ALOGV("timeUs %lld actualTimeUs %lld", (long long)timeUs, (long long)actualTimeUs);
 
           break;
@@ -723,20 +661,6 @@
           break;
       }
 
-      case kWhatStopWidevine:
-      {
-          // mStopRead is only used for Widevine to prevent the video source
-          // from being read while the associated video decoder is shutting down.
-          mStopRead = true;
-          if (mVideoTrack.mSource != NULL) {
-              mVideoTrack.mPackets->clear();
-          }
-          sp<AMessage> response = new AMessage;
-          sp<AReplyToken> replyID;
-          CHECK(msg->senderAwaitsResponse(&replyID));
-          response->postReply(replyID);
-          break;
-      }
       default:
           Source::onMessageReceived(msg);
           break;
@@ -765,7 +689,7 @@
     CHECK(msg->findInt64("timeUs", &timeUs));
 
     int64_t subTimeUs;
-    readBuffer(type, timeUs, &subTimeUs);
+    readBuffer(type, timeUs, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */, &subTimeUs);
 
     int64_t delayUs = subTimeUs - timeUs;
     if (msg->what() == kWhatFetchSubtitleData) {
@@ -796,7 +720,7 @@
     }
 
     int64_t nextSubTimeUs;
-    readBuffer(type, -1, &nextSubTimeUs);
+    readBuffer(type, -1, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */, &nextSubTimeUs);
 
     sp<ABuffer> buffer;
     status_t dequeueStatus = packets->dequeueAccessUnit(&buffer);
@@ -892,11 +816,6 @@
         return -EWOULDBLOCK;
     }
 
-    if (mIsWidevine && !audio) {
-        // try to read a buffer as we may not have been able to the last time
-        postReadBuffer(MEDIA_TRACK_TYPE_VIDEO);
-    }
-
     status_t finalResult;
     if (!track->mPackets->hasBufferAvailable(&finalResult)) {
         if (finalResult == OK) {
@@ -1220,9 +1139,10 @@
     return INVALID_OPERATION;
 }
 
-status_t NuPlayer::GenericSource::seekTo(int64_t seekTimeUs) {
+status_t NuPlayer::GenericSource::seekTo(int64_t seekTimeUs, MediaPlayerSeekMode mode) {
     sp<AMessage> msg = new AMessage(kWhatSeek, this);
     msg->setInt64("seekTimeUs", seekTimeUs);
+    msg->setInt32("mode", mode);
 
     sp<AMessage> response;
     status_t err = msg->postAndAwaitResponse(&response);
@@ -1235,10 +1155,12 @@
 
 void NuPlayer::GenericSource::onSeek(const sp<AMessage>& msg) {
     int64_t seekTimeUs;
+    int32_t mode;
     CHECK(msg->findInt64("seekTimeUs", &seekTimeUs));
+    CHECK(msg->findInt32("mode", &mode));
 
     sp<AMessage> response = new AMessage;
-    status_t err = doSeek(seekTimeUs);
+    status_t err = doSeek(seekTimeUs, (MediaPlayerSeekMode)mode);
     response->setInt32("err", err);
 
     sp<AReplyToken> replyID;
@@ -1246,20 +1168,25 @@
     response->postReply(replyID);
 }
 
-status_t NuPlayer::GenericSource::doSeek(int64_t seekTimeUs) {
+status_t NuPlayer::GenericSource::doSeek(int64_t seekTimeUs, MediaPlayerSeekMode mode) {
     mBufferingMonitor->updateDequeuedBufferTime(-1ll);
 
     // If the Widevine source is stopped, do not attempt to read any
     // more buffers.
+    //
+    // TODO: revisit after widevine is removed.  May be able to
+    // combine mStopRead with mStarted.
     if (mStopRead) {
         return INVALID_OPERATION;
     }
     if (mVideoTrack.mSource != NULL) {
         int64_t actualTimeUs;
-        readBuffer(MEDIA_TRACK_TYPE_VIDEO, seekTimeUs, &actualTimeUs);
+        readBuffer(MEDIA_TRACK_TYPE_VIDEO, seekTimeUs, mode, &actualTimeUs);
 
-        seekTimeUs = actualTimeUs;
-        mVideoLastDequeueTimeUs = seekTimeUs;
+        if (mode != MediaPlayerSeekMode::SEEK_CLOSEST) {
+            seekTimeUs = actualTimeUs;
+        }
+        mVideoLastDequeueTimeUs = actualTimeUs;
     }
 
     if (mAudioTrack.mSource != NULL) {
@@ -1283,9 +1210,7 @@
 
 sp<ABuffer> NuPlayer::GenericSource::mediaBufferToABuffer(
         MediaBuffer* mb,
-        media_track_type trackType,
-        int64_t /* seekTimeUs */,
-        int64_t *actualTimeUs) {
+        media_track_type trackType) {
     bool audio = trackType == MEDIA_TRACK_TYPE_AUDIO;
     size_t outLength = mb->range_length();
 
@@ -1322,16 +1247,6 @@
     CHECK(mb->meta_data()->findInt64(kKeyTime, &timeUs));
     meta->setInt64("timeUs", timeUs);
 
-#if 0
-    // Temporarily disable pre-roll till we have a full solution to handle
-    // both single seek and continous seek gracefully.
-    if (seekTimeUs > timeUs) {
-        sp<AMessage> extra = new AMessage;
-        extra->setInt64("resume-at-mediaTimeUs", seekTimeUs);
-        meta->setMessage("extra", extra);
-    }
-#endif
-
     if (trackType == MEDIA_TRACK_TYPE_VIDEO) {
         int32_t layerId;
         if (mb->meta_data()->findInt32(kKeyTemporalLayerId, &layerId)) {
@@ -1371,10 +1286,6 @@
         meta->setBuffer("mpegUserData", mpegUserData);
     }
 
-    if (actualTimeUs) {
-        *actualTimeUs = timeUs;
-    }
-
     mb->release();
     mb = NULL;
 
@@ -1406,8 +1317,12 @@
 }
 
 void NuPlayer::GenericSource::readBuffer(
-        media_track_type trackType, int64_t seekTimeUs, int64_t *actualTimeUs, bool formatChange) {
+        media_track_type trackType, int64_t seekTimeUs, MediaPlayerSeekMode mode,
+        int64_t *actualTimeUs, bool formatChange) {
     // Do not read data if Widevine source is stopped
+    //
+    // TODO: revisit after widevine is removed.  May be able to
+    // combine mStopRead with mStarted.
     if (mStopRead) {
         return;
     }
@@ -1416,19 +1331,11 @@
     switch (trackType) {
         case MEDIA_TRACK_TYPE_VIDEO:
             track = &mVideoTrack;
-            if (mIsWidevine) {
-                maxBuffers = 2;
-            } else {
-                maxBuffers = 8;  // too large of a number may influence seeks
-            }
+            maxBuffers = 8;  // too large of a number may influence seeks
             break;
         case MEDIA_TRACK_TYPE_AUDIO:
             track = &mAudioTrack;
-            if (mIsWidevine) {
-                maxBuffers = 8;
-            } else {
-                maxBuffers = 64;
-            }
+            maxBuffers = 64;
             break;
         case MEDIA_TRACK_TYPE_SUBTITLE:
             track = &mSubtitleTrack;
@@ -1452,13 +1359,13 @@
 
     bool seeking = false;
     if (seekTimeUs >= 0) {
-        options.setSeekTo(seekTimeUs, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
+        options.setSeekTo(seekTimeUs, mode);
         seeking = true;
     }
 
-    const bool couldReadMultiple = (!mIsWidevine && track->mSource->supportReadMultiple());
+    const bool couldReadMultiple = (track->mSource->supportReadMultiple());
 
-    if (mIsWidevine || couldReadMultiple) {
+    if (couldReadMultiple) {
         options.setNonBlocking();
     }
 
@@ -1499,9 +1406,20 @@
 
             queueDiscontinuityIfNeeded(seeking, formatChange, trackType, track);
 
-            sp<ABuffer> buffer = mediaBufferToABuffer(
-                    mbuf, trackType, seekTimeUs,
-                    numBuffers == 0 ? actualTimeUs : NULL);
+            sp<ABuffer> buffer = mediaBufferToABuffer(mbuf, trackType);
+            if (numBuffers == 0 && actualTimeUs != nullptr) {
+                *actualTimeUs = timeUs;
+            }
+            if (seeking && buffer != nullptr) {
+                sp<AMessage> meta = buffer->meta();
+                if (meta != nullptr && mode == MediaPlayerSeekMode::SEEK_CLOSEST
+                        && seekTimeUs > timeUs) {
+                    sp<AMessage> extra = new AMessage;
+                    extra->setInt64("resume-at-mediaTimeUs", seekTimeUs);
+                    meta->setMessage("extra", extra);
+                }
+            }
+
             track->mPackets->queueAccessUnit(buffer);
             formatChange = false;
             seeking = false;
@@ -1562,24 +1480,66 @@
       mFirstDequeuedBufferRealUs(-1ll),
       mFirstDequeuedBufferMediaUs(-1ll),
       mlastDequeuedBufferMediaUs(-1ll) {
+      getDefaultBufferingSettings(&mSettings);
 }
 
 NuPlayer::GenericSource::BufferingMonitor::~BufferingMonitor() {
 }
 
+void NuPlayer::GenericSource::BufferingMonitor::getDefaultBufferingSettings(
+        BufferingSettings *buffering /* nonnull */) {
+    buffering->mInitialBufferingMode = BUFFERING_MODE_TIME_ONLY;
+    buffering->mRebufferingMode = BUFFERING_MODE_TIME_THEN_SIZE;
+    buffering->mInitialWatermarkMs = kHighWaterMarkMs;
+    buffering->mRebufferingWatermarkLowMs = kLowWaterMarkMs;
+    buffering->mRebufferingWatermarkHighMs = kHighWaterMarkRebufferMs;
+    buffering->mRebufferingWatermarkLowKB = kLowWaterMarkKB;
+    buffering->mRebufferingWatermarkHighKB = kHighWaterMarkKB;
+
+    ALOGV("BufferingMonitor::getDefaultBufferingSettings{%s}",
+            buffering->toString().string());
+}
+
+status_t NuPlayer::GenericSource::BufferingMonitor::setBufferingSettings(
+        const BufferingSettings &buffering) {
+    ALOGV("BufferingMonitor::setBufferingSettings{%s}",
+            buffering.toString().string());
+
+    Mutex::Autolock _l(mLock);
+    if (buffering.IsSizeBasedBufferingMode(buffering.mInitialBufferingMode)
+            || (buffering.IsTimeBasedBufferingMode(buffering.mRebufferingMode)
+                && buffering.mRebufferingWatermarkLowMs > buffering.mRebufferingWatermarkHighMs)
+            || (buffering.IsSizeBasedBufferingMode(buffering.mRebufferingMode)
+                && buffering.mRebufferingWatermarkLowKB > buffering.mRebufferingWatermarkHighKB)) {
+        return BAD_VALUE;
+    }
+    mSettings = buffering;
+    if (mSettings.mInitialBufferingMode == BUFFERING_MODE_NONE) {
+        mSettings.mInitialWatermarkMs = BufferingSettings::kNoWatermark;
+    }
+    if (!mSettings.IsTimeBasedBufferingMode(mSettings.mRebufferingMode)) {
+        mSettings.mRebufferingWatermarkLowMs = BufferingSettings::kNoWatermark;
+        mSettings.mRebufferingWatermarkHighMs = INT32_MAX;
+    }
+    if (!mSettings.IsSizeBasedBufferingMode(mSettings.mRebufferingMode)) {
+        mSettings.mRebufferingWatermarkLowKB = BufferingSettings::kNoWatermark;
+        mSettings.mRebufferingWatermarkHighKB = INT32_MAX;
+    }
+    return OK;
+}
+
 void NuPlayer::GenericSource::BufferingMonitor::prepare(
         const sp<NuCachedSource2> &cachedSource,
-        const sp<WVMExtractor> &wvmExtractor,
         int64_t durationUs,
         int64_t bitrate,
         bool isStreaming) {
     Mutex::Autolock _l(mLock);
-    prepare_l(cachedSource, wvmExtractor, durationUs, bitrate, isStreaming);
+    prepare_l(cachedSource, durationUs, bitrate, isStreaming);
 }
 
 void NuPlayer::GenericSource::BufferingMonitor::stop() {
     Mutex::Autolock _l(mLock);
-    prepare_l(NULL /* cachedSource */, NULL /* wvmExtractor */, -1 /* durationUs */,
+    prepare_l(NULL /* cachedSource */, -1 /* durationUs */,
             -1 /* bitrate */, false /* isStreaming */);
 }
 
@@ -1634,22 +1594,17 @@
 
 void NuPlayer::GenericSource::BufferingMonitor::prepare_l(
         const sp<NuCachedSource2> &cachedSource,
-        const sp<WVMExtractor> &wvmExtractor,
         int64_t durationUs,
         int64_t bitrate,
         bool isStreaming) {
-    ALOGW_IF(wvmExtractor != NULL && cachedSource != NULL,
-            "WVMExtractor and NuCachedSource are both present when "
-            "BufferingMonitor::prepare_l is called, ignore NuCachedSource");
 
     mCachedSource = cachedSource;
-    mWVMExtractor = wvmExtractor;
     mDurationUs = durationUs;
     mBitrate = bitrate;
     mIsStreaming = isStreaming;
     mAudioTimeUs = 0;
     mVideoTimeUs = 0;
-    mPrepareBuffering = (cachedSource != NULL || wvmExtractor != NULL);
+    mPrepareBuffering = (cachedSource != NULL);
     cancelPollBuffering_l();
     mOffloadAudio = false;
     mFirstDequeuedBufferRealUs = -1ll;
@@ -1733,9 +1688,7 @@
     int32_t kbps = 0;
     status_t err = UNKNOWN_ERROR;
 
-    if (mWVMExtractor != NULL) {
-        err = mWVMExtractor->getEstimatedBandwidthKbps(&kbps);
-    } else if (mCachedSource != NULL) {
+    if (mCachedSource != NULL) {
         err = mCachedSource->getEstimatedBandwidthKbps(&kbps);
     }
 
@@ -1775,10 +1728,7 @@
     int64_t cachedDurationUs = -1ll;
     ssize_t cachedDataRemaining = -1;
 
-    if (mWVMExtractor != NULL) {
-        cachedDurationUs =
-                mWVMExtractor->getCachedDurationUs(&finalStatus);
-    } else if (mCachedSource != NULL) {
+    if (mCachedSource != NULL) {
         cachedDataRemaining =
                 mCachedSource->approxDataRemaining(&finalStatus);
 
@@ -1806,7 +1756,9 @@
 
         stopBufferingIfNecessary_l();
         return;
-    } else if (cachedDurationUs >= 0ll) {
+    }
+
+    if (cachedDurationUs >= 0ll) {
         if (mDurationUs > 0ll) {
             int64_t cachedPosUs = getLastReadPosition_l() + cachedDurationUs;
             int percentage = 100.0 * cachedPosUs / mDurationUs;
@@ -1817,36 +1769,40 @@
             notifyBufferingUpdate_l(percentage);
         }
 
-        ALOGV("onPollBuffering_l: cachedDurationUs %.1f sec",
-                cachedDurationUs / 1000000.0f);
+        ALOGV("onPollBuffering_l: cachedDurationUs %.1f sec", cachedDurationUs / 1000000.0f);
 
-        if (cachedDurationUs < kLowWaterMarkUs) {
-            // Take into account the data cached in downstream components to try to avoid
-            // unnecessary pause.
-            if (mOffloadAudio && mFirstDequeuedBufferRealUs >= 0) {
-                int64_t downStreamCacheUs = mlastDequeuedBufferMediaUs - mFirstDequeuedBufferMediaUs
-                        - (ALooper::GetNowUs() - mFirstDequeuedBufferRealUs);
-                if (downStreamCacheUs > 0) {
-                    cachedDurationUs += downStreamCacheUs;
+        if (mPrepareBuffering) {
+            if (cachedDurationUs > mSettings.mInitialWatermarkMs * 1000) {
+                stopBufferingIfNecessary_l();
+            }
+        } else if (mSettings.IsTimeBasedBufferingMode(mSettings.mRebufferingMode)) {
+            if (cachedDurationUs < mSettings.mRebufferingWatermarkLowMs * 1000) {
+                // Take into account the data cached in downstream components to try to avoid
+                // unnecessary pause.
+                if (mOffloadAudio && mFirstDequeuedBufferRealUs >= 0) {
+                    int64_t downStreamCacheUs =
+                        mlastDequeuedBufferMediaUs - mFirstDequeuedBufferMediaUs
+                            - (ALooper::GetNowUs() - mFirstDequeuedBufferRealUs);
+                    if (downStreamCacheUs > 0) {
+                        cachedDurationUs += downStreamCacheUs;
+                    }
                 }
-            }
 
-            if (cachedDurationUs < kLowWaterMarkUs) {
-                startBufferingIfNecessary_l();
-            }
-        } else {
-            int64_t highWaterMark = mPrepareBuffering ? kHighWaterMarkUs : kHighWaterMarkRebufferUs;
-            if (cachedDurationUs > highWaterMark) {
+                if (cachedDurationUs < mSettings.mRebufferingWatermarkLowMs * 1000) {
+                    startBufferingIfNecessary_l();
+                }
+            } else if (cachedDurationUs > mSettings.mRebufferingWatermarkHighMs * 1000) {
                 stopBufferingIfNecessary_l();
             }
         }
-    } else if (cachedDataRemaining >= 0) {
+    } else if (cachedDataRemaining >= 0
+            && mSettings.IsSizeBasedBufferingMode(mSettings.mRebufferingMode)) {
         ALOGV("onPollBuffering_l: cachedDataRemaining %zd bytes",
                 cachedDataRemaining);
 
-        if (cachedDataRemaining < kLowWaterMarkBytes) {
+        if (cachedDataRemaining < (mSettings.mRebufferingWatermarkLowKB << 10)) {
             startBufferingIfNecessary_l();
-        } else if (cachedDataRemaining > kHighWaterMarkBytes) {
+        } else if (cachedDataRemaining > (mSettings.mRebufferingWatermarkHighKB << 10)) {
             stopBufferingIfNecessary_l();
         }
     }
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index e92a2ae..e1949f3 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -37,7 +37,6 @@
 struct MediaSource;
 class MediaBuffer;
 struct NuCachedSource2;
-class WVMExtractor;
 
 struct NuPlayer::GenericSource : public NuPlayer::Source {
     GenericSource(const sp<AMessage> &notify, bool uidValid, uid_t uid);
@@ -51,6 +50,10 @@
 
     status_t setDataSource(const sp<DataSource>& dataSource);
 
+    virtual status_t getDefaultBufferingSettings(
+            BufferingSettings* buffering /* nonnull */) override;
+    virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+
     virtual void prepareAsync();
 
     virtual void start();
@@ -71,7 +74,9 @@
     virtual sp<AMessage> getTrackInfo(size_t trackIndex) const;
     virtual ssize_t getSelectedTrack(media_track_type type) const;
     virtual status_t selectTrack(size_t trackIndex, bool select, int64_t timeUs);
-    virtual status_t seekTo(int64_t seekTimeUs);
+    virtual status_t seekTo(
+        int64_t seekTimeUs,
+        MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC) override;
 
     virtual status_t setBuffers(bool audio, Vector<MediaBuffer *> &buffers);
 
@@ -101,7 +106,6 @@
         kWhatSelectTrack,
         kWhatSeek,
         kWhatReadBuffer,
-        kWhatStopWidevine,
         kWhatStart,
         kWhatResume,
         kWhatSecureDecodersInstantiated,
@@ -119,9 +123,11 @@
     public:
         explicit BufferingMonitor(const sp<AMessage> &notify);
 
+        void getDefaultBufferingSettings(BufferingSettings *buffering /* nonnull */);
+        status_t setBufferingSettings(const BufferingSettings &buffering);
+
         // Set up state.
         void prepare(const sp<NuCachedSource2> &cachedSource,
-                const sp<WVMExtractor> &wvmExtractor,
                 int64_t durationUs,
                 int64_t bitrate,
                 bool isStreaming);
@@ -155,7 +161,6 @@
         sp<AMessage> mNotify;
 
         sp<NuCachedSource2> mCachedSource;
-        sp<WVMExtractor> mWVMExtractor;
         int64_t mDurationUs;
         int64_t mBitrate;
         bool mIsStreaming;
@@ -169,13 +174,13 @@
 
         mutable Mutex mLock;
 
+        BufferingSettings mSettings;
         bool mOffloadAudio;
         int64_t mFirstDequeuedBufferRealUs;
         int64_t mFirstDequeuedBufferMediaUs;
         int64_t mlastDequeuedBufferMediaUs;
 
         void prepare_l(const sp<NuCachedSource2> &cachedSource,
-                const sp<WVMExtractor> &wvmExtractor,
                 int64_t durationUs,
                 int64_t bitrate,
                 bool isStreaming);
@@ -204,7 +209,6 @@
     int32_t mFetchTimedTextDataGeneration;
     int64_t mDurationUs;
     bool mAudioIsVorbis;
-    bool mIsWidevine;
     bool mIsSecure;
     bool mIsStreaming;
     bool mUIDValid;
@@ -219,7 +223,6 @@
     sp<DataSource> mDataSource;
     sp<NuCachedSource2> mCachedSource;
     sp<DataSource> mHttpSource;
-    sp<WVMExtractor> mWVMExtractor;
     sp<MetaData> mFileMeta;
     DrmManagerClient *mDrmManagerClient;
     sp<DecryptHandle> mDecryptHandle;
@@ -239,7 +242,6 @@
     void resetDataSource();
 
     status_t initFromDataSource();
-    void checkDrmStatus(const sp<DataSource>& dataSource);
     int64_t getLastReadPosition();
     void setDrmPlaybackStatusIfNeeded(int playbackStatus, int64_t position);
 
@@ -261,7 +263,7 @@
     status_t doSelectTrack(size_t trackIndex, bool select, int64_t timeUs);
 
     void onSeek(const sp<AMessage>& msg);
-    status_t doSeek(int64_t seekTimeUs);
+    status_t doSeek(int64_t seekTimeUs, MediaPlayerSeekMode mode);
 
     void onPrepareAsync();
 
@@ -279,15 +281,20 @@
 
     sp<ABuffer> mediaBufferToABuffer(
             MediaBuffer *mbuf,
-            media_track_type trackType,
-            int64_t seekTimeUs,
-            int64_t *actualTimeUs = NULL);
+            media_track_type trackType);
 
     void postReadBuffer(media_track_type trackType);
     void onReadBuffer(const sp<AMessage>& msg);
+    // When |mode| is MediaPlayerSeekMode::SEEK_CLOSEST, the buffer read shall
+    // include an item indicating skipping rendering all buffers with timestamp
+    // earlier than |seekTimeUs|.
+    // For other modes, the buffer read will not include the item as above in order
+    // to facilitate fast seek operation.
     void readBuffer(
             media_track_type trackType,
-            int64_t seekTimeUs = -1ll, int64_t *actualTimeUs = NULL, bool formatChange = false);
+            int64_t seekTimeUs = -1ll,
+            MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC,
+            int64_t *actualTimeUs = NULL, bool formatChange = false);
 
     void queueDiscontinuityIfNeeded(
             bool seeking, bool formatChange, media_track_type trackType, Track *track);
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
index 5027e01..05e6201 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
@@ -32,6 +32,11 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/Utils.h>
 
+// default buffer prepare/ready/underflow marks
+static const int kReadyMarkMs     = 5000;  // 5 seconds
+static const int kPrepareMarkMs   = 1500;  // 1.5 seconds
+static const int kUnderflowMarkMs = 1000;  // 1 second
+
 namespace android {
 
 NuPlayer::HTTPLiveSource::HTTPLiveSource(
@@ -49,6 +54,7 @@
       mFetchMetaDataGeneration(0),
       mHasMetadata(false),
       mMetadataSelected(false) {
+    getDefaultBufferingSettings(&mBufferingSettings);
     if (headers) {
         mExtraHeaders = *headers;
 
@@ -76,6 +82,42 @@
     }
 }
 
+status_t NuPlayer::HTTPLiveSource::getDefaultBufferingSettings(
+            BufferingSettings* buffering /* nonnull */) {
+    buffering->mInitialBufferingMode = BUFFERING_MODE_TIME_ONLY;
+    buffering->mRebufferingMode = BUFFERING_MODE_TIME_ONLY;
+    buffering->mInitialWatermarkMs = kPrepareMarkMs;
+    buffering->mRebufferingWatermarkLowMs = kUnderflowMarkMs;
+    buffering->mRebufferingWatermarkHighMs = kReadyMarkMs;
+
+    return OK;
+}
+
+status_t NuPlayer::HTTPLiveSource::setBufferingSettings(const BufferingSettings& buffering) {
+    if (buffering.IsSizeBasedBufferingMode(buffering.mInitialBufferingMode)
+            || buffering.IsSizeBasedBufferingMode(buffering.mRebufferingMode)
+            || (buffering.IsTimeBasedBufferingMode(buffering.mRebufferingMode)
+                && buffering.mRebufferingWatermarkLowMs > buffering.mRebufferingWatermarkHighMs)) {
+        return BAD_VALUE;
+    }
+
+    mBufferingSettings = buffering;
+
+    if (mBufferingSettings.mInitialBufferingMode == BUFFERING_MODE_NONE) {
+        mBufferingSettings.mInitialWatermarkMs = BufferingSettings::kNoWatermark;
+    }
+    if (mBufferingSettings.mRebufferingMode == BUFFERING_MODE_NONE) {
+        mBufferingSettings.mRebufferingWatermarkLowMs = BufferingSettings::kNoWatermark;
+        mBufferingSettings.mRebufferingWatermarkHighMs = INT32_MAX;
+    }
+
+    if (mLiveSession != NULL) {
+        mLiveSession->setBufferingSettings(mBufferingSettings);
+    }
+
+    return OK;
+}
+
 void NuPlayer::HTTPLiveSource::prepareAsync() {
     if (mLiveLooper == NULL) {
         mLiveLooper = new ALooper;
@@ -94,6 +136,7 @@
 
     mLiveLooper->registerHandler(mLiveSession);
 
+    mLiveSession->setBufferingSettings(mBufferingSettings);
     mLiveSession->connectAsync(
             mURL.c_str(), mExtraHeaders.isEmpty() ? NULL : &mExtraHeaders);
 }
@@ -214,8 +257,8 @@
     return (err == OK || err == BAD_VALUE) ? (status_t)OK : err;
 }
 
-status_t NuPlayer::HTTPLiveSource::seekTo(int64_t seekTimeUs) {
-    return mLiveSession->seekTo(seekTimeUs);
+status_t NuPlayer::HTTPLiveSource::seekTo(int64_t seekTimeUs, MediaPlayerSeekMode mode) {
+    return mLiveSession->seekTo(seekTimeUs, mode);
 }
 
 void NuPlayer::HTTPLiveSource::pollForRawData(
@@ -317,8 +360,9 @@
                 notifyVideoSizeChanged();
             }
 
-            uint32_t flags = FLAG_CAN_PAUSE;
+            uint32_t flags = 0;
             if (mLiveSession->isSeekable()) {
+                flags |= FLAG_CAN_PAUSE;
                 flags |= FLAG_CAN_SEEK;
                 flags |= FLAG_CAN_SEEK_BACKWARD;
                 flags |= FLAG_CAN_SEEK_FORWARD;
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
index 574937d..2866a6a 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
@@ -34,6 +34,10 @@
             const char *url,
             const KeyedVector<String8, String8> *headers);
 
+    virtual status_t getDefaultBufferingSettings(
+            BufferingSettings* buffering /* nonnull */) override;
+    virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+
     virtual void prepareAsync();
     virtual void start();
 
@@ -47,7 +51,9 @@
     virtual sp<AMessage> getTrackInfo(size_t trackIndex) const;
     virtual ssize_t getSelectedTrack(media_track_type /* type */) const;
     virtual status_t selectTrack(size_t trackIndex, bool select, int64_t timeUs);
-    virtual status_t seekTo(int64_t seekTimeUs);
+    virtual status_t seekTo(
+            int64_t seekTimeUs,
+            MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC) override;
 
 protected:
     virtual ~HTTPLiveSource();
@@ -78,6 +84,7 @@
     int32_t mFetchMetaDataGeneration;
     bool mHasMetadata;
     bool mMetadataSelected;
+    BufferingSettings mBufferingSettings;
 
     void onSessionNotify(const sp<AMessage> &msg);
     void pollForRawData(
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 4e16fba..4c576a5 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -16,6 +16,9 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "NuPlayer"
+
+#include <inttypes.h>
+
 #include <utils/Log.h>
 
 #include "NuPlayer.h"
@@ -39,6 +42,7 @@
 
 #include <media/AudioResamplerPublic.h>
 #include <media/AVSyncSettings.h>
+#include <media/MediaCodecBuffer.h>
 
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/foundation/ABuffer.h>
@@ -69,16 +73,18 @@
 };
 
 struct NuPlayer::SeekAction : public Action {
-    explicit SeekAction(int64_t seekTimeUs)
-        : mSeekTimeUs(seekTimeUs) {
+    explicit SeekAction(int64_t seekTimeUs, MediaPlayerSeekMode mode)
+        : mSeekTimeUs(seekTimeUs),
+          mMode(mode) {
     }
 
     virtual void execute(NuPlayer *player) {
-        player->performSeek(mSeekTimeUs);
+        player->performSeek(mSeekTimeUs, mMode);
     }
 
 private:
     int64_t mSeekTimeUs;
+    MediaPlayerSeekMode mMode;
 
     DISALLOW_EVIL_CONSTRUCTORS(SeekAction);
 };
@@ -260,9 +266,6 @@
     } else {
         sp<GenericSource> genericSource =
                 new GenericSource(notify, mUIDValid, mUID);
-        // Don't set FLAG_SECURE on mSourceFlags here for widevine.
-        // The correct flags will be updated in Source::kWhatFlagsChanged
-        // handler when  GenericSource is prepared.
 
         status_t err = genericSource->setDataSource(httpService, url, headers);
 
@@ -311,6 +314,31 @@
     msg->post();
 }
 
+status_t NuPlayer::getDefaultBufferingSettings(
+        BufferingSettings *buffering /* nonnull */) {
+    sp<AMessage> msg = new AMessage(kWhatGetDefaultBufferingSettings, this);
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+        if (err == OK) {
+            readFromAMessage(response, buffering);
+        }
+    }
+    return err;
+}
+
+status_t NuPlayer::setBufferingSettings(const BufferingSettings& buffering) {
+    sp<AMessage> msg = new AMessage(kWhatSetBufferingSettings, this);
+    writeToAMessage(msg, buffering);
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+    }
+    return err;
+}
+
 void NuPlayer::prepareAsync() {
     (new AMessage(kWhatPrepare, this))->post();
 }
@@ -419,9 +447,10 @@
     (new AMessage(kWhatReset, this))->post();
 }
 
-void NuPlayer::seekToAsync(int64_t seekTimeUs, bool needNotify) {
+void NuPlayer::seekToAsync(int64_t seekTimeUs, MediaPlayerSeekMode mode, bool needNotify) {
     sp<AMessage> msg = new AMessage(kWhatSeek, this);
     msg->setInt64("seekTimeUs", seekTimeUs);
+    msg->setInt32("mode", mode);
     msg->setInt32("needNotify", needNotify);
     msg->post();
 }
@@ -504,6 +533,48 @@
             break;
         }
 
+        case kWhatGetDefaultBufferingSettings:
+        {
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            ALOGV("kWhatGetDefaultBufferingSettings");
+            BufferingSettings buffering;
+            status_t err = OK;
+            if (mSource != NULL) {
+                err = mSource->getDefaultBufferingSettings(&buffering);
+            } else {
+                err = INVALID_OPERATION;
+            }
+            sp<AMessage> response = new AMessage;
+            if (err == OK) {
+                writeToAMessage(response, buffering);
+            }
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
+        case kWhatSetBufferingSettings:
+        {
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            ALOGV("kWhatSetBufferingSettings");
+            BufferingSettings buffering;
+            readFromAMessage(msg, &buffering);
+            status_t err = OK;
+            if (mSource != NULL) {
+                err = mSource->setBufferingSettings(buffering);
+            } else {
+                err = INVALID_OPERATION;
+            }
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
         case kWhatPrepare:
         {
             mSource->prepareAsync();
@@ -680,7 +751,8 @@
                     int64_t currentPositionUs = 0;
                     if (getCurrentPosition(&currentPositionUs) == OK) {
                         mDeferredActions.push_back(
-                                new SeekAction(currentPositionUs));
+                                new SeekAction(currentPositionUs,
+                                        MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */));
                     }
                 }
 
@@ -1196,12 +1268,14 @@
         case kWhatSeek:
         {
             int64_t seekTimeUs;
+            int32_t mode;
             int32_t needNotify;
             CHECK(msg->findInt64("seekTimeUs", &seekTimeUs));
+            CHECK(msg->findInt32("mode", &mode));
             CHECK(msg->findInt32("needNotify", &needNotify));
 
-            ALOGV("kWhatSeek seekTimeUs=%lld us, needNotify=%d",
-                    (long long)seekTimeUs, needNotify);
+            ALOGV("kWhatSeek seekTimeUs=%lld us, mode=%d, needNotify=%d",
+                    (long long)seekTimeUs, mode, needNotify);
 
             if (!mStarted) {
                 // Seek before the player is started. In order to preview video,
@@ -1209,7 +1283,7 @@
                 // only once if needed. After the player is started, any seek
                 // operation will go through normal path.
                 // Audio-only cases are handled separately.
-                onStart(seekTimeUs);
+                onStart(seekTimeUs, (MediaPlayerSeekMode)mode);
                 if (mStarted) {
                     onPause();
                     mPausedByClient = true;
@@ -1225,7 +1299,7 @@
                                            FLUSH_CMD_FLUSH /* video */));
 
             mDeferredActions.push_back(
-                    new SeekAction(seekTimeUs));
+                    new SeekAction(seekTimeUs, (MediaPlayerSeekMode)mode));
 
             // After a flush without shutdown, decoder is paused.
             // Don't resume it until source seek is done, otherwise it could
@@ -1283,6 +1357,8 @@
     } else {
         ALOGW("resume called when renderer is gone or not set");
     }
+
+    mLastStartedPlayingTimeNs = systemTime();
 }
 
 status_t NuPlayer::onInstantiateSecureDecoders() {
@@ -1314,13 +1390,13 @@
     return OK;
 }
 
-void NuPlayer::onStart(int64_t startPositionUs) {
+void NuPlayer::onStart(int64_t startPositionUs, MediaPlayerSeekMode mode) {
     if (!mSourceStarted) {
         mSourceStarted = true;
         mSource->start();
     }
     if (startPositionUs > 0) {
-        performSeek(startPositionUs);
+        performSeek(startPositionUs, mode);
         if (mSource->getFormat(false /* audio */) == NULL) {
             return;
         }
@@ -1338,26 +1414,26 @@
         flags |= Renderer::FLAG_REAL_TIME;
     }
 
-    sp<MetaData> audioMeta = mSource->getFormatMeta(true /* audio */);
-    sp<MetaData> videoMeta = mSource->getFormatMeta(false /* audio */);
-    if (audioMeta == NULL && videoMeta == NULL) {
+    bool hasAudio = (mSource->getFormat(true /* audio */) != NULL);
+    bool hasVideo = (mSource->getFormat(false /* audio */) != NULL);
+    if (!hasAudio && !hasVideo) {
         ALOGE("no metadata for either audio or video source");
         mSource->stop();
         mSourceStarted = false;
         notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, ERROR_MALFORMED);
         return;
     }
-    ALOGV_IF(audioMeta == NULL, "no metadata for audio source");  // video only stream
+    ALOGV_IF(!hasAudio, "no metadata for audio source");  // video only stream
+
+    sp<MetaData> audioMeta = mSource->getFormatMeta(true /* audio */);
 
     audio_stream_type_t streamType = AUDIO_STREAM_MUSIC;
     if (mAudioSink != NULL) {
         streamType = mAudioSink->getAudioStreamType();
     }
 
-    sp<AMessage> videoFormat = mSource->getFormat(false /* audio */);
-
     mOffloadAudio =
-        canOffloadStream(audioMeta, (videoFormat != NULL), mSource->isStreaming(), streamType)
+        canOffloadStream(audioMeta, hasVideo, mSource->isStreaming(), streamType)
                 && (mPlaybackSettings.mSpeed == 1.f && mPlaybackSettings.mPitch == 1.f);
     if (mOffloadAudio) {
         flags |= Renderer::FLAG_OFFLOAD_AUDIO;
@@ -1392,6 +1468,8 @@
         mAudioDecoder->setRenderer(mRenderer);
     }
 
+    mLastStartedPlayingTimeNs = systemTime();
+
     postScanSources();
 }
 
@@ -1410,6 +1488,16 @@
     } else {
         ALOGW("pause called when renderer is gone or not set");
     }
+
+    sp<NuPlayerDriver> driver = mDriver.promote();
+    if (driver != NULL) {
+        int64_t now = systemTime();
+        int64_t played = now - mLastStartedPlayingTimeNs;
+        ALOGD("played from %" PRId64 " to %" PRId64 " = %" PRId64 ,
+              mLastStartedPlayingTimeNs, now, played);
+
+        driver->notifyMorePlayingTimeUs((played+500)/1000);
+    }
 }
 
 bool NuPlayer::audioDecoderStillNeeded() {
@@ -1536,7 +1624,7 @@
         mRenderer->flush(false /* audio */, false /* notifyComplete */);
     }
 
-    performSeek(currentPositionUs);
+    performSeek(currentPositionUs, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */);
 
     if (forceNonOffload) {
         mRenderer->signalDisableOffloadAudio();
@@ -1640,7 +1728,7 @@
         } else {
             mSource->setOffloadAudio(false /* offload */);
 
-            *decoder = new Decoder(notify, mSource, mPID, mRenderer);
+            *decoder = new Decoder(notify, mSource, mPID, mUID, mRenderer);
         }
     } else {
         sp<AMessage> notify = new AMessage(kWhatVideoNotify, this);
@@ -1648,15 +1736,13 @@
         notify->setInt32("generation", mVideoDecoderGeneration);
 
         *decoder = new Decoder(
-                notify, mSource, mPID, mRenderer, mSurface, mCCDecoder);
+                notify, mSource, mPID, mUID, mRenderer, mSurface, mCCDecoder);
 
         // enable FRC if high-quality AV sync is requested, even if not
         // directly queuing to display, as this will even improve textureview
         // playback.
         {
-            char value[PROPERTY_VALUE_MAX];
-            if (property_get("persist.sys.media.avsync", value, NULL) &&
-                    (!strcmp("1", value) || !strcasecmp("true", value))) {
+            if (property_get_bool("persist.sys.media.avsync", false)) {
                 format->setInt32("auto-frc", 1);
             }
         }
@@ -1664,29 +1750,6 @@
     (*decoder)->init();
     (*decoder)->configure(format);
 
-    // allocate buffers to decrypt widevine source buffers
-    if (!audio && (mSourceFlags & Source::FLAG_SECURE)) {
-        Vector<sp<ABuffer> > inputBufs;
-        CHECK_EQ((*decoder)->getInputBuffers(&inputBufs), (status_t)OK);
-
-        Vector<MediaBuffer *> mediaBufs;
-        for (size_t i = 0; i < inputBufs.size(); i++) {
-            const sp<ABuffer> &buffer = inputBufs[i];
-            MediaBuffer *mbuf = new MediaBuffer(buffer->data(), buffer->size());
-            mediaBufs.push(mbuf);
-        }
-
-        status_t err = mSource->setBuffers(audio, mediaBufs);
-        if (err != OK) {
-            for (size_t i = 0; i < mediaBufs.size(); ++i) {
-                mediaBufs[i]->release();
-            }
-            mediaBufs.clear();
-            ALOGE("Secure source didn't support secure mediaBufs.");
-            return err;
-        }
-    }
-
     if (!audio) {
         sp<AMessage> params = new AMessage();
         float rate = getFrameRate();
@@ -1718,6 +1781,16 @@
         notifyListener(MEDIA_SET_VIDEO_SIZE, 0, 0);
         return;
     }
+    int32_t err = OK;
+    inputFormat->findInt32("err", &err);
+    if (err == -EWOULDBLOCK) {
+        ALOGW("Video meta is not available yet!");
+        return;
+    }
+    if (err != OK) {
+        ALOGW("Something is wrong with video meta!");
+        return;
+    }
 
     int32_t displayWidth, displayHeight;
     if (outputFormat != NULL) {
@@ -1755,6 +1828,20 @@
         displayWidth = (displayWidth * sarWidth) / sarHeight;
 
         ALOGV("display dimensions %d x %d", displayWidth, displayHeight);
+    } else {
+        int32_t width, height;
+        if (inputFormat->findInt32("display-width", &width)
+                && inputFormat->findInt32("display-height", &height)
+                && width > 0 && height > 0
+                && displayWidth > 0 && displayHeight > 0) {
+            if (displayHeight * (int64_t)width / height > (int64_t)displayWidth) {
+                displayHeight = (int32_t)(displayWidth * (int64_t)height / width);
+            } else {
+                displayWidth = (int32_t)(displayHeight * (int64_t)width / height);
+            }
+            ALOGV("Video display width and height are overridden to %d x %d",
+                 displayWidth, displayHeight);
+        }
     }
 
     int32_t rotationDegrees;
@@ -1979,10 +2066,9 @@
     }
 }
 
-void NuPlayer::performSeek(int64_t seekTimeUs) {
-    ALOGV("performSeek seekTimeUs=%lld us (%.2f secs)",
-          (long long)seekTimeUs,
-          seekTimeUs / 1E6);
+void NuPlayer::performSeek(int64_t seekTimeUs, MediaPlayerSeekMode mode) {
+    ALOGV("performSeek seekTimeUs=%lld us (%.2f secs), mode=%d",
+          (long long)seekTimeUs, seekTimeUs / 1E6, mode);
 
     if (mSource == NULL) {
         // This happens when reset occurs right before the loop mode
@@ -1993,7 +2079,7 @@
         return;
     }
     mPreviousSeekTimeUs = seekTimeUs;
-    mSource->seekTo(seekTimeUs);
+    mSource->seekTo(seekTimeUs, mode);
     ++mTimedTextGeneration;
 
     // everything's flushed, continue playback.
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index a002f6f..cc8c97a 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -50,6 +50,9 @@
 
     void setDataSourceAsync(const sp<DataSource> &source);
 
+    status_t getDefaultBufferingSettings(BufferingSettings* buffering /* nonnull */);
+    status_t setBufferingSettings(const BufferingSettings& buffering);
+
     void prepareAsync();
 
     void setVideoSurfaceTextureAsync(
@@ -70,7 +73,10 @@
 
     // Will notify the driver through "notifySeekComplete" once finished
     // and needNotify is true.
-    void seekToAsync(int64_t seekTimeUs, bool needNotify = false);
+    void seekToAsync(
+            int64_t seekTimeUs,
+            MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC,
+            bool needNotify = false);
 
     status_t setVideoScalingMode(int32_t mode);
     status_t getTrackInfo(Parcel* reply) const;
@@ -134,6 +140,8 @@
         kWhatGetTrackInfo               = 'gTrI',
         kWhatGetSelectedTrack           = 'gSel',
         kWhatSelectTrack                = 'selT',
+        kWhatGetDefaultBufferingSettings = 'gDBS',
+        kWhatSetBufferingSettings       = 'sBuS',
     };
 
     wp<NuPlayerDriver> mDriver;
@@ -155,6 +163,8 @@
     int32_t mVideoDecoderGeneration;
     int32_t mRendererGeneration;
 
+    int64_t mLastStartedPlayingTimeNs;
+
     int64_t mPreviousSeekTimeUs;
 
     List<sp<Action> > mDeferredActions;
@@ -245,7 +255,9 @@
     void handleFlushComplete(bool audio, bool isDecoder);
     void finishFlushIfPossible();
 
-    void onStart(int64_t startPositionUs = -1);
+    void onStart(
+            int64_t startPositionUs = -1,
+            MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC);
     void onResume();
     void onPause();
 
@@ -263,7 +275,7 @@
 
     void processDeferredActions();
 
-    void performSeek(int64_t seekTimeUs);
+    void performSeek(int64_t seekTimeUs, MediaPlayerSeekMode mode);
     void performDecoderFlush(FlushCommand audio, FlushCommand video);
     void performReset();
     void performScanSources();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
index 978d360..73b07bb 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
@@ -72,37 +72,37 @@
 
         if (cc->mData1 >= 0x20 && cc->mData1 <= 0x7f) {
             // 2 basic chars
-            sprintf(tmp, "[%d]Basic: %c %c", cc->mType, cc->mData1, cc->mData2);
+            snprintf(tmp, sizeof(tmp), "[%d]Basic: %c %c", cc->mType, cc->mData1, cc->mData2);
         } else if ((cc->mData1 == 0x11 || cc->mData1 == 0x19)
                  && cc->mData2 >= 0x30 && cc->mData2 <= 0x3f) {
             // 1 special char
-            sprintf(tmp, "[%d]Special: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+            snprintf(tmp, sizeof(tmp), "[%d]Special: %02x %02x", cc->mType, cc->mData1, cc->mData2);
         } else if ((cc->mData1 == 0x12 || cc->mData1 == 0x1A)
                  && cc->mData2 >= 0x20 && cc->mData2 <= 0x3f){
             // 1 Spanish/French char
-            sprintf(tmp, "[%d]Spanish: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+            snprintf(tmp, sizeof(tmp), "[%d]Spanish: %02x %02x", cc->mType, cc->mData1, cc->mData2);
         } else if ((cc->mData1 == 0x13 || cc->mData1 == 0x1B)
                  && cc->mData2 >= 0x20 && cc->mData2 <= 0x3f){
             // 1 Portuguese/German/Danish char
-            sprintf(tmp, "[%d]German: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+            snprintf(tmp, sizeof(tmp), "[%d]German: %02x %02x", cc->mType, cc->mData1, cc->mData2);
         } else if ((cc->mData1 == 0x11 || cc->mData1 == 0x19)
                  && cc->mData2 >= 0x20 && cc->mData2 <= 0x2f){
             // Mid-Row Codes (Table 69)
-            sprintf(tmp, "[%d]Mid-row: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+            snprintf(tmp, sizeof(tmp), "[%d]Mid-row: %02x %02x", cc->mType, cc->mData1, cc->mData2);
         } else if (((cc->mData1 == 0x14 || cc->mData1 == 0x1c)
                   && cc->mData2 >= 0x20 && cc->mData2 <= 0x2f)
                   ||
                    ((cc->mData1 == 0x17 || cc->mData1 == 0x1f)
                   && cc->mData2 >= 0x21 && cc->mData2 <= 0x23)){
             // Misc Control Codes (Table 70)
-            sprintf(tmp, "[%d]Ctrl: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+            snprintf(tmp, sizeof(tmp), "[%d]Ctrl: %02x %02x", cc->mType, cc->mData1, cc->mData2);
         } else if ((cc->mData1 & 0x70) == 0x10
                 && (cc->mData2 & 0x40) == 0x40
                 && ((cc->mData1 & 0x07) || !(cc->mData2 & 0x20)) ) {
             // Preamble Address Codes (Table 71)
-            sprintf(tmp, "[%d]PAC: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+            snprintf(tmp, sizeof(tmp), "[%d]PAC: %02x %02x", cc->mType, cc->mData1, cc->mData2);
         } else {
-            sprintf(tmp, "[%d]Invalid: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+            snprintf(tmp, sizeof(tmp), "[%d]Invalid: %02x %02x", cc->mType, cc->mData1, cc->mData2);
         }
 
         if (out.size() > 0) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 594128c..1d62498 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -28,6 +28,7 @@
 
 #include <cutils/properties.h>
 #include <media/ICrypto.h>
+#include <media/MediaCodecBuffer.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
@@ -57,6 +58,7 @@
         const sp<AMessage> &notify,
         const sp<Source> &source,
         pid_t pid,
+        uid_t uid,
         const sp<Renderer> &renderer,
         const sp<Surface> &surface,
         const sp<CCDecoder> &ccDecoder)
@@ -66,6 +68,7 @@
       mRenderer(renderer),
       mCCDecoder(ccDecoder),
       mPid(pid),
+      mUid(uid),
       mSkipRenderingUntilMediaTimeUs(-1ll),
       mNumFramesTotal(0ll),
       mNumInputFramesDropped(0ll),
@@ -200,6 +203,18 @@
             break;
         }
 
+        case kWhatAudioOutputFormatChanged:
+        {
+            if (!isStaleReply(msg)) {
+                status_t err;
+                if (msg->findInt32("err", &err) && err != OK) {
+                    ALOGE("Renderer reported 0x%x when changing audio output format", err);
+                    handleError(err);
+                }
+            }
+            break;
+        }
+
         case kWhatSetVideoSurface:
         {
             sp<AReplyToken> replyID;
@@ -265,7 +280,7 @@
     ALOGV("[%s] onConfigure (surface=%p)", mComponentName.c_str(), mSurface.get());
 
     mCodec = MediaCodec::CreateByType(
-            mCodecLooper, mime.c_str(), false /* encoder */, NULL /* err */, mPid);
+            mCodecLooper, mime.c_str(), false /* encoder */, NULL /* err */, mPid, mUid);
     int32_t secure = 0;
     if (format->findInt32("secure", &secure) && secure != 0) {
         if (mCodec != NULL) {
@@ -274,7 +289,7 @@
             mCodec->release();
             ALOGI("[%s] creating", mComponentName.c_str());
             mCodec = MediaCodec::CreateByComponentName(
-                    mCodecLooper, mComponentName.c_str(), NULL /* err */, mPid);
+                    mCodecLooper, mComponentName.c_str(), NULL /* err */, mPid, mUid);
         }
     }
     if (mCodec == NULL) {
@@ -408,17 +423,7 @@
 }
 
 void NuPlayer::Decoder::onSetRenderer(const sp<Renderer> &renderer) {
-    bool hadNoRenderer = (mRenderer == NULL);
     mRenderer = renderer;
-    if (hadNoRenderer && mRenderer != NULL) {
-        // this means that the widevine legacy source is ready
-        onRequestInputBuffers();
-    }
-}
-
-void NuPlayer::Decoder::onGetInputBuffers(
-        Vector<sp<ABuffer> > *dstBuffers) {
-    CHECK_EQ((status_t)OK, mCodec->getWidevineLegacyBuffers(dstBuffers));
 }
 
 void NuPlayer::Decoder::onResume(bool notifyComplete) {
@@ -515,9 +520,7 @@
  * returns true if we should request more data
  */
 bool NuPlayer::Decoder::doRequestBuffers() {
-    // mRenderer is only NULL if we have a legacy widevine source that
-    // is not yet ready. In this case we must not fetch input.
-    if (isDiscontinuityPending() || mRenderer == NULL) {
+    if (isDiscontinuityPending()) {
         return false;
     }
     status_t err = OK;
@@ -561,7 +564,7 @@
         return false;
     }
 
-    sp<ABuffer> buffer;
+    sp<MediaCodecBuffer> buffer;
     mCodec->getInputBuffer(index, &buffer);
 
     if (buffer == NULL) {
@@ -628,7 +631,7 @@
         int64_t timeUs,
         int32_t flags) {
 //    CHECK_LT(bufferIx, mOutputBuffers.size());
-    sp<ABuffer> buffer;
+    sp<MediaCodecBuffer> buffer;
     mCodec->getOutputBuffer(index, &buffer);
 
     if (index >= mOutputBuffers.size()) {
@@ -708,11 +711,10 @@
             flags = AUDIO_OUTPUT_FLAG_NONE;
         }
 
-        status_t err = mRenderer->openAudioSink(
-                format, false /* offloadOnly */, hasVideo, flags, NULL /* isOffloaed */);
-        if (err != OK) {
-            handleError(err);
-        }
+        sp<AMessage> reply = new AMessage(kWhatAudioOutputFormatChanged, this);
+        reply->setInt32("generation", mBufferGeneration);
+        mRenderer->changeAudioFormat(
+                format, false /* offloadOnly */, hasVideo, flags, reply);
     }
 }
 
@@ -865,43 +867,11 @@
     size_t bufferIx;
     CHECK(msg->findSize("buffer-ix", &bufferIx));
     CHECK_LT(bufferIx, mInputBuffers.size());
-    sp<ABuffer> codecBuffer = mInputBuffers[bufferIx];
+    sp<MediaCodecBuffer> codecBuffer = mInputBuffers[bufferIx];
 
     sp<ABuffer> buffer;
     bool hasBuffer = msg->findBuffer("buffer", &buffer);
-
-    // handle widevine classic source - that fills an arbitrary input buffer
-    MediaBuffer *mediaBuffer = NULL;
-    if (hasBuffer) {
-        mediaBuffer = (MediaBuffer *)(buffer->getMediaBufferBase());
-        if (mediaBuffer != NULL) {
-            // likely filled another buffer than we requested: adjust buffer index
-            size_t ix;
-            for (ix = 0; ix < mInputBuffers.size(); ix++) {
-                const sp<ABuffer> &buf = mInputBuffers[ix];
-                if (buf->data() == mediaBuffer->data()) {
-                    // all input buffers are dequeued on start, hence the check
-                    if (!mInputBufferIsDequeued[ix]) {
-                        ALOGV("[%s] received MediaBuffer for #%zu instead of #%zu",
-                                mComponentName.c_str(), ix, bufferIx);
-                        mediaBuffer->release();
-                        return false;
-                    }
-
-                    // TRICKY: need buffer for the metadata, so instead, set
-                    // codecBuffer to the same (though incorrect) buffer to
-                    // avoid a memcpy into the codecBuffer
-                    codecBuffer = buffer;
-                    codecBuffer->setRange(
-                            mediaBuffer->range_offset(),
-                            mediaBuffer->range_length());
-                    bufferIx = ix;
-                    break;
-                }
-            }
-            CHECK(ix < mInputBuffers.size());
-        }
-    }
+    bool needsCopy = true;
 
     if (buffer == NULL /* includes !hasBuffer */) {
         int32_t streamErr = ERROR_END_OF_STREAM;
@@ -955,7 +925,7 @@
         }
 
         // copy into codec buffer
-        if (buffer != codecBuffer) {
+        if (needsCopy) {
             if (buffer->size() > codecBuffer->capacity()) {
                 handleError(ERROR_BUFFER_TOO_SMALL);
                 mDequeuedInputBuffers.push_back(bufferIx);
@@ -972,18 +942,11 @@
                         timeUs,
                         flags);
         if (err != OK) {
-            if (mediaBuffer != NULL) {
-                mediaBuffer->release();
-            }
             ALOGE("Failed to queue input buffer for %s (err=%d)",
                     mComponentName.c_str(), err);
             handleError(err);
         } else {
             mInputBufferIsDequeued.editItemAt(bufferIx) = false;
-            if (mediaBuffer != NULL) {
-                CHECK(mMediaBuffers[bufferIx] == NULL);
-                mMediaBuffers.editItemAt(bufferIx) = mediaBuffer;
-            }
         }
     }
     return true;
@@ -998,7 +961,7 @@
 
     if (!mIsAudio) {
         int64_t timeUs;
-        sp<ABuffer> buffer = mOutputBuffers[bufferIx];
+        sp<MediaCodecBuffer> buffer = mOutputBuffers[bufferIx];
         buffer->meta()->findInt64("timeUs", &timeUs);
 
         if (mCCDecoder != NULL && mCCDecoder->isSelected()) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
index 0c619ed..82db59c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
@@ -23,10 +23,13 @@
 
 namespace android {
 
+class MediaCodecBuffer;
+
 struct NuPlayer::Decoder : public DecoderBase {
     Decoder(const sp<AMessage> &notify,
             const sp<Source> &source,
             pid_t pid,
+            uid_t uid,
             const sp<Renderer> &renderer = NULL,
             const sp<Surface> &surface = NULL,
             const sp<CCDecoder> &ccDecoder = NULL);
@@ -44,7 +47,6 @@
     virtual void onConfigure(const sp<AMessage> &format);
     virtual void onSetParameters(const sp<AMessage> &params);
     virtual void onSetRenderer(const sp<Renderer> &renderer);
-    virtual void onGetInputBuffers(Vector<sp<ABuffer> > *dstBuffers);
     virtual void onResume(bool notifyComplete);
     virtual void onFlush();
     virtual void onShutdown(bool notifyComplete);
@@ -54,7 +56,8 @@
     enum {
         kWhatCodecNotify         = 'cdcN',
         kWhatRenderBuffer        = 'rndr',
-        kWhatSetVideoSurface     = 'sSur'
+        kWhatSetVideoSurface     = 'sSur',
+        kWhatAudioOutputFormatChanged = 'aofc'
     };
 
     enum {
@@ -74,8 +77,8 @@
 
     List<sp<AMessage> > mPendingInputMessages;
 
-    Vector<sp<ABuffer> > mInputBuffers;
-    Vector<sp<ABuffer> > mOutputBuffers;
+    Vector<sp<MediaCodecBuffer> > mInputBuffers;
+    Vector<sp<MediaCodecBuffer> > mOutputBuffers;
     Vector<sp<ABuffer> > mCSDsForCurrentFormat;
     Vector<sp<ABuffer> > mCSDsToSubmit;
     Vector<bool> mInputBufferIsDequeued;
@@ -83,6 +86,7 @@
     Vector<size_t> mDequeuedInputBuffers;
 
     const pid_t mPid;
+    const uid_t mUid;
     int64_t mSkipRenderingUntilMediaTimeUs;
     int64_t mNumFramesTotal;
     int64_t mNumInputFramesDropped;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
index 04bb61c..1210dc9 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
@@ -23,6 +23,7 @@
 
 #include "NuPlayerRenderer.h"
 
+#include <media/MediaCodecBuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 
@@ -91,14 +92,6 @@
     PostAndAwaitResponse(msg, &response);
 }
 
-status_t NuPlayer::DecoderBase::getInputBuffers(Vector<sp<ABuffer> > *buffers) const {
-    sp<AMessage> msg = new AMessage(kWhatGetInputBuffers, this);
-    msg->setPointer("buffers", buffers);
-
-    sp<AMessage> response;
-    return PostAndAwaitResponse(msg, &response);
-}
-
 void NuPlayer::DecoderBase::signalFlush() {
     (new AMessage(kWhatFlush, this))->post();
 }
@@ -165,20 +158,6 @@
             break;
         }
 
-        case kWhatGetInputBuffers:
-        {
-            sp<AReplyToken> replyID;
-            CHECK(msg->senderAwaitsResponse(&replyID));
-
-            Vector<sp<ABuffer> > *dstBuffers;
-            CHECK(msg->findPointer("buffers", (void **)&dstBuffers));
-
-            onGetInputBuffers(dstBuffers);
-
-            (new AMessage)->postReply(replyID);
-            break;
-        }
-
         case kWhatRequestInputBuffers:
         {
             mRequestInputBuffersPending = false;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
index 9966144..6811903 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
@@ -27,6 +27,7 @@
 struct ABuffer;
 struct MediaCodec;
 class MediaBuffer;
+class MediaCodecBuffer;
 class Surface;
 
 struct NuPlayer::DecoderBase : public AHandler {
@@ -42,7 +43,6 @@
     void setRenderer(const sp<Renderer> &renderer);
     virtual status_t setVideoSurface(const sp<Surface> &) { return INVALID_OPERATION; }
 
-    status_t getInputBuffers(Vector<sp<ABuffer> > *dstBuffers) const;
     void signalFlush();
     void signalResume(bool notifyComplete);
     void initiateShutdown();
@@ -70,7 +70,6 @@
     virtual void onConfigure(const sp<AMessage> &format) = 0;
     virtual void onSetParameters(const sp<AMessage> &params) = 0;
     virtual void onSetRenderer(const sp<Renderer> &renderer) = 0;
-    virtual void onGetInputBuffers(Vector<sp<ABuffer> > *dstBuffers) = 0;
     virtual void onResume(bool notifyComplete) = 0;
     virtual void onFlush() = 0;
     virtual void onShutdown(bool notifyComplete) = 0;
@@ -90,7 +89,6 @@
         kWhatSetParameters       = 'setP',
         kWhatSetRenderer         = 'setR',
         kWhatPause               = 'paus',
-        kWhatGetInputBuffers     = 'gInB',
         kWhatRequestInputBuffers = 'reqB',
         kWhatFlush               = 'flus',
         kWhatShutdown            = 'shuD',
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
index f224635..cb668e4 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
@@ -25,6 +25,7 @@
 #include "NuPlayerSource.h"
 
 #include <media/ICrypto.h>
+#include <media/MediaCodecBuffer.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
@@ -92,11 +93,6 @@
             "ignoring request to change renderer");
 }
 
-void NuPlayer::DecoderPassThrough::onGetInputBuffers(
-        Vector<sp<ABuffer> > * /* dstBuffers */) {
-    ALOGE("onGetInputBuffers() called unexpectedly");
-}
-
 bool NuPlayer::DecoderPassThrough::isStaleReply(const sp<AMessage> &msg) {
     int32_t generation;
     CHECK(msg->findInt32("generation", &generation));
@@ -319,10 +315,9 @@
     int32_t bufferSize = buffer->size();
     mCachedBytes += bufferSize;
 
+    int64_t timeUs = 0;
+    CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
     if (mSkipRenderingUntilMediaTimeUs >= 0) {
-        int64_t timeUs = 0;
-        CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
-
         if (timeUs < mSkipRenderingUntilMediaTimeUs) {
             ALOGV("[%s] dropping buffer at time %lld as requested.",
                      mComponentName.c_str(), (long long)timeUs);
@@ -343,7 +338,10 @@
     reply->setInt32("generation", mBufferGeneration);
     reply->setInt32("size", bufferSize);
 
-    mRenderer->queueBuffer(true /* audio */, buffer, reply);
+    sp<MediaCodecBuffer> mcBuffer = new MediaCodecBuffer(nullptr, buffer);
+    mcBuffer->meta()->setInt64("timeUs", timeUs);
+
+    mRenderer->queueBuffer(true /* audio */, mcBuffer, reply);
 
     ++mPendingBuffersToDrain;
     ALOGV("onInputBufferFilled: #ToDrain = %zu, cachedBytes = %zu",
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
index 5850efa..173387a 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
@@ -38,7 +38,6 @@
     virtual void onConfigure(const sp<AMessage> &format);
     virtual void onSetParameters(const sp<AMessage> &params);
     virtual void onSetRenderer(const sp<Renderer> &renderer);
-    virtual void onGetInputBuffers(Vector<sp<ABuffer> > *dstBuffers);
     virtual void onResume(bool notifyComplete);
     virtual void onFlush();
     virtual void onShutdown(bool notifyComplete);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 0f4dce9..b8bb8fe 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -31,8 +31,14 @@
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/Utils.h>
 
+#include <media/IMediaAnalyticsService.h>
+
+static const int kDumpLockRetries = 50;
+static const int kDumpLockSleepUs = 20000;
+
 namespace android {
 
+
 NuPlayerDriver::NuPlayerDriver(pid_t pid)
     : mState(STATE_IDLE),
       mIsAsyncPrepare(false),
@@ -41,14 +47,20 @@
       mDurationUs(-1),
       mPositionUs(-1),
       mSeekInProgress(false),
+      mPlayingTimeUs(0),
       mLooper(new ALooper),
       mPlayerFlags(0),
+      mAnalyticsItem(NULL),
       mAtEOS(false),
       mLooping(false),
       mAutoLoop(false) {
-    ALOGV("NuPlayerDriver(%p)", this);
+    ALOGD("NuPlayerDriver(%p) created, clientPid(%d)", this, pid);
     mLooper->setName("NuPlayerDriver Looper");
 
+    // set up an analytics record
+    mAnalyticsItem = new MediaAnalyticsItem("nuplayer");
+    mAnalyticsItem->generateSessionID();
+
     mLooper->start(
             false, /* runOnCallingThread */
             true,  /* canCallJava */
@@ -63,6 +75,15 @@
 NuPlayerDriver::~NuPlayerDriver() {
     ALOGV("~NuPlayerDriver(%p)", this);
     mLooper->stop();
+
+    // finalize any pending metrics, usually a no-op.
+    finalizeMetrics("destructor");
+    logMetrics("destructor");
+
+    if (mAnalyticsItem != NULL) {
+        delete mAnalyticsItem;
+        mAnalyticsItem = NULL;
+    }
 }
 
 status_t NuPlayerDriver::initCheck() {
@@ -183,6 +204,26 @@
     return OK;
 }
 
+status_t NuPlayerDriver::getDefaultBufferingSettings(BufferingSettings* buffering) {
+    ALOGV("getDefaultBufferingSettings(%p)", this);
+    Mutex::Autolock autoLock(mLock);
+    if (mState == STATE_IDLE) {
+        return INVALID_OPERATION;
+    }
+
+    return mPlayer->getDefaultBufferingSettings(buffering);
+}
+
+status_t NuPlayerDriver::setBufferingSettings(const BufferingSettings& buffering) {
+    ALOGV("setBufferingSettings(%p)", this);
+    Mutex::Autolock autoLock(mLock);
+    if (mState == STATE_IDLE) {
+        return INVALID_OPERATION;
+    }
+
+    return mPlayer->setBufferingSettings(buffering);
+}
+
 status_t NuPlayerDriver::prepare() {
     ALOGV("prepare(%p)", this);
     Mutex::Autolock autoLock(mLock);
@@ -208,7 +249,8 @@
             mAtEOS = false;
             mState = STATE_STOPPED_AND_PREPARING;
             mIsAsyncPrepare = false;
-            mPlayer->seekToAsync(0, true /* needNotify */);
+            mPlayer->seekToAsync(0, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */,
+                    true /* needNotify */);
             while (mState == STATE_STOPPED_AND_PREPARING) {
                 mCondition.wait(mLock);
             }
@@ -233,7 +275,8 @@
             mAtEOS = false;
             mState = STATE_STOPPED_AND_PREPARING;
             mIsAsyncPrepare = true;
-            mPlayer->seekToAsync(0, true /* needNotify */);
+            mPlayer->seekToAsync(0, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */,
+                    true /* needNotify */);
             return OK;
         default:
             return INVALID_OPERATION;
@@ -382,8 +425,8 @@
     return mPlayer->getSyncSettings(sync, videoFps);
 }
 
-status_t NuPlayerDriver::seekTo(int msec) {
-    ALOGD("seekTo(%p) %d ms at state %d", this, msec, mState);
+status_t NuPlayerDriver::seekTo(int msec, MediaPlayerSeekMode mode) {
+    ALOGD("seekTo(%p) (%d ms, %d) at state %d", this, msec, mode, mState);
     Mutex::Autolock autoLock(mLock);
 
     int64_t seekTimeUs = msec * 1000ll;
@@ -398,7 +441,7 @@
             mSeekInProgress = true;
             // seeks can take a while, so we essentially paused
             notifyListener_l(MEDIA_PAUSED);
-            mPlayer->seekToAsync(seekTimeUs, true /* needNotify */);
+            mPlayer->seekToAsync(seekTimeUs, mode, true /* needNotify */);
             break;
         }
 
@@ -448,8 +491,103 @@
     return OK;
 }
 
+void NuPlayerDriver::finalizeMetrics(const char *where) {
+    if (where == NULL) {
+        where = "unknown";
+    }
+    ALOGD("finalizeMetrics(%p) from %s at state %d", this, where, mState);
+
+    // gather the final stats for this record
+    Vector<sp<AMessage>> trackStats;
+    mPlayer->getStats(&trackStats);
+
+    if (trackStats.size() > 0) {
+        for (size_t i = 0; i < trackStats.size(); ++i) {
+            const sp<AMessage> &stats = trackStats.itemAt(i);
+
+            AString mime;
+            stats->findString("mime", &mime);
+
+            AString name;
+            stats->findString("component-name", &name);
+
+            if (mime.startsWith("video/")) {
+                int32_t width, height;
+                mAnalyticsItem->setCString("video/mime", mime.c_str());
+                if (!name.empty()) {
+                    mAnalyticsItem->setCString("video/codec", name.c_str());
+                }
+
+                if (stats->findInt32("width", &width)
+                        && stats->findInt32("height", &height)) {
+                    mAnalyticsItem->setInt32("wid", width);
+                    mAnalyticsItem->setInt32("ht", height);
+                }
+
+                int64_t numFramesTotal = 0;
+                int64_t numFramesDropped = 0;
+                stats->findInt64("frames-total", &numFramesTotal);
+                stats->findInt64("frames-dropped-output", &numFramesDropped);
+
+                mAnalyticsItem->setInt64("frames", numFramesTotal);
+                mAnalyticsItem->setInt64("dropped", numFramesDropped);
+
+
+            } else if (mime.startsWith("audio/")) {
+                mAnalyticsItem->setCString("audio/mime", mime.c_str());
+                if (!name.empty()) {
+                    mAnalyticsItem->setCString("audio/codec", name.c_str());
+                }
+            }
+        }
+
+        // getDuration() uses mLock for mutex -- careful where we use it.
+        int duration_ms = -1;
+        getDuration(&duration_ms);
+        if (duration_ms != -1) {
+            mAnalyticsItem->setInt64("duration", duration_ms);
+        }
+
+        if (mPlayingTimeUs > 0) {
+            mAnalyticsItem->setInt64("playing", (mPlayingTimeUs+500)/1000 );
+        }
+    }
+}
+
+
+void NuPlayerDriver::logMetrics(const char *where) {
+    if (where == NULL) {
+        where = "unknown";
+    }
+    ALOGD("logMetrics(%p) from %s at state %d", this, where, mState);
+
+    if (mAnalyticsItem == NULL || mAnalyticsItem->isEnabled() == false) {
+        return;
+    }
+
+    // only bother to log non-empty records
+    if (mAnalyticsItem->count() > 0) {
+
+        mAnalyticsItem->setFinalized(true);
+        mAnalyticsItem->selfrecord();
+
+        // re-init in case we prepare() and start() again.
+        delete mAnalyticsItem ;
+        mAnalyticsItem = new MediaAnalyticsItem("nuplayer");
+        if (mAnalyticsItem) {
+            mAnalyticsItem->generateSessionID();
+        }
+    } else {
+        ALOGV("did not have anything to record");
+    }
+}
+
 status_t NuPlayerDriver::reset() {
     ALOGD("reset(%p) at state %d", this, mState);
+
+    finalizeMetrics("reset");
+    logMetrics("reset");
+
     Mutex::Autolock autoLock(mLock);
 
     switch (mState) {
@@ -476,9 +614,7 @@
         notifyListener_l(MEDIA_STOPPED);
     }
 
-    char value[PROPERTY_VALUE_MAX];
-    if (property_get("persist.debug.sf.stats", value, NULL) &&
-            (!strcmp("1", value) || !strcasecmp("true", value))) {
+    if (property_get_bool("persist.debug.sf.stats", false)) {
         Vector<String16> args;
         dump(-1, args);
     }
@@ -493,6 +629,7 @@
     mDurationUs = -1;
     mPositionUs = -1;
     mLooping = false;
+    mPlayingTimeUs = 0;
 
     return OK;
 }
@@ -624,6 +761,11 @@
     mDurationUs = durationUs;
 }
 
+void NuPlayerDriver::notifyMorePlayingTimeUs(int64_t playingUs) {
+    Mutex::Autolock autoLock(mLock);
+    mPlayingTimeUs += playingUs;
+}
+
 void NuPlayerDriver::notifySeekComplete() {
     ALOGV("notifySeekComplete(%p)", this);
     Mutex::Autolock autoLock(mLock);
@@ -657,6 +799,24 @@
     AString logString(" NuPlayer\n");
     char buf[256] = {0};
 
+    bool locked = false;
+    for (int i = 0; i < kDumpLockRetries; ++i) {
+        if (mLock.tryLock() == NO_ERROR) {
+            locked = true;
+            break;
+        }
+        usleep(kDumpLockSleepUs);
+    }
+
+    if (locked) {
+        snprintf(buf, sizeof(buf), "  state(%d), atEOS(%d), looping(%d), autoLoop(%d)\n",
+                mState, mAtEOS, mLooping, mAutoLoop);
+        mLock.unlock();
+    } else {
+        snprintf(buf, sizeof(buf), "  NPD(%p) lock is taken\n", this);
+    }
+    logString.append(buf);
+
     for (size_t i = 0; i < trackStats.size(); ++i) {
         const sp<AMessage> &stats = trackStats.itemAt(i);
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
index 58008f0..5bfc539 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
@@ -16,6 +16,7 @@
 
 #include <media/MediaPlayerInterface.h>
 
+#include <media/MediaAnalyticsItem.h>
 #include <media/stagefright/foundation/ABase.h>
 
 namespace android {
@@ -43,6 +44,11 @@
 
     virtual status_t setVideoSurfaceTexture(
             const sp<IGraphicBufferProducer> &bufferProducer);
+
+    virtual status_t getDefaultBufferingSettings(
+            BufferingSettings* buffering /* nonnull */) override;
+    virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+
     virtual status_t prepare();
     virtual status_t prepareAsync();
     virtual status_t start();
@@ -53,7 +59,8 @@
     virtual status_t getPlaybackSettings(AudioPlaybackRate *rate);
     virtual status_t setSyncSettings(const AVSyncSettings &sync, float videoFpsHint);
     virtual status_t getSyncSettings(AVSyncSettings *sync, float *videoFps);
-    virtual status_t seekTo(int msec);
+    virtual status_t seekTo(
+            int msec, MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC);
     virtual status_t getCurrentPosition(int *msec);
     virtual status_t getDuration(int *msec);
     virtual status_t reset();
@@ -74,6 +81,7 @@
     void notifyResetComplete();
     void notifySetSurfaceComplete();
     void notifyDuration(int64_t durationUs);
+    void notifyMorePlayingTimeUs(int64_t timeUs);
     void notifySeekComplete();
     void notifySeekComplete_l();
     void notifyListener(int msg, int ext1 = 0, int ext2 = 0, const Parcel *in = NULL);
@@ -111,6 +119,7 @@
     int64_t mDurationUs;
     int64_t mPositionUs;
     bool mSeekInProgress;
+    int64_t mPlayingTimeUs;
     // <<<
 
     sp<ALooper> mLooper;
@@ -118,10 +127,15 @@
     sp<AudioSink> mAudioSink;
     uint32_t mPlayerFlags;
 
+    MediaAnalyticsItem *mAnalyticsItem;
+
     bool mAtEOS;
     bool mLooping;
     bool mAutoLoop;
 
+    void finalizeMetrics(const char *where);
+    void logMetrics(const char *where);
+
     status_t prepare_l();
     status_t start_l();
     void notifyListener_l(int msg, int ext1 = 0, int ext2 = 0, const Parcel *in = NULL);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index b742762..42e95da 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -21,7 +21,6 @@
 #include "NuPlayerRenderer.h"
 #include <algorithm>
 #include <cutils/properties.h>
-#include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/foundation/AUtils.h>
@@ -31,6 +30,7 @@
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/Utils.h>
 #include <media/stagefright/VideoFrameScheduler.h>
+#include <media/MediaCodecBuffer.h>
 
 #include <inttypes.h>
 
@@ -144,9 +144,10 @@
 
     // Try to avoid racing condition in case callback is still on.
     Mutex::Autolock autoLock(mLock);
-    mUseAudioCallback = false;
-    flushQueue(&mAudioQueue);
-    flushQueue(&mVideoQueue);
+    if (mUseAudioCallback) {
+        flushQueue(&mAudioQueue);
+        flushQueue(&mVideoQueue);
+    }
     mWakeLock.clear();
     mMediaClock.clear();
     mVideoScheduler.clear();
@@ -156,12 +157,12 @@
 
 void NuPlayer::Renderer::queueBuffer(
         bool audio,
-        const sp<ABuffer> &buffer,
+        const sp<MediaCodecBuffer> &buffer,
         const sp<AMessage> &notifyConsumed) {
     sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this);
     msg->setInt32("queueGeneration", getQueueGeneration(audio));
     msg->setInt32("audio", static_cast<int32_t>(audio));
-    msg->setBuffer("buffer", buffer);
+    msg->setObject("buffer", buffer);
     msg->setMessage("notifyConsumed", notifyConsumed);
     msg->post();
 }
@@ -297,7 +298,7 @@
             ++mVideoDrainGeneration;
         }
 
-        clearAnchorTime_l();
+        mMediaClock->clearAnchor();
         mVideoLateByUs = 0;
         mSyncQueues = false;
     }
@@ -374,7 +375,8 @@
     }
 }
 
-void NuPlayer::Renderer::clearAnchorTime_l() {
+// Called on renderer looper.
+void NuPlayer::Renderer::clearAnchorTime() {
     mMediaClock->clearAnchor();
     mAnchorTimeMediaUs = -1;
     mAnchorNumFramesWritten = -1;
@@ -423,6 +425,25 @@
     msg->postAndAwaitResponse(&response);
 }
 
+void NuPlayer::Renderer::changeAudioFormat(
+        const sp<AMessage> &format,
+        bool offloadOnly,
+        bool hasVideo,
+        uint32_t flags,
+        const sp<AMessage> &notify) {
+    sp<AMessage> meta = new AMessage;
+    meta->setMessage("format", format);
+    meta->setInt32("offload-only", offloadOnly);
+    meta->setInt32("has-video", hasVideo);
+    meta->setInt32("flags", flags);
+
+    sp<AMessage> msg = new AMessage(kWhatChangeAudioFormat, this);
+    msg->setInt32("queueGeneration", getQueueGeneration(true /* audio */));
+    msg->setMessage("notify", notify);
+    msg->setMessage("meta", meta);
+    msg->post();
+}
+
 void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) {
     switch (msg->what()) {
         case kWhatOpenAudioSink:
@@ -470,6 +491,41 @@
             break;
         }
 
+        case kWhatChangeAudioFormat:
+        {
+            int32_t queueGeneration;
+            CHECK(msg->findInt32("queueGeneration", &queueGeneration));
+
+            sp<AMessage> notify;
+            CHECK(msg->findMessage("notify", &notify));
+
+            if (offloadingAudio()) {
+                ALOGW("changeAudioFormat should NOT be called in offload mode");
+                notify->setInt32("err", INVALID_OPERATION);
+                notify->post();
+                break;
+            }
+
+            sp<AMessage> meta;
+            CHECK(msg->findMessage("meta", &meta));
+
+            if (queueGeneration != getQueueGeneration(true /* audio */)
+                    || mAudioQueue.empty()) {
+                onChangeAudioFormat(meta, notify);
+                break;
+            }
+
+            QueueEntry entry;
+            entry.mNotifyConsumed = notify;
+            entry.mMeta = meta;
+
+            Mutex::Autolock autoLock(mLock);
+            mAudioQueue.push_back(entry);
+            postDrainAudioQueue_l();
+
+            break;
+        }
+
         case kWhatDrainAudioQueue:
         {
             mDrainAudioQueuePending = false;
@@ -869,7 +925,7 @@
     while (it != mAudioQueue.end()) {
         int32_t eos;
         QueueEntry *entry = &*it++;
-        if (entry->mBuffer == NULL
+        if ((entry->mBuffer == nullptr && entry->mNotifyConsumed == nullptr)
                 || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) {
             itEOS = it;
             foundEOS = true;
@@ -879,9 +935,14 @@
     if (foundEOS) {
         // post all replies before EOS and drop the samples
         for (it = mAudioQueue.begin(); it != itEOS; it++) {
-            if (it->mBuffer == NULL) {
-                // delay doesn't matter as we don't even have an AudioTrack
-                notifyEOS(true /* audio */, it->mFinalResult);
+            if (it->mBuffer == nullptr) {
+                if (it->mNotifyConsumed == nullptr) {
+                    // delay doesn't matter as we don't even have an AudioTrack
+                    notifyEOS(true /* audio */, it->mFinalResult);
+                } else {
+                    // TAG for re-opening audio sink.
+                    onChangeAudioFormat(it->mMeta, it->mNotifyConsumed);
+                }
             } else {
                 it->mNotifyConsumed->post();
             }
@@ -933,9 +994,14 @@
     while (!mAudioQueue.empty()) {
         QueueEntry *entry = &*mAudioQueue.begin();
 
-        mLastAudioBufferDrained = entry->mBufferOrdinal;
-
         if (entry->mBuffer == NULL) {
+            if (entry->mNotifyConsumed != nullptr) {
+                // TAG for re-open audio sink.
+                onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
+                mAudioQueue.erase(mAudioQueue.begin());
+                continue;
+            }
+
             // EOS
             int64_t postEOSDelayUs = 0;
             if (mAudioSink->needsTrailingPadding()) {
@@ -956,6 +1022,8 @@
             return false;
         }
 
+        mLastAudioBufferDrained = entry->mBufferOrdinal;
+
         // ignore 0-sized buffer which could be EOS marker with no data
         if (entry->mOffset == 0 && entry->mBuffer->size() > 0) {
             int64_t mediaTimeUs;
@@ -1291,8 +1359,7 @@
         if (!mVideoSampleReceived && !mHasAudio) {
             // This will ensure that the first frame after a flush won't be used as anchor
             // when renderer is in paused state, because resume can happen any time after seek.
-            Mutex::Autolock autoLock(mLock);
-            clearAnchorTime_l();
+            clearAnchorTime();
         }
     }
 
@@ -1368,8 +1435,9 @@
         }
     }
 
-    sp<ABuffer> buffer;
-    CHECK(msg->findBuffer("buffer", &buffer));
+    sp<RefBase> obj;
+    CHECK(msg->findObject("buffer", &obj));
+    sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
 
     sp<AMessage> notifyConsumed;
     CHECK(msg->findMessage("notifyConsumed", &notifyConsumed));
@@ -1395,8 +1463,8 @@
         return;
     }
 
-    sp<ABuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
-    sp<ABuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
+    sp<MediaCodecBuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
+    sp<MediaCodecBuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
 
     if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
         // EOS signalled on either queue.
@@ -1501,8 +1569,8 @@
         // Therefore we'll stop syncing the queues if at least one of them
         // is flushed.
         syncQueuesDone_l();
-        clearAnchorTime_l();
     }
+    clearAnchorTime();
 
     ALOGV("flushing %s", audio ? "audio" : "video");
     if (audio) {
@@ -1573,6 +1641,9 @@
 
         if (entry->mBuffer != NULL) {
             entry->mNotifyConsumed->post();
+        } else if (entry->mNotifyConsumed != nullptr) {
+            // Is it needed to open audio sink now?
+            onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
         }
 
         queue->erase(queue->begin());
@@ -1610,10 +1681,7 @@
     }
     CHECK(!mDrainAudioQueuePending);
     mNumFramesWritten = 0;
-    {
-        Mutex::Autolock autoLock(mLock);
-        mAnchorNumFramesWritten = -1;
-    }
+    mAnchorNumFramesWritten = -1;
     uint32_t written;
     if (mAudioSink->getFramesWritten(&written) == OK) {
         mNumFramesWritten = written;
@@ -1961,5 +2029,27 @@
     mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
 }
 
+void NuPlayer::Renderer::onChangeAudioFormat(
+        const sp<AMessage> &meta, const sp<AMessage> &notify) {
+    sp<AMessage> format;
+    CHECK(meta->findMessage("format", &format));
+
+    int32_t offloadOnly;
+    CHECK(meta->findInt32("offload-only", &offloadOnly));
+
+    int32_t hasVideo;
+    CHECK(meta->findInt32("has-video", &hasVideo));
+
+    uint32_t flags;
+    CHECK(meta->findInt32("flags", (int32_t *)&flags));
+
+    status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags);
+
+    if (err != OK) {
+        notify->setInt32("err", err);
+    }
+    notify->post();
+}
+
 }  // namespace android
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index fe7f8fa..385bb06 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -25,9 +25,9 @@
 
 namespace android {
 
-struct ABuffer;
 class  AWakeLock;
 struct MediaClock;
+class MediaCodecBuffer;
 struct VideoFrameScheduler;
 
 struct NuPlayer::Renderer : public AHandler {
@@ -46,7 +46,7 @@
 
     void queueBuffer(
             bool audio,
-            const sp<ABuffer> &buffer,
+            const sp<MediaCodecBuffer> &buffer,
             const sp<AMessage> &notifyConsumed);
 
     void queueEOS(bool audio, status_t finalResult);
@@ -60,8 +60,6 @@
 
     void signalTimeDiscontinuity();
 
-    void signalAudioSinkChanged();
-
     void signalDisableOffloadAudio();
     void signalEnableOffloadAudio();
 
@@ -81,6 +79,14 @@
             bool *isOffloaded);
     void closeAudioSink();
 
+    // re-open audio sink after all pending audio buffers played.
+    void changeAudioFormat(
+            const sp<AMessage> &format,
+            bool offloadOnly,
+            bool hasVideo,
+            uint32_t flags,
+            const sp<AMessage> &notify);
+
     enum {
         kWhatEOS                      = 'eos ',
         kWhatFlushComplete            = 'fluC',
@@ -118,14 +124,19 @@
         kWhatResume              = 'resm',
         kWhatOpenAudioSink       = 'opnA',
         kWhatCloseAudioSink      = 'clsA',
+        kWhatChangeAudioFormat   = 'chgA',
         kWhatStopAudioSink       = 'stpA',
         kWhatDisableOffloadAudio = 'noOA',
         kWhatEnableOffloadAudio  = 'enOA',
         kWhatSetVideoFrameRate   = 'sVFR',
     };
 
+    // if mBuffer != nullptr, it's a buffer containing real data.
+    // else if mNotifyConsumed == nullptr, it's EOS.
+    // else it's a tag for re-opening audio sink in different format.
     struct QueueEntry {
-        sp<ABuffer> mBuffer;
+        sp<MediaCodecBuffer> mBuffer;
+        sp<AMessage> mMeta;
         sp<AMessage> mNotifyConsumed;
         size_t mOffset;
         status_t mFinalResult;
@@ -220,7 +231,7 @@
     int64_t getPendingAudioPlayoutDurationUs(int64_t nowUs);
     void postDrainAudioQueue_l(int64_t delayUs = 0);
 
-    void clearAnchorTime_l();
+    void clearAnchorTime();
     void clearAudioFirstAnchorTime_l();
     void setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs);
     void setVideoLateByUs(int64_t lateUs);
@@ -258,6 +269,7 @@
             bool hasVideo,
             uint32_t flags);
     void onCloseAudioSink();
+    void onChangeAudioFormat(const sp<AMessage> &meta, const sp<AMessage> &notify);
 
     void notifyEOS(bool audio, status_t finalResult, int64_t delayUs = 0);
     void notifyFlushComplete(bool audio);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
index 3a96138..0429ef1 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
@@ -63,6 +63,10 @@
         : mNotify(notify) {
     }
 
+    virtual status_t getDefaultBufferingSettings(
+            BufferingSettings* buffering /* nonnull */) = 0;
+    virtual status_t setBufferingSettings(const BufferingSettings& buffering) = 0;
+
     virtual void prepareAsync() = 0;
 
     virtual void start() = 0;
@@ -77,7 +81,11 @@
     // an error or ERROR_END_OF_STREAM if not.
     virtual status_t feedMoreTSData() = 0;
 
+    // Returns non-NULL format when the specified track exists.
+    // When the format has "err" set to -EWOULDBLOCK, source needs more time to get valid meta data.
+    // Returns NULL if the specified track doesn't exist or is invalid;
     virtual sp<AMessage> getFormat(bool audio);
+
     virtual sp<MetaData> getFormatMeta(bool /* audio */) { return NULL; }
     virtual sp<MetaData> getFileFormatMeta() const { return NULL; }
 
@@ -104,7 +112,9 @@
         return INVALID_OPERATION;
     }
 
-    virtual status_t seekTo(int64_t /* seekTimeUs */) {
+    virtual status_t seekTo(
+            int64_t /* seekTimeUs */,
+            MediaPlayerSeekMode /* mode */ = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC) {
         return INVALID_OPERATION;
     }
 
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index c4e5df7..9264e49 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -32,11 +32,11 @@
 
 const int64_t kNearEOSTimeoutUs = 2000000ll; // 2 secs
 
-// Buffer Underflow/Prepare/StartServer/Overflow Marks
-const int64_t NuPlayer::RTSPSource::kUnderflowMarkUs   =  1000000ll;
-const int64_t NuPlayer::RTSPSource::kPrepareMarkUs     =  3000000ll;
-const int64_t NuPlayer::RTSPSource::kStartServerMarkUs =  5000000ll;
-const int64_t NuPlayer::RTSPSource::kOverflowMarkUs    = 10000000ll;
+// Default Buffer Underflow/Prepare/StartServer/Overflow Marks
+static const int kUnderflowMarkMs   =  1000;  // 1 second
+static const int kPrepareMarkMs     =  3000;  // 3 seconds
+//static const int kStartServerMarkMs =  5000;
+static const int kOverflowMarkMs    = 10000;  // 10 seconds
 
 NuPlayer::RTSPSource::RTSPSource(
         const sp<AMessage> &notify,
@@ -62,6 +62,7 @@
       mSeekGeneration(0),
       mEOSTimeoutAudio(0),
       mEOSTimeoutVideo(0) {
+    getDefaultBufferingSettings(&mBufferingSettings);
     if (headers) {
         mExtraHeaders = *headers;
 
@@ -83,6 +84,34 @@
     }
 }
 
+status_t NuPlayer::RTSPSource::getDefaultBufferingSettings(
+            BufferingSettings* buffering /* nonnull */) {
+    buffering->mInitialBufferingMode = BUFFERING_MODE_TIME_ONLY;
+    buffering->mRebufferingMode = BUFFERING_MODE_TIME_ONLY;
+    buffering->mInitialWatermarkMs = kPrepareMarkMs;
+    buffering->mRebufferingWatermarkLowMs = kUnderflowMarkMs;
+    buffering->mRebufferingWatermarkHighMs = kOverflowMarkMs;
+
+    return OK;
+}
+
+status_t NuPlayer::RTSPSource::setBufferingSettings(const BufferingSettings& buffering) {
+    if (mLooper == NULL) {
+        mBufferingSettings = buffering;
+        return OK;
+    }
+
+    sp<AMessage> msg = new AMessage(kWhatSetBufferingSettings, this);
+    writeToAMessage(msg, buffering);
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+    }
+
+    return err;
+}
+
 void NuPlayer::RTSPSource::prepareAsync() {
     if (mIsSDP && mHTTPService == NULL) {
         notifyPrepared(BAD_VALUE);
@@ -258,7 +287,7 @@
 }
 
 status_t NuPlayer::RTSPSource::getDuration(int64_t *durationUs) {
-    *durationUs = 0ll;
+    *durationUs = -1ll;
 
     int64_t audioDurationUs;
     if (mAudioTrack != NULL
@@ -279,10 +308,11 @@
     return OK;
 }
 
-status_t NuPlayer::RTSPSource::seekTo(int64_t seekTimeUs) {
+status_t NuPlayer::RTSPSource::seekTo(int64_t seekTimeUs, MediaPlayerSeekMode mode) {
     sp<AMessage> msg = new AMessage(kWhatPerformSeek, this);
     msg->setInt32("generation", ++mSeekGeneration);
     msg->setInt64("timeUs", seekTimeUs);
+    msg->setInt32("mode", mode);
 
     sp<AMessage> response;
     status_t err = msg->postAndAwaitResponse(&response);
@@ -327,7 +357,8 @@
         int64_t bufferedDurationUs = src->getBufferedDurationUs(&finalResult);
 
         // isFinished when duration is 0 checks for EOS result only
-        if (bufferedDurationUs > kPrepareMarkUs || src->isFinished(/* duration */ 0)) {
+        if (bufferedDurationUs > mBufferingSettings.mInitialWatermarkMs * 1000
+                || src->isFinished(/* duration */ 0)) {
             ++preparedCount;
         }
 
@@ -335,13 +366,16 @@
             ++overflowCount;
             ++finishedCount;
         } else {
-            if (bufferedDurationUs < kUnderflowMarkUs) {
+            if (bufferedDurationUs < mBufferingSettings.mRebufferingWatermarkLowMs * 1000) {
                 ++underflowCount;
             }
-            if (bufferedDurationUs > kOverflowMarkUs) {
+            if (bufferedDurationUs > mBufferingSettings.mRebufferingWatermarkHighMs * 1000) {
                 ++overflowCount;
             }
-            if (bufferedDurationUs < kStartServerMarkUs) {
+            int64_t startServerMarkUs =
+                    (mBufferingSettings.mRebufferingWatermarkLowMs
+                        + mBufferingSettings.mRebufferingWatermarkHighMs) / 2 * 1000ll;
+            if (bufferedDurationUs < startServerMarkUs) {
                 ++startCount;
             }
         }
@@ -465,9 +499,12 @@
         }
 
         int64_t seekTimeUs;
+        int32_t mode;
         CHECK(msg->findInt64("timeUs", &seekTimeUs));
+        CHECK(msg->findInt32("mode", &mode));
 
-        performSeek(seekTimeUs);
+        // TODO: add "mode" to performSeek.
+        performSeek(seekTimeUs/*, (MediaPlayerSeekMode)mode */);
         return;
     } else if (msg->what() == kWhatPollBuffering) {
         onPollBuffering();
@@ -475,6 +512,36 @@
     } else if (msg->what() == kWhatSignalEOS) {
         onSignalEOS(msg);
         return;
+    } else if (msg->what() == kWhatSetBufferingSettings) {
+        sp<AReplyToken> replyID;
+        CHECK(msg->senderAwaitsResponse(&replyID));
+
+        BufferingSettings buffering;
+        readFromAMessage(msg, &buffering);
+
+        status_t err = OK;
+        if (buffering.IsSizeBasedBufferingMode(buffering.mInitialBufferingMode)
+                || buffering.IsSizeBasedBufferingMode(buffering.mRebufferingMode)
+                || (buffering.mRebufferingWatermarkLowMs > buffering.mRebufferingWatermarkHighMs
+                    && buffering.IsTimeBasedBufferingMode(buffering.mRebufferingMode))) {
+            err = BAD_VALUE;
+        } else {
+            if (buffering.mInitialBufferingMode == BUFFERING_MODE_NONE) {
+                buffering.mInitialWatermarkMs = BufferingSettings::kNoWatermark;
+            }
+            if (buffering.mRebufferingMode == BUFFERING_MODE_NONE) {
+                buffering.mRebufferingWatermarkLowMs = BufferingSettings::kNoWatermark;
+                buffering.mRebufferingWatermarkHighMs = INT32_MAX;
+            }
+
+            mBufferingSettings = buffering;
+        }
+
+        sp<AMessage> response = new AMessage;
+        response->setInt32("err", err);
+        response->postReply(replyID);
+
+        return;
     }
 
     CHECK_EQ(msg->what(), (int)kWhatNotify);
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.h b/media/libmediaplayerservice/nuplayer/RTSPSource.h
index c7834ef..0812991 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.h
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.h
@@ -40,6 +40,10 @@
             uid_t uid = 0,
             bool isSDP = false);
 
+    virtual status_t getDefaultBufferingSettings(
+            BufferingSettings* buffering /* nonnull */) override;
+    virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+
     virtual void prepareAsync();
     virtual void start();
     virtual void stop();
@@ -49,7 +53,9 @@
     virtual status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit);
 
     virtual status_t getDuration(int64_t *durationUs);
-    virtual status_t seekTo(int64_t seekTimeUs);
+    virtual status_t seekTo(
+            int64_t seekTimeUs,
+            MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC) override;
 
     void onMessageReceived(const sp<AMessage> &msg);
 
@@ -65,6 +71,7 @@
         kWhatPerformSeek     = 'seek',
         kWhatPollBuffering   = 'poll',
         kWhatSignalEOS       = 'eos ',
+        kWhatSetBufferingSettings = 'sBuS',
     };
 
     enum State {
@@ -79,12 +86,6 @@
         kFlagIncognito = 1,
     };
 
-    // Buffer Prepare/Underflow/Overflow/Resume Marks
-    static const int64_t kPrepareMarkUs;
-    static const int64_t kUnderflowMarkUs;
-    static const int64_t kOverflowMarkUs;
-    static const int64_t kStartServerMarkUs;
-
     struct TrackInfo {
         sp<AnotherPacketSource> mSource;
 
@@ -108,6 +109,7 @@
     bool mBuffering;
     bool mInPreparationPhase;
     bool mEOSPending;
+    BufferingSettings mBufferingSettings;
 
     sp<ALooper> mLooper;
     sp<MyHandler> mHandler;
diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
index 7f9f913..fc0803b 100644
--- a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
@@ -51,6 +51,22 @@
     }
 }
 
+status_t NuPlayer::StreamingSource::getDefaultBufferingSettings(
+        BufferingSettings *buffering /* nonnull */) {
+    *buffering = BufferingSettings();
+    return OK;
+}
+
+status_t NuPlayer::StreamingSource::setBufferingSettings(
+        const BufferingSettings &buffering) {
+    if (buffering.mInitialBufferingMode != BUFFERING_MODE_NONE
+            || buffering.mRebufferingMode != BUFFERING_MODE_NONE) {
+        return BAD_VALUE;
+    }
+
+    return OK;
+}
+
 void NuPlayer::StreamingSource::prepareAsync() {
     if (mLooper == NULL) {
         mLooper = new ALooper;
@@ -234,8 +250,7 @@
     }
     status_t err = convertMetaDataToMessage(meta, &format);
     if (err != OK) { // format may have been cleared on error
-        format = new AMessage;
-        format->setInt32("err", err);
+        return NULL;
     }
     return format;
 }
diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.h b/media/libmediaplayerservice/nuplayer/StreamingSource.h
index db88c7f..2e1d2b3 100644
--- a/media/libmediaplayerservice/nuplayer/StreamingSource.h
+++ b/media/libmediaplayerservice/nuplayer/StreamingSource.h
@@ -32,6 +32,10 @@
             const sp<AMessage> &notify,
             const sp<IStreamSource> &source);
 
+    virtual status_t getDefaultBufferingSettings(
+            BufferingSettings* buffering /* nonnull */) override;
+    virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+
     virtual void prepareAsync();
     virtual void start();
 
diff --git a/media/libmediaplayerservice/tests/Android.mk b/media/libmediaplayerservice/tests/Android.mk
index ea75a97..c0b3265 100644
--- a/media/libmediaplayerservice/tests/Android.mk
+++ b/media/libmediaplayerservice/tests/Android.mk
@@ -20,7 +20,6 @@
 	frameworks/av/media/libmediaplayerservice \
 
 LOCAL_CFLAGS += -Werror -Wall
-LOCAL_CLANG := true
 
 LOCAL_32_BIT_ONLY := true
 
diff --git a/media/libnbaio/Android.bp b/media/libnbaio/Android.bp
index 615b541..fd7af4f 100644
--- a/media/libnbaio/Android.bp
+++ b/media/libnbaio/Android.bp
@@ -2,16 +2,15 @@
     name: "libnbaio",
     srcs: [
         "AudioBufferProviderSource.cpp",
-        "AudioStreamOutSink.cpp",
         "AudioStreamInSource.cpp",
-        "NBAIO.cpp",
+        "AudioStreamOutSink.cpp",
         "MonoPipe.cpp",
         "MonoPipeReader.cpp",
+        "NBAIO.cpp",
+        "NBLog.cpp",
         "Pipe.cpp",
         "PipeReader.cpp",
         "SourceAudioBufferProvider.cpp",
-
-        "NBLog.cpp",
     ],
 
     // libsndfile license is incompatible; uncomment to use for local debug only
@@ -33,4 +32,6 @@
         "-Werror",
         "-Wall",
     ],
+
+    include_dirs: ["system/media/audio_utils/include"],
 }
diff --git a/media/libnbaio/AudioStreamInSource.cpp b/media/libnbaio/AudioStreamInSource.cpp
index 2dc3050..1054b68 100644
--- a/media/libnbaio/AudioStreamInSource.cpp
+++ b/media/libnbaio/AudioStreamInSource.cpp
@@ -19,33 +19,38 @@
 
 #include <cutils/compiler.h>
 #include <utils/Log.h>
+#include <media/audiohal/StreamHalInterface.h>
 #include <media/nbaio/AudioStreamInSource.h>
 
 namespace android {
 
-AudioStreamInSource::AudioStreamInSource(audio_stream_in *stream) :
+AudioStreamInSource::AudioStreamInSource(sp<StreamInHalInterface> stream) :
         NBAIO_Source(),
         mStream(stream),
         mStreamBufferSizeBytes(0),
         mFramesOverrun(0),
         mOverruns(0)
 {
-    ALOG_ASSERT(stream != NULL);
+    ALOG_ASSERT(stream != 0);
 }
 
 AudioStreamInSource::~AudioStreamInSource()
 {
+    mStream.clear();
 }
 
 ssize_t AudioStreamInSource::negotiate(const NBAIO_Format offers[], size_t numOffers,
                                       NBAIO_Format counterOffers[], size_t& numCounterOffers)
 {
     if (!Format_isValid(mFormat)) {
-        mStreamBufferSizeBytes = mStream->common.get_buffer_size(&mStream->common);
-        audio_format_t streamFormat = mStream->common.get_format(&mStream->common);
-        uint32_t sampleRate = mStream->common.get_sample_rate(&mStream->common);
-        audio_channel_mask_t channelMask =
-                (audio_channel_mask_t) mStream->common.get_channels(&mStream->common);
+        status_t result;
+        result = mStream->getBufferSize(&mStreamBufferSizeBytes);
+        if (result != OK) return result;
+        audio_format_t streamFormat;
+        uint32_t sampleRate;
+        audio_channel_mask_t channelMask;
+        result = mStream->getAudioProperties(&sampleRate, &channelMask, &streamFormat);
+        if (result != OK) return result;
         mFormat = Format_from_SR_C(sampleRate,
                 audio_channel_count_from_in_mask(channelMask), streamFormat);
         mFrameSize = Format_frameSize(mFormat);
@@ -55,11 +60,14 @@
 
 int64_t AudioStreamInSource::framesOverrun()
 {
-    uint32_t framesOverrun = mStream->get_input_frames_lost(mStream);
-    if (framesOverrun > 0) {
+    uint32_t framesOverrun;
+    status_t result = mStream->getInputFramesLost(&framesOverrun);
+    if (result == OK && framesOverrun > 0) {
         mFramesOverrun += framesOverrun;
         // FIXME only increment for contiguous ranges
         ++mOverruns;
+    } else if (result != OK) {
+        ALOGE("Error when retrieving lost frames count from HAL: %d", result);
     }
     return mFramesOverrun;
 }
@@ -69,12 +77,14 @@
     if (CC_UNLIKELY(!Format_isValid(mFormat))) {
         return NEGOTIATE;
     }
-    ssize_t bytesRead = mStream->read(mStream, buffer, count * mFrameSize);
-    if (bytesRead > 0) {
+    size_t bytesRead;
+    status_t result = mStream->read(buffer, count * mFrameSize, &bytesRead);
+    if (result == OK && bytesRead > 0) {
         size_t framesRead = bytesRead / mFrameSize;
         mFramesRead += framesRead;
         return framesRead;
     } else {
+        ALOGE_IF(result != OK, "Error while reading data from HAL: %d", result);
         return bytesRead;
     }
 }
diff --git a/media/libnbaio/AudioStreamOutSink.cpp b/media/libnbaio/AudioStreamOutSink.cpp
index ee44678..cbff87d 100644
--- a/media/libnbaio/AudioStreamOutSink.cpp
+++ b/media/libnbaio/AudioStreamOutSink.cpp
@@ -18,31 +18,36 @@
 //#define LOG_NDEBUG 0
 
 #include <utils/Log.h>
+#include <media/audiohal/StreamHalInterface.h>
 #include <media/nbaio/AudioStreamOutSink.h>
 
 namespace android {
 
-AudioStreamOutSink::AudioStreamOutSink(audio_stream_out *stream) :
+AudioStreamOutSink::AudioStreamOutSink(sp<StreamOutHalInterface> stream) :
         NBAIO_Sink(),
         mStream(stream),
         mStreamBufferSizeBytes(0)
 {
-    ALOG_ASSERT(stream != NULL);
+    ALOG_ASSERT(stream != 0);
 }
 
 AudioStreamOutSink::~AudioStreamOutSink()
 {
+    mStream.clear();
 }
 
 ssize_t AudioStreamOutSink::negotiate(const NBAIO_Format offers[], size_t numOffers,
                                       NBAIO_Format counterOffers[], size_t& numCounterOffers)
 {
     if (!Format_isValid(mFormat)) {
-        mStreamBufferSizeBytes = mStream->common.get_buffer_size(&mStream->common);
-        audio_format_t streamFormat = mStream->common.get_format(&mStream->common);
-        uint32_t sampleRate = mStream->common.get_sample_rate(&mStream->common);
-        audio_channel_mask_t channelMask =
-                (audio_channel_mask_t) mStream->common.get_channels(&mStream->common);
+        status_t result;
+        result = mStream->getBufferSize(&mStreamBufferSizeBytes);
+        if (result != OK) return result;
+        audio_format_t streamFormat;
+        uint32_t sampleRate;
+        audio_channel_mask_t channelMask;
+        result = mStream->getAudioProperties(&sampleRate, &channelMask, &streamFormat);
+        if (result != OK) return result;
         mFormat = Format_from_SR_C(sampleRate,
                 audio_channel_count_from_out_mask(channelMask), streamFormat);
         mFrameSize = Format_frameSize(mFormat);
@@ -56,25 +61,24 @@
         return NEGOTIATE;
     }
     ALOG_ASSERT(Format_isValid(mFormat));
-    ssize_t ret = mStream->write(mStream, buffer, count * mFrameSize);
-    if (ret > 0) {
-        ret /= mFrameSize;
-        mFramesWritten += ret;
+    size_t written;
+    status_t ret = mStream->write(buffer, count * mFrameSize, &written);
+    if (ret == OK && written > 0) {
+        written /= mFrameSize;
+        mFramesWritten += written;
+        return written;
     } else {
         // FIXME verify HAL implementations are returning the correct error codes e.g. WOULD_BLOCK
+        ALOGE_IF(ret != OK, "Error while writing data to HAL: %d", ret);
+        return ret;
     }
-    return ret;
 }
 
 status_t AudioStreamOutSink::getTimestamp(ExtendedTimestamp &timestamp)
 {
-    if (mStream->get_presentation_position == NULL) {
-        return INVALID_OPERATION;
-    }
-
     uint64_t position64;
     struct timespec time;
-    if (mStream->get_presentation_position(mStream, &position64, &time) != OK) {
+    if (mStream->getPresentationPosition(&position64, &time) != OK) {
         return INVALID_OPERATION;
     }
     timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = position64;
diff --git a/media/libnbaio/MonoPipe.cpp b/media/libnbaio/MonoPipe.cpp
index 8d1cb0f..3c5df1a 100644
--- a/media/libnbaio/MonoPipe.cpp
+++ b/media/libnbaio/MonoPipe.cpp
@@ -19,7 +19,6 @@
 #define LOG_TAG "MonoPipe"
 //#define LOG_NDEBUG 0
 
-#include <cutils/atomic.h>
 #include <cutils/compiler.h>
 #include <utils/Log.h>
 #include <utils/Trace.h>
@@ -32,11 +31,11 @@
 
 MonoPipe::MonoPipe(size_t reqFrames, const NBAIO_Format& format, bool writeCanBlock) :
         NBAIO_Sink(format),
-        mReqFrames(reqFrames),
+        // TODO fifo now supports non-power-of-2 buffer sizes, so could remove the roundup
         mMaxFrames(roundup(reqFrames)),
         mBuffer(malloc(mMaxFrames * Format_frameSize(format))),
-        mFront(0),
-        mRear(0),
+        mFifo(mMaxFrames, Format_frameSize(format), mBuffer, true /*throttlesWriter*/),
+        mFifoWriter(mFifo),
         mWriteTsValid(false),
         // mWriteTs
         mSetpoint((reqFrames * 11) / 16),
@@ -53,14 +52,14 @@
     free(mBuffer);
 }
 
-ssize_t MonoPipe::availableToWrite() const
+ssize_t MonoPipe::availableToWrite()
 {
     if (CC_UNLIKELY(!mNegotiated)) {
         return NEGOTIATE;
     }
-    // uses mMaxFrames not mReqFrames, so allows "over-filling" the pipe beyond requested limit
-    ssize_t ret = mMaxFrames - (mRear - android_atomic_acquire_load(&mFront));
-    ALOG_ASSERT((0 <= ret) && (ret <= mMaxFrames));
+    // uses mMaxFrames not reqFrames, so allows "over-filling" the pipe beyond requested limit
+    ssize_t ret = mFifoWriter.available();
+    ALOG_ASSERT(ret <= mMaxFrames);
     return ret;
 }
 
@@ -71,38 +70,33 @@
     }
     size_t totalFramesWritten = 0;
     while (count > 0) {
-        // can't return a negative value, as we already checked for !mNegotiated
-        size_t avail = availableToWrite();
-        size_t written = avail;
-        if (CC_LIKELY(written > count)) {
-            written = count;
-        }
-        size_t rear = mRear & (mMaxFrames - 1);
-        size_t part1 = mMaxFrames - rear;
-        if (part1 > written) {
-            part1 = written;
-        }
-        if (CC_LIKELY(part1 > 0)) {
-            memcpy((char *) mBuffer + (rear * mFrameSize), buffer, part1 * mFrameSize);
-            if (CC_UNLIKELY(rear + part1 == mMaxFrames)) {
-                size_t part2 = written - part1;
-                if (CC_LIKELY(part2 > 0)) {
-                    memcpy(mBuffer, (char *) buffer + (part1 * mFrameSize), part2 * mFrameSize);
-                }
+        ssize_t actual = mFifoWriter.write(buffer, count);
+        ALOG_ASSERT(actual <= count);
+        if (actual < 0) {
+            if (totalFramesWritten == 0) {
+                return actual;
             }
-            android_atomic_release_store(written + mRear, &mRear);
-            totalFramesWritten += written;
+            break;
         }
+        size_t written = (size_t) actual;
+        totalFramesWritten += written;
         if (!mWriteCanBlock || mIsShutdown) {
             break;
         }
         count -= written;
         buffer = (char *) buffer + (written * mFrameSize);
+        // TODO Replace this whole section by audio_util_fifo's setpoint feature.
         // Simulate blocking I/O by sleeping at different rates, depending on a throttle.
         // The throttle tries to keep the mean pipe depth near the setpoint, with a slight jitter.
         uint32_t ns;
         if (written > 0) {
-            size_t filled = (mMaxFrames - avail) + written;
+            ssize_t avail = mFifoWriter.available();
+            ALOG_ASSERT(avail <= mMaxFrames);
+            if (avail < 0) {
+                // don't return avail as status, because totalFramesWritten > 0
+                break;
+            }
+            size_t filled = mMaxFrames - (size_t) avail;
             // FIXME cache these values to avoid re-computation
             if (filled <= mSetpoint / 2) {
                 // pipe is (nearly) empty, fill quickly
diff --git a/media/libnbaio/MonoPipeReader.cpp b/media/libnbaio/MonoPipeReader.cpp
index 01dc524..a9b4d18 100644
--- a/media/libnbaio/MonoPipeReader.cpp
+++ b/media/libnbaio/MonoPipeReader.cpp
@@ -25,7 +25,7 @@
 
 MonoPipeReader::MonoPipeReader(MonoPipe* pipe) :
         NBAIO_Source(pipe->mFormat),
-        mPipe(pipe)
+        mPipe(pipe), mFifoReader(mPipe->mFifo, true /*throttlesWriter*/)
 {
 }
 
@@ -38,38 +38,21 @@
     if (CC_UNLIKELY(!mNegotiated)) {
         return NEGOTIATE;
     }
-    ssize_t ret = android_atomic_acquire_load(&mPipe->mRear) - mPipe->mFront;
-    ALOG_ASSERT((0 <= ret) && ((size_t) ret <= mPipe->mMaxFrames));
+    ssize_t ret = mFifoReader.available();
+    ALOG_ASSERT(ret <= mPipe->mMaxFrames);
     return ret;
 }
 
 ssize_t MonoPipeReader::read(void *buffer, size_t count)
 {
     // count == 0 is unlikely and not worth checking for explicitly; will be handled automatically
-    ssize_t red = availableToRead();
-    if (CC_UNLIKELY(red <= 0)) {
-        return red;
+    ssize_t actual = mFifoReader.read(buffer, count);
+    ALOG_ASSERT(actual <= count);
+    if (CC_UNLIKELY(actual <= 0)) {
+        return actual;
     }
-    if (CC_LIKELY((size_t) red > count)) {
-        red = count;
-    }
-    size_t front = mPipe->mFront & (mPipe->mMaxFrames - 1);
-    size_t part1 = mPipe->mMaxFrames - front;
-    if (part1 > (size_t) red) {
-        part1 = red;
-    }
-    if (CC_LIKELY(part1 > 0)) {
-        memcpy(buffer, (char *) mPipe->mBuffer + (front * mFrameSize), part1 * mFrameSize);
-        if (CC_UNLIKELY(front + part1 == mPipe->mMaxFrames)) {
-            size_t part2 = red - part1;
-            if (CC_LIKELY(part2 > 0)) {
-                memcpy((char *) buffer + (part1 * mFrameSize), mPipe->mBuffer, part2 * mFrameSize);
-            }
-        }
-        android_atomic_release_store(red + mPipe->mFront, &mPipe->mFront);
-        mFramesRead += red;
-    }
-    return red;
+    mFramesRead += (size_t) actual;
+    return actual;
 }
 
 void MonoPipeReader::onTimestamp(const ExtendedTimestamp &timestamp)
diff --git a/media/libnbaio/NBLog.cpp b/media/libnbaio/NBLog.cpp
index 4d14904..f019df5 100644
--- a/media/libnbaio/NBLog.cpp
+++ b/media/libnbaio/NBLog.cpp
@@ -23,7 +23,7 @@
 #include <string.h>
 #include <time.h>
 #include <new>
-#include <cutils/atomic.h>
+#include <audio_utils/roundup.h>
 #include <media/nbaio/NBLog.h>
 #include <utils/Log.h>
 #include <utils/String8.h>
@@ -67,25 +67,37 @@
 /*static*/
 size_t NBLog::Timeline::sharedSize(size_t size)
 {
+    // TODO fifo now supports non-power-of-2 buffer sizes, so could remove the roundup
     return sizeof(Shared) + roundup(size);
 }
 
 // ---------------------------------------------------------------------------
 
 NBLog::Writer::Writer()
-    : mSize(0), mShared(NULL), mRear(0), mEnabled(false)
+    : mShared(NULL), mFifo(NULL), mFifoWriter(NULL), mEnabled(false)
 {
 }
 
-NBLog::Writer::Writer(size_t size, void *shared)
-    : mSize(roundup(size)), mShared((Shared *) shared), mRear(0), mEnabled(mShared != NULL)
+NBLog::Writer::Writer(void *shared, size_t size)
+    : mShared((Shared *) shared),
+      mFifo(mShared != NULL ?
+        new audio_utils_fifo(size, sizeof(uint8_t),
+            mShared->mBuffer, mShared->mRear, NULL /*throttlesFront*/) : NULL),
+      mFifoWriter(mFifo != NULL ? new audio_utils_fifo_writer(*mFifo) : NULL),
+      mEnabled(mFifoWriter != NULL)
 {
 }
 
-NBLog::Writer::Writer(size_t size, const sp<IMemory>& iMemory)
-    : mSize(roundup(size)), mShared(iMemory != 0 ? (Shared *) iMemory->pointer() : NULL),
-      mIMemory(iMemory), mRear(0), mEnabled(mShared != NULL)
+NBLog::Writer::Writer(const sp<IMemory>& iMemory, size_t size)
+    : Writer(iMemory != 0 ? (Shared *) iMemory->pointer() : NULL, size)
 {
+    mIMemory = iMemory;
+}
+
+NBLog::Writer::~Writer()
+{
+    delete mFifoWriter;
+    delete mFifo;
 }
 
 void NBLog::Writer::log(const char *string)
@@ -94,8 +106,8 @@
         return;
     }
     size_t length = strlen(string);
-    if (length > 255) {
-        length = 255;
+    if (length > Entry::kMaxLength) {
+        length = Entry::kMaxLength;
     }
     log(EVENT_STRING, string, length);
 }
@@ -116,7 +128,7 @@
     if (!mEnabled) {
         return;
     }
-    char buffer[256];
+    char buffer[Entry::kMaxLength + 1 /*NUL*/];
     int length = vsnprintf(buffer, sizeof(buffer), fmt, ap);
     if (length >= (int) sizeof(buffer)) {
         length = sizeof(buffer) - 1;
@@ -152,7 +164,10 @@
     if (!mEnabled) {
         return;
     }
-    if (data == NULL || length > 255) {
+    if (data == NULL || length > Entry::kMaxLength) {
+        // TODO Perhaps it makes sense to display truncated data or at least a
+        //      message that the data is too long?  The current behavior can create
+        //      a confusion for a programmer debugging their code.
         return;
     }
     switch (event) {
@@ -176,26 +191,16 @@
         log(entry->mEvent, entry->mData, entry->mLength);
         return;
     }
-    size_t rear = mRear & (mSize - 1);
-    size_t written = mSize - rear;      // written = number of bytes that have been written so far
-    size_t need = entry->mLength + 3;   // mEvent, mLength, data[length], mLength
-                                        // need = number of bytes remaining to write
-    if (written > need) {
-        written = need;
-    }
-    size_t i;
+    size_t need = entry->mLength + Entry::kOverhead;    // mEvent, mLength, data[length], mLength
+                                                        // need = number of bytes remaining to write
+
     // FIXME optimize this using memcpy for the data part of the Entry.
     // The Entry could have a method copyTo(ptr, offset, size) to optimize the copy.
-    for (i = 0; i < written; ++i) {
-        mShared->mBuffer[rear + i] = entry->readAt(i);
+    uint8_t temp[Entry::kMaxLength + Entry::kOverhead];
+    for (size_t i = 0; i < need; i++) {
+        temp[i] = entry->readAt(i);
     }
-    if (rear + written == mSize && (need -= written) > 0)  {
-        for (i = 0; i < need; ++i) {
-            mShared->mBuffer[i] = entry->readAt(written + i);
-        }
-        written += need;
-    }
-    android_atomic_release_store(mRear += written, &mShared->mRear);
+    mFifoWriter->write(temp, need);
 }
 
 bool NBLog::Writer::isEnabled() const
@@ -217,8 +222,8 @@
 {
 }
 
-NBLog::LockedWriter::LockedWriter(size_t size, void *shared)
-    : Writer(size, shared)
+NBLog::LockedWriter::LockedWriter(void *shared, size_t size)
+    : Writer(shared, size)
 {
 }
 
@@ -272,60 +277,59 @@
 
 // ---------------------------------------------------------------------------
 
-NBLog::Reader::Reader(size_t size, const void *shared)
-    : mSize(roundup(size)), mShared((const Shared *) shared), mFront(0)
+NBLog::Reader::Reader(const void *shared, size_t size)
+    : mShared((/*const*/ Shared *) shared), /*mIMemory*/
+      mFd(-1), mIndent(0),
+      mFifo(mShared != NULL ?
+        new audio_utils_fifo(size, sizeof(uint8_t),
+            mShared->mBuffer, mShared->mRear, NULL /*throttlesFront*/) : NULL),
+      mFifoReader(mFifo != NULL ? new audio_utils_fifo_reader(*mFifo) : NULL)
 {
 }
 
-NBLog::Reader::Reader(size_t size, const sp<IMemory>& iMemory)
-    : mSize(roundup(size)), mShared(iMemory != 0 ? (const Shared *) iMemory->pointer() : NULL),
-      mIMemory(iMemory), mFront(0)
+NBLog::Reader::Reader(const sp<IMemory>& iMemory, size_t size)
+    : Reader(iMemory != 0 ? (Shared *) iMemory->pointer() : NULL, size)
 {
+    mIMemory = iMemory;
+}
+
+NBLog::Reader::~Reader()
+{
+    delete mFifoReader;
+    delete mFifo;
 }
 
 void NBLog::Reader::dump(int fd, size_t indent)
 {
-    int32_t rear = android_atomic_acquire_load(&mShared->mRear);
-    size_t avail = rear - mFront;
-    if (avail == 0) {
+    if (mFifoReader == NULL) {
         return;
     }
-    size_t lost = 0;
-    if (avail > mSize) {
-        lost = avail - mSize;
-        mFront += lost;
-        avail = mSize;
-    }
-    size_t remaining = avail;       // remaining = number of bytes left to read
-    size_t front = mFront & (mSize - 1);
-    size_t read = mSize - front;    // read = number of bytes that have been read so far
-    if (read > remaining) {
-        read = remaining;
-    }
     // make a copy to avoid race condition with writer
-    uint8_t *copy = new uint8_t[avail];
-    // copy first part of circular buffer up until the wraparound point
-    memcpy(copy, &mShared->mBuffer[front], read);
-    if (front + read == mSize) {
-        if ((remaining -= read) > 0) {
-            // copy second part of circular buffer starting at beginning
-            memcpy(&copy[read], mShared->mBuffer, remaining);
-            read += remaining;
-            // remaining = 0 but not necessary
-        }
-    }
-    mFront += read;
+    size_t capacity = mFifo->capacity();
+
+    // TODO Stack-based allocation of large objects may fail.
+    //      Currently the log buffers are a page or two, which should be safe.
+    //      But if the log buffers ever get a lot larger,
+    //      then change this to allocate from heap when necessary.
+    static size_t kReasonableStackObjectSize = 32768;
+    ALOGW_IF(capacity > kReasonableStackObjectSize, "Stack-based allocation of object may fail");
+    uint8_t copy[capacity];
+
+    size_t lost;
+    ssize_t actual = mFifoReader->read(copy, capacity, NULL /*timeout*/, &lost);
+    ALOG_ASSERT(actual <= capacity);
+    size_t avail = actual > 0 ? (size_t) actual : 0;
     size_t i = avail;
     Event event;
     size_t length;
     struct timespec ts;
     time_t maxSec = -1;
-    while (i >= 3) {
+    while (i >= Entry::kOverhead) {
         length = copy[i - 1];
-        if (length + 3 > i || copy[i - length - 2] != length) {
+        if (length + Entry::kOverhead > i || copy[i - length - 2] != length) {
             break;
         }
-        event = (Event) copy[i - length - 3];
+        event = (Event) copy[i - length - Entry::kOverhead];
         if (event == EVENT_TIMESTAMP) {
             if (length != sizeof(struct timespec)) {
                 // corrupt
@@ -336,7 +340,7 @@
                 maxSec = ts.tv_sec;
             }
         }
-        i -= length + 3;
+        i -= length + Entry::kOverhead;
     }
     mFd = fd;
     mIndent = indent;
@@ -361,7 +365,7 @@
         event = (Event) copy[i];
         length = copy[i + 1];
         const void *data = &copy[i + 2];
-        size_t advance = length + 3;
+        size_t advance = length + Entry::kOverhead;
         switch (event) {
         case EVENT_STRING:
             body.appendFormat("%.*s", (int) length, (const char *) data);
@@ -375,7 +379,7 @@
             long deltaTotal = 0;
             size_t j = i;
             for (;;) {
-                j += sizeof(struct timespec) + 3;
+                j += sizeof(struct timespec) + 3 /*Entry::kOverhead?*/;
                 if (j >= avail || (Event) copy[j] != EVENT_TIMESTAMP) {
                     break;
                 }
@@ -397,7 +401,7 @@
                 deltaTotal += delta;
                 prevNsec = tsNext.tv_nsec;
             }
-            size_t n = (j - i) / (sizeof(struct timespec) + 3);
+            size_t n = (j - i) / (sizeof(struct timespec) + 3 /*Entry::kOverhead?*/);
             if (deferredTimestamp) {
                 dumpLine(timestamp, body);
                 deferredTimestamp = false;
@@ -431,8 +435,6 @@
     if (deferredTimestamp) {
         dumpLine(timestamp, body);
     }
-    // FIXME it would be more efficient to put a char mCopy[256] as a member variable of the dumper
-    delete[] copy;
 }
 
 void NBLog::Reader::dumpLine(const String8& timestamp, String8& body)
diff --git a/media/libnbaio/Pipe.cpp b/media/libnbaio/Pipe.cpp
index 13f211d..39df3f4 100644
--- a/media/libnbaio/Pipe.cpp
+++ b/media/libnbaio/Pipe.cpp
@@ -27,9 +27,11 @@
 
 Pipe::Pipe(size_t maxFrames, const NBAIO_Format& format, void *buffer) :
         NBAIO_Sink(format),
+        // TODO fifo now supports non-power-of-2 buffer sizes, so could remove the roundup
         mMaxFrames(roundup(maxFrames)),
         mBuffer(buffer == NULL ? malloc(mMaxFrames * Format_frameSize(format)) : buffer),
-        mRear(0),
+        mFifo(mMaxFrames, Format_frameSize(format), mBuffer, false /*throttlesWriter*/),
+        mFifoWriter(mFifo),
         mReaders(0),
         mFreeBufferInDestructor(buffer == NULL)
 {
@@ -49,25 +51,13 @@
     if (CC_UNLIKELY(!mNegotiated)) {
         return NEGOTIATE;
     }
-    // write() is not multi-thread safe w.r.t. itself, so no mutex or atomic op needed to read mRear
-    size_t rear = mRear & (mMaxFrames - 1);
-    size_t written = mMaxFrames - rear;
-    if (CC_LIKELY(written > count)) {
-        written = count;
+    ssize_t actual = mFifoWriter.write(buffer, count);
+    ALOG_ASSERT(actual <= count);
+    if (actual <= 0) {
+        return actual;
     }
-    memcpy((char *) mBuffer + (rear * mFrameSize), buffer, written * mFrameSize);
-    if (CC_UNLIKELY(rear + written == mMaxFrames)) {
-        if (CC_UNLIKELY((count -= written) > rear)) {
-            count = rear;
-        }
-        if (CC_LIKELY(count > 0)) {
-            memcpy(mBuffer, (char *) buffer + (written * mFrameSize), count * mFrameSize);
-            written += count;
-        }
-    }
-    android_atomic_release_store(written + mRear, &mRear);
-    mFramesWritten += written;
-    return written;
+    mFramesWritten += (size_t) actual;
+    return actual;
 }
 
 }   // namespace android
diff --git a/media/libnbaio/PipeReader.cpp b/media/libnbaio/PipeReader.cpp
index a879647..bd468a6 100644
--- a/media/libnbaio/PipeReader.cpp
+++ b/media/libnbaio/PipeReader.cpp
@@ -25,9 +25,7 @@
 
 PipeReader::PipeReader(Pipe& pipe) :
         NBAIO_Source(pipe.mFormat),
-        mPipe(pipe),
-        // any data already in the pipe is not visible to this PipeReader
-        mFront(android_atomic_acquire_load(&pipe.mRear)),
+        mPipe(pipe), mFifoReader(mPipe.mFifo, false /*throttlesWriter*/),
         mFramesOverrun(0),
         mOverruns(0)
 {
@@ -50,51 +48,50 @@
     if (CC_UNLIKELY(!mNegotiated)) {
         return NEGOTIATE;
     }
-    int32_t rear = android_atomic_acquire_load(&mPipe.mRear);
-    // read() is not multi-thread safe w.r.t. itself, so no mutex or atomic op needed to read mFront
-    size_t avail = rear - mFront;
-    if (CC_UNLIKELY(avail > mPipe.mMaxFrames)) {
-        // Discard 1/16 of the most recent data in pipe to avoid another overrun immediately
-        int32_t oldFront = mFront;
-        mFront = rear - mPipe.mMaxFrames + (mPipe.mMaxFrames >> 4);
-        mFramesOverrun += (size_t) (mFront - oldFront);
+    size_t lost;
+    ssize_t avail = mFifoReader.available(&lost);
+    if (avail == -EOVERFLOW || lost > 0) {
+        mFramesOverrun += lost;
         ++mOverruns;
-        return OVERRUN;
+        avail = OVERRUN;
     }
     return avail;
 }
 
 ssize_t PipeReader::read(void *buffer, size_t count)
 {
-    ssize_t avail = availableToRead();
-    if (CC_UNLIKELY(avail <= 0)) {
-        return avail;
+    size_t lost;
+    ssize_t actual = mFifoReader.read(buffer, count, NULL /*timeout*/, &lost);
+    ALOG_ASSERT(actual <= count);
+    if (actual == -EOVERFLOW || lost > 0) {
+        mFramesOverrun += lost;
+        ++mOverruns;
+        actual = OVERRUN;
     }
-    // An overrun can occur from here on and be silently ignored,
-    // but it will be caught at next read()
-    if (CC_LIKELY(count > (size_t) avail)) {
-        count = avail;
+    if (actual <= 0) {
+        return actual;
     }
-    size_t front = mFront & (mPipe.mMaxFrames - 1);
-    size_t red = mPipe.mMaxFrames - front;
-    if (CC_LIKELY(red > count)) {
-        red = count;
+    mFramesRead += (size_t) actual;
+    return actual;
+}
+
+ssize_t PipeReader::flush()
+{
+    if (CC_UNLIKELY(!mNegotiated)) {
+        return NEGOTIATE;
     }
-    // In particular, an overrun during the memcpy will result in reading corrupt data
-    memcpy(buffer, (char *) mPipe.mBuffer + (front * mFrameSize), red * mFrameSize);
-    // We could re-read the rear pointer here to detect the corruption, but why bother?
-    if (CC_UNLIKELY(front + red == mPipe.mMaxFrames)) {
-        if (CC_UNLIKELY((count -= red) > front)) {
-            count = front;
-        }
-        if (CC_LIKELY(count > 0)) {
-            memcpy((char *) buffer + (red * mFrameSize), mPipe.mBuffer, count * mFrameSize);
-            red += count;
-        }
+    size_t lost;
+    ssize_t flushed = mFifoReader.flush(&lost);
+    if (flushed == -EOVERFLOW || lost > 0) {
+        mFramesOverrun += lost;
+        ++mOverruns;
+        flushed = OVERRUN;
     }
-    mFront += red;
-    mFramesRead += red;
-    return red;
+    if (flushed <= 0) {
+        return flushed;
+    }
+    mFramesRead += (size_t) flushed;  // we consider flushed frames as read, but not lost frames
+    return flushed;
 }
 
 }   // namespace android
diff --git a/media/liboboe/Android.bp b/media/liboboe/Android.bp
new file mode 100644
index 0000000..bfcc049
--- /dev/null
+++ b/media/liboboe/Android.bp
@@ -0,0 +1,28 @@
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+ndk_headers {
+    name: "libOboe_headers",
+    from: "include",
+    to: "",
+    srcs: ["include/oboe/*.h"],
+    license: "include/oboe/NOTICE",
+}
+
+ndk_library {
+    name: "liboboe.ndk",
+    symbol_file: "liboboe.map.txt",
+    first_version: "26",
+    unversioned_until: "current",
+}
diff --git a/media/liboboe/Android.mk b/media/liboboe/Android.mk
new file mode 100644
index 0000000..5053e7d
--- /dev/null
+++ b/media/liboboe/Android.mk
@@ -0,0 +1 @@
+include $(call all-subdir-makefiles)
diff --git a/media/liboboe/README.md b/media/liboboe/README.md
new file mode 100644
index 0000000..80894c6
--- /dev/null
+++ b/media/liboboe/README.md
@@ -0,0 +1 @@
+Oboe Audio input/output API
diff --git a/media/liboboe/examples/Android.mk b/media/liboboe/examples/Android.mk
new file mode 100644
index 0000000..5053e7d
--- /dev/null
+++ b/media/liboboe/examples/Android.mk
@@ -0,0 +1 @@
+include $(call all-subdir-makefiles)
diff --git a/media/liboboe/examples/write_sine/Android.mk b/media/liboboe/examples/write_sine/Android.mk
new file mode 100644
index 0000000..b56328b
--- /dev/null
+++ b/media/liboboe/examples/write_sine/Android.mk
@@ -0,0 +1,6 @@
+# include $(call all-subdir-makefiles)
+
+# Just include static/ for now.
+LOCAL_PATH := $(call my-dir)
+#include $(LOCAL_PATH)/jni/Android.mk
+include $(LOCAL_PATH)/static/Android.mk
diff --git a/media/liboboe/examples/write_sine/README.md b/media/liboboe/examples/write_sine/README.md
new file mode 100644
index 0000000..9f7ee87
--- /dev/null
+++ b/media/liboboe/examples/write_sine/README.md
@@ -0,0 +1,7 @@
+# cd to this directory
+mkdir -p jni/include/oboe
+ln -s $PLATFORM/frameworks/av/media/liboboe/include/oboe/*.h jni/include/oboe
+ln -s $PLATFORM/out/target/product/$TARGET_PRODUCT/symbols/out/soong/ndk/platforms/android-current/arch-arm64/usr/lib/liboboe.so jni
+$NDK/ndk-build
+adb push libs/arm64-v8a/write_sine_threaded /data
+adb shell /data/write_sine_threaded
diff --git a/media/liboboe/examples/write_sine/jni/Android.mk b/media/liboboe/examples/write_sine/jni/Android.mk
new file mode 100644
index 0000000..51a5a85
--- /dev/null
+++ b/media/liboboe/examples/write_sine/jni/Android.mk
@@ -0,0 +1,35 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/av/media/liboboe/include
+
+LOCAL_SRC_FILES:= frameworks/av/media/liboboe/src/write_sine.cpp
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia libtinyalsa \
+        libbinder libcutils libutils
+LOCAL_STATIC_LIBRARIES := libsndfile
+LOCAL_MODULE := write_sine_ndk
+LOCAL_SHARED_LIBRARIES += liboboe_prebuilt
+include $(BUILD_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/av/media/liboboe/include
+
+LOCAL_SRC_FILES:= frameworks/av/media/liboboe/src/write_sine_threaded.cpp
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia libtinyalsa \
+        libbinder libcutils libutils
+LOCAL_STATIC_LIBRARIES := libsndfile
+LOCAL_MODULE := write_sine_threaded_ndk
+LOCAL_SHARED_LIBRARIES += liboboe_prebuilt
+include $(BUILD_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := liboboe_prebuilt
+LOCAL_SRC_FILES := liboboe.so
+LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
+include $(PREBUILT_SHARED_LIBRARY)
diff --git a/media/liboboe/examples/write_sine/jni/Application.mk b/media/liboboe/examples/write_sine/jni/Application.mk
new file mode 100644
index 0000000..e74475c
--- /dev/null
+++ b/media/liboboe/examples/write_sine/jni/Application.mk
@@ -0,0 +1,3 @@
+# TODO remove then when we support other architectures
+APP_ABI := arm64-v8a
+APP_CPPFLAGS += -std=c++11
diff --git a/media/liboboe/examples/write_sine/src/SineGenerator.h b/media/liboboe/examples/write_sine/src/SineGenerator.h
new file mode 100644
index 0000000..ade7527
--- /dev/null
+++ b/media/liboboe/examples/write_sine/src/SineGenerator.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SINE_GENERATOR_H
+#define SINE_GENERATOR_H
+
+#include <math.h>
+
+class SineGenerator
+{
+public:
+    SineGenerator() {}
+    virtual ~SineGenerator() = default;
+
+    void setup(double frequency, double frameRate) {
+        mFrameRate = frameRate;
+        mPhaseIncrement = frequency * M_PI * 2 / frameRate;
+    }
+
+    void setSweep(double frequencyLow, double frequencyHigh, double seconds) {
+        mPhaseIncrementLow = frequencyLow * M_PI * 2 / mFrameRate;
+        mPhaseIncrementHigh = frequencyHigh * M_PI * 2 / mFrameRate;
+
+        double numFrames = seconds * mFrameRate;
+        mUpScaler = pow((frequencyHigh / frequencyLow), (1.0 / numFrames));
+        mDownScaler = 1.0 / mUpScaler;
+        mGoingUp = true;
+        mSweeping = true;
+    }
+
+    void render(int16_t *buffer, int32_t channelStride, int32_t numFrames) {
+        int sampleIndex = 0;
+        for (int i = 0; i < numFrames; i++) {
+            buffer[sampleIndex] = (int16_t) (32767 * sin(mPhase) * mAmplitude);
+            sampleIndex += channelStride;
+            advancePhase();
+        }
+    }
+    void render(float *buffer, int32_t channelStride, int32_t numFrames) {
+        int sampleIndex = 0;
+        for (int i = 0; i < numFrames; i++) {
+            buffer[sampleIndex] = sin(mPhase) * mAmplitude;
+            sampleIndex += channelStride;
+            advancePhase();
+        }
+    }
+
+private:
+    void advancePhase() {
+        mPhase += mPhaseIncrement;
+        if (mPhase > M_PI * 2) {
+            mPhase -= M_PI * 2;
+        }
+        if (mSweeping) {
+            if (mGoingUp) {
+                mPhaseIncrement *= mUpScaler;
+                if (mPhaseIncrement > mPhaseIncrementHigh) {
+                    mGoingUp = false;
+                }
+            } else {
+                mPhaseIncrement *= mDownScaler;
+                if (mPhaseIncrement < mPhaseIncrementLow) {
+                    mGoingUp = true;
+                }
+            }
+        }
+    }
+
+    double mAmplitude = 0.01;
+    double mPhase = 0.0;
+    double mPhaseIncrement = 440 * M_PI * 2 / 48000;
+    double mFrameRate = 48000;
+    double mPhaseIncrementLow;
+    double mPhaseIncrementHigh;
+    double mUpScaler = 1.0;
+    double mDownScaler = 1.0;
+    bool   mGoingUp = false;
+    bool   mSweeping = false;
+};
+
+#endif /* SINE_GENERATOR_H */
+
diff --git a/media/liboboe/examples/write_sine/src/write_sine.cpp b/media/liboboe/examples/write_sine/src/write_sine.cpp
new file mode 100644
index 0000000..084665c
--- /dev/null
+++ b/media/liboboe/examples/write_sine/src/write_sine.cpp
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Play sine waves using Oboe.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <oboe/OboeDefinitions.h>
+#include <oboe/OboeAudio.h>
+#include "SineGenerator.h"
+
+#define SAMPLE_RATE   48000
+#define NUM_SECONDS   10
+
+static const char *getSharingModeText(oboe_sharing_mode_t mode) {
+    const char *modeText = "unknown";
+    switch (mode) {
+    case OBOE_SHARING_MODE_EXCLUSIVE:
+        modeText = "EXCLUSIVE";
+        break;
+    case OBOE_SHARING_MODE_LEGACY:
+        modeText = "LEGACY";
+        break;
+    case OBOE_SHARING_MODE_SHARED:
+        modeText = "SHARED";
+        break;
+    case OBOE_SHARING_MODE_PUBLIC_MIX:
+        modeText = "PUBLIC_MIX";
+        break;
+    default:
+        break;
+    }
+    return modeText;
+}
+
+int main(int argc, char **argv)
+{
+    (void)argc; // unused
+
+    oboe_result_t result = OBOE_OK;
+
+    const int requestedSamplesPerFrame = 2;
+    int actualSamplesPerFrame = 0;
+    const int requestedSampleRate = SAMPLE_RATE;
+    int actualSampleRate = 0;
+    const oboe_audio_format_t requestedDataFormat = OBOE_AUDIO_FORMAT_PCM16;
+    oboe_audio_format_t actualDataFormat = OBOE_AUDIO_FORMAT_PCM16;
+
+    const oboe_sharing_mode_t requestedSharingMode = OBOE_SHARING_MODE_EXCLUSIVE;
+    //const oboe_sharing_mode_t requestedSharingMode = OBOE_SHARING_MODE_LEGACY;
+    oboe_sharing_mode_t actualSharingMode = OBOE_SHARING_MODE_LEGACY;
+
+    OboeStreamBuilder oboeBuilder = OBOE_STREAM_BUILDER_NONE;
+    OboeStream oboeStream = OBOE_STREAM_NONE;
+    oboe_stream_state_t state = OBOE_STREAM_STATE_UNINITIALIZED;
+    oboe_size_frames_t framesPerBurst = 0;
+    oboe_size_frames_t framesToPlay = 0;
+    oboe_size_frames_t framesLeft = 0;
+    int32_t xRunCount = 0;
+    int16_t *data = nullptr;
+
+    SineGenerator sineOsc1;
+    SineGenerator sineOsc2;
+
+    // Make printf print immediately so that debug info is not stuck
+    // in a buffer if we hang or crash.
+    setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+
+    printf("%s - Play a sine wave using Oboe\n", argv[0]);
+
+    // Use an OboeStreamBuilder to contain requested parameters.
+    result = Oboe_createStreamBuilder(&oboeBuilder);
+    if (result != OBOE_OK) {
+        goto finish;
+    }
+
+    // Request stream properties.
+    result = OboeStreamBuilder_setSampleRate(oboeBuilder, requestedSampleRate);
+    if (result != OBOE_OK) {
+        goto finish;
+    }
+    result = OboeStreamBuilder_setSamplesPerFrame(oboeBuilder, requestedSamplesPerFrame);
+    if (result != OBOE_OK) {
+        goto finish;
+    }
+    result = OboeStreamBuilder_setFormat(oboeBuilder, requestedDataFormat);
+    if (result != OBOE_OK) {
+        goto finish;
+    }
+    result = OboeStreamBuilder_setSharingMode(oboeBuilder, requestedSharingMode);
+    if (result != OBOE_OK) {
+        goto finish;
+    }
+
+    // Create an OboeStream using the Builder.
+    result = OboeStreamBuilder_openStream(oboeBuilder, &oboeStream);
+    printf("oboeStream 0x%08x\n", oboeStream);
+    if (result != OBOE_OK) {
+        goto finish;
+    }
+
+    result = OboeStream_getState(oboeStream, &state);
+    printf("after open, state = %s\n", Oboe_convertStreamStateToText(state));
+
+    // Check to see what kind of stream we actually got.
+    result = OboeStream_getSampleRate(oboeStream, &actualSampleRate);
+    printf("SampleRate: requested = %d, actual = %d\n", requestedSampleRate, actualSampleRate);
+
+    sineOsc1.setup(440.0, actualSampleRate);
+    sineOsc2.setup(660.0, actualSampleRate);
+
+    result = OboeStream_getSamplesPerFrame(oboeStream, &actualSamplesPerFrame);
+    printf("SamplesPerFrame: requested = %d, actual = %d\n",
+            requestedSamplesPerFrame, actualSamplesPerFrame);
+
+    result = OboeStream_getSharingMode(oboeStream, &actualSharingMode);
+    printf("SharingMode: requested = %s, actual = %s\n",
+            getSharingModeText(requestedSharingMode),
+            getSharingModeText(actualSharingMode));
+
+    // This is the number of frames that are read in one chunk by a DMA controller
+    // or a DSP or a mixer.
+    result = OboeStream_getFramesPerBurst(oboeStream, &framesPerBurst);
+    printf("DataFormat: original framesPerBurst = %d\n",framesPerBurst);
+    if (result != OBOE_OK) {
+        fprintf(stderr, "ERROR - OboeStream_getFramesPerBurst() returned %d\n", result);
+        goto finish;
+    }
+    // Some DMA might use very short bursts of 16 frames. We don't need to write such small
+    // buffers. But it helps to use a multiple of the burst size for predictable scheduling.
+    while (framesPerBurst < 48) {
+        framesPerBurst *= 2;
+    }
+    printf("DataFormat: final framesPerBurst = %d\n",framesPerBurst);
+
+    OboeStream_getFormat(oboeStream, &actualDataFormat);
+    printf("DataFormat: requested = %d, actual = %d\n", requestedDataFormat, actualDataFormat);
+    // TODO handle other data formats
+
+    // Allocate a buffer for the audio data.
+    data = new int16_t[framesPerBurst * actualSamplesPerFrame];
+    if (data == nullptr) {
+        fprintf(stderr, "ERROR - could not allocate data buffer\n");
+        result = OBOE_ERROR_NO_MEMORY;
+        goto finish;
+    }
+
+    // Start the stream.
+    printf("call OboeStream_requestStart()\n");
+    result = OboeStream_requestStart(oboeStream);
+    if (result != OBOE_OK) {
+        fprintf(stderr, "ERROR - OboeStream_requestStart() returned %d\n", result);
+        goto finish;
+    }
+
+    result = OboeStream_getState(oboeStream, &state);
+    printf("after start, state = %s\n", Oboe_convertStreamStateToText(state));
+
+    // Play for a while.
+    framesToPlay = actualSampleRate * NUM_SECONDS;
+    framesLeft = framesToPlay;
+    while (framesLeft > 0) {
+        // Render sine waves to left and right channels.
+        sineOsc1.render(&data[0], actualSamplesPerFrame, framesPerBurst);
+        if (actualSamplesPerFrame > 1) {
+            sineOsc2.render(&data[1], actualSamplesPerFrame, framesPerBurst);
+        }
+
+        // Write audio data to the stream.
+        oboe_nanoseconds_t timeoutNanos = 100 * OBOE_NANOS_PER_MILLISECOND;
+        int minFrames = (framesToPlay < framesPerBurst) ? framesToPlay : framesPerBurst;
+        int actual = OboeStream_write(oboeStream, data, minFrames, timeoutNanos);
+        if (actual < 0) {
+            fprintf(stderr, "ERROR - OboeStream_write() returned %zd\n", actual);
+            goto finish;
+        } else if (actual == 0) {
+            fprintf(stderr, "WARNING - OboeStream_write() returned %zd\n", actual);
+            goto finish;
+        }
+        framesLeft -= actual;
+    }
+
+    result = OboeStream_getXRunCount(oboeStream, &xRunCount);
+    printf("OboeStream_getXRunCount %d\n", xRunCount);
+
+finish:
+    delete[] data;
+    OboeStream_close(oboeStream);
+    OboeStreamBuilder_delete(oboeBuilder);
+    printf("exiting - Oboe result = %d = %s\n", result, Oboe_convertResultToText(result));
+    return (result != OBOE_OK) ? EXIT_FAILURE : EXIT_SUCCESS;
+}
+
diff --git a/media/liboboe/examples/write_sine/src/write_sine_threaded.cpp b/media/liboboe/examples/write_sine/src/write_sine_threaded.cpp
new file mode 100644
index 0000000..aedcc6e
--- /dev/null
+++ b/media/liboboe/examples/write_sine/src/write_sine_threaded.cpp
@@ -0,0 +1,315 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Play sine waves using an Oboe background thread.
+
+#include <assert.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <math.h>
+#include <time.h>
+#include <oboe/OboeDefinitions.h>
+#include <oboe/OboeAudio.h>
+#include "SineGenerator.h"
+
+#define NUM_SECONDS   10
+#define SHARING_MODE  OBOE_SHARING_MODE_EXCLUSIVE
+//#define SHARING_MODE  OBOE_SHARING_MODE_LEGACY
+
+// Prototype for a callback.
+typedef int audio_callback_proc_t(float *outputBuffer,
+                                     oboe_size_frames_t numFrames,
+                                     void *userContext);
+
+static void *SimpleOboePlayerThreadProc(void *arg);
+
+/**
+ * Simple wrapper for Oboe that opens a default stream and then calls
+ * a callback function to fill the output buffers.
+ */
+class SimpleOboePlayer {
+public:
+    SimpleOboePlayer() {}
+    virtual ~SimpleOboePlayer() {
+        close();
+    };
+
+    void setSharingMode(oboe_sharing_mode_t requestedSharingMode) {
+        mRequestedSharingMode = requestedSharingMode;
+    }
+
+    /** Also known as "sample rate"
+     */
+    int32_t getFramesPerSecond() {
+        return mFramesPerSecond;
+    }
+
+    int32_t getSamplesPerFrame() {
+        return mSamplesPerFrame;
+    }
+
+    /**
+     * Open a stream
+     */
+    oboe_result_t open(audio_callback_proc_t *proc, void *userContext) {
+        mCallbackProc = proc;
+        mUserContext = userContext;
+        oboe_result_t result = OBOE_OK;
+
+        // Use an OboeStreamBuilder to contain requested parameters.
+        result = Oboe_createStreamBuilder(&mBuilder);
+        if (result != OBOE_OK) return result;
+
+        result = OboeStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
+        if (result != OBOE_OK) goto finish1;
+
+        // Open an OboeStream using the Builder.
+        result = OboeStreamBuilder_openStream(mBuilder, &mStream);
+        if (result != OBOE_OK) goto finish1;
+
+        // Check to see what kind of stream we actually got.
+        result = OboeStream_getSampleRate(mStream, &mFramesPerSecond);
+        printf("open() mFramesPerSecond = %d\n", mFramesPerSecond);
+        if (result != OBOE_OK) goto finish2;
+        result = OboeStream_getSamplesPerFrame(mStream, &mSamplesPerFrame);
+        printf("open() mSamplesPerFrame = %d\n", mSamplesPerFrame);
+        if (result != OBOE_OK) goto finish2;
+
+        // This is the number of frames that are read in one chunk by a DMA controller
+        // or a DSP or a mixer.
+        result = OboeStream_getFramesPerBurst(mStream, &mFramesPerBurst);
+        if (result != OBOE_OK) goto finish2;
+        // Some DMA might use very short bursts. We don't need to write such small
+        // buffers. But it helps to use a multiple of the burst size for predictable scheduling.
+        while (mFramesPerBurst < 48) {
+            mFramesPerBurst *= 2;
+        }
+        printf("DataFormat: final framesPerBurst = %d\n",mFramesPerBurst);
+
+        result = OboeStream_getFormat(mStream, &mDataFormat);
+        if (result != OBOE_OK) {
+            fprintf(stderr, "ERROR - OboeStream_getFormat() returned %d\n", result);
+            goto finish2;
+        }
+
+        // Allocate a buffer for the audio data.
+        mOutputBuffer = new float[mFramesPerBurst * mSamplesPerFrame];
+        if (mOutputBuffer == nullptr) {
+            fprintf(stderr, "ERROR - could not allocate data buffer\n");
+            result = OBOE_ERROR_NO_MEMORY;
+        }
+
+        // If needed allocate a buffer for converting float to int16_t.
+        if (mDataFormat == OBOE_AUDIO_FORMAT_PCM16) {
+            mConversionBuffer = new int16_t[mFramesPerBurst * mSamplesPerFrame];
+            if (mConversionBuffer == nullptr) {
+                fprintf(stderr, "ERROR - could not allocate conversion buffer\n");
+                result = OBOE_ERROR_NO_MEMORY;
+            }
+        }
+        return result;
+
+     finish2:
+        OboeStream_close(mStream);
+        mStream = OBOE_HANDLE_INVALID;
+     finish1:
+        OboeStreamBuilder_delete(mBuilder);
+        mBuilder = OBOE_HANDLE_INVALID;
+        return result;
+    }
+
+    oboe_result_t close() {
+        stop();
+        OboeStream_close(mStream);
+        mStream = OBOE_HANDLE_INVALID;
+        OboeStreamBuilder_delete(mBuilder);
+        mBuilder = OBOE_HANDLE_INVALID;
+        delete mOutputBuffer;
+        mOutputBuffer = nullptr;
+        delete mConversionBuffer;
+        mConversionBuffer = nullptr;
+        return OBOE_OK;
+    }
+
+    // Start a thread that will call the callback proc.
+    oboe_result_t start() {
+        mEnabled = true;
+        oboe_nanoseconds_t nanosPerBurst = mFramesPerBurst * OBOE_NANOS_PER_SECOND
+                                           / mFramesPerSecond;
+        return OboeStream_createThread(mStream, nanosPerBurst,
+                                       SimpleOboePlayerThreadProc,
+                                       this);
+    }
+
+    // Tell the thread to stop.
+    oboe_result_t stop() {
+        mEnabled = false;
+        return OboeStream_joinThread(mStream, nullptr, 2 * OBOE_NANOS_PER_SECOND);
+    }
+
+    oboe_result_t callbackLoop() {
+        int32_t framesWritten = 0;
+        int32_t xRunCount = 0;
+        oboe_result_t result = OBOE_OK;
+
+        result = OboeStream_requestStart(mStream);
+        if (result != OBOE_OK) {
+            fprintf(stderr, "ERROR - OboeStream_requestStart() returned %d\n", result);
+            return result;
+        }
+
+        // Give up after several burst periods have passed.
+        const int burstsPerTimeout = 8;
+        oboe_nanoseconds_t nanosPerTimeout =
+                        burstsPerTimeout * mFramesPerBurst * OBOE_NANOS_PER_SECOND
+                        / mFramesPerSecond;
+
+        while (mEnabled && result >= 0) {
+            // Call application's callback function to fill the buffer.
+            if (mCallbackProc(mOutputBuffer, mFramesPerBurst, mUserContext)) {
+                mEnabled = false;
+            }
+            // if needed, convert from float to int16_t PCM
+            if (mConversionBuffer != nullptr) {
+                int32_t numSamples = mFramesPerBurst * mSamplesPerFrame;
+                for (int i = 0; i < numSamples; i++) {
+                    mConversionBuffer[i] = (int16_t)(32767.0 * mOutputBuffer[i]);
+                }
+                // Write the application data to stream.
+                result = OboeStream_write(mStream, mConversionBuffer, mFramesPerBurst, nanosPerTimeout);
+            } else {
+                // Write the application data to stream.
+                result = OboeStream_write(mStream, mOutputBuffer, mFramesPerBurst, nanosPerTimeout);
+            }
+            framesWritten += result;
+            if (result < 0) {
+                fprintf(stderr, "ERROR - OboeStream_write() returned %zd\n", result);
+            }
+        }
+
+        result = OboeStream_getXRunCount(mStream, &xRunCount);
+        printf("OboeStream_getXRunCount %d\n", xRunCount);
+
+        result = OboeStream_requestStop(mStream);
+        if (result != OBOE_OK) {
+            fprintf(stderr, "ERROR - OboeStream_requestStart() returned %d\n", result);
+            return result;
+        }
+
+        return result;
+    }
+
+private:
+    OboeStreamBuilder   mBuilder = OBOE_HANDLE_INVALID;
+    OboeStream          mStream = OBOE_HANDLE_INVALID;
+    float            *  mOutputBuffer = nullptr;
+    int16_t          *  mConversionBuffer = nullptr;
+
+    audio_callback_proc_t * mCallbackProc = nullptr;
+    void             *  mUserContext = nullptr;
+    oboe_sharing_mode_t mRequestedSharingMode = SHARING_MODE;
+    int32_t             mSamplesPerFrame = 0;
+    int32_t             mFramesPerSecond = 0;
+    oboe_size_frames_t  mFramesPerBurst = 0;
+    oboe_audio_format_t mDataFormat = OBOE_AUDIO_FORMAT_PCM16;
+
+    volatile bool       mEnabled = false; // used to request that callback exit its loop
+};
+
+static void *SimpleOboePlayerThreadProc(void *arg) {
+    SimpleOboePlayer *player = (SimpleOboePlayer *) arg;
+    player->callbackLoop();
+    return nullptr;
+}
+
+// Application data that gets passed to the callback.
+typedef struct SineThreadedData_s {
+    SineGenerator  sineOsc1;
+    SineGenerator  sineOsc2;
+    int32_t        samplesPerFrame = 0;
+} SineThreadedData_t;
+
+// Callback function that fills the audio output buffer.
+int MyCallbackProc(float *outputBuffer, int32_t numFrames, void *userContext) {
+    SineThreadedData_t *data = (SineThreadedData_t *) userContext;
+    // Render sine waves to left and right channels.
+    data->sineOsc1.render(&outputBuffer[0], data->samplesPerFrame, numFrames);
+    if (data->samplesPerFrame > 1) {
+        data->sineOsc2.render(&outputBuffer[1], data->samplesPerFrame, numFrames);
+    }
+    return 0;
+}
+
+int main(int argc, char **argv)
+{
+    (void)argc; // unused
+    SimpleOboePlayer player;
+    SineThreadedData_t myData;
+    oboe_result_t result;
+
+    // Make printf print immediately so that debug info is not stuck
+    // in a buffer if we hang or crash.
+    setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+    printf("%s - Play a sine wave using an Oboe Thread\n", argv[0]);
+
+    result = player.open(MyCallbackProc, &myData);
+    if (result != OBOE_OK) {
+        fprintf(stderr, "ERROR -  player.open() returned %d\n", result);
+        goto error;
+    }
+    printf("player.getFramesPerSecond() = %d\n", player.getFramesPerSecond());
+    printf("player.getSamplesPerFrame() = %d\n", player.getSamplesPerFrame());
+    myData.sineOsc1.setup(440.0, 48000);
+    myData.sineOsc1.setSweep(300.0, 2000.0, 5.0);
+    myData.sineOsc2.setup(660.0, 48000);
+    myData.sineOsc2.setSweep(400.0, 3000.0, 7.0);
+    myData.samplesPerFrame = player.getSamplesPerFrame();
+
+    result = player.start();
+    if (result != OBOE_OK) {
+        fprintf(stderr, "ERROR -  player.start() returned %d\n", result);
+        goto error;
+    }
+
+    printf("Sleep for %d seconds while audio plays in a background thread.\n", NUM_SECONDS);
+    {
+        // FIXME sleep is not an NDK API
+        // sleep(NUM_SECONDS);
+        const struct timespec request = { .tv_sec = NUM_SECONDS, .tv_nsec = 0 };
+        (void) clock_nanosleep(CLOCK_MONOTONIC, 0 /*flags*/, &request, NULL /*remain*/);
+    }
+    printf("Woke up now.\n");
+
+    result = player.stop();
+    if (result != OBOE_OK) {
+        fprintf(stderr, "ERROR -  player.stop() returned %d\n", result);
+        goto error;
+    }
+    result = player.close();
+    if (result != OBOE_OK) {
+        fprintf(stderr, "ERROR -  player.close() returned %d\n", result);
+        goto error;
+    }
+
+    printf("SUCCESS\n");
+    return EXIT_SUCCESS;
+error:
+    player.close();
+    printf("exiting - Oboe result = %d = %s\n", result, Oboe_convertResultToText(result));
+    return EXIT_FAILURE;
+}
+
diff --git a/media/liboboe/examples/write_sine/static/Android.mk b/media/liboboe/examples/write_sine/static/Android.mk
new file mode 100644
index 0000000..7c8d17c
--- /dev/null
+++ b/media/liboboe/examples/write_sine/static/Android.mk
@@ -0,0 +1,34 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := examples
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/av/media/liboboe/include
+
+# TODO reorganize folders to avoid using ../
+LOCAL_SRC_FILES:= ../src/write_sine.cpp
+
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
+                          libbinder libcutils libutils \
+                          libaudioclient liblog libtinyalsa
+LOCAL_STATIC_LIBRARIES := liboboe
+
+LOCAL_MODULE := write_sine
+include $(BUILD_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/av/media/liboboe/include
+
+LOCAL_SRC_FILES:= ../src/write_sine_threaded.cpp
+
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
+                          libbinder libcutils libutils \
+                          libaudioclient liblog libtinyalsa
+LOCAL_STATIC_LIBRARIES := liboboe
+
+LOCAL_MODULE := write_sine_threaded
+include $(BUILD_EXECUTABLE)
diff --git a/media/liboboe/examples/write_sine/static/README.md b/media/liboboe/examples/write_sine/static/README.md
new file mode 100644
index 0000000..768f4cb
--- /dev/null
+++ b/media/liboboe/examples/write_sine/static/README.md
@@ -0,0 +1,2 @@
+Makefile for building simple command line examples.
+They link with Oboe as a static library.
diff --git a/media/liboboe/include/oboe/NOTICE b/media/liboboe/include/oboe/NOTICE
new file mode 100644
index 0000000..d6c0922
--- /dev/null
+++ b/media/liboboe/include/oboe/NOTICE
@@ -0,0 +1,13 @@
+Copyright (C) 2016 The Android Open Source Project
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/media/liboboe/include/oboe/OboeAudio.h b/media/liboboe/include/oboe/OboeAudio.h
new file mode 100644
index 0000000..52e3f69
--- /dev/null
+++ b/media/liboboe/include/oboe/OboeAudio.h
@@ -0,0 +1,574 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This is the 'C' ABI for Oboe.
+ */
+#ifndef OBOE_OBOEAUDIO_H
+#define OBOE_OBOEAUDIO_H
+
+#include "OboeDefinitions.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef oboe_handle_t OboeStream;
+typedef oboe_handle_t OboeStreamBuilder;
+
+#define OBOE_STREAM_NONE         ((OboeStream)OBOE_HANDLE_INVALID)
+#define OBOE_STREAM_BUILDER_NONE ((OboeStreamBuilder)OBOE_HANDLE_INVALID)
+
+/* OBOE_API will probably get defined in a Makefile for a specific platform. */
+#ifndef OBOE_API
+#define OBOE_API /* for exporting symbols */
+#endif
+
+// ============================================================
+// Audio System
+// ============================================================
+
+/**
+ * @return time in the same clock domain as the timestamps
+ */
+OBOE_API oboe_nanoseconds_t Oboe_getNanoseconds(oboe_clockid_t clockid);
+
+/**
+ * The text is the ASCII symbol corresponding to the returnCode,
+ * or an English message saying the returnCode is unrecognized.
+ * This is intended for developers to use when debugging.
+ * It is not for display to users.
+ *
+ * @return pointer to a text representation of an Oboe result code.
+ */
+OBOE_API const char * Oboe_convertResultToText(oboe_result_t returnCode);
+
+/**
+ * The text is the ASCII symbol corresponding to the stream state,
+ * or an English message saying the state is unrecognized.
+ * This is intended for developers to use when debugging.
+ * It is not for display to users.
+ *
+ * @return pointer to a text representation of an Oboe state.
+ */
+OBOE_API const char * Oboe_convertStreamStateToText(oboe_stream_state_t state);
+
+// ============================================================
+// StreamBuilder
+// ============================================================
+
+/**
+ * Create a StreamBuilder that can be used to open a Stream.
+ *
+ * The deviceId is initially unspecified, meaning that the current default device will be used.
+ *
+ * The default direction is OBOE_DIRECTION_OUTPUT.
+ * The default sharing mode is OBOE_SHARING_MODE_LEGACY.
+ * The data format, samplesPerFrames and sampleRate are unspecified and will be
+ * chosen by the device when it is opened.
+ *
+ * OboeStreamBuilder_delete() must be called when you are done using the builder.
+ */
+OBOE_API oboe_result_t Oboe_createStreamBuilder(OboeStreamBuilder *builder);
+
+/**
+ * Request an audio device identified device using an ID.
+ * The ID is platform specific.
+ * On Android, for example, the ID could be obtained from the Java AudioManager.
+ *
+ * By default, the primary device will be used.
+ *
+ * @param builder handle provided by Oboe_createStreamBuilder()
+ * @param deviceId platform specific identifier or OBOE_DEVICE_UNSPECIFIED
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStreamBuilder_setDeviceId(OboeStreamBuilder builder,
+                                                     oboe_device_id_t deviceId);
+/**
+ * Passes back requested device ID.
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStreamBuilder_getDeviceId(OboeStreamBuilder builder,
+                                                     oboe_device_id_t *deviceId);
+
+/**
+ * Request a sample rate in Hz.
+ * The stream may be opened with a different sample rate.
+ * So the application should query for the actual rate after the stream is opened.
+ *
+ * Technically, this should be called the "frame rate" or "frames per second",
+ * because it refers to the number of complete frames transferred per second.
+ * But it is traditionally called "sample rate". Se we use that term.
+ *
+ * Default is OBOE_UNSPECIFIED.
+ *
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStreamBuilder_setSampleRate(OboeStreamBuilder builder,
+                                                       oboe_sample_rate_t sampleRate);
+
+/**
+ * Returns sample rate in Hertz (samples per second).
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStreamBuilder_getSampleRate(OboeStreamBuilder builder,
+                                                       oboe_sample_rate_t *sampleRate);
+
+
+/**
+ * Request a number of samples per frame.
+ * The stream may be opened with a different value.
+ * So the application should query for the actual value after the stream is opened.
+ *
+ * Default is OBOE_UNSPECIFIED.
+ *
+ * Note, this quantity is sometimes referred to as "channel count".
+ *
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStreamBuilder_setSamplesPerFrame(OboeStreamBuilder builder,
+                                                   int32_t samplesPerFrame);
+
+/**
+ * Note, this quantity is sometimes referred to as "channel count".
+ *
+ * @param builder handle provided by Oboe_createStreamBuilder()
+ * @param samplesPerFrame pointer to a variable to be set to samplesPerFrame.
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStreamBuilder_getSamplesPerFrame(OboeStreamBuilder builder,
+                                                   int32_t *samplesPerFrame);
+
+
+/**
+ * Request a sample data format, for example OBOE_AUDIO_FORMAT_PCM16.
+ * The application should query for the actual format after the stream is opened.
+ *
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStreamBuilder_setFormat(OboeStreamBuilder builder,
+                                                   oboe_audio_format_t format);
+
+/**
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStreamBuilder_getFormat(OboeStreamBuilder builder,
+                                                   oboe_audio_format_t *format);
+
+/**
+ * Request a mode for sharing the device.
+ * The requested sharing mode may not be available.
+ * So the application should query for the actual mode after the stream is opened.
+ *
+ * @param builder handle provided by Oboe_createStreamBuilder()
+ * @param sharingMode OBOE_SHARING_MODE_LEGACY or OBOE_SHARING_MODE_EXCLUSIVE
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStreamBuilder_setSharingMode(OboeStreamBuilder builder,
+                                                        oboe_sharing_mode_t sharingMode);
+
+/**
+ * Return requested sharing mode.
+ * @return OBOE_OK or a negative error
+ */
+OBOE_API oboe_result_t OboeStreamBuilder_getSharingMode(OboeStreamBuilder builder,
+                                                        oboe_sharing_mode_t *sharingMode);
+
+/**
+ * Request the direction for a stream. The default is OBOE_DIRECTION_OUTPUT.
+ *
+ * @param builder handle provided by Oboe_createStreamBuilder()
+ * @param direction OBOE_DIRECTION_OUTPUT or OBOE_DIRECTION_INPUT
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStreamBuilder_setDirection(OboeStreamBuilder builder,
+                                                      oboe_direction_t direction);
+
+/**
+ * @param builder handle provided by Oboe_createStreamBuilder()
+ * @param direction pointer to a variable to be set to the currently requested direction.
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStreamBuilder_getDirection(OboeStreamBuilder builder,
+                                                      oboe_direction_t *direction);
+
+/**
+ * Open a stream based on the options in the StreamBuilder.
+ *
+ * OboeStream_close must be called when finished with the stream to recover
+ * the memory and to free the associated resources.
+ *
+ * @param builder handle provided by Oboe_createStreamBuilder()
+ * @param stream pointer to a variable to receive the new stream handle
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t  OboeStreamBuilder_openStream(OboeStreamBuilder builder,
+                                                     OboeStream *stream);
+
+/**
+ * Delete the resources associated with the StreamBuilder.
+ *
+ * @param builder handle provided by Oboe_createStreamBuilder()
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t  OboeStreamBuilder_delete(OboeStreamBuilder builder);
+
+// ============================================================
+// Stream Control
+// ============================================================
+
+/**
+ * Free the resources associated with a stream created by OboeStreamBuilder_openStream()
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t  OboeStream_close(OboeStream stream);
+
+/**
+ * Asynchronously request to start playing the stream. For output streams, one should
+ * write to the stream to fill the buffer before starting.
+ * Otherwise it will underflow.
+ * After this call the state will be in OBOE_STREAM_STATE_STARTING or OBOE_STREAM_STATE_STARTED.
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t  OboeStream_requestStart(OboeStream stream);
+
+/**
+ * Asynchronous request for the stream to pause.
+ * Pausing a stream will freeze the data flow but not flush any buffers.
+ * Use OboeStream_Start() to resume playback after a pause.
+ * After this call the state will be in OBOE_STREAM_STATE_PAUSING or OBOE_STREAM_STATE_PAUSED.
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t  OboeStream_requestPause(OboeStream stream);
+
+/**
+ * Asynchronous request for the stream to flush.
+ * Flushing will discard any pending data.
+ * This call only works if the stream is pausing or paused. TODO review
+ * Frame counters are not reset by a flush. They may be advanced.
+ * After this call the state will be in OBOE_STREAM_STATE_FLUSHING or OBOE_STREAM_STATE_FLUSHED.
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t  OboeStream_requestFlush(OboeStream stream);
+
+/**
+ * Asynchronous request for the stream to stop.
+ * The stream will stop after all of the data currently buffered has been played.
+ * After this call the state will be in OBOE_STREAM_STATE_STOPPING or OBOE_STREAM_STATE_STOPPED.
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t  OboeStream_requestStop(OboeStream stream);
+
+/**
+ * Query the current state, eg. OBOE_STREAM_STATE_PAUSING
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param state pointer to a variable that will be set to the current state
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStream_getState(OboeStream stream, oboe_stream_state_t *state);
+
+/**
+ * Wait until the current state no longer matches the input state.
+ *
+ * <pre><code>
+ * oboe_stream_state_t currentState;
+ * oboe_result_t result = OboeStream_getState(stream, &currentState);
+ * while (result == OBOE_OK && currentState != OBOE_STREAM_STATE_PAUSING) {
+ *     result = OboeStream_waitForStateChange(
+ *                                   stream, currentState, &currentState, MY_TIMEOUT_NANOS);
+ * }
+ * </code></pre>
+ *
+ * @param stream A handle provided by OboeStreamBuilder_openStream()
+ * @param inputState The state we want to avoid.
+ * @param nextState Pointer to a variable that will be set to the new state.
+ * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStream_waitForStateChange(OboeStream stream,
+                                            oboe_stream_state_t inputState,
+                                            oboe_stream_state_t *nextState,
+                                            oboe_nanoseconds_t timeoutNanoseconds);
+
+// ============================================================
+// Stream I/O
+// ============================================================
+
+/**
+ * Read data from the stream.
+ *
+ * The call will wait until the read is complete or until it runs out of time.
+ * If timeoutNanos is zero then this call will not wait.
+ *
+ * Note that timeoutNanoseconds is a relative duration in wall clock time.
+ * Time will not stop if the thread is asleep.
+ * So it will be implemented using CLOCK_BOOTTIME.
+ *
+ * This call is "strong non-blocking" unless it has to wait for data.
+ *
+ * @param stream A stream created using OboeStreamBuilder_openStream().
+ * @param buffer The address of the first sample.
+ * @param numFrames Number of frames to read. Only complete frames will be written.
+ * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
+ * @return The number of frames actually written or a negative error.
+ */
+OBOE_API oboe_result_t OboeStream_read(OboeStream stream,
+                               void *buffer,
+                               oboe_size_frames_t numFrames,
+                               oboe_nanoseconds_t timeoutNanoseconds);
+
+/**
+ * Write data to the stream.
+ *
+ * The call will wait until the write is complete or until it runs out of time.
+ * If timeoutNanos is zero then this call will not wait.
+ *
+ * Note that timeoutNanoseconds is a relative duration in wall clock time.
+ * Time will not stop if the thread is asleep.
+ * So it will be implemented using CLOCK_BOOTTIME.
+ *
+ * This call is "strong non-blocking" unless it has to wait for room in the buffer.
+ *
+ * @param stream A stream created using OboeStreamBuilder_openStream().
+ * @param buffer The address of the first sample.
+ * @param numFrames Number of frames to write. Only complete frames will be written.
+ * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
+ * @return The number of frames actually written or a negative error.
+ */
+OBOE_API oboe_result_t OboeStream_write(OboeStream stream,
+                               const void *buffer,
+                               oboe_size_frames_t numFrames,
+                               oboe_nanoseconds_t timeoutNanoseconds);
+
+
+// ============================================================
+// High priority audio threads
+// ============================================================
+
+typedef void *(oboe_audio_thread_proc_t)(void *);
+
+/**
+ * Create a thread associated with a stream. The thread has special properties for
+ * low latency audio performance. This thread can be used to implement a callback API.
+ *
+ * Only one thread may be associated with a stream.
+ *
+ * Note that this API is in flux.
+ *
+ * @param stream A stream created using OboeStreamBuilder_openStream().
+ * @param periodNanoseconds the estimated period at which the audio thread will need to wake up
+ * @param startRoutine your thread entry point
+ * @param arg an argument that will be passed to your thread entry point
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStream_createThread(OboeStream stream,
+                                     oboe_nanoseconds_t periodNanoseconds,
+                                     oboe_audio_thread_proc_t *threadProc,
+                                     void *arg);
+
+/**
+ * Wait until the thread exits or an error occurs.
+ * The thread handle will be deleted.
+ *
+ * @param stream A stream created using OboeStreamBuilder_openStream().
+ * @param returnArg a pointer to a variable to receive the return value
+ * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStream_joinThread(OboeStream stream,
+                                   void **returnArg,
+                                   oboe_nanoseconds_t timeoutNanoseconds);
+
+// ============================================================
+// Stream - queries
+// ============================================================
+
+
+/**
+ * This can be used to adjust the latency of the buffer by changing
+ * the threshold where blocking will occur.
+ * By combining this with OboeStream_getUnderrunCount(), the latency can be tuned
+ * at run-time for each device.
+ *
+ * This cannot be set higher than OboeStream_getBufferCapacity().
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param requestedFrames requested number of frames that can be filled without blocking
+ * @param actualFrames receives final number of frames
+ * @return OBOE_OK or a negative error
+ */
+OBOE_API oboe_result_t OboeStream_setBufferSize(OboeStream stream,
+                                                oboe_size_frames_t requestedFrames,
+                                                oboe_size_frames_t *actualFrames);
+
+/**
+ * Query the maximum number of frames that can be filled without blocking.
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param frames pointer to variable to receive the buffer size
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStream_getBufferSize(OboeStream stream, oboe_size_frames_t *frames);
+
+/**
+ * Query the number of frames that are read or written by the endpoint at one time.
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param frames pointer to variable to receive the burst size
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStream_getFramesPerBurst(OboeStream stream, oboe_size_frames_t *frames);
+
+/**
+ * Query maximum buffer capacity in frames.
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param frames pointer to variable to receive the buffer capacity
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStream_getBufferCapacity(OboeStream stream, oboe_size_frames_t *frames);
+
+/**
+ * An XRun is an Underrun or an Overrun.
+ * During playing, an underrun will occur if the stream is not written in time
+ * and the system runs out of valid data.
+ * During recording, an overrun will occur if the stream is not read in time
+ * and there is no place to put the incoming data so it is discarded.
+ *
+ * An underrun or overrun can cause an audible "pop" or "glitch".
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param xRunCount pointer to variable to receive the underrun or overrun count
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStream_getXRunCount(OboeStream stream, int32_t *xRunCount);
+
+/**
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param sampleRate pointer to variable to receive the actual sample rate
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStream_getSampleRate(OboeStream stream, oboe_sample_rate_t *sampleRate);
+
+/**
+ * The samplesPerFrame is also known as channelCount.
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param samplesPerFrame pointer to variable to receive the actual samples per frame
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStream_getSamplesPerFrame(OboeStream stream, int32_t *samplesPerFrame);
+
+/**
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param deviceId pointer to variable to receive the actual device ID
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStream_getDeviceId(OboeStream stream, oboe_device_id_t *deviceId);
+
+/**
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param format pointer to variable to receive the actual data format
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStream_getFormat(OboeStream stream, oboe_audio_format_t *format);
+
+/**
+ * Provide actual sharing mode.
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param sharingMode pointer to variable to receive the actual sharing mode
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStream_getSharingMode(OboeStream stream,
+                                        oboe_sharing_mode_t *sharingMode);
+
+/**
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param direction pointer to a variable to be set to the current direction.
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStream_getDirection(OboeStream stream, oboe_direction_t *direction);
+
+/**
+ * Passes back the number of frames that have been written since the stream was created.
+ * For an output stream, this will be advanced by the application calling write().
+ * For an input stream, this will be advanced by the device or service.
+ *
+ * The frame position is monotonically increasing.
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param frames pointer to variable to receive the frames written
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStream_getFramesWritten(OboeStream stream,
+                                                   oboe_position_frames_t *frames);
+
+/**
+ * Passes back the number of frames that have been read since the stream was created.
+ * For an output stream, this will be advanced by the device or service.
+ * For an input stream, this will be advanced by the application calling read().
+ *
+ * The frame position is monotonically increasing.
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param frames pointer to variable to receive the frames written
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStream_getFramesRead(OboeStream stream, oboe_position_frames_t *frames);
+
+/**
+ * Passes back the time at which a particular frame was presented.
+ * This can be used to synchronize audio with video or MIDI.
+ * It can also be used to align a recorded stream with a playback stream.
+ *
+ * Timestamps are only valid when the stream is in OBOE_STREAM_STATE_STARTED.
+ * OBOE_ERROR_INVALID_STATE will be returned if the stream is not started.
+ * Note that because requestStart() is asynchronous, timestamps will not be valid until
+ * a short time after calling requestStart().
+ * So OBOE_ERROR_INVALID_STATE should not be considered a fatal error.
+ * Just try calling again later.
+ *
+ * If an error occurs, then the position and time will not be modified.
+ *
+ * The position and time passed back are monotonically increasing.
+ *
+ * @param stream A handle provided by OboeStreamBuilder_openStream()
+ * @param clockid OBOE_CLOCK_MONOTONIC or OBOE_CLOCK_BOOTTIME
+ * @param framePosition pointer to a variable to receive the position
+ * @param timeNanoseconds pointer to a variable to receive the time
+ * @return OBOE_OK or a negative error
+ */
+OBOE_API oboe_result_t OboeStream_getTimestamp(OboeStream stream,
+                                      oboe_clockid_t clockid,
+                                      oboe_position_frames_t *framePosition,
+                                      oboe_nanoseconds_t *timeNanoseconds);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //OBOE_OBOEAUDIO_H
diff --git a/media/liboboe/include/oboe/OboeDefinitions.h b/media/liboboe/include/oboe/OboeDefinitions.h
new file mode 100644
index 0000000..9d56a24
--- /dev/null
+++ b/media/liboboe/include/oboe/OboeDefinitions.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_OBOEDEFINITIONS_H
+#define OBOE_OBOEDEFINITIONS_H
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef int32_t  oboe_handle_t; // negative handles are error codes
+typedef int32_t  oboe_result_t;
+/**
+ * A platform specific identifier for a device.
+ */
+typedef int32_t  oboe_device_id_t;
+typedef int32_t  oboe_sample_rate_t;
+/** This is used for small quantities such as the number of frames in a buffer. */
+typedef int32_t  oboe_size_frames_t;
+/** This is used for small quantities such as the number of bytes in a frame. */
+typedef int32_t  oboe_size_bytes_t;
+/**
+ * This is used for large quantities, such as the number of frames that have
+ * been played since a stream was started.
+ * At 48000 Hz, a 32-bit integer would wrap around in just over 12 hours.
+ */
+typedef int64_t  oboe_position_frames_t;
+
+typedef int64_t  oboe_nanoseconds_t;
+
+/**
+ * This is used to represent a value that has not been specified.
+ * For example, an application could use OBOE_UNSPECIFIED to indicate
+ * that is did not not care what the specific value of a parameter was
+ * and would accept whatever it was given.
+ */
+#define OBOE_UNSPECIFIED           0
+#define OBOE_DEVICE_UNSPECIFIED    ((oboe_device_id_t) -1)
+#define OBOE_NANOS_PER_MICROSECOND ((int64_t)1000)
+#define OBOE_NANOS_PER_MILLISECOND (OBOE_NANOS_PER_MICROSECOND * 1000)
+#define OBOE_MILLIS_PER_SECOND     1000
+#define OBOE_NANOS_PER_SECOND      (OBOE_NANOS_PER_MILLISECOND * OBOE_MILLIS_PER_SECOND)
+
+#define OBOE_HANDLE_INVALID     ((oboe_handle_t)-1)
+
+enum oboe_direction_t {
+    OBOE_DIRECTION_OUTPUT,
+    OBOE_DIRECTION_INPUT,
+    OBOE_DIRECTION_COUNT // This should always be last.
+};
+
+enum oboe_audio_format_t {
+    OBOE_AUDIO_FORMAT_INVALID = -1,
+    OBOE_AUDIO_FORMAT_UNSPECIFIED = 0,
+    OBOE_AUDIO_FORMAT_PCM16, // TODO rename to _PCM_I16
+    OBOE_AUDIO_FORMAT_PCM_FLOAT,
+    OBOE_AUDIO_FORMAT_PCM824, // TODO rename to _PCM_I8_24
+    OBOE_AUDIO_FORMAT_PCM32  // TODO rename to _PCM_I32
+};
+
+enum {
+    OBOE_OK,
+    OBOE_ERROR_BASE = -900, // TODO review
+    OBOE_ERROR_DISCONNECTED,
+    OBOE_ERROR_ILLEGAL_ARGUMENT,
+    OBOE_ERROR_INCOMPATIBLE,
+    OBOE_ERROR_INTERNAL, // an underlying API returned an error code
+    OBOE_ERROR_INVALID_STATE,
+    OBOE_ERROR_UNEXPECTED_STATE,
+    OBOE_ERROR_UNEXPECTED_VALUE,
+    OBOE_ERROR_INVALID_HANDLE,
+    OBOE_ERROR_INVALID_QUERY,
+    OBOE_ERROR_UNIMPLEMENTED,
+    OBOE_ERROR_UNAVAILABLE,
+    OBOE_ERROR_NO_FREE_HANDLES,
+    OBOE_ERROR_NO_MEMORY,
+    OBOE_ERROR_NULL,
+    OBOE_ERROR_TIMEOUT,
+    OBOE_ERROR_WOULD_BLOCK,
+    OBOE_ERROR_INVALID_ORDER,
+    OBOE_ERROR_OUT_OF_RANGE
+};
+
+typedef enum {
+    OBOE_CLOCK_MONOTONIC, // Clock since booted, pauses when CPU is sleeping.
+    OBOE_CLOCK_BOOTTIME,  // Clock since booted, runs all the time.
+    OBOE_CLOCK_COUNT // This should always be last.
+} oboe_clockid_t;
+
+typedef enum
+{
+    OBOE_STREAM_STATE_UNINITIALIZED = 0,
+    OBOE_STREAM_STATE_OPEN,
+    OBOE_STREAM_STATE_STARTING,
+    OBOE_STREAM_STATE_STARTED,
+    OBOE_STREAM_STATE_PAUSING,
+    OBOE_STREAM_STATE_PAUSED,
+    OBOE_STREAM_STATE_FLUSHING,
+    OBOE_STREAM_STATE_FLUSHED,
+    OBOE_STREAM_STATE_STOPPING,
+    OBOE_STREAM_STATE_STOPPED,
+    OBOE_STREAM_STATE_CLOSING,
+    OBOE_STREAM_STATE_CLOSED,
+} oboe_stream_state_t;
+
+// TODO review API
+typedef enum {
+    /**
+     * This will use an AudioTrack object for playing audio
+     * and an AudioRecord for recording data.
+     */
+    OBOE_SHARING_MODE_LEGACY,
+    /**
+     * This will be the only stream using a particular source or sink.
+     * This mode will provide the lowest possible latency.
+     * You should close EXCLUSIVE streams immediately when you are not using them.
+     */
+    OBOE_SHARING_MODE_EXCLUSIVE,
+    /**
+     * Multiple applications will be mixed by the Oboe Server.
+     * This will have higher latency than the EXCLUSIVE mode.
+     */
+    OBOE_SHARING_MODE_SHARED,
+    /**
+     * Multiple applications will do their own mixing into a memory mapped buffer.
+     * It may be possible for malicious applications to read the data produced by
+     * other apps. So do not use this for private data such as telephony or messaging.
+     */
+    OBOE_SHARING_MODE_PUBLIC_MIX,
+    OBOE_SHARING_MODE_COUNT // This should always be last.
+} oboe_sharing_mode_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // OBOE_OBOEDEFINITIONS_H
diff --git a/media/liboboe/include/oboe/README.md b/media/liboboe/include/oboe/README.md
new file mode 100644
index 0000000..de60d03
--- /dev/null
+++ b/media/liboboe/include/oboe/README.md
@@ -0,0 +1,4 @@
+Oboe Audio headers
+
+This folder contains the public header files.
+
diff --git a/media/liboboe/liboboe.map.txt b/media/liboboe/liboboe.map.txt
new file mode 100644
index 0000000..9be7fe1
--- /dev/null
+++ b/media/liboboe/liboboe.map.txt
@@ -0,0 +1,46 @@
+LIBOBOE {
+  global:
+    Oboe_getNanoseconds;
+    Oboe_convertResultToText;
+    Oboe_convertStreamStateToText;
+    Oboe_createStreamBuilder;
+    OboeStreamBuilder_setDeviceId;
+    OboeStreamBuilder_setSampleRate;
+    OboeStreamBuilder_getSampleRate;
+    OboeStreamBuilder_setSamplesPerFrame;
+    OboeStreamBuilder_getSamplesPerFrame;
+    OboeStreamBuilder_setFormat;
+    OboeStreamBuilder_getFormat;
+    OboeStreamBuilder_setSharingMode;
+    OboeStreamBuilder_getSharingMode;
+    OboeStreamBuilder_setDirection;
+    OboeStreamBuilder_getDirection;
+    OboeStreamBuilder_openStream;
+    OboeStreamBuilder_delete;
+    OboeStream_close;
+    OboeStream_requestStart;
+    OboeStream_requestPause;
+    OboeStream_requestFlush;
+    OboeStream_requestStop;
+    OboeStream_getState;
+    OboeStream_waitForStateChange;
+    OboeStream_read;
+    OboeStream_write;
+    OboeStream_createThread;
+    OboeStream_joinThread;
+    OboeStream_setBufferSize;
+    OboeStream_getBufferSize;
+    OboeStream_getFramesPerBurst;
+    OboeStream_getBufferCapacity;
+    OboeStream_getXRunCount;
+    OboeStream_getSampleRate;
+    OboeStream_getSamplesPerFrame;
+    OboeStream_getFormat;
+    OboeStream_getSharingMode;
+    OboeStream_getDirection;
+    OboeStream_getFramesWritten;
+    OboeStream_getFramesRead;
+    OboeStream_getTimestamp;
+  local:
+    *;
+};
diff --git a/media/liboboe/src/Android.mk b/media/liboboe/src/Android.mk
new file mode 100644
index 0000000..59edcb2
--- /dev/null
+++ b/media/liboboe/src/Android.mk
@@ -0,0 +1,108 @@
+LOCAL_PATH:= $(call my-dir)
+
+# ======================= STATIC LIBRARY ==========================
+# This is being built because it make Oboe testing very easy with a complete executable.
+# TODO Remove this target later, when not needed.
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := liboboe
+LOCAL_MODULE_TAGS := optional
+
+LIBOBOE_DIR := $(TOP)/frameworks/av/media/liboboe
+LIBOBOE_SRC_DIR := $(LIBOBOE_DIR)/src
+
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/native/include \
+    system/core/base/include \
+    frameworks/native/media/liboboe/include/include \
+    frameworks/av/media/liboboe/include \
+    frameworks/native/include \
+    $(LOCAL_PATH) \
+    $(LOCAL_PATH)/binding \
+    $(LOCAL_PATH)/client \
+    $(LOCAL_PATH)/core \
+    $(LOCAL_PATH)/fifo \
+    $(LOCAL_PATH)/legacy \
+    $(LOCAL_PATH)/utility
+
+LOCAL_SRC_FILES = \
+    core/AudioStream.cpp \
+    core/AudioStreamBuilder.cpp \
+    core/OboeAudio.cpp \
+    legacy/AudioStreamRecord.cpp \
+    legacy/AudioStreamTrack.cpp \
+    utility/HandleTracker.cpp \
+    utility/OboeUtilities.cpp \
+    fifo/FifoBuffer.cpp \
+    fifo/FifoControllerBase.cpp \
+    client/AudioEndpoint.cpp \
+    client/AudioStreamInternal.cpp \
+    client/IsochronousClockModel.cpp \
+    binding/SharedMemoryParcelable.cpp \
+    binding/SharedRegionParcelable.cpp \
+    binding/RingBufferParcelable.cpp \
+    binding/AudioEndpointParcelable.cpp \
+    binding/OboeStreamRequest.cpp \
+    binding/OboeStreamConfiguration.cpp \
+    binding/IOboeAudioService.cpp
+
+LOCAL_CFLAGS += -Wno-unused-parameter -Wall -Werror
+
+# By default, all symbols are hidden.
+# LOCAL_CFLAGS += -fvisibility=hidden
+# OBOE_API is used to explicitly export a function or a variable as a visible symbol.
+LOCAL_CFLAGS += -DOBOE_API='__attribute__((visibility("default")))'
+
+include $(BUILD_STATIC_LIBRARY)
+
+# ======================= SHARED LIBRARY ==========================
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := liboboe
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/native/include \
+    system/core/base/include \
+    frameworks/native/media/liboboe/include/include \
+    frameworks/av/media/liboboe/include \
+    $(LOCAL_PATH) \
+    $(LOCAL_PATH)/binding \
+    $(LOCAL_PATH)/client \
+    $(LOCAL_PATH)/core \
+    $(LOCAL_PATH)/fifo \
+    $(LOCAL_PATH)/legacy \
+    $(LOCAL_PATH)/utility
+
+LOCAL_SRC_FILES = core/AudioStream.cpp \
+    core/AudioStreamBuilder.cpp \
+    core/OboeAudio.cpp \
+    legacy/AudioStreamRecord.cpp \
+    legacy/AudioStreamTrack.cpp \
+    utility/HandleTracker.cpp \
+    utility/OboeUtilities.cpp \
+    fifo/FifoBuffer.cpp \
+    fifo/FifoControllerBase.cpp \
+    client/AudioEndpoint.cpp \
+    client/AudioStreamInternal.cpp \
+    client/IsochronousClockModel.cpp \
+    binding/SharedMemoryParcelable.cpp \
+    binding/SharedRegionParcelable.cpp \
+    binding/RingBufferParcelable.cpp \
+    binding/AudioEndpointParcelable.cpp \
+    binding/OboeStreamRequest.cpp \
+    binding/OboeStreamConfiguration.cpp \
+    binding/IOboeAudioService.cpp
+
+LOCAL_CFLAGS += -Wno-unused-parameter -Wall -Werror
+
+# By default, all symbols are hidden.
+# LOCAL_CFLAGS += -fvisibility=hidden
+# OBOE_API is used to explicitly export a function or a variable as a visible symbol.
+LOCAL_CFLAGS += -DOBOE_API='__attribute__((visibility("default")))'
+
+LOCAL_SHARED_LIBRARIES := libaudioclient liblog libcutils libutils libbinder
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/liboboe/src/binding/AudioEndpointParcelable.cpp b/media/liboboe/src/binding/AudioEndpointParcelable.cpp
new file mode 100644
index 0000000..096a819
--- /dev/null
+++ b/media/liboboe/src/binding/AudioEndpointParcelable.cpp
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+
+#include "binding/OboeServiceDefinitions.h"
+#include "binding/RingBufferParcelable.h"
+#include "binding/AudioEndpointParcelable.h"
+
+using android::NO_ERROR;
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+using namespace oboe;
+
+/**
+ * Container for information about the message queues plus
+ * general stream information needed by Oboe clients.
+ * It contains no addresses, just sizes, offsets and file descriptors for
+ * shared memory that can be passed through Binder.
+ */
+AudioEndpointParcelable::AudioEndpointParcelable() {}
+
+AudioEndpointParcelable::~AudioEndpointParcelable() {}
+
+/**
+ * Add the file descriptor to the table.
+ * @return index in table or negative error
+ */
+int32_t AudioEndpointParcelable::addFileDescriptor(int fd, int32_t sizeInBytes) {
+    if (mNumSharedMemories >= MAX_SHARED_MEMORIES) {
+        return OBOE_ERROR_OUT_OF_RANGE;
+    }
+    int32_t index = mNumSharedMemories++;
+    mSharedMemories[index].setup(fd, sizeInBytes);
+    return index;
+}
+
+/**
+ * The read and write must be symmetric.
+ */
+status_t AudioEndpointParcelable::writeToParcel(Parcel* parcel) const {
+    parcel->writeInt32(mNumSharedMemories);
+    for (int i = 0; i < mNumSharedMemories; i++) {
+        mSharedMemories[i].writeToParcel(parcel);
+    }
+    mUpMessageQueueParcelable.writeToParcel(parcel);
+    mDownMessageQueueParcelable.writeToParcel(parcel);
+    mUpDataQueueParcelable.writeToParcel(parcel);
+    mDownDataQueueParcelable.writeToParcel(parcel);
+    return NO_ERROR; // TODO check for errors above
+}
+
+status_t AudioEndpointParcelable::readFromParcel(const Parcel* parcel) {
+    parcel->readInt32(&mNumSharedMemories);
+    for (int i = 0; i < mNumSharedMemories; i++) {
+        mSharedMemories[i].readFromParcel(parcel);
+    }
+    mUpMessageQueueParcelable.readFromParcel(parcel);
+    mDownMessageQueueParcelable.readFromParcel(parcel);
+    mUpDataQueueParcelable.readFromParcel(parcel);
+    mDownDataQueueParcelable.readFromParcel(parcel);
+    return NO_ERROR; // TODO check for errors above
+}
+
+oboe_result_t AudioEndpointParcelable::resolve(EndpointDescriptor *descriptor) {
+    // TODO error check
+    mUpMessageQueueParcelable.resolve(mSharedMemories, &descriptor->upMessageQueueDescriptor);
+    mDownMessageQueueParcelable.resolve(mSharedMemories,
+                                        &descriptor->downMessageQueueDescriptor);
+    mUpDataQueueParcelable.resolve(mSharedMemories, &descriptor->upDataQueueDescriptor);
+    mDownDataQueueParcelable.resolve(mSharedMemories, &descriptor->downDataQueueDescriptor);
+    return OBOE_OK;
+}
+
+oboe_result_t AudioEndpointParcelable::validate() {
+    oboe_result_t result;
+    if (mNumSharedMemories < 0 || mNumSharedMemories >= MAX_SHARED_MEMORIES) {
+        ALOGE("AudioEndpointParcelable invalid mNumSharedMemories = %d", mNumSharedMemories);
+        return OBOE_ERROR_INTERNAL;
+    }
+    for (int i = 0; i < mNumSharedMemories; i++) {
+        result = mSharedMemories[i].validate();
+        if (result != OBOE_OK) {
+            return result;
+        }
+    }
+    if ((result = mUpMessageQueueParcelable.validate()) != OBOE_OK) {
+        ALOGE("AudioEndpointParcelable invalid mUpMessageQueueParcelable = %d", result);
+        return result;
+    }
+    if ((result = mDownMessageQueueParcelable.validate()) != OBOE_OK) {
+        ALOGE("AudioEndpointParcelable invalid mDownMessageQueueParcelable = %d", result);
+        return result;
+    }
+    if ((result = mUpDataQueueParcelable.validate()) != OBOE_OK) {
+        ALOGE("AudioEndpointParcelable invalid mUpDataQueueParcelable = %d", result);
+        return result;
+    }
+    if ((result = mDownDataQueueParcelable.validate()) != OBOE_OK) {
+        ALOGE("AudioEndpointParcelable invalid mDownDataQueueParcelable = %d", result);
+        return result;
+    }
+    return OBOE_OK;
+}
+
+void AudioEndpointParcelable::dump() {
+    ALOGD("AudioEndpointParcelable ======================================= BEGIN");
+    ALOGD("AudioEndpointParcelable mNumSharedMemories = %d", mNumSharedMemories);
+    for (int i = 0; i < mNumSharedMemories; i++) {
+        mSharedMemories[i].dump();
+    }
+    ALOGD("AudioEndpointParcelable mUpMessageQueueParcelable =========");
+    mUpMessageQueueParcelable.dump();
+    ALOGD("AudioEndpointParcelable mDownMessageQueueParcelable =======");
+    mDownMessageQueueParcelable.dump();
+    ALOGD("AudioEndpointParcelable mUpDataQueueParcelable ============");
+    mUpDataQueueParcelable.dump();
+    ALOGD("AudioEndpointParcelable mDownDataQueueParcelable ==========");
+    mDownDataQueueParcelable.dump();
+    ALOGD("AudioEndpointParcelable ======================================= END");
+}
+
diff --git a/media/liboboe/src/binding/AudioEndpointParcelable.h b/media/liboboe/src/binding/AudioEndpointParcelable.h
new file mode 100644
index 0000000..6bdd8a4
--- /dev/null
+++ b/media/liboboe/src/binding/AudioEndpointParcelable.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_AUDIOENDPOINTPARCELABLE_H
+#define BINDING_AUDIOENDPOINTPARCELABLE_H
+
+#include <stdint.h>
+
+//#include <sys/mman.h>
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+
+#include "binding/OboeServiceDefinitions.h"
+#include "binding/RingBufferParcelable.h"
+
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+namespace oboe {
+
+/**
+ * Container for information about the message queues plus
+ * general stream information needed by Oboe clients.
+ * It contains no addresses, just sizes, offsets and file descriptors for
+ * shared memory that can be passed through Binder.
+ */
+class AudioEndpointParcelable : public Parcelable {
+public:
+    AudioEndpointParcelable();
+    virtual ~AudioEndpointParcelable();
+
+    /**
+     * Add the file descriptor to the table.
+     * @return index in table or negative error
+     */
+    int32_t addFileDescriptor(int fd, int32_t sizeInBytes);
+
+    virtual status_t writeToParcel(Parcel* parcel) const override;
+
+    virtual status_t readFromParcel(const Parcel* parcel) override;
+
+    oboe_result_t resolve(EndpointDescriptor *descriptor);
+
+    oboe_result_t validate();
+
+    void dump();
+
+public: // TODO add getters
+    // Set capacityInFrames to zero if Queue is unused.
+    RingBufferParcelable    mUpMessageQueueParcelable;   // server to client
+    RingBufferParcelable    mDownMessageQueueParcelable; // to server
+    RingBufferParcelable    mUpDataQueueParcelable;      // eg. record, could share same queue
+    RingBufferParcelable    mDownDataQueueParcelable;    // eg. playback
+
+private:
+    int32_t                 mNumSharedMemories = 0;
+    SharedMemoryParcelable  mSharedMemories[MAX_SHARED_MEMORIES];
+};
+
+} /* namespace oboe */
+
+#endif //BINDING_AUDIOENDPOINTPARCELABLE_H
diff --git a/media/liboboe/src/binding/IOboeAudioService.cpp b/media/liboboe/src/binding/IOboeAudioService.cpp
new file mode 100644
index 0000000..a3437b2
--- /dev/null
+++ b/media/liboboe/src/binding/IOboeAudioService.cpp
@@ -0,0 +1,296 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <oboe/OboeDefinitions.h>
+
+#include "binding/AudioEndpointParcelable.h"
+#include "binding/OboeStreamRequest.h"
+#include "binding/OboeStreamConfiguration.h"
+#include "binding/IOboeAudioService.h"
+
+namespace android {
+
+/**
+ * This is used by the Oboe Client to talk to the Oboe Service.
+ *
+ * The order of parameters in the Parcels must match with code in OboeAudioService.cpp.
+ */
+class BpOboeAudioService : public BpInterface<IOboeAudioService>
+{
+public:
+    explicit BpOboeAudioService(const sp<IBinder>& impl)
+        : BpInterface<IOboeAudioService>(impl)
+    {
+    }
+
+    virtual oboe_handle_t openStream(oboe::OboeStreamRequest &request,
+                                     oboe::OboeStreamConfiguration &configuration) override {
+        Parcel data, reply;
+        // send command
+        data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
+        request.writeToParcel(&data);
+        status_t err = remote()->transact(OPEN_STREAM, data, &reply);
+        if (err != NO_ERROR) {
+            return OBOE_ERROR_INTERNAL; // TODO consider another error
+        }
+        // parse reply
+        oboe_handle_t stream;
+        reply.readInt32(&stream);
+        configuration.readFromParcel(&reply);
+        return stream;
+    }
+
+    virtual oboe_result_t closeStream(int32_t streamHandle) override {
+        Parcel data, reply;
+        // send command
+        data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
+        data.writeInt32(streamHandle);
+        status_t err = remote()->transact(CLOSE_STREAM, data, &reply);
+        if (err != NO_ERROR) {
+            return OBOE_ERROR_INTERNAL; // TODO consider another error
+        }
+        // parse reply
+        oboe_result_t res;
+        reply.readInt32(&res);
+        return res;
+    }
+
+    virtual oboe_result_t getStreamDescription(oboe_handle_t streamHandle,
+                                               AudioEndpointParcelable &parcelable)   {
+        Parcel data, reply;
+        // send command
+        data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
+        data.writeInt32(streamHandle);
+        status_t err = remote()->transact(GET_STREAM_DESCRIPTION, data, &reply);
+        if (err != NO_ERROR) {
+            return OBOE_ERROR_INTERNAL; // TODO consider another error
+        }
+        // parse reply
+        parcelable.readFromParcel(&reply);
+        parcelable.dump();
+        oboe_result_t result = parcelable.validate();
+        if (result != OBOE_OK) {
+            return result;
+        }
+        reply.readInt32(&result);
+        return result;
+    }
+
+    // TODO should we wait for a reply?
+    virtual oboe_result_t startStream(oboe_handle_t streamHandle) override {
+        Parcel data, reply;
+        // send command
+        data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
+        data.writeInt32(streamHandle);
+        status_t err = remote()->transact(START_STREAM, data, &reply);
+        if (err != NO_ERROR) {
+            return OBOE_ERROR_INTERNAL; // TODO consider another error
+        }
+        // parse reply
+        oboe_result_t res;
+        reply.readInt32(&res);
+        return res;
+    }
+
+    virtual oboe_result_t pauseStream(oboe_handle_t streamHandle) override {
+        Parcel data, reply;
+        // send command
+        data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
+        data.writeInt32(streamHandle);
+        status_t err = remote()->transact(PAUSE_STREAM, data, &reply);
+        if (err != NO_ERROR) {
+            return OBOE_ERROR_INTERNAL; // TODO consider another error
+        }
+        // parse reply
+        oboe_result_t res;
+        reply.readInt32(&res);
+        return res;
+    }
+
+    virtual oboe_result_t flushStream(oboe_handle_t streamHandle) override {
+        Parcel data, reply;
+        // send command
+        data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
+        data.writeInt32(streamHandle);
+        status_t err = remote()->transact(FLUSH_STREAM, data, &reply);
+        if (err != NO_ERROR) {
+            return OBOE_ERROR_INTERNAL; // TODO consider another error
+        }
+        // parse reply
+        oboe_result_t res;
+        reply.readInt32(&res);
+        return res;
+    }
+
+    virtual void tickle() override { // TODO remove after service thread implemented
+        Parcel data;
+        // send command
+        data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
+        remote()->transact(TICKLE, data, nullptr);
+    }
+
+    virtual oboe_result_t registerAudioThread(oboe_handle_t streamHandle, pid_t clientThreadId,
+                                              oboe_nanoseconds_t periodNanoseconds)
+    override {
+        Parcel data, reply;
+        // send command
+        data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
+        data.writeInt32(streamHandle);
+        data.writeInt32((int32_t) clientThreadId);
+        data.writeInt64(periodNanoseconds);
+        status_t err = remote()->transact(REGISTER_AUDIO_THREAD, data, &reply);
+        if (err != NO_ERROR) {
+            return OBOE_ERROR_INTERNAL; // TODO consider another error
+        }
+        // parse reply
+        oboe_result_t res;
+        reply.readInt32(&res);
+        return res;
+    }
+
+    virtual oboe_result_t unregisterAudioThread(oboe_handle_t streamHandle, pid_t clientThreadId)
+    override {
+        Parcel data, reply;
+        // send command
+        data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
+        data.writeInt32(streamHandle);
+        data.writeInt32((int32_t) clientThreadId);
+        status_t err = remote()->transact(UNREGISTER_AUDIO_THREAD, data, &reply);
+        if (err != NO_ERROR) {
+            return OBOE_ERROR_INTERNAL; // TODO consider another error
+        }
+        // parse reply
+        oboe_result_t res;
+        reply.readInt32(&res);
+        return res;
+    }
+
+};
+
+// Implement an interface to the service.
+// This is here so that you don't have to link with liboboe static library.
+IMPLEMENT_META_INTERFACE(OboeAudioService, "IOboeAudioService");
+
+// The order of parameters in the Parcels must match with code in BpOboeAudioService
+
+status_t BnOboeAudioService::onTransact(uint32_t code, const Parcel& data,
+                                        Parcel* reply, uint32_t flags) {
+    OboeStream stream;
+    OboeStreamRequest request;
+    OboeStreamConfiguration configuration;
+    pid_t pid;
+    oboe_nanoseconds_t nanoseconds;
+    oboe_result_t result;
+    ALOGV("BnOboeAudioService::onTransact(%i) %i", code, flags);
+    data.checkInterface(this);
+
+    switch(code) {
+        case OPEN_STREAM: {
+            request.readFromParcel(&data);
+            stream = openStream(request, configuration);
+            ALOGD("BnOboeAudioService::onTransact OPEN_STREAM 0x%08X", stream);
+            reply->writeInt32(stream);
+            configuration.writeToParcel(reply);
+            return NO_ERROR;
+        } break;
+
+        case CLOSE_STREAM: {
+            data.readInt32(&stream);
+            ALOGD("BnOboeAudioService::onTransact CLOSE_STREAM 0x%08X", stream);
+            result = closeStream(stream);
+            reply->writeInt32(result);
+            return NO_ERROR;
+        } break;
+
+        case GET_STREAM_DESCRIPTION: {
+            data.readInt32(&stream);
+            ALOGD("BnOboeAudioService::onTransact GET_STREAM_DESCRIPTION 0x%08X", stream);
+            oboe::AudioEndpointParcelable parcelable;
+            result = getStreamDescription(stream, parcelable);
+            if (result != OBOE_OK) {
+                return -1; // FIXME
+            }
+            parcelable.dump();
+            result = parcelable.validate();
+            if (result != OBOE_OK) {
+                return -1; // FIXME
+            }
+            parcelable.writeToParcel(reply);
+            reply->writeInt32(result);
+            return NO_ERROR;
+        } break;
+
+        case START_STREAM: {
+            data.readInt32(&stream);
+            result = startStream(stream);
+            ALOGD("BnOboeAudioService::onTransact START_STREAM 0x%08X, result = %d",
+                    stream, result);
+            reply->writeInt32(result);
+            return NO_ERROR;
+        } break;
+
+        case PAUSE_STREAM: {
+            data.readInt32(&stream);
+            result = pauseStream(stream);
+            ALOGD("BnOboeAudioService::onTransact PAUSE_STREAM 0x%08X, result = %d",
+                    stream, result);
+            reply->writeInt32(result);
+            return NO_ERROR;
+        } break;
+
+        case FLUSH_STREAM: {
+            data.readInt32(&stream);
+            result = flushStream(stream);
+            ALOGD("BnOboeAudioService::onTransact FLUSH_STREAM 0x%08X, result = %d",
+                    stream, result);
+            reply->writeInt32(result);
+            return NO_ERROR;
+        } break;
+
+        case REGISTER_AUDIO_THREAD: {
+            data.readInt32(&stream);
+            data.readInt32(&pid);
+            data.readInt64(&nanoseconds);
+            result = registerAudioThread(stream, pid, nanoseconds);
+            ALOGD("BnOboeAudioService::onTransact REGISTER_AUDIO_THREAD 0x%08X, result = %d",
+                    stream, result);
+            reply->writeInt32(result);
+            return NO_ERROR;
+        } break;
+
+        case UNREGISTER_AUDIO_THREAD: {
+            data.readInt32(&stream);
+            data.readInt32(&pid);
+            result = unregisterAudioThread(stream, pid);
+            ALOGD("BnOboeAudioService::onTransact UNREGISTER_AUDIO_THREAD 0x%08X, result = %d",
+                    stream, result);
+            reply->writeInt32(result);
+            return NO_ERROR;
+        } break;
+
+        case TICKLE: {
+            ALOGV("BnOboeAudioService::onTransact TICKLE");
+            tickle();
+            return NO_ERROR;
+        } break;
+
+        default:
+            // ALOGW("BnOboeAudioService::onTransact not handled %u", code);
+            return BBinder::onTransact(code, data, reply, flags);
+    }
+}
+
+} /* namespace android */
diff --git a/media/liboboe/src/binding/IOboeAudioService.h b/media/liboboe/src/binding/IOboeAudioService.h
new file mode 100644
index 0000000..4b4c99c
--- /dev/null
+++ b/media/liboboe/src/binding/IOboeAudioService.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_IOBOEAUDIOSERVICE_H
+#define BINDING_IOBOEAUDIOSERVICE_H
+
+#include <stdint.h>
+#include <utils/RefBase.h>
+#include <binder/TextOutput.h>
+#include <binder/IInterface.h>
+
+#include <oboe/OboeAudio.h>
+
+#include "binding/OboeServiceDefinitions.h"
+#include "binding/AudioEndpointParcelable.h"
+#include "binding/OboeStreamRequest.h"
+#include "binding/OboeStreamConfiguration.h"
+
+//using android::status_t;
+//using android::IInterface;
+//using android::BnInterface;
+
+using oboe::AudioEndpointParcelable;
+using oboe::OboeStreamRequest;
+using oboe::OboeStreamConfiguration;
+
+namespace android {
+
+// Interface (our AIDL) - Shared by server and client
+class IOboeAudioService : public IInterface {
+public:
+
+    DECLARE_META_INTERFACE(OboeAudioService);
+
+    virtual oboe_handle_t openStream(OboeStreamRequest &request,
+                                     OboeStreamConfiguration &configuration) = 0;
+
+    virtual oboe_result_t closeStream(int32_t streamHandle) = 0;
+
+    /* Get an immutable description of the in-memory queues
+    * used to communicate with the underlying HAL or Service.
+    */
+    virtual oboe_result_t getStreamDescription(oboe_handle_t streamHandle,
+                                               AudioEndpointParcelable &parcelable) = 0;
+
+    /**
+     * Start the flow of data.
+     */
+    virtual oboe_result_t startStream(oboe_handle_t streamHandle) = 0;
+
+    /**
+     * Stop the flow of data such that start() can resume without loss of data.
+     */
+    virtual oboe_result_t pauseStream(oboe_handle_t streamHandle) = 0;
+
+    /**
+     *  Discard any data held by the underlying HAL or Service.
+     */
+    virtual oboe_result_t flushStream(oboe_handle_t streamHandle) = 0;
+
+    /**
+     * Manage the specified thread as a low latency audio thread.
+     */
+    virtual oboe_result_t registerAudioThread(oboe_handle_t streamHandle, pid_t clientThreadId,
+                                              oboe_nanoseconds_t periodNanoseconds) = 0;
+
+    virtual oboe_result_t unregisterAudioThread(oboe_handle_t streamHandle,
+                                                pid_t clientThreadId) = 0;
+
+    /**
+     * Poke server instead of running a background thread.
+     * Cooperative multi-tasking for early development only.
+     * TODO remove tickle() when service has its own thread.
+     */
+    virtual void tickle() { };
+
+};
+
+class BnOboeAudioService : public BnInterface<IOboeAudioService> {
+public:
+    virtual status_t onTransact(uint32_t code, const Parcel& data,
+                                Parcel* reply, uint32_t flags = 0);
+
+};
+
+} /* namespace android */
+
+#endif //BINDING_IOBOEAUDIOSERVICE_H
diff --git a/media/liboboe/src/binding/OboeServiceDefinitions.h b/media/liboboe/src/binding/OboeServiceDefinitions.h
new file mode 100644
index 0000000..ad00fe2
--- /dev/null
+++ b/media/liboboe/src/binding/OboeServiceDefinitions.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_OBOESERVICEDEFINITIONS_H
+#define BINDING_OBOESERVICEDEFINITIONS_H
+
+#include <stdint.h>
+#include <utils/RefBase.h>
+#include <binder/TextOutput.h>
+#include <binder/IInterface.h>
+
+#include <oboe/OboeAudio.h>
+
+using android::NO_ERROR;
+using android::IBinder;
+
+namespace android {
+
+enum oboe_commands_t {
+    OPEN_STREAM = IBinder::FIRST_CALL_TRANSACTION,
+    CLOSE_STREAM,
+    GET_STREAM_DESCRIPTION,
+    START_STREAM,
+    PAUSE_STREAM,
+    FLUSH_STREAM,
+    REGISTER_AUDIO_THREAD,
+    UNREGISTER_AUDIO_THREAD,
+    TICKLE
+};
+
+} // namespace android
+
+namespace oboe {
+
+enum oboe_commands_t {
+    OPEN_STREAM = IBinder::FIRST_CALL_TRANSACTION,
+    CLOSE_STREAM,
+    GET_STREAM_DESCRIPTION,
+    START_STREAM,
+    PAUSE_STREAM,
+    FLUSH_STREAM,
+    REGISTER_AUDIO_THREAD,
+    UNREGISTER_AUDIO_THREAD,
+    TICKLE
+};
+
+// TODO Expand this to include all the open parameters.
+typedef struct OboeServiceStreamInfo_s {
+    int32_t             deviceId;
+    int32_t             samplesPerFrame;  // number of channels
+    oboe_sample_rate_t  sampleRate;
+    oboe_audio_format_t audioFormat;
+} OboeServiceStreamInfo;
+
+// This must be a fixed width so it can be in shared memory.
+enum RingbufferFlags : uint32_t {
+    NONE = 0,
+    RATE_ISOCHRONOUS = 0x0001,
+    RATE_ASYNCHRONOUS = 0x0002,
+    COHERENCY_DMA = 0x0004,
+    COHERENCY_ACQUIRE_RELEASE = 0x0008,
+    COHERENCY_AUTO = 0x0010,
+};
+
+// This is not passed through Binder.
+// Client side code will convert Binder data and fill this descriptor.
+typedef struct RingBufferDescriptor_s {
+    uint8_t* dataAddress;       // offset from read or write block
+    int64_t* writeCounterAddress;
+    int64_t* readCounterAddress;
+    int32_t  bytesPerFrame;     // index is in frames
+    int32_t  framesPerBurst;    // for ISOCHRONOUS queues
+    int32_t  capacityInFrames;  // zero if unused
+    RingbufferFlags flags;
+} RingBufferDescriptor;
+
+// This is not passed through Binder.
+// Client side code will convert Binder data and fill this descriptor.
+typedef struct EndpointDescriptor_s {
+    // Set capacityInFrames to zero if Queue is unused.
+    RingBufferDescriptor upMessageQueueDescriptor;   // server to client
+    RingBufferDescriptor downMessageQueueDescriptor; // client to server
+    RingBufferDescriptor upDataQueueDescriptor;      // eg. record
+    RingBufferDescriptor downDataQueueDescriptor;    // eg. playback
+} EndpointDescriptor;
+
+} // namespace oboe
+
+#endif //BINDING_OBOESERVICEDEFINITIONS_H
diff --git a/media/liboboe/src/binding/OboeServiceMessage.h b/media/liboboe/src/binding/OboeServiceMessage.h
new file mode 100644
index 0000000..aa13571
--- /dev/null
+++ b/media/liboboe/src/binding/OboeServiceMessage.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_OBOE_SERVICE_MESSAGE_H
+#define OBOE_OBOE_SERVICE_MESSAGE_H
+
+#include <stdint.h>
+
+#include <oboe/OboeDefinitions.h>
+
+namespace oboe {
+
+// TODO move this an "include" folder for the service.
+
+struct OboeMessageTimestamp {
+    oboe_position_frames_t position;
+    int64_t                deviceOffset; // add to client position to get device position
+    oboe_nanoseconds_t     timestamp;
+};
+
+typedef enum oboe_service_event_e : uint32_t {
+    OBOE_SERVICE_EVENT_STARTED,
+    OBOE_SERVICE_EVENT_PAUSED,
+    OBOE_SERVICE_EVENT_FLUSHED,
+    OBOE_SERVICE_EVENT_CLOSED,
+    OBOE_SERVICE_EVENT_DISCONNECTED
+} oboe_service_event_t;
+
+struct OboeMessageEvent {
+    oboe_service_event_t event;
+    int32_t data1;
+    int64_t data2;
+};
+
+typedef struct OboeServiceMessage_s {
+    enum class code : uint32_t {
+        NOTHING,
+        TIMESTAMP,
+        EVENT,
+    };
+
+    code what;
+    union {
+        OboeMessageTimestamp timestamp;
+        OboeMessageEvent event;
+    };
+} OboeServiceMessage;
+
+
+} /* namespace oboe */
+
+#endif //OBOE_OBOE_SERVICE_MESSAGE_H
diff --git a/media/liboboe/src/binding/OboeStreamConfiguration.cpp b/media/liboboe/src/binding/OboeStreamConfiguration.cpp
new file mode 100644
index 0000000..4b8b5b2
--- /dev/null
+++ b/media/liboboe/src/binding/OboeStreamConfiguration.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+
+#include <oboe/OboeDefinitions.h>
+
+#include "binding/OboeStreamConfiguration.h"
+
+using android::NO_ERROR;
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+using namespace oboe;
+
+OboeStreamConfiguration::OboeStreamConfiguration() {}
+OboeStreamConfiguration::~OboeStreamConfiguration() {}
+
+status_t OboeStreamConfiguration::writeToParcel(Parcel* parcel) const {
+    parcel->writeInt32(mDeviceId);
+    parcel->writeInt32(mSampleRate);
+    parcel->writeInt32(mSamplesPerFrame);
+    parcel->writeInt32((int32_t) mAudioFormat);
+    return NO_ERROR; // TODO check for errors above
+}
+
+status_t OboeStreamConfiguration::readFromParcel(const Parcel* parcel) {
+    int32_t temp;
+    parcel->readInt32(&mDeviceId);
+    parcel->readInt32(&mSampleRate);
+    parcel->readInt32(&mSamplesPerFrame);
+    parcel->readInt32(&temp);
+    mAudioFormat = (oboe_audio_format_t) temp;
+    return NO_ERROR; // TODO check for errors above
+}
+
+oboe_result_t OboeStreamConfiguration::validate() {
+    // Validate results of the open.
+    if (mSampleRate < 0 || mSampleRate >= 8 * 48000) { // TODO review limits
+        ALOGE("OboeStreamConfiguration.validate(): invalid sampleRate = %d", mSampleRate);
+        return OBOE_ERROR_INTERNAL;
+    }
+
+    if (mSamplesPerFrame < 1 || mSamplesPerFrame >= 32) { // TODO review limits
+        ALOGE("OboeStreamConfiguration.validate() invalid samplesPerFrame = %d", mSamplesPerFrame);
+        return OBOE_ERROR_INTERNAL;
+    }
+
+    switch (mAudioFormat) {
+    case OBOE_AUDIO_FORMAT_PCM16:
+    case OBOE_AUDIO_FORMAT_PCM_FLOAT:
+    case OBOE_AUDIO_FORMAT_PCM824:
+    case OBOE_AUDIO_FORMAT_PCM32:
+        break;
+    default:
+        ALOGE("OboeStreamConfiguration.validate() invalid audioFormat = %d", mAudioFormat);
+        return OBOE_ERROR_INTERNAL;
+    }
+    return OBOE_OK;
+}
+
+void OboeStreamConfiguration::dump() {
+    ALOGD("OboeStreamConfiguration mSampleRate = %d -----", mSampleRate);
+    ALOGD("OboeStreamConfiguration mSamplesPerFrame = %d", mSamplesPerFrame);
+    ALOGD("OboeStreamConfiguration mAudioFormat = %d", (int)mAudioFormat);
+}
diff --git a/media/liboboe/src/binding/OboeStreamConfiguration.h b/media/liboboe/src/binding/OboeStreamConfiguration.h
new file mode 100644
index 0000000..6bc1924
--- /dev/null
+++ b/media/liboboe/src/binding/OboeStreamConfiguration.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_OBOE_STREAM_CONFIGURATION_H
+#define BINDING_OBOE_STREAM_CONFIGURATION_H
+
+#include <stdint.h>
+
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+#include <oboe/OboeDefinitions.h>
+
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+namespace oboe {
+
+class OboeStreamConfiguration : public Parcelable {
+public:
+    OboeStreamConfiguration();
+    virtual ~OboeStreamConfiguration();
+
+    oboe_device_id_t getDeviceId() const {
+        return mDeviceId;
+    }
+
+    void setDeviceId(oboe_device_id_t deviceId) {
+        mDeviceId = deviceId;
+    }
+
+    oboe_sample_rate_t getSampleRate() const {
+        return mSampleRate;
+    }
+
+    void setSampleRate(oboe_sample_rate_t sampleRate) {
+        mSampleRate = sampleRate;
+    }
+
+    int32_t getSamplesPerFrame() const {
+        return mSamplesPerFrame;
+    }
+
+    void setSamplesPerFrame(int32_t samplesPerFrame) {
+        mSamplesPerFrame = samplesPerFrame;
+    }
+
+    oboe_audio_format_t getAudioFormat() const {
+        return mAudioFormat;
+    }
+
+    void setAudioFormat(oboe_audio_format_t audioFormat) {
+        mAudioFormat = audioFormat;
+    }
+
+    virtual status_t writeToParcel(Parcel* parcel) const override;
+
+    virtual status_t readFromParcel(const Parcel* parcel) override;
+
+    oboe_result_t validate();
+
+    void dump();
+
+protected:
+    oboe_device_id_t    mDeviceId        = OBOE_DEVICE_UNSPECIFIED;
+    oboe_sample_rate_t  mSampleRate      = OBOE_UNSPECIFIED;
+    int32_t             mSamplesPerFrame = OBOE_UNSPECIFIED;
+    oboe_audio_format_t mAudioFormat     = OBOE_AUDIO_FORMAT_UNSPECIFIED;
+};
+
+} /* namespace oboe */
+
+#endif //BINDING_OBOE_STREAM_CONFIGURATION_H
diff --git a/media/liboboe/src/binding/OboeStreamRequest.cpp b/media/liboboe/src/binding/OboeStreamRequest.cpp
new file mode 100644
index 0000000..5d521d0
--- /dev/null
+++ b/media/liboboe/src/binding/OboeStreamRequest.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+
+#include <oboe/OboeDefinitions.h>
+
+#include "binding/OboeStreamConfiguration.h"
+#include "binding/OboeStreamRequest.h"
+
+using android::NO_ERROR;
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+using namespace oboe;
+
+OboeStreamRequest::OboeStreamRequest()
+    : mConfiguration()
+    {}
+
+OboeStreamRequest::~OboeStreamRequest() {}
+
+status_t OboeStreamRequest::writeToParcel(Parcel* parcel) const {
+    parcel->writeInt32((int32_t) mUserId);
+    parcel->writeInt32((int32_t) mProcessId);
+    mConfiguration.writeToParcel(parcel);
+    return NO_ERROR; // TODO check for errors above
+}
+
+status_t OboeStreamRequest::readFromParcel(const Parcel* parcel) {
+    int32_t temp;
+    parcel->readInt32(&temp);
+    mUserId = (uid_t) temp;
+    parcel->readInt32(&temp);
+    mProcessId = (pid_t) temp;
+    mConfiguration.readFromParcel(parcel);
+    return NO_ERROR; // TODO check for errors above
+}
+
+oboe_result_t OboeStreamRequest::validate() {
+    return mConfiguration.validate();
+}
+
+void OboeStreamRequest::dump() {
+    ALOGD("OboeStreamRequest mUserId = %d -----", mUserId);
+    ALOGD("OboeStreamRequest mProcessId = %d", mProcessId);
+    mConfiguration.dump();
+}
diff --git a/media/liboboe/src/binding/OboeStreamRequest.h b/media/liboboe/src/binding/OboeStreamRequest.h
new file mode 100644
index 0000000..aab3c97
--- /dev/null
+++ b/media/liboboe/src/binding/OboeStreamRequest.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_OBOE_STREAM_REQUEST_H
+#define BINDING_OBOE_STREAM_REQUEST_H
+
+#include <stdint.h>
+
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+#include <oboe/OboeDefinitions.h>
+
+#include "binding/OboeStreamConfiguration.h"
+
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+namespace oboe {
+
+class OboeStreamRequest : public Parcelable {
+public:
+    OboeStreamRequest();
+    virtual ~OboeStreamRequest();
+
+    uid_t getUserId() const {
+        return mUserId;
+    }
+
+    void setUserId(uid_t userId) {
+        mUserId = userId;
+    }
+
+    pid_t getProcessId() const {
+        return mProcessId;
+    }
+
+    void setProcessId(pid_t processId) {
+        mProcessId = processId;
+    }
+
+    OboeStreamConfiguration &getConfiguration() {
+        return mConfiguration;
+    }
+
+    virtual status_t writeToParcel(Parcel* parcel) const override;
+
+    virtual status_t readFromParcel(const Parcel* parcel) override;
+
+    oboe_result_t validate();
+
+    void dump();
+
+protected:
+    OboeStreamConfiguration  mConfiguration;
+    uid_t    mUserId;
+    pid_t    mProcessId;
+};
+
+} /* namespace oboe */
+
+#endif //BINDING_OBOE_STREAM_REQUEST_H
diff --git a/media/liboboe/src/binding/RingBufferParcelable.cpp b/media/liboboe/src/binding/RingBufferParcelable.cpp
new file mode 100644
index 0000000..f097655
--- /dev/null
+++ b/media/liboboe/src/binding/RingBufferParcelable.cpp
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include <binder/Parcelable.h>
+
+#include "binding/OboeServiceDefinitions.h"
+#include "binding/SharedRegionParcelable.h"
+#include "binding/RingBufferParcelable.h"
+
+using namespace oboe;
+
+RingBufferParcelable::RingBufferParcelable() {}
+RingBufferParcelable::~RingBufferParcelable() {}
+
+// TODO This assumes that all three use the same SharedMemoryParcelable
+void RingBufferParcelable::setupMemory(int32_t sharedMemoryIndex,
+                 int32_t dataMemoryOffset,
+                 int32_t dataSizeInBytes,
+                 int32_t readCounterOffset,
+                 int32_t writeCounterOffset,
+                 int32_t counterSizeBytes) {
+    mReadCounterParcelable.setup(sharedMemoryIndex, readCounterOffset, counterSizeBytes);
+    mWriteCounterParcelable.setup(sharedMemoryIndex, writeCounterOffset, counterSizeBytes);
+    mDataParcelable.setup(sharedMemoryIndex, dataMemoryOffset, dataSizeInBytes);
+}
+
+void RingBufferParcelable::setupMemory(int32_t sharedMemoryIndex,
+                 int32_t dataMemoryOffset,
+                 int32_t dataSizeInBytes) {
+    mReadCounterParcelable.setup(sharedMemoryIndex, 0, 0);
+    mWriteCounterParcelable.setup(sharedMemoryIndex, 0, 0);
+    mDataParcelable.setup(sharedMemoryIndex, dataMemoryOffset, dataSizeInBytes);
+}
+
+int32_t RingBufferParcelable::getBytesPerFrame() {
+    return mBytesPerFrame;
+}
+
+void RingBufferParcelable::setBytesPerFrame(int32_t bytesPerFrame) {
+    mBytesPerFrame = bytesPerFrame;
+}
+
+int32_t RingBufferParcelable::getFramesPerBurst() {
+    return mFramesPerBurst;
+}
+
+void RingBufferParcelable::setFramesPerBurst(int32_t framesPerBurst) {
+    mFramesPerBurst = framesPerBurst;
+}
+
+int32_t RingBufferParcelable::getCapacityInFrames() {
+    return mCapacityInFrames;
+}
+
+void RingBufferParcelable::setCapacityInFrames(int32_t capacityInFrames) {
+    mCapacityInFrames = capacityInFrames;
+}
+
+/**
+ * The read and write must be symmetric.
+ */
+status_t RingBufferParcelable::writeToParcel(Parcel* parcel) const {
+    parcel->writeInt32(mCapacityInFrames);
+    if (mCapacityInFrames > 0) {
+        parcel->writeInt32(mBytesPerFrame);
+        parcel->writeInt32(mFramesPerBurst);
+        parcel->writeInt32(mFlags);
+        mReadCounterParcelable.writeToParcel(parcel);
+        mWriteCounterParcelable.writeToParcel(parcel);
+        mDataParcelable.writeToParcel(parcel);
+    }
+    return NO_ERROR; // TODO check for errors above
+}
+
+status_t RingBufferParcelable::readFromParcel(const Parcel* parcel) {
+    parcel->readInt32(&mCapacityInFrames);
+    if (mCapacityInFrames > 0) {
+        parcel->readInt32(&mBytesPerFrame);
+        parcel->readInt32(&mFramesPerBurst);
+        parcel->readInt32((int32_t *)&mFlags);
+        mReadCounterParcelable.readFromParcel(parcel);
+        mWriteCounterParcelable.readFromParcel(parcel);
+        mDataParcelable.readFromParcel(parcel);
+    }
+    return NO_ERROR; // TODO check for errors above
+}
+
+oboe_result_t RingBufferParcelable::resolve(SharedMemoryParcelable *memoryParcels, RingBufferDescriptor *descriptor) {
+    oboe_result_t result;
+
+    result = mReadCounterParcelable.resolve(memoryParcels,
+                                            (void **) &descriptor->readCounterAddress);
+    if (result != OBOE_OK) {
+        return result;
+    }
+
+    result = mWriteCounterParcelable.resolve(memoryParcels,
+                                             (void **) &descriptor->writeCounterAddress);
+    if (result != OBOE_OK) {
+        return result;
+    }
+
+    result = mDataParcelable.resolve(memoryParcels, (void **) &descriptor->dataAddress);
+    if (result != OBOE_OK) {
+        return result;
+    }
+
+    descriptor->bytesPerFrame = mBytesPerFrame;
+    descriptor->framesPerBurst = mFramesPerBurst;
+    descriptor->capacityInFrames = mCapacityInFrames;
+    descriptor->flags = mFlags;
+    return OBOE_OK;
+}
+
+oboe_result_t RingBufferParcelable::validate() {
+    oboe_result_t result;
+    if (mCapacityInFrames < 0 || mCapacityInFrames >= 32 * 1024) {
+        ALOGE("RingBufferParcelable invalid mCapacityInFrames = %d", mCapacityInFrames);
+        return OBOE_ERROR_INTERNAL;
+    }
+    if (mBytesPerFrame < 0 || mBytesPerFrame >= 256) {
+        ALOGE("RingBufferParcelable invalid mBytesPerFrame = %d", mBytesPerFrame);
+        return OBOE_ERROR_INTERNAL;
+    }
+    if (mFramesPerBurst < 0 || mFramesPerBurst >= 1024) {
+        ALOGE("RingBufferParcelable invalid mFramesPerBurst = %d", mFramesPerBurst);
+        return OBOE_ERROR_INTERNAL;
+    }
+    if ((result = mReadCounterParcelable.validate()) != OBOE_OK) {
+        ALOGE("RingBufferParcelable invalid mReadCounterParcelable = %d", result);
+        return result;
+    }
+    if ((result = mWriteCounterParcelable.validate()) != OBOE_OK) {
+        ALOGE("RingBufferParcelable invalid mWriteCounterParcelable = %d", result);
+        return result;
+    }
+    if ((result = mDataParcelable.validate()) != OBOE_OK) {
+        ALOGE("RingBufferParcelable invalid mDataParcelable = %d", result);
+        return result;
+    }
+    return OBOE_OK;
+}
+
+
+void RingBufferParcelable::dump() {
+    ALOGD("RingBufferParcelable mCapacityInFrames = %d ---------", mCapacityInFrames);
+    if (mCapacityInFrames > 0) {
+        ALOGD("RingBufferParcelable mBytesPerFrame = %d", mBytesPerFrame);
+        ALOGD("RingBufferParcelable mFramesPerBurst = %d", mFramesPerBurst);
+        ALOGD("RingBufferParcelable mFlags = %u", mFlags);
+        mReadCounterParcelable.dump();
+        mWriteCounterParcelable.dump();
+        mDataParcelable.dump();
+    }
+}
diff --git a/media/liboboe/src/binding/RingBufferParcelable.h b/media/liboboe/src/binding/RingBufferParcelable.h
new file mode 100644
index 0000000..9bb695a
--- /dev/null
+++ b/media/liboboe/src/binding/RingBufferParcelable.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_RINGBUFFER_PARCELABLE_H
+#define BINDING_RINGBUFFER_PARCELABLE_H
+
+#include <stdint.h>
+
+#include <binder/Parcelable.h>
+
+#include "binding/OboeServiceDefinitions.h"
+#include "binding/SharedRegionParcelable.h"
+
+namespace oboe {
+
+class RingBufferParcelable : public Parcelable {
+public:
+    RingBufferParcelable();
+    virtual ~RingBufferParcelable();
+
+    // TODO This assumes that all three use the same SharedMemoryParcelable
+    void setupMemory(int32_t sharedMemoryIndex,
+                     int32_t dataMemoryOffset,
+                     int32_t dataSizeInBytes,
+                     int32_t readCounterOffset,
+                     int32_t writeCounterOffset,
+                     int32_t counterSizeBytes);
+
+    void setupMemory(int32_t sharedMemoryIndex,
+                     int32_t dataMemoryOffset,
+                     int32_t dataSizeInBytes);
+
+    int32_t getBytesPerFrame();
+
+    void setBytesPerFrame(int32_t bytesPerFrame);
+
+    int32_t getFramesPerBurst();
+
+    void setFramesPerBurst(int32_t framesPerBurst);
+
+    int32_t getCapacityInFrames();
+
+    void setCapacityInFrames(int32_t capacityInFrames);
+
+    /**
+     * The read and write must be symmetric.
+     */
+    virtual status_t writeToParcel(Parcel* parcel) const override;
+
+    virtual status_t readFromParcel(const Parcel* parcel) override;
+
+    oboe_result_t resolve(SharedMemoryParcelable *memoryParcels, RingBufferDescriptor *descriptor);
+
+    oboe_result_t validate();
+
+    void dump();
+
+private:
+    SharedRegionParcelable  mReadCounterParcelable;
+    SharedRegionParcelable  mWriteCounterParcelable;
+    SharedRegionParcelable  mDataParcelable;
+    int32_t                 mBytesPerFrame = 0;     // index is in frames
+    int32_t                 mFramesPerBurst = 0;    // for ISOCHRONOUS queues
+    int32_t                 mCapacityInFrames = 0;  // zero if unused
+    RingbufferFlags         mFlags = RingbufferFlags::NONE;
+};
+
+} /* namespace oboe */
+
+#endif //BINDING_RINGBUFFER_PARCELABLE_H
diff --git a/media/liboboe/src/binding/SharedMemoryParcelable.cpp b/media/liboboe/src/binding/SharedMemoryParcelable.cpp
new file mode 100644
index 0000000..5b739c0
--- /dev/null
+++ b/media/liboboe/src/binding/SharedMemoryParcelable.cpp
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <oboe/OboeDefinitions.h>
+
+#include <binder/Parcelable.h>
+
+#include "binding/SharedMemoryParcelable.h"
+
+using android::NO_ERROR;
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+using namespace oboe;
+
+SharedMemoryParcelable::SharedMemoryParcelable() {}
+SharedMemoryParcelable::~SharedMemoryParcelable() {};
+
+void SharedMemoryParcelable::setup(int fd, int32_t sizeInBytes) {
+    mFd = fd;
+    mSizeInBytes = sizeInBytes;
+}
+
+status_t SharedMemoryParcelable::writeToParcel(Parcel* parcel) const {
+    parcel->writeInt32(mSizeInBytes);
+    if (mSizeInBytes > 0) {
+        parcel->writeDupFileDescriptor(mFd);
+    }
+    return NO_ERROR; // TODO check for errors above
+}
+
+status_t SharedMemoryParcelable::readFromParcel(const Parcel* parcel) {
+    parcel->readInt32(&mSizeInBytes);
+    if (mSizeInBytes > 0) {
+        mFd = dup(parcel->readFileDescriptor());
+    }
+    return NO_ERROR; // TODO check for errors above
+}
+
+// TODO Add code to unmmap()
+
+oboe_result_t SharedMemoryParcelable::resolve(int32_t offsetInBytes, int32_t sizeInBytes,
+                                              void **regionAddressPtr) {
+    if (offsetInBytes < 0) {
+        ALOGE("SharedMemoryParcelable illegal offsetInBytes = %d", offsetInBytes);
+        return OBOE_ERROR_OUT_OF_RANGE;
+    } else if ((offsetInBytes + sizeInBytes) > mSizeInBytes) {
+        ALOGE("SharedMemoryParcelable out of range, offsetInBytes = %d, "
+              "sizeInBytes = %d, mSizeInBytes = %d",
+              offsetInBytes, sizeInBytes, mSizeInBytes);
+        return OBOE_ERROR_OUT_OF_RANGE;
+    }
+    if (mResolvedAddress == nullptr) {
+        mResolvedAddress = (uint8_t *) mmap(0, mSizeInBytes, PROT_READ|PROT_WRITE,
+                                          MAP_SHARED, mFd, 0);
+        if (mResolvedAddress == nullptr) {
+            ALOGE("SharedMemoryParcelable mmap failed for fd = %d", mFd);
+            return OBOE_ERROR_INTERNAL;
+        }
+    }
+    *regionAddressPtr = mResolvedAddress + offsetInBytes;
+    ALOGD("SharedMemoryParcelable mResolvedAddress = %p", mResolvedAddress);
+    ALOGD("SharedMemoryParcelable offset by %d, *regionAddressPtr = %p",
+          offsetInBytes, *regionAddressPtr);
+    return OBOE_OK;
+}
+
+int32_t SharedMemoryParcelable::getSizeInBytes() {
+    return mSizeInBytes;
+}
+
+oboe_result_t SharedMemoryParcelable::validate() {
+    if (mSizeInBytes < 0 || mSizeInBytes >= MAX_MMAP_SIZE) {
+        ALOGE("SharedMemoryParcelable invalid mSizeInBytes = %d", mSizeInBytes);
+        return OBOE_ERROR_INTERNAL;
+    }
+    if (mSizeInBytes > 0) {
+        if (mFd == -1) {
+            ALOGE("SharedMemoryParcelable uninitialized mFd = %d", mFd);
+            return OBOE_ERROR_INTERNAL;
+        }
+    }
+    return OBOE_OK;
+}
+
+void SharedMemoryParcelable::dump() {
+    ALOGD("SharedMemoryParcelable mFd = %d", mFd);
+    ALOGD("SharedMemoryParcelable mSizeInBytes = %d", mSizeInBytes);
+    ALOGD("SharedMemoryParcelable mResolvedAddress = %p", mResolvedAddress);
+}
diff --git a/media/liboboe/src/binding/SharedMemoryParcelable.h b/media/liboboe/src/binding/SharedMemoryParcelable.h
new file mode 100644
index 0000000..9585779
--- /dev/null
+++ b/media/liboboe/src/binding/SharedMemoryParcelable.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_SHAREDMEMORYPARCELABLE_H
+#define BINDING_SHAREDMEMORYPARCELABLE_H
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+namespace oboe {
+
+// Arbitrary limits for sanity checks. TODO remove after debugging.
+#define MAX_SHARED_MEMORIES (32)
+#define MAX_MMAP_OFFSET (32 * 1024)
+#define MAX_MMAP_SIZE (32 * 1024)
+
+/**
+ * This is a parcelable description of a shared memory referenced by a file descriptor.
+ * It may be divided into several regions.
+ */
+class SharedMemoryParcelable : public Parcelable {
+public:
+    SharedMemoryParcelable();
+    virtual ~SharedMemoryParcelable();
+
+    void setup(int fd, int32_t sizeInBytes);
+
+    virtual status_t writeToParcel(Parcel* parcel) const override;
+
+    virtual status_t readFromParcel(const Parcel* parcel) override;
+
+    oboe_result_t resolve(int32_t offsetInBytes, int32_t sizeInBytes, void **regionAddressPtr);
+
+    int32_t getSizeInBytes();
+
+    oboe_result_t validate();
+
+    void dump();
+
+protected:
+    int mFd = -1;
+    int32_t mSizeInBytes = 0;
+    uint8_t *mResolvedAddress = nullptr;
+};
+
+} /* namespace oboe */
+
+#endif //BINDING_SHAREDMEMORYPARCELABLE_H
diff --git a/media/liboboe/src/binding/SharedRegionParcelable.cpp b/media/liboboe/src/binding/SharedRegionParcelable.cpp
new file mode 100644
index 0000000..86ce8f3
--- /dev/null
+++ b/media/liboboe/src/binding/SharedRegionParcelable.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <binder/Parcelable.h>
+
+#include <oboe/OboeDefinitions.h>
+
+#include "binding/SharedMemoryParcelable.h"
+#include "binding/SharedRegionParcelable.h"
+
+using android::NO_ERROR;
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+using namespace oboe;
+
+SharedRegionParcelable::SharedRegionParcelable() {}
+SharedRegionParcelable::~SharedRegionParcelable() {}
+
+void SharedRegionParcelable::setup(int32_t sharedMemoryIndex,
+                                   int32_t offsetInBytes,
+                                   int32_t sizeInBytes) {
+    mSharedMemoryIndex = sharedMemoryIndex;
+    mOffsetInBytes = offsetInBytes;
+    mSizeInBytes = sizeInBytes;
+}
+
+status_t SharedRegionParcelable::writeToParcel(Parcel* parcel) const {
+    parcel->writeInt32(mSizeInBytes);
+    if (mSizeInBytes > 0) {
+        parcel->writeInt32(mSharedMemoryIndex);
+        parcel->writeInt32(mOffsetInBytes);
+    }
+    return NO_ERROR; // TODO check for errors above
+}
+
+status_t SharedRegionParcelable::readFromParcel(const Parcel* parcel) {
+    parcel->readInt32(&mSizeInBytes);
+    if (mSizeInBytes > 0) {
+        parcel->readInt32(&mSharedMemoryIndex);
+        parcel->readInt32(&mOffsetInBytes);
+    }
+    return NO_ERROR; // TODO check for errors above
+}
+
+oboe_result_t SharedRegionParcelable::resolve(SharedMemoryParcelable *memoryParcels,
+                                              void **regionAddressPtr) {
+    if (mSizeInBytes == 0) {
+        *regionAddressPtr = nullptr;
+        return OBOE_OK;
+    }
+    if (mSharedMemoryIndex < 0) {
+        ALOGE("SharedRegionParcelable invalid mSharedMemoryIndex = %d", mSharedMemoryIndex);
+        return OBOE_ERROR_INTERNAL;
+    }
+    SharedMemoryParcelable *memoryParcel = &memoryParcels[mSharedMemoryIndex];
+    return memoryParcel->resolve(mOffsetInBytes, mSizeInBytes, regionAddressPtr);
+}
+
+oboe_result_t SharedRegionParcelable::validate() {
+    if (mSizeInBytes < 0 || mSizeInBytes >= MAX_MMAP_SIZE) {
+        ALOGE("SharedRegionParcelable invalid mSizeInBytes = %d", mSizeInBytes);
+        return OBOE_ERROR_INTERNAL;
+    }
+    if (mSizeInBytes > 0) {
+        if (mOffsetInBytes < 0 || mOffsetInBytes >= MAX_MMAP_OFFSET) {
+            ALOGE("SharedRegionParcelable invalid mOffsetInBytes = %d", mOffsetInBytes);
+            return OBOE_ERROR_INTERNAL;
+        }
+        if (mSharedMemoryIndex < 0 || mSharedMemoryIndex >= MAX_SHARED_MEMORIES) {
+            ALOGE("SharedRegionParcelable invalid mSharedMemoryIndex = %d", mSharedMemoryIndex);
+            return OBOE_ERROR_INTERNAL;
+        }
+    }
+    return OBOE_OK;
+}
+
+void SharedRegionParcelable::dump() {
+    ALOGD("SharedRegionParcelable mSizeInBytes = %d -----", mSizeInBytes);
+    if (mSizeInBytes > 0) {
+        ALOGD("SharedRegionParcelable mSharedMemoryIndex = %d", mSharedMemoryIndex);
+        ALOGD("SharedRegionParcelable mOffsetInBytes = %d", mOffsetInBytes);
+    }
+}
diff --git a/media/liboboe/src/binding/SharedRegionParcelable.h b/media/liboboe/src/binding/SharedRegionParcelable.h
new file mode 100644
index 0000000..bccdaa8
--- /dev/null
+++ b/media/liboboe/src/binding/SharedRegionParcelable.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_SHAREDREGIONPARCELABLE_H
+#define BINDING_SHAREDREGIONPARCELABLE_H
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <binder/Parcelable.h>
+
+#include <oboe/OboeDefinitions.h>
+
+#include "binding/SharedMemoryParcelable.h"
+
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+namespace oboe {
+
+class SharedRegionParcelable : public Parcelable {
+public:
+    SharedRegionParcelable();
+    virtual ~SharedRegionParcelable();
+
+    void setup(int32_t sharedMemoryIndex, int32_t offsetInBytes, int32_t sizeInBytes);
+
+    virtual status_t writeToParcel(Parcel* parcel) const override;
+
+    virtual status_t readFromParcel(const Parcel* parcel) override;
+
+    oboe_result_t resolve(SharedMemoryParcelable *memoryParcels, void **regionAddressPtr);
+
+    oboe_result_t validate();
+
+    void dump();
+
+protected:
+    int32_t mSharedMemoryIndex = -1;
+    int32_t mOffsetInBytes     = 0;
+    int32_t mSizeInBytes       = 0;
+};
+
+} /* namespace oboe */
+
+#endif //BINDING_SHAREDREGIONPARCELABLE_H
diff --git a/media/liboboe/src/client/AudioEndpoint.cpp b/media/liboboe/src/client/AudioEndpoint.cpp
new file mode 100644
index 0000000..160c37e
--- /dev/null
+++ b/media/liboboe/src/client/AudioEndpoint.cpp
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "OboeAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <cassert>
+#include <oboe/OboeDefinitions.h>
+
+#include "AudioEndpointParcelable.h"
+#include "AudioEndpoint.h"
+#include "OboeServiceMessage.h"
+
+using namespace android;
+using namespace oboe;
+
+AudioEndpoint::AudioEndpoint()
+    : mOutputFreeRunning(false)
+    , mDataReadCounter(0)
+    , mDataWriteCounter(0)
+{
+}
+
+AudioEndpoint::~AudioEndpoint()
+{
+}
+
+static void AudioEndpoint_validateQueueDescriptor(const char *type,
+                                                  const RingBufferDescriptor *descriptor) {
+    assert(descriptor->capacityInFrames > 0);
+    assert(descriptor->bytesPerFrame > 1);
+    assert(descriptor->dataAddress != nullptr);
+    ALOGD("AudioEndpoint_validateQueueDescriptor %s, dataAddress at %p ====================",
+          type,
+          descriptor->dataAddress);
+    ALOGD("AudioEndpoint_validateQueueDescriptor  readCounter at %p, writeCounter at %p",
+          descriptor->readCounterAddress,
+          descriptor->writeCounterAddress);
+
+    // Try to READ from the data area.
+    uint8_t value = descriptor->dataAddress[0];
+    ALOGD("AudioEndpoint_validateQueueDescriptor() dataAddress[0] = %d, then try to write",
+        (int) value);
+    // Try to WRITE to the data area.
+    descriptor->dataAddress[0] = value;
+    ALOGD("AudioEndpoint_validateQueueDescriptor() wrote successfully");
+
+    if (descriptor->readCounterAddress) {
+        fifo_counter_t counter = *descriptor->readCounterAddress;
+        ALOGD("AudioEndpoint_validateQueueDescriptor() *readCounterAddress = %d, now write",
+              (int) counter);
+        *descriptor->readCounterAddress = counter;
+        ALOGD("AudioEndpoint_validateQueueDescriptor() wrote readCounterAddress successfully");
+    }
+    if (descriptor->writeCounterAddress) {
+        fifo_counter_t counter = *descriptor->writeCounterAddress;
+        ALOGD("AudioEndpoint_validateQueueDescriptor() *writeCounterAddress = %d, now write",
+              (int) counter);
+        *descriptor->writeCounterAddress = counter;
+        ALOGD("AudioEndpoint_validateQueueDescriptor() wrote writeCounterAddress successfully");
+    }
+}
+
+void AudioEndpoint_validateDescriptor(const EndpointDescriptor *pEndpointDescriptor) {
+    AudioEndpoint_validateQueueDescriptor("msg", &pEndpointDescriptor->upMessageQueueDescriptor);
+    AudioEndpoint_validateQueueDescriptor("data", &pEndpointDescriptor->downDataQueueDescriptor);
+}
+
+oboe_result_t AudioEndpoint::configure(const EndpointDescriptor *pEndpointDescriptor)
+{
+    oboe_result_t result = OBOE_OK;
+    AudioEndpoint_validateDescriptor(pEndpointDescriptor); // FIXME remove after debugging
+
+    const RingBufferDescriptor *descriptor = &pEndpointDescriptor->upMessageQueueDescriptor;
+    assert(descriptor->bytesPerFrame == sizeof(OboeServiceMessage));
+    assert(descriptor->readCounterAddress != nullptr);
+    assert(descriptor->writeCounterAddress != nullptr);
+    mUpCommandQueue = new FifoBuffer(
+            descriptor->bytesPerFrame,
+            descriptor->capacityInFrames,
+            descriptor->readCounterAddress,
+            descriptor->writeCounterAddress,
+            descriptor->dataAddress
+    );
+    /* TODO mDownCommandQueue
+    if (descriptor->capacityInFrames > 0) {
+        descriptor = &pEndpointDescriptor->downMessageQueueDescriptor;
+        mDownCommandQueue = new FifoBuffer(
+                descriptor->capacityInFrames,
+                descriptor->bytesPerFrame,
+                descriptor->readCounterAddress,
+                descriptor->writeCounterAddress,
+                descriptor->dataAddress
+        );
+    }
+     */
+    descriptor = &pEndpointDescriptor->downDataQueueDescriptor;
+    assert(descriptor->capacityInFrames > 0);
+    assert(descriptor->bytesPerFrame > 1);
+    assert(descriptor->bytesPerFrame < 4 * 16); // FIXME just for initial debugging
+    assert(descriptor->framesPerBurst > 0);
+    assert(descriptor->framesPerBurst < 8 * 1024); // FIXME just for initial debugging
+    assert(descriptor->dataAddress != nullptr);
+    ALOGD("AudioEndpoint::configure() data framesPerBurst = %d", descriptor->framesPerBurst);
+    ALOGD("AudioEndpoint::configure() data readCounterAddress = %p", descriptor->readCounterAddress);
+    mOutputFreeRunning = descriptor->readCounterAddress == nullptr;
+    ALOGD("AudioEndpoint::configure() mOutputFreeRunning = %d", mOutputFreeRunning ? 1 : 0);
+    int64_t *readCounterAddress = (descriptor->readCounterAddress == nullptr)
+                                  ? &mDataReadCounter
+                                  : descriptor->readCounterAddress;
+    int64_t *writeCounterAddress = (descriptor->writeCounterAddress == nullptr)
+                                  ? &mDataWriteCounter
+                                  : descriptor->writeCounterAddress;
+    mDownDataQueue = new FifoBuffer(
+            descriptor->bytesPerFrame,
+            descriptor->capacityInFrames,
+            readCounterAddress,
+            writeCounterAddress,
+            descriptor->dataAddress
+    );
+    uint32_t threshold = descriptor->capacityInFrames / 2;
+    mDownDataQueue->setThreshold(threshold);
+    return result;
+}
+
+oboe_result_t AudioEndpoint::readUpCommand(OboeServiceMessage *commandPtr)
+{
+    return mUpCommandQueue->read(commandPtr, 1);
+}
+
+oboe_result_t AudioEndpoint::writeDataNow(const void *buffer, int32_t numFrames)
+{
+    return mDownDataQueue->write(buffer, numFrames);
+}
+
+void AudioEndpoint::setDownDataReadCounter(fifo_counter_t framesRead)
+{
+    mDownDataQueue->setReadCounter(framesRead);
+}
+
+fifo_counter_t AudioEndpoint::getDownDataReadCounter()
+{
+    return mDownDataQueue->getReadCounter();
+}
+
+void AudioEndpoint::setDownDataWriteCounter(fifo_counter_t framesRead)
+{
+    mDownDataQueue->setWriteCounter(framesRead);
+}
+
+fifo_counter_t AudioEndpoint::getDownDataWriteCounter()
+{
+    return mDownDataQueue->getWriteCounter();
+}
+
+oboe_size_frames_t AudioEndpoint::setBufferSizeInFrames(oboe_size_frames_t requestedFrames,
+                                            oboe_size_frames_t *actualFrames)
+{
+    if (requestedFrames < ENDPOINT_DATA_QUEUE_SIZE_MIN) {
+        requestedFrames = ENDPOINT_DATA_QUEUE_SIZE_MIN;
+    }
+    mDownDataQueue->setThreshold(requestedFrames);
+    *actualFrames = mDownDataQueue->getThreshold();
+    return OBOE_OK;
+}
+
+int32_t AudioEndpoint::getBufferSizeInFrames() const
+{
+    return mDownDataQueue->getThreshold();
+}
+
+int32_t AudioEndpoint::getBufferCapacityInFrames() const
+{
+    return (int32_t)mDownDataQueue->getBufferCapacityInFrames();
+}
+
+int32_t AudioEndpoint::getFullFramesAvailable()
+{
+    return mDownDataQueue->getFifoControllerBase()->getFullFramesAvailable();
+}
diff --git a/media/liboboe/src/client/AudioEndpoint.h b/media/liboboe/src/client/AudioEndpoint.h
new file mode 100644
index 0000000..6ae8b72
--- /dev/null
+++ b/media/liboboe/src/client/AudioEndpoint.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_AUDIO_ENDPOINT_H
+#define OBOE_AUDIO_ENDPOINT_H
+
+#include <oboe/OboeAudio.h>
+
+#include "OboeServiceMessage.h"
+#include "AudioEndpointParcelable.h"
+#include "fifo/FifoBuffer.h"
+
+namespace oboe {
+
+#define ENDPOINT_DATA_QUEUE_SIZE_MIN   64
+
+/**
+ * A sink for audio.
+ * Used by the client code.
+ */
+class AudioEndpoint {
+
+public:
+    AudioEndpoint();
+    virtual ~AudioEndpoint();
+
+    /**
+     * Configure based on the EndPointDescriptor_t.
+     */
+    oboe_result_t configure(const EndpointDescriptor *pEndpointDescriptor);
+
+    /**
+     * Read from a command passed up from the Server.
+     * @return 1 if command received, 0 for no command, or negative error.
+     */
+    oboe_result_t readUpCommand(OboeServiceMessage *commandPtr);
+
+    /**
+     * Non-blocking write.
+     * @return framesWritten or a negative error code.
+     */
+    oboe_result_t writeDataNow(const void *buffer, int32_t numFrames);
+
+    /**
+     * Set the read index in the downData queue.
+     * This is needed if the reader is not updating the index itself.
+     */
+    void setDownDataReadCounter(fifo_counter_t framesRead);
+    fifo_counter_t getDownDataReadCounter();
+
+    void setDownDataWriteCounter(fifo_counter_t framesWritten);
+    fifo_counter_t getDownDataWriteCounter();
+
+    /**
+     * The result is not valid until after configure() is called.
+     *
+     * @return true if the output buffer read position is not updated, eg. DMA
+     */
+    bool isOutputFreeRunning() const { return mOutputFreeRunning; }
+
+    int32_t setBufferSizeInFrames(oboe_size_frames_t requestedFrames,
+                                  oboe_size_frames_t *actualFrames);
+    oboe_size_frames_t getBufferSizeInFrames() const;
+
+    oboe_size_frames_t getBufferCapacityInFrames() const;
+
+    oboe_size_frames_t getFullFramesAvailable();
+
+private:
+    FifoBuffer   * mUpCommandQueue;
+    FifoBuffer   * mDownDataQueue;
+    bool           mOutputFreeRunning;
+    fifo_counter_t mDataReadCounter; // only used if free-running
+    fifo_counter_t mDataWriteCounter; // only used if free-running
+};
+
+} // namespace oboe
+
+#endif //OBOE_AUDIO_ENDPOINT_H
diff --git a/media/liboboe/src/client/AudioStreamInternal.cpp b/media/liboboe/src/client/AudioStreamInternal.cpp
new file mode 100644
index 0000000..0d169e1
--- /dev/null
+++ b/media/liboboe/src/client/AudioStreamInternal.cpp
@@ -0,0 +1,528 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "OboeAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <assert.h>
+
+#include <binder/IServiceManager.h>
+
+#include <oboe/OboeAudio.h>
+
+#include "AudioClock.h"
+#include "AudioEndpointParcelable.h"
+#include "binding/OboeStreamRequest.h"
+#include "binding/OboeStreamConfiguration.h"
+#include "binding/IOboeAudioService.h"
+#include "binding/OboeServiceMessage.h"
+
+#include "AudioStreamInternal.h"
+
+#define LOG_TIMESTAMPS   0
+
+using android::String16;
+using android::IServiceManager;
+using android::defaultServiceManager;
+using android::interface_cast;
+
+using namespace oboe;
+
+// Helper function to get access to the "OboeAudioService" service.
+static sp<IOboeAudioService> getOboeAudioService() {
+    sp<IServiceManager> sm = defaultServiceManager();
+    sp<IBinder> binder = sm->getService(String16("OboeAudioService"));
+    // TODO: If the "OboeHack" service is not running, getService times out and binder == 0.
+    sp<IOboeAudioService> service = interface_cast<IOboeAudioService>(binder);
+    return service;
+}
+
+AudioStreamInternal::AudioStreamInternal()
+        : AudioStream()
+        , mClockModel()
+        , mAudioEndpoint()
+        , mServiceStreamHandle(OBOE_HANDLE_INVALID)
+        , mFramesPerBurst(16)
+{
+    // TODO protect against mService being NULL;
+    // TODO Model access to the service on frameworks/av/media/libaudioclient/AudioSystem.cpp
+    mService = getOboeAudioService();
+}
+
+AudioStreamInternal::~AudioStreamInternal() {
+}
+
+oboe_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
+
+    oboe_result_t result = OBOE_OK;
+    OboeStreamRequest request;
+    OboeStreamConfiguration configuration;
+
+    result = AudioStream::open(builder);
+    if (result < 0) {
+        return result;
+    }
+
+    // Build the request.
+    request.setUserId(getuid());
+    request.setProcessId(getpid());
+    request.getConfiguration().setDeviceId(getDeviceId());
+    request.getConfiguration().setSampleRate(getSampleRate());
+    request.getConfiguration().setSamplesPerFrame(getSamplesPerFrame());
+    request.getConfiguration().setAudioFormat(getFormat());
+    request.dump();
+
+    mServiceStreamHandle = mService->openStream(request, configuration);
+    ALOGD("AudioStreamInternal.open(): openStream returned mServiceStreamHandle = 0x%08X",
+         (unsigned int)mServiceStreamHandle);
+    if (mServiceStreamHandle < 0) {
+        result = mServiceStreamHandle;
+        ALOGE("AudioStreamInternal.open(): acquireRealtimeStream oboe_result_t = 0x%08X", result);
+    } else {
+        result = configuration.validate();
+        if (result != OBOE_OK) {
+            close();
+            return result;
+        }
+        // Save results of the open.
+        setSampleRate(configuration.getSampleRate());
+        setSamplesPerFrame(configuration.getSamplesPerFrame());
+        setFormat(configuration.getAudioFormat());
+
+        oboe::AudioEndpointParcelable parcelable;
+        result = mService->getStreamDescription(mServiceStreamHandle, parcelable);
+        if (result != OBOE_OK) {
+            ALOGE("AudioStreamInternal.open(): getStreamDescriptor returns %d", result);
+            mService->closeStream(mServiceStreamHandle);
+            return result;
+        }
+        // resolve parcelable into a descriptor
+        parcelable.resolve(&mEndpointDescriptor);
+
+        // Configure endpoint based on descriptor.
+        mAudioEndpoint.configure(&mEndpointDescriptor);
+
+
+        mFramesPerBurst = mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
+        assert(mFramesPerBurst >= 16);
+        assert(mEndpointDescriptor.downDataQueueDescriptor.capacityInFrames < 10 * 1024);
+
+        mClockModel.setSampleRate(getSampleRate());
+        mClockModel.setFramesPerBurst(mFramesPerBurst);
+
+        setState(OBOE_STREAM_STATE_OPEN);
+    }
+    return result;
+}
+
+oboe_result_t AudioStreamInternal::close() {
+    ALOGD("AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X", mServiceStreamHandle);
+    if (mServiceStreamHandle != OBOE_HANDLE_INVALID) {
+        mService->closeStream(mServiceStreamHandle);
+        mServiceStreamHandle = OBOE_HANDLE_INVALID;
+        return OBOE_OK;
+    } else {
+        return OBOE_ERROR_INVALID_STATE;
+    }
+}
+
+oboe_result_t AudioStreamInternal::requestStart()
+{
+    oboe_nanoseconds_t startTime;
+    ALOGD("AudioStreamInternal(): start()");
+    if (mServiceStreamHandle == OBOE_HANDLE_INVALID) {
+        return OBOE_ERROR_INVALID_STATE;
+    }
+    startTime = Oboe_getNanoseconds(OBOE_CLOCK_MONOTONIC);
+    mClockModel.start(startTime);
+    processTimestamp(0, startTime);
+    setState(OBOE_STREAM_STATE_STARTING);
+    return mService->startStream(mServiceStreamHandle);
+}
+
+oboe_result_t AudioStreamInternal::requestPause()
+{
+    ALOGD("AudioStreamInternal(): pause()");
+    if (mServiceStreamHandle == OBOE_HANDLE_INVALID) {
+        return OBOE_ERROR_INVALID_STATE;
+    }
+    mClockModel.stop(Oboe_getNanoseconds(OBOE_CLOCK_MONOTONIC));
+    setState(OBOE_STREAM_STATE_PAUSING);
+    return mService->pauseStream(mServiceStreamHandle);
+}
+
+oboe_result_t AudioStreamInternal::requestFlush() {
+    ALOGD("AudioStreamInternal(): flush()");
+    if (mServiceStreamHandle == OBOE_HANDLE_INVALID) {
+        return OBOE_ERROR_INVALID_STATE;
+    }
+    setState(OBOE_STREAM_STATE_FLUSHING);
+    return mService->flushStream(mServiceStreamHandle);
+}
+
+void AudioStreamInternal::onFlushFromServer() {
+    ALOGD("AudioStreamInternal(): onFlushFromServer()");
+    oboe_position_frames_t readCounter = mAudioEndpoint.getDownDataReadCounter();
+    oboe_position_frames_t writeCounter = mAudioEndpoint.getDownDataWriteCounter();
+    // Bump offset so caller does not see the retrograde motion in getFramesRead().
+    oboe_position_frames_t framesFlushed = writeCounter - readCounter;
+    mFramesOffsetFromService += framesFlushed;
+    // Flush written frames by forcing writeCounter to readCounter.
+    // This is because we cannot move the read counter in the hardware.
+    mAudioEndpoint.setDownDataWriteCounter(readCounter);
+}
+
+oboe_result_t AudioStreamInternal::requestStop()
+{
+    // TODO better implementation of requestStop()
+    oboe_result_t result = requestPause();
+    if (result == OBOE_OK) {
+        oboe_stream_state_t state;
+        result = waitForStateChange(OBOE_STREAM_STATE_PAUSING,
+                                    &state,
+                                    500 * OBOE_NANOS_PER_MILLISECOND);// TODO temporary code
+        if (result == OBOE_OK) {
+            result = requestFlush();
+        }
+    }
+    return result;
+}
+
+oboe_result_t AudioStreamInternal::registerThread() {
+    ALOGD("AudioStreamInternal(): registerThread()");
+    if (mServiceStreamHandle == OBOE_HANDLE_INVALID) {
+        return OBOE_ERROR_INVALID_STATE;
+    }
+    return mService->registerAudioThread(mServiceStreamHandle,
+                                         gettid(),
+                                         getPeriodNanoseconds());
+}
+
+oboe_result_t AudioStreamInternal::unregisterThread() {
+    ALOGD("AudioStreamInternal(): unregisterThread()");
+    if (mServiceStreamHandle == OBOE_HANDLE_INVALID) {
+        return OBOE_ERROR_INVALID_STATE;
+    }
+    return mService->unregisterAudioThread(mServiceStreamHandle, gettid());
+}
+
+// TODO use oboe_clockid_t all the way down to AudioClock
+oboe_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
+                           oboe_position_frames_t *framePosition,
+                           oboe_nanoseconds_t *timeNanoseconds) {
+// TODO implement using real HAL
+    oboe_nanoseconds_t time = AudioClock::getNanoseconds();
+    *framePosition = mClockModel.convertTimeToPosition(time);
+    *timeNanoseconds = time + (10 * OBOE_NANOS_PER_MILLISECOND); // Fake hardware delay
+    return OBOE_OK;
+}
+
+oboe_result_t AudioStreamInternal::updateState() {
+    return processCommands();
+}
+
+#if LOG_TIMESTAMPS
+static void AudioStreamInternal_LogTimestamp(OboeServiceMessage &command) {
+    static int64_t oldPosition = 0;
+    static oboe_nanoseconds_t oldTime = 0;
+    int64_t framePosition = command.timestamp.position;
+    oboe_nanoseconds_t nanoTime = command.timestamp.timestamp;
+    ALOGD("AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %llu",
+         (long long) framePosition,
+         (long long) nanoTime);
+    int64_t nanosDelta = nanoTime - oldTime;
+    if (nanosDelta > 0 && oldTime > 0) {
+        int64_t framesDelta = framePosition - oldPosition;
+        int64_t rate = (framesDelta * OBOE_NANOS_PER_SECOND) / nanosDelta;
+        ALOGD("AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
+        ALOGD("AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
+        ALOGD("AudioStreamInternal() - measured rate = %llu", (unsigned long long) rate);
+    }
+    oldPosition = framePosition;
+    oldTime = nanoTime;
+}
+#endif
+
+oboe_result_t AudioStreamInternal::onTimestampFromServer(OboeServiceMessage *message) {
+    oboe_position_frames_t framePosition = 0;
+#if LOG_TIMESTAMPS
+    AudioStreamInternal_LogTimestamp(command);
+#endif
+    framePosition = message->timestamp.position;
+    processTimestamp(framePosition, message->timestamp.timestamp);
+    return OBOE_OK;
+}
+
+oboe_result_t AudioStreamInternal::onEventFromServer(OboeServiceMessage *message) {
+    oboe_result_t result = OBOE_OK;
+    ALOGD("processCommands() got event %d", message->event.event);
+    switch (message->event.event) {
+        case OBOE_SERVICE_EVENT_STARTED:
+            ALOGD("processCommands() got OBOE_SERVICE_EVENT_STARTED");
+            setState(OBOE_STREAM_STATE_STARTED);
+            break;
+        case OBOE_SERVICE_EVENT_PAUSED:
+            ALOGD("processCommands() got OBOE_SERVICE_EVENT_PAUSED");
+            setState(OBOE_STREAM_STATE_PAUSED);
+            break;
+        case OBOE_SERVICE_EVENT_FLUSHED:
+            ALOGD("processCommands() got OBOE_SERVICE_EVENT_FLUSHED");
+            setState(OBOE_STREAM_STATE_FLUSHED);
+            onFlushFromServer();
+            break;
+        case OBOE_SERVICE_EVENT_CLOSED:
+            ALOGD("processCommands() got OBOE_SERVICE_EVENT_CLOSED");
+            setState(OBOE_STREAM_STATE_CLOSED);
+            break;
+        case OBOE_SERVICE_EVENT_DISCONNECTED:
+            result = OBOE_ERROR_DISCONNECTED;
+            ALOGW("WARNING - processCommands() OBOE_SERVICE_EVENT_DISCONNECTED");
+            break;
+        default:
+            ALOGW("WARNING - processCommands() Unrecognized event = %d",
+                 (int) message->event.event);
+            break;
+    }
+    return result;
+}
+
+// Process all the commands coming from the server.
+oboe_result_t AudioStreamInternal::processCommands() {
+    oboe_result_t result = OBOE_OK;
+
+    // Let the service run in case it is a fake service simulator.
+    mService->tickle(); // TODO use real service thread
+
+    while (result == OBOE_OK) {
+        OboeServiceMessage message;
+        if (mAudioEndpoint.readUpCommand(&message) != 1) {
+            break; // no command this time, no problem
+        }
+        switch (message.what) {
+        case OboeServiceMessage::code::TIMESTAMP:
+            result = onTimestampFromServer(&message);
+            break;
+
+        case OboeServiceMessage::code::EVENT:
+            result = onEventFromServer(&message);
+            break;
+
+        default:
+            ALOGW("WARNING - AudioStreamInternal::processCommands() Unrecognized what = %d",
+                 (int) message.what);
+            result = OBOE_ERROR_UNEXPECTED_VALUE;
+            break;
+        }
+    }
+    return result;
+}
+
+// Write the data, block if needed and timeoutMillis > 0
+oboe_result_t AudioStreamInternal::write(const void *buffer, int32_t numFrames,
+                                         oboe_nanoseconds_t timeoutNanoseconds)
+{
+    oboe_result_t result = OBOE_OK;
+    uint8_t* source = (uint8_t*)buffer;
+    oboe_nanoseconds_t currentTimeNanos = AudioClock::getNanoseconds();
+    oboe_nanoseconds_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
+    int32_t framesLeft = numFrames;
+//    ALOGD("AudioStreamInternal::write(%p, %d) at time %08llu , mState = %d ------------------",
+//         buffer, numFrames, (unsigned long long) currentTimeNanos, mState);
+
+    // Write until all the data has been written or until a timeout occurs.
+    while (framesLeft > 0) {
+        // The call to writeNow() will not block. It will just write as much as it can.
+        oboe_nanoseconds_t wakeTimeNanos = 0;
+        oboe_result_t framesWritten = writeNow(source, framesLeft,
+                                               currentTimeNanos, &wakeTimeNanos);
+//        ALOGD("AudioStreamInternal::write() writeNow() framesLeft = %d --> framesWritten = %d", framesLeft, framesWritten);
+        if (framesWritten < 0) {
+            result = framesWritten;
+            break;
+        }
+        framesLeft -= (int32_t) framesWritten;
+        source += framesWritten * getBytesPerFrame();
+
+        // Should we block?
+        if (timeoutNanoseconds == 0) {
+            break; // don't block
+        } else if (framesLeft > 0) {
+            //ALOGD("AudioStreamInternal:: original wakeTimeNanos %lld", (long long) wakeTimeNanos);
+            // clip the wake time to something reasonable
+            if (wakeTimeNanos < currentTimeNanos) {
+                wakeTimeNanos = currentTimeNanos;
+            }
+            if (wakeTimeNanos > deadlineNanos) {
+                // If we time out, just return the framesWritten so far.
+                ALOGE("AudioStreamInternal::write(): timed out after %lld nanos", (long long) timeoutNanoseconds);
+                break;
+            }
+
+            //ALOGD("AudioStreamInternal:: sleep until %lld, dur = %lld", (long long) wakeTimeNanos,
+            //        (long long) (wakeTimeNanos - currentTimeNanos));
+            AudioClock::sleepForNanos(wakeTimeNanos - currentTimeNanos);
+            currentTimeNanos = AudioClock::getNanoseconds();
+        }
+    }
+
+    // return error or framesWritten
+    return (result < 0) ? result : numFrames - framesLeft;
+}
+
+// Write as much data as we can without blocking.
+oboe_result_t AudioStreamInternal::writeNow(const void *buffer, int32_t numFrames,
+                                         oboe_nanoseconds_t currentNanoTime, oboe_nanoseconds_t *wakeTimePtr) {
+    {
+        oboe_result_t result = processCommands();
+        if (result != OBOE_OK) {
+            return result;
+        }
+    }
+
+    if (mAudioEndpoint.isOutputFreeRunning()) {
+        // Update data queue based on the timing model.
+        int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
+        mAudioEndpoint.setDownDataReadCounter(estimatedReadCounter);
+        // If the read index passed the write index then consider it an underrun.
+        if (mAudioEndpoint.getFullFramesAvailable() < 0) {
+            mXRunCount++;
+        }
+    }
+    // TODO else query from endpoint cuz set by actual reader, maybe
+
+    // Write some data to the buffer.
+    int32_t framesWritten = mAudioEndpoint.writeDataNow(buffer, numFrames);
+    if (framesWritten > 0) {
+        incrementFramesWritten(framesWritten);
+    }
+    //ALOGD("AudioStreamInternal::writeNow() - tried to write %d frames, wrote %d",
+    //    numFrames, framesWritten);
+
+    // Calculate an ideal time to wake up.
+    if (wakeTimePtr != nullptr && framesWritten >= 0) {
+        // By default wake up a few milliseconds from now.  // TODO review
+        oboe_nanoseconds_t wakeTime = currentNanoTime + (2 * OBOE_NANOS_PER_MILLISECOND);
+        switch (getState()) {
+            case OBOE_STREAM_STATE_OPEN:
+            case OBOE_STREAM_STATE_STARTING:
+                if (framesWritten != 0) {
+                    // Don't wait to write more data. Just prime the buffer.
+                    wakeTime = currentNanoTime;
+                }
+                break;
+            case OBOE_STREAM_STATE_STARTED:   // When do we expect the next read burst to occur?
+                {
+                    uint32_t burstSize = mFramesPerBurst;
+                    if (burstSize < 32) {
+                        burstSize = 32; // TODO review
+                    }
+
+                    uint64_t nextReadPosition = mAudioEndpoint.getDownDataReadCounter() + burstSize;
+                    wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
+                }
+                break;
+            default:
+                break;
+        }
+        *wakeTimePtr = wakeTime;
+
+    }
+//    ALOGD("AudioStreamInternal::writeNow finished: now = %llu, read# = %llu, wrote# = %llu",
+//         (unsigned long long)currentNanoTime,
+//         (unsigned long long)mAudioEndpoint.getDownDataReadCounter(),
+//         (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
+    return framesWritten;
+}
+
+oboe_result_t AudioStreamInternal::waitForStateChange(oboe_stream_state_t currentState,
+                                                      oboe_stream_state_t *nextState,
+                                                      oboe_nanoseconds_t timeoutNanoseconds)
+
+{
+    oboe_result_t result = processCommands();
+//    ALOGD("AudioStreamInternal::waitForStateChange() - processCommands() returned %d", result);
+    if (result != OBOE_OK) {
+        return result;
+    }
+    // TODO replace this polling with a timed sleep on a futex on the message queue
+    int32_t durationNanos = 5 * OBOE_NANOS_PER_MILLISECOND;
+    oboe_stream_state_t state = getState();
+//    ALOGD("AudioStreamInternal::waitForStateChange() - state = %d", state);
+    while (state == currentState && timeoutNanoseconds > 0) {
+        // TODO use futex from service message queue
+        if (durationNanos > timeoutNanoseconds) {
+            durationNanos = timeoutNanoseconds;
+        }
+        AudioClock::sleepForNanos(durationNanos);
+        timeoutNanoseconds -= durationNanos;
+
+        result = processCommands();
+        if (result != OBOE_OK) {
+            return result;
+        }
+
+        state = getState();
+//        ALOGD("AudioStreamInternal::waitForStateChange() - state = %d", state);
+    }
+    if (nextState != nullptr) {
+        *nextState = state;
+    }
+    return (state == currentState) ? OBOE_ERROR_TIMEOUT : OBOE_OK;
+}
+
+
+void AudioStreamInternal::processTimestamp(uint64_t position, oboe_nanoseconds_t time) {
+    mClockModel.processTimestamp( position, time);
+}
+
+oboe_result_t AudioStreamInternal::setBufferSize(oboe_size_frames_t requestedFrames,
+                                        oboe_size_frames_t *actualFrames) {
+    return mAudioEndpoint.setBufferSizeInFrames(requestedFrames, actualFrames);
+}
+
+oboe_size_frames_t AudioStreamInternal::getBufferSize() const
+{
+    return mAudioEndpoint.getBufferSizeInFrames();
+}
+
+oboe_size_frames_t AudioStreamInternal::getBufferCapacity() const
+{
+    return mAudioEndpoint.getBufferCapacityInFrames();
+}
+
+oboe_size_frames_t AudioStreamInternal::getFramesPerBurst() const
+{
+    return mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
+}
+
+oboe_position_frames_t AudioStreamInternal::getFramesRead()
+{
+    oboe_position_frames_t framesRead =
+            mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
+            + mFramesOffsetFromService;
+    // Prevent retrograde motion.
+    if (framesRead < mLastFramesRead) {
+        framesRead = mLastFramesRead;
+    } else {
+        mLastFramesRead = framesRead;
+    }
+    ALOGD("AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
+    return framesRead;
+}
+
+// TODO implement getTimestamp
diff --git a/media/liboboe/src/client/AudioStreamInternal.h b/media/liboboe/src/client/AudioStreamInternal.h
new file mode 100644
index 0000000..6f37761
--- /dev/null
+++ b/media/liboboe/src/client/AudioStreamInternal.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_AUDIOSTREAMINTERNAL_H
+#define OBOE_AUDIOSTREAMINTERNAL_H
+
+#include <stdint.h>
+#include <oboe/OboeAudio.h>
+
+#include "binding/IOboeAudioService.h"
+#include "binding/AudioEndpointParcelable.h"
+#include "client/IsochronousClockModel.h"
+#include "client/AudioEndpoint.h"
+#include "core/AudioStream.h"
+
+using android::sp;
+using android::IOboeAudioService;
+
+namespace oboe {
+
+// A stream that talks to the OboeService or directly to a HAL.
+class AudioStreamInternal : public AudioStream {
+
+public:
+    AudioStreamInternal();
+    virtual ~AudioStreamInternal();
+
+    // =========== Begin ABSTRACT methods ===========================
+    virtual oboe_result_t requestStart() override;
+
+    virtual oboe_result_t requestPause() override;
+
+    virtual oboe_result_t requestFlush() override;
+
+    virtual oboe_result_t requestStop() override;
+
+    // TODO use oboe_clockid_t all the way down to AudioClock
+    virtual oboe_result_t getTimestamp(clockid_t clockId,
+                                       oboe_position_frames_t *framePosition,
+                                       oboe_nanoseconds_t *timeNanoseconds) override;
+
+
+    virtual oboe_result_t updateState() override;
+    // =========== End ABSTRACT methods ===========================
+
+    virtual oboe_result_t open(const AudioStreamBuilder &builder) override;
+
+    virtual oboe_result_t close() override;
+
+    virtual oboe_result_t write(const void *buffer,
+                             int32_t numFrames,
+                             oboe_nanoseconds_t timeoutNanoseconds) override;
+
+    virtual oboe_result_t waitForStateChange(oboe_stream_state_t currentState,
+                                          oboe_stream_state_t *nextState,
+                                          oboe_nanoseconds_t timeoutNanoseconds) override;
+
+    virtual oboe_result_t setBufferSize(oboe_size_frames_t requestedFrames,
+                                        oboe_size_frames_t *actualFrames) override;
+
+    virtual oboe_size_frames_t getBufferSize() const override;
+
+    virtual oboe_size_frames_t getBufferCapacity() const override;
+
+    virtual oboe_size_frames_t getFramesPerBurst() const override;
+
+    virtual oboe_position_frames_t getFramesRead() override;
+
+    virtual int32_t getXRunCount() const override {
+        return mXRunCount;
+    }
+
+    virtual oboe_result_t registerThread() override;
+
+    virtual oboe_result_t unregisterThread() override;
+
+protected:
+
+    oboe_result_t processCommands();
+
+/**
+ * Low level write that will not block. It will just write as much as it can.
+ *
+ * It passed back a recommended time to wake up if wakeTimePtr is not NULL.
+ *
+ * @return the number of frames written or a negative error code.
+ */
+    virtual oboe_result_t writeNow(const void *buffer,
+                                int32_t numFrames,
+                                oboe_nanoseconds_t currentTimeNanos,
+                                oboe_nanoseconds_t *wakeTimePtr);
+
+    void onFlushFromServer();
+
+    oboe_result_t onEventFromServer(OboeServiceMessage *message);
+
+    oboe_result_t onTimestampFromServer(OboeServiceMessage *message);
+
+private:
+    IsochronousClockModel    mClockModel;
+    AudioEndpoint            mAudioEndpoint;
+    oboe_handle_t            mServiceStreamHandle;
+    EndpointDescriptor       mEndpointDescriptor;
+    sp<IOboeAudioService>    mService;
+    // Offset from underlying frame position.
+    oboe_position_frames_t   mFramesOffsetFromService = 0;
+    oboe_position_frames_t   mLastFramesRead = 0;
+    oboe_size_frames_t       mFramesPerBurst;
+    int32_t                  mXRunCount = 0;
+
+    void processTimestamp(uint64_t position, oboe_nanoseconds_t time);
+};
+
+} /* namespace oboe */
+
+#endif //OBOE_AUDIOSTREAMINTERNAL_H
diff --git a/media/liboboe/src/client/IsochronousClockModel.cpp b/media/liboboe/src/client/IsochronousClockModel.cpp
new file mode 100644
index 0000000..b8e5538
--- /dev/null
+++ b/media/liboboe/src/client/IsochronousClockModel.cpp
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "OboeAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <oboe/OboeDefinitions.h>
+
+#include "IsochronousClockModel.h"
+
+#define MIN_LATENESS_NANOS (10 * OBOE_NANOS_PER_MICROSECOND)
+
+using namespace android;
+using namespace oboe;
+
+IsochronousClockModel::IsochronousClockModel()
+        : mSampleRate(48000)
+        , mFramesPerBurst(64)
+        , mMaxLatenessInNanos(0)
+        , mMarkerFramePosition(0)
+        , mMarkerNanoTime(0)
+        , mState(STATE_STOPPED)
+{
+}
+
+IsochronousClockModel::~IsochronousClockModel() {
+}
+
+void IsochronousClockModel::start(oboe_nanoseconds_t nanoTime)
+{
+    mMarkerNanoTime = nanoTime;
+    mState = STATE_STARTING;
+}
+
+void IsochronousClockModel::stop(oboe_nanoseconds_t nanoTime)
+{
+    mMarkerNanoTime = nanoTime;
+    mMarkerFramePosition = convertTimeToPosition(nanoTime); // TODO should we do this?
+    mState = STATE_STOPPED;
+}
+
+void IsochronousClockModel::processTimestamp(oboe_position_frames_t framePosition,
+                                             oboe_nanoseconds_t nanoTime) {
+    int64_t framesDelta = framePosition - mMarkerFramePosition;
+    int64_t nanosDelta = nanoTime - mMarkerNanoTime;
+    if (nanosDelta < 1000) {
+        return;
+    }
+
+//    ALOGI("processTimestamp() - mMarkerFramePosition = %lld at mMarkerNanoTime %llu",
+//         (long long)mMarkerFramePosition,
+//         (long long)mMarkerNanoTime);
+//    ALOGI("processTimestamp() - framePosition = %lld at nanoTime %llu",
+//         (long long)framePosition,
+//         (long long)nanoTime);
+
+    int64_t expectedNanosDelta = convertDeltaPositionToTime(framesDelta);
+//    ALOGI("processTimestamp() - expectedNanosDelta = %lld, nanosDelta = %llu",
+//         (long long)expectedNanosDelta,
+//         (long long)nanosDelta);
+
+//    ALOGI("processTimestamp() - mSampleRate = %d", mSampleRate);
+//    ALOGI("processTimestamp() - mState = %d", mState);
+    switch (mState) {
+    case STATE_STOPPED:
+        break;
+    case STATE_STARTING:
+        mMarkerFramePosition = framePosition;
+        mMarkerNanoTime = nanoTime;
+        mState = STATE_SYNCING;
+        break;
+    case STATE_SYNCING:
+        // This will handle a burst of rapid consumption in the beginning.
+        if (nanosDelta < expectedNanosDelta) {
+            mMarkerFramePosition = framePosition;
+            mMarkerNanoTime = nanoTime;
+        } else {
+            ALOGI("processTimestamp() - advance to STATE_RUNNING");
+            mState = STATE_RUNNING;
+        }
+        break;
+    case STATE_RUNNING:
+        if (nanosDelta < expectedNanosDelta) {
+            // Earlier than expected timestamp.
+            // This data is probably more accurate so use it.
+            // or we may be drifting due to a slow HW clock.
+            mMarkerFramePosition = framePosition;
+            mMarkerNanoTime = nanoTime;
+            ALOGI("processTimestamp() - STATE_RUNNING - %d < %d micros - EARLY",
+                 (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000));
+        } else if (nanosDelta > (expectedNanosDelta + mMaxLatenessInNanos)) {
+            // Later than expected timestamp.
+            mMarkerFramePosition = framePosition;
+            mMarkerNanoTime = nanoTime - mMaxLatenessInNanos;
+            ALOGI("processTimestamp() - STATE_RUNNING - %d > %d + %d micros - LATE",
+                 (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000),
+                 (int) (mMaxLatenessInNanos / 1000));
+        }
+        break;
+    default:
+        break;
+    }
+    ++mTimestampCount;
+}
+
+void IsochronousClockModel::setSampleRate(int32_t sampleRate) {
+    mSampleRate = sampleRate;
+    update();
+}
+
+void IsochronousClockModel::setFramesPerBurst(int32_t framesPerBurst) {
+    mFramesPerBurst = framesPerBurst;
+    update();
+}
+
+void IsochronousClockModel::update() {
+    int64_t nanosLate = convertDeltaPositionToTime(mFramesPerBurst); // uses mSampleRate
+    mMaxLatenessInNanos = (nanosLate > MIN_LATENESS_NANOS) ? nanosLate : MIN_LATENESS_NANOS;
+}
+
+oboe_nanoseconds_t IsochronousClockModel::convertDeltaPositionToTime(
+        oboe_position_frames_t framesDelta) const {
+    return (OBOE_NANOS_PER_SECOND * framesDelta) / mSampleRate;
+}
+
+int64_t IsochronousClockModel::convertDeltaTimeToPosition(oboe_nanoseconds_t nanosDelta) const {
+    return (mSampleRate * nanosDelta) / OBOE_NANOS_PER_SECOND;
+}
+
+oboe_nanoseconds_t IsochronousClockModel::convertPositionToTime(
+        oboe_position_frames_t framePosition) const {
+    if (mState == STATE_STOPPED) {
+        return mMarkerNanoTime;
+    }
+    oboe_position_frames_t nextBurstIndex = (framePosition + mFramesPerBurst - 1) / mFramesPerBurst;
+    oboe_position_frames_t nextBurstPosition = mFramesPerBurst * nextBurstIndex;
+    oboe_position_frames_t framesDelta = nextBurstPosition - mMarkerFramePosition;
+    oboe_nanoseconds_t nanosDelta = convertDeltaPositionToTime(framesDelta);
+    oboe_nanoseconds_t time = (oboe_nanoseconds_t) (mMarkerNanoTime + nanosDelta);
+//    ALOGI("IsochronousClockModel::convertPositionToTime: pos = %llu --> time = %llu",
+//         (unsigned long long)framePosition,
+//         (unsigned long long)time);
+    return time;
+}
+
+oboe_position_frames_t IsochronousClockModel::convertTimeToPosition(
+        oboe_nanoseconds_t nanoTime) const {
+    if (mState == STATE_STOPPED) {
+        return mMarkerFramePosition;
+    }
+    oboe_nanoseconds_t nanosDelta = nanoTime - mMarkerNanoTime;
+    oboe_position_frames_t framesDelta = convertDeltaTimeToPosition(nanosDelta);
+    oboe_position_frames_t nextBurstPosition = mMarkerFramePosition + framesDelta;
+    oboe_position_frames_t nextBurstIndex = nextBurstPosition / mFramesPerBurst;
+    oboe_position_frames_t position = nextBurstIndex * mFramesPerBurst;
+//    ALOGI("IsochronousClockModel::convertTimeToPosition: time = %llu --> pos = %llu",
+//         (unsigned long long)nanoTime,
+//         (unsigned long long)position);
+//    ALOGI("IsochronousClockModel::convertTimeToPosition: framesDelta = %llu, mFramesPerBurst = %d",
+//         (long long) framesDelta, mFramesPerBurst);
+    return position;
+}
diff --git a/media/liboboe/src/client/IsochronousClockModel.h b/media/liboboe/src/client/IsochronousClockModel.h
new file mode 100644
index 0000000..97be325
--- /dev/null
+++ b/media/liboboe/src/client/IsochronousClockModel.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_ISOCHRONOUSCLOCKMODEL_H
+#define OBOE_ISOCHRONOUSCLOCKMODEL_H
+
+#include <stdint.h>
+#include <oboe/OboeAudio.h>
+
+namespace oboe {
+
+/**
+ * Model an isochronous data stream using occasional timestamps as input.
+ * This can be used to predict the position of the stream at a given time.
+ *
+ * This class is not thread safe and should only be called from one thread.
+ */
+class IsochronousClockModel {
+
+public:
+    IsochronousClockModel();
+    virtual ~IsochronousClockModel();
+
+    void start(oboe_nanoseconds_t nanoTime);
+    void stop(oboe_nanoseconds_t nanoTime);
+
+    void processTimestamp(oboe_position_frames_t framePosition, oboe_nanoseconds_t nanoTime);
+
+    /**
+     * @param sampleRate rate of the stream in frames per second
+     */
+    void setSampleRate(oboe_sample_rate_t sampleRate);
+
+    oboe_sample_rate_t getSampleRate() const {
+        return mSampleRate;
+    }
+
+    /**
+     * This must be set accurately in order to track the isochronous stream.
+     *
+     * @param framesPerBurst number of frames that stream advance at one time.
+     */
+    void setFramesPerBurst(oboe_size_frames_t framesPerBurst);
+
+    oboe_size_frames_t getFramesPerBurst() const {
+        return mFramesPerBurst;
+    }
+
+    /**
+     * Calculate an estimated time when the stream will be at that position.
+     *
+     * @param framePosition position of the stream in frames
+     * @return time in nanoseconds
+     */
+    oboe_nanoseconds_t convertPositionToTime(oboe_position_frames_t framePosition) const;
+
+    /**
+     * Calculate an estimated position where the stream will be at the specified time.
+     *
+     * @param nanoTime time of interest
+     * @return position in frames
+     */
+    oboe_position_frames_t convertTimeToPosition(oboe_nanoseconds_t nanoTime) const;
+
+    /**
+     * @param framesDelta difference in frames
+     * @return duration in nanoseconds
+     */
+    oboe_nanoseconds_t convertDeltaPositionToTime(oboe_position_frames_t framesDelta) const;
+
+    /**
+     * @param nanosDelta duration in nanoseconds
+     * @return frames that stream will advance in that time
+     */
+    oboe_position_frames_t convertDeltaTimeToPosition(oboe_nanoseconds_t nanosDelta) const;
+
+private:
+    enum clock_model_state_t {
+        STATE_STOPPED,
+        STATE_STARTING,
+        STATE_SYNCING,
+        STATE_RUNNING
+    };
+
+    oboe_sample_rate_t     mSampleRate;
+    oboe_size_frames_t     mFramesPerBurst;
+    int32_t                mMaxLatenessInNanos;
+    oboe_position_frames_t mMarkerFramePosition;
+    oboe_nanoseconds_t     mMarkerNanoTime;
+    int32_t                mTimestampCount;
+    clock_model_state_t     mState;
+
+    void update();
+};
+
+} /* namespace oboe */
+
+#endif //OBOE_ISOCHRONOUSCLOCKMODEL_H
diff --git a/media/liboboe/src/core/AudioStream.cpp b/media/liboboe/src/core/AudioStream.cpp
new file mode 100644
index 0000000..cc654c3
--- /dev/null
+++ b/media/liboboe/src/core/AudioStream.cpp
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "OboeAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <atomic>
+#include <stdint.h>
+#include <oboe/OboeAudio.h>
+
+#include "AudioStreamBuilder.h"
+#include "AudioStream.h"
+#include "AudioClock.h"
+
+using namespace oboe;
+
+AudioStream::AudioStream() {
+    // mThread is a pthread_t of unknown size so we need memset.
+    memset(&mThread, 0, sizeof(mThread));
+    setPeriodNanoseconds(0);
+}
+
+oboe_result_t AudioStream::open(const AudioStreamBuilder& builder)
+{
+    // TODO validate parameters.
+    // Copy parameters from the Builder because the Builder may be deleted after this call.
+    mSamplesPerFrame = builder.getSamplesPerFrame();
+    mSampleRate = builder.getSampleRate();
+    mDeviceId = builder.getDeviceId();
+    mFormat = builder.getFormat();
+    mSharingMode = builder.getSharingMode();
+    return OBOE_OK;
+}
+
+AudioStream::~AudioStream() {
+    close();
+}
+
+oboe_result_t AudioStream::waitForStateTransition(oboe_stream_state_t startingState,
+                                               oboe_stream_state_t endingState,
+                                               oboe_nanoseconds_t timeoutNanoseconds)
+{
+    oboe_stream_state_t state = getState();
+    oboe_stream_state_t nextState = state;
+    if (state == startingState && state != endingState) {
+        oboe_result_t result = waitForStateChange(state, &nextState, timeoutNanoseconds);
+        if (result != OBOE_OK) {
+            return result;
+        }
+    }
+// It's OK if the expected transition has already occurred.
+// But if we reach an unexpected state then that is an error.
+    if (nextState != endingState) {
+        return OBOE_ERROR_UNEXPECTED_STATE;
+    } else {
+        return OBOE_OK;
+    }
+}
+
+oboe_result_t AudioStream::waitForStateChange(oboe_stream_state_t currentState,
+                                                oboe_stream_state_t *nextState,
+                                                oboe_nanoseconds_t timeoutNanoseconds)
+{
+    // TODO replace this when similar functionality added to AudioTrack.cpp
+    oboe_nanoseconds_t durationNanos = 20 * OBOE_NANOS_PER_MILLISECOND;
+    oboe_stream_state_t state = getState();
+    while (state == currentState && timeoutNanoseconds > 0) {
+        if (durationNanos > timeoutNanoseconds) {
+            durationNanos = timeoutNanoseconds;
+        }
+        AudioClock::sleepForNanos(durationNanos);
+        timeoutNanoseconds -= durationNanos;
+
+        oboe_result_t result = updateState();
+        if (result != OBOE_OK) {
+            return result;
+        }
+
+        state = getState();
+    }
+    if (nextState != nullptr) {
+        *nextState = state;
+    }
+    return (state == currentState) ? OBOE_ERROR_TIMEOUT : OBOE_OK;
+}
+
+// This registers the app's background audio thread with the server before
+// passing control to the app. This gives the server an opportunity to boost
+// the thread's performance characteristics.
+void* AudioStream::wrapUserThread() {
+    void* procResult = nullptr;
+    mThreadRegistrationResult = registerThread();
+    if (mThreadRegistrationResult == OBOE_OK) {
+        // Call application procedure. This may take a very long time.
+        procResult = mThreadProc(mThreadArg);
+        ALOGD("AudioStream::mThreadProc() returned");
+        mThreadRegistrationResult = unregisterThread();
+    }
+    return procResult;
+}
+
+// This is the entry point for the new thread created by createThread().
+// It converts the 'C' function call to a C++ method call.
+static void* AudioStream_internalThreadProc(void* threadArg) {
+    AudioStream *audioStream = (AudioStream *) threadArg;
+    return audioStream->wrapUserThread();
+}
+
+oboe_result_t AudioStream::createThread(oboe_nanoseconds_t periodNanoseconds,
+                                     oboe_audio_thread_proc_t *threadProc,
+                                     void* threadArg)
+{
+    if (mHasThread) {
+        return OBOE_ERROR_INVALID_STATE;
+    }
+    if (threadProc == nullptr) {
+        return OBOE_ERROR_NULL;
+    }
+    // Pass input parameters to the background thread.
+    mThreadProc = threadProc;
+    mThreadArg = threadArg;
+    setPeriodNanoseconds(periodNanoseconds);
+    int err = pthread_create(&mThread, nullptr, AudioStream_internalThreadProc, this);
+    if (err != 0) {
+        // TODO convert errno to oboe_result_t
+        return OBOE_ERROR_INTERNAL;
+    } else {
+        mHasThread = true;
+        return OBOE_OK;
+    }
+}
+
+oboe_result_t AudioStream::joinThread(void** returnArg, oboe_nanoseconds_t timeoutNanoseconds)
+{
+    if (!mHasThread) {
+        return OBOE_ERROR_INVALID_STATE;
+    }
+#if 0
+    // TODO implement equivalent of pthread_timedjoin_np()
+    struct timespec abstime;
+    int err = pthread_timedjoin_np(mThread, returnArg, &abstime);
+#else
+    int err = pthread_join(mThread, returnArg);
+#endif
+    mHasThread = false;
+    // TODO convert errno to oboe_result_t
+    return err ? OBOE_ERROR_INTERNAL : mThreadRegistrationResult;
+}
+
diff --git a/media/liboboe/src/core/AudioStream.h b/media/liboboe/src/core/AudioStream.h
new file mode 100644
index 0000000..c13ae9f
--- /dev/null
+++ b/media/liboboe/src/core/AudioStream.h
@@ -0,0 +1,280 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_AUDIOSTREAM_H
+#define OBOE_AUDIOSTREAM_H
+
+#include <atomic>
+#include <stdint.h>
+#include <oboe/OboeDefinitions.h>
+#include <oboe/OboeAudio.h>
+
+#include "OboeUtilities.h"
+#include "MonotonicCounter.h"
+
+namespace oboe {
+
+class AudioStreamBuilder;
+
+/**
+ * Oboe audio stream.
+ */
+class AudioStream {
+public:
+
+    AudioStream();
+
+    virtual ~AudioStream();
+
+
+    // =========== Begin ABSTRACT methods ===========================
+
+    /* Asynchronous requests.
+     * Use waitForStateChange() to wait for completion.
+     */
+    virtual oboe_result_t requestStart() = 0;
+    virtual oboe_result_t requestPause() = 0;
+    virtual oboe_result_t requestFlush() = 0;
+    virtual oboe_result_t requestStop() = 0;
+
+    // TODO use oboe_clockid_t all the way down to AudioClock
+    virtual oboe_result_t getTimestamp(clockid_t clockId,
+                                       oboe_position_frames_t *framePosition,
+                                       oboe_nanoseconds_t *timeNanoseconds) = 0;
+
+
+    virtual oboe_result_t updateState() = 0;
+
+
+    // =========== End ABSTRACT methods ===========================
+
+    virtual oboe_result_t waitForStateChange(oboe_stream_state_t currentState,
+                                          oboe_stream_state_t *nextState,
+                                          oboe_nanoseconds_t timeoutNanoseconds);
+
+    /**
+     * Open the stream using the parameters in the builder.
+     * Allocate the necessary resources.
+     */
+    virtual oboe_result_t open(const AudioStreamBuilder& builder);
+
+    /**
+     * Close the stream and deallocate any resources from the open() call.
+     * It is safe to call close() multiple times.
+     */
+    virtual oboe_result_t close() {
+        return OBOE_OK;
+    }
+
+    virtual oboe_result_t setBufferSize(oboe_size_frames_t requestedFrames,
+                                        oboe_size_frames_t *actualFrames) {
+        return OBOE_ERROR_UNIMPLEMENTED;
+    }
+
+    virtual oboe_result_t createThread(oboe_nanoseconds_t periodNanoseconds,
+                                       oboe_audio_thread_proc_t *threadProc,
+                                       void *threadArg);
+
+    virtual oboe_result_t joinThread(void **returnArg, oboe_nanoseconds_t timeoutNanoseconds);
+
+    virtual oboe_result_t registerThread() {
+        return OBOE_OK;
+    }
+
+    virtual oboe_result_t unregisterThread() {
+        return OBOE_OK;
+    }
+
+    /**
+     * Internal function used to call the audio thread passed by the user.
+     * It is unfortunately public because it needs to be called by a static 'C' function.
+     */
+    void* wrapUserThread();
+
+    // ============== Queries ===========================
+
+    virtual oboe_stream_state_t getState() const {
+        return mState;
+    }
+
+    virtual oboe_size_frames_t getBufferSize() const {
+        return OBOE_ERROR_UNIMPLEMENTED;
+    }
+
+    virtual oboe_size_frames_t getBufferCapacity() const {
+        return OBOE_ERROR_UNIMPLEMENTED;
+    }
+
+    virtual oboe_size_frames_t getFramesPerBurst() const {
+        return OBOE_ERROR_UNIMPLEMENTED;
+    }
+
+    virtual int32_t getXRunCount() const {
+        return OBOE_ERROR_UNIMPLEMENTED;
+    }
+
+    bool isPlaying() const {
+        return mState == OBOE_STREAM_STATE_STARTING || mState == OBOE_STREAM_STATE_STARTED;
+    }
+
+    oboe_result_t getSampleRate() const {
+        return mSampleRate;
+    }
+
+    oboe_audio_format_t getFormat()  const {
+        return mFormat;
+    }
+
+    oboe_result_t getSamplesPerFrame() const {
+        return mSamplesPerFrame;
+    }
+
+    oboe_device_id_t getDeviceId() const {
+        return mDeviceId;
+    }
+
+    oboe_sharing_mode_t getSharingMode() const {
+        return mSharingMode;
+    }
+
+    oboe_direction_t getDirection() const {
+        return mDirection;
+    }
+
+    oboe_size_bytes_t getBytesPerFrame() const {
+        return mSamplesPerFrame * getBytesPerSample();
+    }
+
+    oboe_size_bytes_t getBytesPerSample() const {
+        return OboeConvert_formatToSizeInBytes(mFormat);
+    }
+
+    virtual oboe_position_frames_t getFramesWritten() {
+        return mFramesWritten.get();
+    }
+
+    virtual oboe_position_frames_t getFramesRead() {
+        return mFramesRead.get();
+    }
+
+
+    // ============== I/O ===========================
+    // A Stream will only implement read() or write() depending on its direction.
+    virtual oboe_result_t write(const void *buffer,
+                             oboe_size_frames_t numFrames,
+                             oboe_nanoseconds_t timeoutNanoseconds) {
+        return OBOE_ERROR_UNIMPLEMENTED;
+    }
+
+    virtual oboe_result_t read(void *buffer,
+                            oboe_size_frames_t numFrames,
+                            oboe_nanoseconds_t timeoutNanoseconds) {
+        return OBOE_ERROR_UNIMPLEMENTED;
+    }
+
+protected:
+
+    virtual oboe_position_frames_t incrementFramesWritten(oboe_size_frames_t frames) {
+        return static_cast<oboe_position_frames_t>(mFramesWritten.increment(frames));
+    }
+
+    virtual oboe_position_frames_t incrementFramesRead(oboe_size_frames_t frames) {
+        return static_cast<oboe_position_frames_t>(mFramesRead.increment(frames));
+    }
+
+    /**
+     * Wait for a transition from one state to another.
+     * @return OBOE_OK if the endingState was observed, or OBOE_ERROR_UNEXPECTED_STATE
+     *   if any state that was not the startingState or endingState was observed
+     *   or OBOE_ERROR_TIMEOUT
+     */
+    virtual oboe_result_t waitForStateTransition(oboe_stream_state_t startingState,
+                                              oboe_stream_state_t endingState,
+                                              oboe_nanoseconds_t timeoutNanoseconds);
+
+    /**
+     * This should not be called after the open() call.
+     */
+    void setSampleRate(oboe_sample_rate_t sampleRate) {
+        mSampleRate = sampleRate;
+    }
+
+    /**
+     * This should not be called after the open() call.
+     */
+    void setSamplesPerFrame(int32_t samplesPerFrame) {
+        mSamplesPerFrame = samplesPerFrame;
+    }
+
+    /**
+     * This should not be called after the open() call.
+     */
+    void setSharingMode(oboe_sharing_mode_t sharingMode) {
+        mSharingMode = sharingMode;
+    }
+
+    /**
+     * This should not be called after the open() call.
+     */
+    void setFormat(oboe_audio_format_t format) {
+        mFormat = format;
+    }
+
+    void setState(oboe_stream_state_t state) {
+        mState = state;
+    }
+
+
+
+protected:
+    MonotonicCounter     mFramesWritten;
+    MonotonicCounter     mFramesRead;
+
+    void setPeriodNanoseconds(oboe_nanoseconds_t periodNanoseconds) {
+        mPeriodNanoseconds.store(periodNanoseconds, std::memory_order_release);
+    }
+
+    oboe_nanoseconds_t getPeriodNanoseconds() {
+        return mPeriodNanoseconds.load(std::memory_order_acquire);
+    }
+
+private:
+    // These do not change after open().
+    int32_t              mSamplesPerFrame = OBOE_UNSPECIFIED;
+    oboe_sample_rate_t   mSampleRate = OBOE_UNSPECIFIED;
+    oboe_stream_state_t  mState = OBOE_STREAM_STATE_UNINITIALIZED;
+    oboe_device_id_t     mDeviceId = OBOE_UNSPECIFIED;
+    oboe_sharing_mode_t  mSharingMode = OBOE_SHARING_MODE_LEGACY;
+    oboe_audio_format_t  mFormat = OBOE_AUDIO_FORMAT_UNSPECIFIED;
+    oboe_direction_t     mDirection = OBOE_DIRECTION_OUTPUT;
+
+    // background thread ----------------------------------
+    bool                 mHasThread = false;
+    pthread_t            mThread; // initialized in constructor
+
+    // These are set by the application thread and then read by the audio pthread.
+    std::atomic<oboe_nanoseconds_t>  mPeriodNanoseconds; // for tuning SCHED_FIFO threads
+    // TODO make atomic?
+    oboe_audio_thread_proc_t* mThreadProc = nullptr;
+    void*                mThreadArg = nullptr;
+    oboe_result_t        mThreadRegistrationResult = OBOE_OK;
+
+
+};
+
+} /* namespace oboe */
+
+#endif /* OBOE_AUDIOSTREAM_H */
diff --git a/media/liboboe/src/core/AudioStreamBuilder.cpp b/media/liboboe/src/core/AudioStreamBuilder.cpp
new file mode 100644
index 0000000..37e1378
--- /dev/null
+++ b/media/liboboe/src/core/AudioStreamBuilder.cpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "OboeAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <new>
+#include <stdint.h>
+
+#include <oboe/OboeDefinitions.h>
+#include <oboe/OboeAudio.h>
+
+#include "client/AudioStreamInternal.h"
+#include "core/AudioStream.h"
+#include "core/AudioStreamBuilder.h"
+#include "legacy/AudioStreamRecord.h"
+#include "legacy/AudioStreamTrack.h"
+
+using namespace oboe;
+
+/*
+ * AudioStreamBuilder
+ */
+AudioStreamBuilder::AudioStreamBuilder() {
+}
+
+AudioStreamBuilder::~AudioStreamBuilder() {
+}
+
+oboe_result_t AudioStreamBuilder::build(AudioStream** streamPtr) {
+    // TODO Is there a better place to put the code that decides which class to use?
+    AudioStream* audioStream = nullptr;
+    const oboe_sharing_mode_t sharingMode = getSharingMode();
+    switch (getDirection()) {
+    case OBOE_DIRECTION_INPUT:
+        switch (sharingMode) {
+            case OBOE_SHARING_MODE_LEGACY:
+                audioStream = new(std::nothrow) AudioStreamRecord();
+                break;
+            default:
+                ALOGE("AudioStreamBuilder(): bad sharing mode = %d", sharingMode);
+                return OBOE_ERROR_ILLEGAL_ARGUMENT;
+                break;
+        }
+        break;
+    case OBOE_DIRECTION_OUTPUT:
+        switch (sharingMode) {
+            case OBOE_SHARING_MODE_LEGACY:
+                audioStream = new(std::nothrow) AudioStreamTrack();
+                break;
+            case OBOE_SHARING_MODE_EXCLUSIVE:
+                audioStream = new(std::nothrow) AudioStreamInternal();
+                break;
+            default:
+                ALOGE("AudioStreamBuilder(): bad sharing mode = %d", sharingMode);
+                return OBOE_ERROR_ILLEGAL_ARGUMENT;
+                break;
+        }
+        break;
+    default:
+        ALOGE("AudioStreamBuilder(): bad direction = %d", getDirection());
+        return OBOE_ERROR_ILLEGAL_ARGUMENT;
+        break;
+    }
+    if (audioStream == nullptr) {
+        return OBOE_ERROR_NO_MEMORY;
+    }
+    ALOGD("AudioStreamBuilder(): created audioStream = %p", audioStream);
+
+    // TODO maybe move this out of build and pass the builder to the constructors
+    // Open the stream using the parameters from the builder.
+    const oboe_result_t result = audioStream->open(*this);
+    if (result != OBOE_OK) {
+        delete audioStream;
+    } else {
+        *streamPtr = audioStream;
+    }
+    return result;
+}
diff --git a/media/liboboe/src/core/AudioStreamBuilder.h b/media/liboboe/src/core/AudioStreamBuilder.h
new file mode 100644
index 0000000..ec17eb6
--- /dev/null
+++ b/media/liboboe/src/core/AudioStreamBuilder.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_AUDIOSTREAMBUILDER_H
+#define OBOE_AUDIOSTREAMBUILDER_H
+
+#include <stdint.h>
+
+#include <oboe/OboeDefinitions.h>
+#include <oboe/OboeAudio.h>
+
+#include "AudioStream.h"
+
+namespace oboe {
+
+/**
+ * Factory class for an AudioStream.
+ */
+class AudioStreamBuilder {
+public:
+    AudioStreamBuilder();
+
+    ~AudioStreamBuilder();
+
+    int getSamplesPerFrame() const {
+        return mSamplesPerFrame;
+    }
+
+    /**
+     * This is also known as channelCount.
+     */
+    AudioStreamBuilder* setSamplesPerFrame(int samplesPerFrame) {
+        mSamplesPerFrame = samplesPerFrame;
+        return this;
+    }
+
+    oboe_direction_t getDirection() const {
+        return mDirection;
+    }
+
+    AudioStreamBuilder* setDirection(oboe_direction_t direction) {
+        mDirection = direction;
+        return this;
+    }
+
+    oboe_sample_rate_t getSampleRate() const {
+        return mSampleRate;
+    }
+
+    AudioStreamBuilder* setSampleRate(oboe_sample_rate_t sampleRate) {
+        mSampleRate = sampleRate;
+        return this;
+    }
+
+    oboe_audio_format_t getFormat() const {
+        return mFormat;
+    }
+
+    AudioStreamBuilder *setFormat(oboe_audio_format_t format) {
+        mFormat = format;
+        return this;
+    }
+
+    oboe_sharing_mode_t getSharingMode() const {
+        return mSharingMode;
+    }
+
+    AudioStreamBuilder* setSharingMode(oboe_sharing_mode_t sharingMode) {
+        mSharingMode = sharingMode;
+        return this;
+    }
+
+    oboe_device_id_t getDeviceId() const {
+        return mDeviceId;
+    }
+
+    AudioStreamBuilder* setDeviceId(oboe_device_id_t deviceId) {
+        mDeviceId = deviceId;
+        return this;
+    }
+
+    oboe_result_t build(AudioStream **streamPtr);
+
+private:
+    int32_t              mSamplesPerFrame = OBOE_UNSPECIFIED;
+    oboe_sample_rate_t   mSampleRate = OBOE_UNSPECIFIED;
+    oboe_device_id_t     mDeviceId = OBOE_DEVICE_UNSPECIFIED;
+    oboe_sharing_mode_t  mSharingMode = OBOE_SHARING_MODE_LEGACY;
+    oboe_audio_format_t  mFormat = OBOE_AUDIO_FORMAT_UNSPECIFIED;
+    oboe_direction_t     mDirection = OBOE_DIRECTION_OUTPUT;
+};
+
+} /* namespace oboe */
+
+#endif /* OBOE_AUDIOSTREAMBUILDER_H */
diff --git a/media/liboboe/src/core/OboeAudio.cpp b/media/liboboe/src/core/OboeAudio.cpp
new file mode 100644
index 0000000..d98ca36
--- /dev/null
+++ b/media/liboboe/src/core/OboeAudio.cpp
@@ -0,0 +1,562 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "OboeAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <time.h>
+#include <pthread.h>
+
+#include <oboe/OboeDefinitions.h>
+#include <oboe/OboeAudio.h>
+
+#include "AudioStreamBuilder.h"
+#include "AudioStream.h"
+#include "AudioClock.h"
+#include "client/AudioStreamInternal.h"
+#include "HandleTracker.h"
+
+using namespace oboe;
+
+// This is not the maximum theoretic possible number of handles that the HandlerTracker
+// class could support; instead it is the maximum number of handles that we are configuring
+// for our HandleTracker instance (sHandleTracker).
+#define OBOE_MAX_HANDLES  64
+
+// Macros for common code that includes a return.
+// TODO Consider using do{}while(0) construct. I tried but it hung AndroidStudio
+#define CONVERT_BUILDER_HANDLE_OR_RETURN() \
+    convertOboeBuilderToStreamBuilder(builder); \
+    if (streamBuilder == nullptr) { \
+        return OBOE_ERROR_INVALID_HANDLE; \
+    }
+
+#define COMMON_GET_FROM_BUILDER_OR_RETURN(resultPtr) \
+    CONVERT_BUILDER_HANDLE_OR_RETURN() \
+    if ((resultPtr) == nullptr) { \
+        return OBOE_ERROR_NULL; \
+    }
+
+#define CONVERT_STREAM_HANDLE_OR_RETURN() \
+    convertOboeStreamToAudioStream(stream); \
+    if (audioStream == nullptr) { \
+        return OBOE_ERROR_INVALID_HANDLE; \
+    }
+
+#define COMMON_GET_FROM_STREAM_OR_RETURN(resultPtr) \
+    CONVERT_STREAM_HANDLE_OR_RETURN(); \
+    if ((resultPtr) == nullptr) { \
+        return OBOE_ERROR_NULL; \
+    }
+
+// Static data.
+// TODO static constructors are discouraged, alternatives?
+static HandleTracker sHandleTracker(OBOE_MAX_HANDLES);
+
+typedef enum
+{
+    OBOE_HANDLE_TYPE_STREAM,
+    OBOE_HANDLE_TYPE_STREAM_BUILDER,
+    OBOE_HANDLE_TYPE_COUNT
+} oboe_handle_type_t;
+static_assert(OBOE_HANDLE_TYPE_COUNT <= HANDLE_TRACKER_MAX_TYPES, "Too many handle types.");
+
+
+#define OBOE_CASE_ENUM(name) case name: return #name
+
+OBOE_API const char * Oboe_convertResultToText(oboe_result_t returnCode) {
+    switch (returnCode) {
+        OBOE_CASE_ENUM(OBOE_OK);
+        OBOE_CASE_ENUM(OBOE_ERROR_ILLEGAL_ARGUMENT);
+        OBOE_CASE_ENUM(OBOE_ERROR_INCOMPATIBLE);
+        OBOE_CASE_ENUM(OBOE_ERROR_INTERNAL);
+        OBOE_CASE_ENUM(OBOE_ERROR_INVALID_STATE);
+        OBOE_CASE_ENUM(OBOE_ERROR_INVALID_HANDLE);
+        OBOE_CASE_ENUM(OBOE_ERROR_INVALID_QUERY);
+        OBOE_CASE_ENUM(OBOE_ERROR_UNIMPLEMENTED);
+        OBOE_CASE_ENUM(OBOE_ERROR_UNAVAILABLE);
+        OBOE_CASE_ENUM(OBOE_ERROR_NO_FREE_HANDLES);
+        OBOE_CASE_ENUM(OBOE_ERROR_NO_MEMORY);
+        OBOE_CASE_ENUM(OBOE_ERROR_NULL);
+        OBOE_CASE_ENUM(OBOE_ERROR_TIMEOUT);
+        OBOE_CASE_ENUM(OBOE_ERROR_WOULD_BLOCK);
+        OBOE_CASE_ENUM(OBOE_ERROR_INVALID_ORDER);
+        OBOE_CASE_ENUM(OBOE_ERROR_OUT_OF_RANGE);
+    }
+    return "Unrecognized Oboe error.";
+}
+
+OBOE_API const char * Oboe_convertStreamStateToText(oboe_stream_state_t state) {
+    switch (state) {
+        OBOE_CASE_ENUM(OBOE_STREAM_STATE_UNINITIALIZED);
+        OBOE_CASE_ENUM(OBOE_STREAM_STATE_OPEN);
+        OBOE_CASE_ENUM(OBOE_STREAM_STATE_STARTING);
+        OBOE_CASE_ENUM(OBOE_STREAM_STATE_STARTED);
+        OBOE_CASE_ENUM(OBOE_STREAM_STATE_PAUSING);
+        OBOE_CASE_ENUM(OBOE_STREAM_STATE_PAUSED);
+        OBOE_CASE_ENUM(OBOE_STREAM_STATE_FLUSHING);
+        OBOE_CASE_ENUM(OBOE_STREAM_STATE_FLUSHED);
+        OBOE_CASE_ENUM(OBOE_STREAM_STATE_STOPPING);
+        OBOE_CASE_ENUM(OBOE_STREAM_STATE_STOPPED);
+        OBOE_CASE_ENUM(OBOE_STREAM_STATE_CLOSING);
+        OBOE_CASE_ENUM(OBOE_STREAM_STATE_CLOSED);
+    }
+    return "Unrecognized Oboe state.";
+}
+
+#undef OBOE_CASE_ENUM
+
+static AudioStream *convertOboeStreamToAudioStream(OboeStream stream)
+{
+    return (AudioStream *) sHandleTracker.get(OBOE_HANDLE_TYPE_STREAM,
+                                              (oboe_handle_t) stream);
+}
+
+static AudioStreamBuilder *convertOboeBuilderToStreamBuilder(OboeStreamBuilder builder)
+{
+    return (AudioStreamBuilder *) sHandleTracker.get(OBOE_HANDLE_TYPE_STREAM_BUILDER,
+                                                     (oboe_handle_t) builder);
+}
+
+OBOE_API oboe_result_t Oboe_createStreamBuilder(OboeStreamBuilder *builder)
+{
+    ALOGD("Oboe_createStreamBuilder(): check sHandleTracker.isInitialized ()");
+    if (!sHandleTracker.isInitialized()) {
+        return OBOE_ERROR_NO_MEMORY;
+    }
+    AudioStreamBuilder *audioStreamBuilder =  new AudioStreamBuilder();
+    if (audioStreamBuilder == nullptr) {
+        return OBOE_ERROR_NO_MEMORY;
+    }
+    ALOGD("Oboe_createStreamBuilder(): created AudioStreamBuilder = %p", audioStreamBuilder);
+    // TODO protect the put() with a Mutex
+    OboeStreamBuilder handle = sHandleTracker.put(OBOE_HANDLE_TYPE_STREAM_BUILDER,
+            audioStreamBuilder);
+    if (handle < 0) {
+        delete audioStreamBuilder;
+        return static_cast<oboe_result_t>(handle);
+    } else {
+        *builder = handle;
+    }
+    return OBOE_OK;
+}
+
+OBOE_API oboe_result_t OboeStreamBuilder_setDeviceId(OboeStreamBuilder builder,
+                                                     oboe_device_id_t deviceId)
+{
+    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+    streamBuilder->setDeviceId(deviceId);
+    return OBOE_OK;
+}
+
+OBOE_API oboe_result_t OboeStreamBuilder_getDeviceId(OboeStreamBuilder builder,
+                                              oboe_device_id_t *deviceId)
+{
+    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(deviceId);
+    *deviceId = streamBuilder->getDeviceId();
+    return OBOE_OK;
+}
+
+OBOE_API oboe_result_t OboeStreamBuilder_setSampleRate(OboeStreamBuilder builder,
+                                              oboe_sample_rate_t sampleRate)
+{
+    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+    streamBuilder->setSampleRate(sampleRate);
+    return OBOE_OK;
+}
+
+OBOE_API oboe_result_t OboeStreamBuilder_getSampleRate(OboeStreamBuilder builder,
+                                              oboe_sample_rate_t *sampleRate)
+{
+    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(sampleRate);
+    *sampleRate = streamBuilder->getSampleRate();
+    return OBOE_OK;
+}
+
+OBOE_API oboe_result_t OboeStreamBuilder_setSamplesPerFrame(OboeStreamBuilder builder,
+                                                   int32_t samplesPerFrame)
+{
+    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+    streamBuilder->setSamplesPerFrame(samplesPerFrame);
+    return OBOE_OK;
+}
+
+OBOE_API oboe_result_t OboeStreamBuilder_getSamplesPerFrame(OboeStreamBuilder builder,
+                                                   int32_t *samplesPerFrame)
+{
+    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(samplesPerFrame);
+    *samplesPerFrame = streamBuilder->getSamplesPerFrame();
+    return OBOE_OK;
+}
+
+OBOE_API oboe_result_t OboeStreamBuilder_setDirection(OboeStreamBuilder builder,
+                                             oboe_direction_t direction)
+{
+    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+    streamBuilder->setDirection(direction);
+    return OBOE_OK;
+}
+
+OBOE_API oboe_result_t OboeStreamBuilder_getDirection(OboeStreamBuilder builder,
+                                             oboe_direction_t *direction)
+{
+    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(direction);
+    *direction = streamBuilder->getDirection();
+    return OBOE_OK;
+}
+
+OBOE_API oboe_result_t OboeStreamBuilder_setFormat(OboeStreamBuilder builder,
+                                                   oboe_audio_format_t format)
+{
+    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+    streamBuilder->setFormat(format);
+    return OBOE_OK;
+}
+
+OBOE_API oboe_result_t OboeStreamBuilder_getFormat(OboeStreamBuilder builder,
+                                                   oboe_audio_format_t *format)
+{
+    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(format);
+    *format = streamBuilder->getFormat();
+    return OBOE_OK;
+}
+
+OBOE_API oboe_result_t OboeStreamBuilder_setSharingMode(OboeStreamBuilder builder,
+                                                        oboe_sharing_mode_t sharingMode)
+{
+    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+    if ((sharingMode < 0) || (sharingMode >= OBOE_SHARING_MODE_COUNT)) {
+        return OBOE_ERROR_ILLEGAL_ARGUMENT;
+    } else {
+        streamBuilder->setSharingMode(sharingMode);
+        return OBOE_OK;
+    }
+}
+
+OBOE_API oboe_result_t OboeStreamBuilder_getSharingMode(OboeStreamBuilder builder,
+                                                        oboe_sharing_mode_t *sharingMode)
+{
+    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(sharingMode);
+    *sharingMode = streamBuilder->getSharingMode();
+    return OBOE_OK;
+}
+
+static oboe_result_t  OboeInternal_openStream(AudioStreamBuilder *streamBuilder,
+                                              OboeStream *streamPtr)
+{
+    AudioStream *audioStream = nullptr;
+    oboe_result_t result = streamBuilder->build(&audioStream);
+    if (result != OBOE_OK) {
+        return result;
+    } else {
+        // Create a handle for referencing the object.
+        // TODO protect the put() with a Mutex
+        OboeStream handle = sHandleTracker.put(OBOE_HANDLE_TYPE_STREAM, audioStream);
+        if (handle < 0) {
+            delete audioStream;
+            return static_cast<oboe_result_t>(handle);
+        }
+        *streamPtr = handle;
+        return OBOE_OK;
+    }
+}
+
+OBOE_API oboe_result_t  OboeStreamBuilder_openStream(OboeStreamBuilder builder,
+                                                     OboeStream *streamPtr)
+{
+    ALOGD("OboeStreamBuilder_openStream(): builder = 0x%08X", builder);
+    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(streamPtr);
+    return OboeInternal_openStream(streamBuilder, streamPtr);
+}
+
+OBOE_API oboe_result_t  OboeStreamBuilder_delete(OboeStreamBuilder builder)
+{
+    // TODO protect the remove() with a Mutex
+    AudioStreamBuilder *streamBuilder = (AudioStreamBuilder *)
+            sHandleTracker.remove(OBOE_HANDLE_TYPE_STREAM_BUILDER, builder);
+    if (streamBuilder != nullptr) {
+        delete streamBuilder;
+        return OBOE_OK;
+    }
+    return OBOE_ERROR_INVALID_HANDLE;
+}
+
+OBOE_API oboe_result_t  OboeStream_close(OboeStream stream)
+{
+    // TODO protect the remove() with a Mutex
+    AudioStream *audioStream = (AudioStream *)
+            sHandleTracker.remove(OBOE_HANDLE_TYPE_STREAM, (oboe_handle_t)stream);
+    if (audioStream != nullptr) {
+        audioStream->close();
+        delete audioStream;
+        return OBOE_OK;
+    }
+    return OBOE_ERROR_INVALID_HANDLE;
+}
+
+OBOE_API oboe_result_t  OboeStream_requestStart(OboeStream stream)
+{
+    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    ALOGD("OboeStream_requestStart(0x%08X), audioStream = %p", stream, audioStream);
+    return audioStream->requestStart();
+}
+
+OBOE_API oboe_result_t  OboeStream_requestPause(OboeStream stream)
+{
+    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    ALOGD("OboeStream_requestPause(0x%08X), audioStream = %p", stream, audioStream);
+    return audioStream->requestPause();
+}
+
+OBOE_API oboe_result_t  OboeStream_requestFlush(OboeStream stream)
+{
+    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    ALOGD("OboeStream_requestFlush(0x%08X), audioStream = %p", stream, audioStream);
+    return audioStream->requestFlush();
+}
+
+OBOE_API oboe_result_t  OboeStream_requestStop(OboeStream stream)
+{
+    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    ALOGD("OboeStream_requestStop(0x%08X), audioStream = %p", stream, audioStream);
+    return audioStream->requestStop();
+}
+
+OBOE_API oboe_result_t OboeStream_waitForStateChange(OboeStream stream,
+                                            oboe_stream_state_t inputState,
+                                            oboe_stream_state_t *nextState,
+                                            oboe_nanoseconds_t timeoutNanoseconds)
+{
+
+    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    return audioStream->waitForStateChange(inputState, nextState, timeoutNanoseconds);
+}
+
+// ============================================================
+// Stream - non-blocking I/O
+// ============================================================
+
+OBOE_API oboe_result_t OboeStream_read(OboeStream stream,
+                               void *buffer,
+                               oboe_size_frames_t numFrames,
+                               oboe_nanoseconds_t timeoutNanoseconds)
+{
+    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    if (buffer == nullptr) {
+        return OBOE_ERROR_NULL;
+    }
+    if (numFrames < 0) {
+        return OBOE_ERROR_ILLEGAL_ARGUMENT;
+    } else if (numFrames == 0) {
+        return 0;
+    }
+
+    oboe_result_t result = audioStream->read(buffer, numFrames, timeoutNanoseconds);
+    // ALOGD("OboeStream_read(): read returns %d", result);
+
+    return result;
+}
+
+OBOE_API oboe_result_t OboeStream_write(OboeStream stream,
+                               const void *buffer,
+                               oboe_size_frames_t numFrames,
+                               oboe_nanoseconds_t timeoutNanoseconds)
+{
+    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    if (buffer == nullptr) {
+        return OBOE_ERROR_NULL;
+    }
+    if (numFrames < 0) {
+        return OBOE_ERROR_ILLEGAL_ARGUMENT;
+    } else if (numFrames == 0) {
+        return 0;
+    }
+
+    oboe_result_t result = audioStream->write(buffer, numFrames, timeoutNanoseconds);
+    // ALOGD("OboeStream_write(): write returns %d", result);
+
+    return result;
+}
+
+// ============================================================
+// Miscellaneous
+// ============================================================
+
+OBOE_API oboe_result_t OboeStream_createThread(OboeStream stream,
+                                     oboe_nanoseconds_t periodNanoseconds,
+                                     oboe_audio_thread_proc_t *threadProc, void *arg)
+{
+    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    return audioStream->createThread(periodNanoseconds, threadProc, arg);
+}
+
+OBOE_API oboe_result_t OboeStream_joinThread(OboeStream stream,
+                                   void **returnArg,
+                                   oboe_nanoseconds_t timeoutNanoseconds)
+{
+    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    return audioStream->joinThread(returnArg, timeoutNanoseconds);
+}
+
+// ============================================================
+// Stream - queries
+// ============================================================
+
+// TODO Use oboe_clockid_t all the way down through the C++ streams.
+static clockid_t OboeConvert_fromOboeClockId(oboe_clockid_t clockid)
+{
+    clockid_t hostClockId;
+    switch (clockid) {
+        case OBOE_CLOCK_MONOTONIC:
+            hostClockId = CLOCK_MONOTONIC;
+            break;
+        case OBOE_CLOCK_BOOTTIME:
+            hostClockId = CLOCK_BOOTTIME;
+            break;
+        default:
+            hostClockId = 0; // TODO review
+    }
+    return hostClockId;
+}
+
+oboe_nanoseconds_t Oboe_getNanoseconds(oboe_clockid_t clockid)
+{
+    clockid_t hostClockId = OboeConvert_fromOboeClockId(clockid);
+   return AudioClock::getNanoseconds(hostClockId);
+}
+
+OBOE_API oboe_result_t OboeStream_getSampleRate(OboeStream stream, oboe_sample_rate_t *sampleRate)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(sampleRate);
+    *sampleRate = audioStream->getSampleRate();
+    return OBOE_OK;
+}
+
+OBOE_API oboe_result_t OboeStream_getSamplesPerFrame(OboeStream stream, int32_t *samplesPerFrame)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(samplesPerFrame);
+    *samplesPerFrame = audioStream->getSamplesPerFrame();
+    return OBOE_OK;
+}
+
+OBOE_API oboe_result_t OboeStream_getState(OboeStream stream, oboe_stream_state_t *state)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(state);
+    *state = audioStream->getState();
+    return OBOE_OK;
+}
+
+OBOE_API oboe_result_t OboeStream_getFormat(OboeStream stream, oboe_audio_format_t *format)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(format);
+    *format = audioStream->getFormat();
+    return OBOE_OK;
+}
+
+OBOE_API oboe_result_t OboeStream_setBufferSize(OboeStream stream,
+                                                oboe_size_frames_t requestedFrames,
+                                                oboe_size_frames_t *actualFrames)
+{
+    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    return audioStream->setBufferSize(requestedFrames, actualFrames);
+}
+
+OBOE_API oboe_result_t OboeStream_getBufferSize(OboeStream stream, oboe_size_frames_t *frames)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(frames);
+    *frames = audioStream->getBufferSize();
+    return OBOE_OK;
+}
+
+OBOE_API oboe_result_t OboeStream_getDirection(OboeStream stream, int32_t *direction)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(direction);
+    *direction = audioStream->getDirection();
+    return OBOE_OK;
+}
+
+OBOE_API oboe_result_t OboeStream_getFramesPerBurst(OboeStream stream,
+                                                    oboe_size_frames_t *framesPerBurst)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(framesPerBurst);
+    *framesPerBurst = audioStream->getFramesPerBurst();
+    return OBOE_OK;
+}
+
+OBOE_API oboe_result_t OboeStream_getBufferCapacity(OboeStream stream,
+                                           oboe_size_frames_t *capacity)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(capacity);
+    *capacity = audioStream->getBufferCapacity();
+    return OBOE_OK;
+}
+
+OBOE_API oboe_result_t OboeStream_getXRunCount(OboeStream stream, int32_t *xRunCount)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(xRunCount);
+    *xRunCount = audioStream->getXRunCount();
+    return OBOE_OK;
+}
+
+OBOE_API oboe_result_t OboeStream_getDeviceId(OboeStream stream,
+                                                 oboe_device_id_t *deviceId)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(deviceId);
+    *deviceId = audioStream->getDeviceId();
+    return OBOE_OK;
+}
+
+OBOE_API oboe_result_t OboeStream_getSharingMode(OboeStream stream,
+                                                 oboe_sharing_mode_t *sharingMode)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(sharingMode);
+    *sharingMode = audioStream->getSharingMode();
+    return OBOE_OK;
+}
+
+OBOE_API oboe_result_t OboeStream_getFramesWritten(OboeStream stream,
+                                                   oboe_position_frames_t *frames)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(frames);
+    *frames = audioStream->getFramesWritten();
+    return OBOE_OK;
+}
+
+OBOE_API oboe_result_t OboeStream_getFramesRead(OboeStream stream, oboe_position_frames_t *frames)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(frames);
+    *frames = audioStream->getFramesRead();
+    return OBOE_OK;
+}
+
+OBOE_API oboe_result_t OboeStream_getTimestamp(OboeStream stream,
+                                      oboe_clockid_t clockid,
+                                      oboe_position_frames_t *framePosition,
+                                      oboe_nanoseconds_t *timeNanoseconds)
+{
+    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    if (framePosition == nullptr) {
+        return OBOE_ERROR_NULL;
+    } else if (timeNanoseconds == nullptr) {
+        return OBOE_ERROR_NULL;
+    } else if (clockid != OBOE_CLOCK_MONOTONIC && clockid != OBOE_CLOCK_BOOTTIME) {
+        return OBOE_ERROR_ILLEGAL_ARGUMENT;
+    }
+
+    clockid_t hostClockId = OboeConvert_fromOboeClockId(clockid);
+    return audioStream->getTimestamp(hostClockId, framePosition, timeNanoseconds);
+}
diff --git a/media/liboboe/src/core/README.md b/media/liboboe/src/core/README.md
new file mode 100644
index 0000000..dd99286
--- /dev/null
+++ b/media/liboboe/src/core/README.md
@@ -0,0 +1,2 @@
+The core folder contains the essential Oboe files common to all implementations.
+The OboeAudio.cpp contains the 'C' API.
diff --git a/media/liboboe/src/core/VersionExperiment.txt b/media/liboboe/src/core/VersionExperiment.txt
new file mode 100644
index 0000000..071239b
--- /dev/null
+++ b/media/liboboe/src/core/VersionExperiment.txt
@@ -0,0 +1,55 @@
+
+// TODO Experiment with versioning. This may be removed or changed dramatically.
+// Please ignore for now. Do not review.
+#define OBOE_VERSION_EXPERIMENT  0
+#if OBOE_VERSION_EXPERIMENT
+
+#define OBOE_EARLIEST_SUPPORTED_VERSION  1
+#define OBOE_CURRENT_VERSION  2
+
+typedef struct OboeInterface_s {
+    int32_t size; // do not use size_t because its size can vary
+    int32_t version;
+    int32_t reserved1;
+    void *  reserved2;
+    oboe_result_t (*createStreamBuilder)(OboeStreamBuilder *);
+} OboeInterface_t;
+
+OboeInterface_t s_oboe_template = {
+        .size = sizeof(OboeInterface_t),
+        .version = OBOE_CURRENT_VERSION,
+        .reserved1 = 0,
+        .reserved2 = NULL,
+        .createStreamBuilder = Oboe_createStreamBuilder
+};
+
+oboe_result_t Oboe_Unimplemented(OboeInterface_t *oboe) {
+    (void) oboe;
+    return OBOE_ERROR_UNIMPLEMENTED;
+}
+
+typedef oboe_result_t (*OboeFunction_t)(OboeInterface_t *oboe);
+
+int32_t Oboe_Initialize(OboeInterface_t *oboe, uint32_t flags) {
+    if (oboe->version < OBOE_EARLIEST_SUPPORTED_VERSION) {
+        return OBOE_ERROR_INCOMPATIBLE;
+    }
+    // Fill in callers vector table.
+    uint8_t *start = (uint8_t*)&oboe->reserved1;
+    uint8_t *end;
+    if (oboe->size <= s_oboe_template.size) {
+        end = ((uint8_t *)oboe) + oboe->size;
+    } else {
+        end = ((uint8_t *)oboe) + s_oboe_template.size;
+        // Assume the rest of the structure is vectors.
+        // Point them all to OboeInternal_Unimplemented()
+        // Point to first vector past end of the known structure.
+        OboeFunction_t *next = (OboeFunction_t*)end;
+        while ((((uint8_t *)next) - ((uint8_t *)oboe)) < oboe->size) {
+            *next++ = Oboe_Unimplemented;
+        }
+    }
+    memcpy(&oboe->reserved1, &s_oboe_template.reserved1, end - start);
+    return OBOE_OK;
+}
+#endif /* OBOE_VERSION_EXPERIMENT -------------------------- */
diff --git a/media/liboboe/src/fifo/FifoBuffer.cpp b/media/liboboe/src/fifo/FifoBuffer.cpp
new file mode 100644
index 0000000..c5489f1
--- /dev/null
+++ b/media/liboboe/src/fifo/FifoBuffer.cpp
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstring>
+#include <unistd.h>
+
+#define LOG_TAG "FifoBuffer"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include "FifoControllerBase.h"
+#include "FifoController.h"
+#include "FifoControllerIndirect.h"
+#include "FifoBuffer.h"
+
+FifoBuffer::FifoBuffer(int32_t bytesPerFrame, fifo_frames_t capacityInFrames)
+        : mFrameCapacity(capacityInFrames)
+        , mBytesPerFrame(bytesPerFrame)
+        , mStorage(nullptr)
+        , mFramesReadCount(0)
+        , mFramesUnderrunCount(0)
+        , mUnderrunCount(0)
+{
+    // TODO Handle possible failures to allocate. Move out of constructor?
+    mFifo = new FifoController(capacityInFrames, capacityInFrames);
+    // allocate buffer
+    int32_t bytesPerBuffer = bytesPerFrame * capacityInFrames;
+    mStorage = new uint8_t[bytesPerBuffer];
+    mStorageOwned = true;
+    ALOGD("FifoBuffer: capacityInFrames = %d, bytesPerFrame = %d",
+          capacityInFrames, bytesPerFrame);
+}
+
+FifoBuffer::FifoBuffer( int32_t   bytesPerFrame,
+                        fifo_frames_t   capacityInFrames,
+                        fifo_counter_t *  readIndexAddress,
+                        fifo_counter_t *  writeIndexAddress,
+                        void *  dataStorageAddress
+                        )
+        : mFrameCapacity(capacityInFrames)
+        , mBytesPerFrame(bytesPerFrame)
+        , mStorage(static_cast<uint8_t *>(dataStorageAddress))
+        , mFramesReadCount(0)
+        , mFramesUnderrunCount(0)
+        , mUnderrunCount(0)
+{
+    // TODO Handle possible failures to allocate. Move out of constructor?
+    mFifo = new FifoControllerIndirect(capacityInFrames,
+                                       capacityInFrames,
+                                       readIndexAddress,
+                                       writeIndexAddress);
+    mStorageOwned = false;
+    ALOGD("FifoProcessor: capacityInFrames = %d, bytesPerFrame = %d",
+          capacityInFrames, bytesPerFrame);
+}
+
+FifoBuffer::~FifoBuffer() {
+    if (mStorageOwned) {
+        delete[] mStorage;
+    }
+    delete mFifo;
+}
+
+
+int32_t FifoBuffer::convertFramesToBytes(fifo_frames_t frames) {
+    return frames * mBytesPerFrame;
+}
+
+fifo_frames_t FifoBuffer::read(void *buffer, fifo_frames_t numFrames) {
+    size_t numBytes;
+    fifo_frames_t framesAvailable = mFifo->getFullFramesAvailable();
+    fifo_frames_t framesToRead = numFrames;
+    // Is there enough data in the FIFO
+    if (framesToRead > framesAvailable) {
+        framesToRead = framesAvailable;
+    }
+    if (framesToRead == 0) {
+        return 0;
+    }
+
+    fifo_frames_t readIndex = mFifo->getReadIndex();
+    uint8_t *destination = (uint8_t *) buffer;
+    uint8_t *source = &mStorage[convertFramesToBytes(readIndex)];
+    if ((readIndex + framesToRead) > mFrameCapacity) {
+        // read in two parts, first part here
+        fifo_frames_t frames1 = mFrameCapacity - readIndex;
+        int32_t numBytes = convertFramesToBytes(frames1);
+        memcpy(destination, source, numBytes);
+        destination += numBytes;
+        // read second part
+        source = &mStorage[0];
+        fifo_frames_t frames2 = framesToRead - frames1;
+        numBytes = convertFramesToBytes(frames2);
+        memcpy(destination, source, numBytes);
+    } else {
+        // just read in one shot
+        numBytes = convertFramesToBytes(framesToRead);
+        memcpy(destination, source, numBytes);
+    }
+    mFifo->advanceReadIndex(framesToRead);
+
+    return framesToRead;
+}
+
+fifo_frames_t FifoBuffer::write(const void *buffer, fifo_frames_t framesToWrite) {
+    fifo_frames_t framesAvailable = mFifo->getEmptyFramesAvailable();
+//    ALOGD("FifoBuffer::write() framesToWrite = %d, framesAvailable = %d",
+//         framesToWrite, framesAvailable);
+    if (framesToWrite > framesAvailable) {
+        framesToWrite = framesAvailable;
+    }
+    if (framesToWrite <= 0) {
+        return 0;
+    }
+
+    size_t numBytes;
+    fifo_frames_t writeIndex = mFifo->getWriteIndex();
+    int byteIndex = convertFramesToBytes(writeIndex);
+    const uint8_t *source = (const uint8_t *) buffer;
+    uint8_t *destination = &mStorage[byteIndex];
+    if ((writeIndex + framesToWrite) > mFrameCapacity) {
+        // write in two parts, first part here
+        fifo_frames_t frames1 = mFrameCapacity - writeIndex;
+        numBytes = convertFramesToBytes(frames1);
+        memcpy(destination, source, numBytes);
+//        ALOGD("FifoBuffer::write(%p to %p, numBytes = %d", source, destination, numBytes);
+        // read second part
+        source += convertFramesToBytes(frames1);
+        destination = &mStorage[0];
+        fifo_frames_t framesLeft = framesToWrite - frames1;
+        numBytes = convertFramesToBytes(framesLeft);
+//        ALOGD("FifoBuffer::write(%p to %p, numBytes = %d", source, destination, numBytes);
+        memcpy(destination, source, numBytes);
+    } else {
+        // just write in one shot
+        numBytes = convertFramesToBytes(framesToWrite);
+//        ALOGD("FifoBuffer::write(%p to %p, numBytes = %d", source, destination, numBytes);
+        memcpy(destination, source, numBytes);
+    }
+    mFifo->advanceWriteIndex(framesToWrite);
+
+    return framesToWrite;
+}
+
+fifo_frames_t FifoBuffer::readNow(void *buffer, fifo_frames_t numFrames) {
+    mLastReadSize = numFrames;
+    fifo_frames_t framesLeft = numFrames;
+    fifo_frames_t framesRead = read(buffer, numFrames);
+    framesLeft -= framesRead;
+    mFramesReadCount += framesRead;
+    mFramesUnderrunCount += framesLeft;
+    // Zero out any samples we could not set.
+    if (framesLeft > 0) {
+        mUnderrunCount++;
+        int32_t bytesToZero = convertFramesToBytes(framesLeft);
+        memset(buffer, 0, bytesToZero);
+    }
+
+    return framesRead;
+}
+
+fifo_frames_t FifoBuffer::getThreshold() {
+    return mFifo->getThreshold();
+}
+
+void FifoBuffer::setThreshold(fifo_frames_t threshold) {
+    mFifo->setThreshold(threshold);
+}
+
+fifo_frames_t FifoBuffer::getBufferCapacityInFrames() {
+    return mFifo->getCapacity();
+}
+
diff --git a/media/liboboe/src/fifo/FifoBuffer.h b/media/liboboe/src/fifo/FifoBuffer.h
new file mode 100644
index 0000000..faa9ae2
--- /dev/null
+++ b/media/liboboe/src/fifo/FifoBuffer.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FIFO_FIFO_BUFFER_H
+#define FIFO_FIFO_BUFFER_H
+
+#include <stdint.h>
+
+#include "FifoControllerBase.h"
+
+class FifoBuffer {
+public:
+    FifoBuffer(int32_t bytesPerFrame, fifo_frames_t capacityInFrames);
+
+    FifoBuffer(int32_t   bytesPerFrame,
+               fifo_frames_t   capacityInFrames,
+               fifo_counter_t * readCounterAddress,
+               fifo_counter_t * writeCounterAddress,
+               void * dataStorageAddress);
+
+    ~FifoBuffer();
+
+    int32_t convertFramesToBytes(fifo_frames_t frames);
+
+    fifo_frames_t read(void *destination, fifo_frames_t framesToRead);
+
+    fifo_frames_t write(const void *source, fifo_frames_t framesToWrite);
+
+    fifo_frames_t getThreshold();
+    void setThreshold(fifo_frames_t threshold);
+
+    fifo_frames_t getBufferCapacityInFrames();
+
+    fifo_frames_t readNow(void *buffer, fifo_frames_t numFrames);
+
+    int64_t getNextReadTime(int32_t frameRate);
+
+    int32_t getUnderrunCount() const { return mUnderrunCount; }
+
+    FifoControllerBase *getFifoControllerBase() { return mFifo; }
+
+    int32_t getBytesPerFrame() {
+        return mBytesPerFrame;
+    }
+
+    fifo_counter_t getReadCounter() {
+        return mFifo->getReadCounter();
+    }
+
+    void setReadCounter(fifo_counter_t n) {
+        mFifo->setReadCounter(n);
+    }
+
+    fifo_counter_t getWriteCounter() {
+        return mFifo->getWriteCounter();
+    }
+
+    void setWriteCounter(fifo_counter_t n) {
+        mFifo->setWriteCounter(n);
+    }
+
+private:
+    const fifo_frames_t mFrameCapacity;
+    const int32_t       mBytesPerFrame;
+    uint8_t *           mStorage;
+    bool                mStorageOwned; // did this object allocate the storage?
+    FifoControllerBase *mFifo;
+    fifo_counter_t      mFramesReadCount;
+    fifo_counter_t      mFramesUnderrunCount;
+    int32_t             mUnderrunCount; // need? just use frames
+    int32_t             mLastReadSize;
+};
+
+#endif //FIFO_FIFO_BUFFER_H
diff --git a/media/liboboe/src/fifo/FifoController.h b/media/liboboe/src/fifo/FifoController.h
new file mode 100644
index 0000000..7434634
--- /dev/null
+++ b/media/liboboe/src/fifo/FifoController.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FIFO_FIFO_CONTROLLER_H
+#define FIFO_FIFO_CONTROLLER_H
+
+#include <stdint.h>
+#include <atomic>
+
+#include "FifoControllerBase.h"
+
+/**
+ * A FIFO with counters contained in the class.
+ */
+class FifoController : public FifoControllerBase
+{
+public:
+    FifoController(fifo_counter_t bufferSize, fifo_counter_t threshold)
+    : FifoControllerBase(bufferSize, threshold)
+    , mReadCounter(0)
+    , mWriteCounter(0)
+    {}
+
+    virtual ~FifoController() {}
+
+    // TODO review use of memory barriers, probably incorrect
+    virtual fifo_counter_t getReadCounter() override {
+        return mReadCounter.load(std::memory_order_acquire);
+    }
+    virtual void setReadCounter(fifo_counter_t n) override {
+        mReadCounter.store(n, std::memory_order_release);
+    }
+    virtual fifo_counter_t getWriteCounter() override {
+        return mWriteCounter.load(std::memory_order_acquire);
+    }
+    virtual void setWriteCounter(fifo_counter_t n) override {
+        mWriteCounter.store(n, std::memory_order_release);
+    }
+
+private:
+    std::atomic<fifo_counter_t> mReadCounter;
+    std::atomic<fifo_counter_t> mWriteCounter;
+};
+
+
+#endif //FIFO_FIFO_CONTROLLER_H
diff --git a/media/liboboe/src/fifo/FifoControllerBase.cpp b/media/liboboe/src/fifo/FifoControllerBase.cpp
new file mode 100644
index 0000000..33a253e
--- /dev/null
+++ b/media/liboboe/src/fifo/FifoControllerBase.cpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "FifoControllerBase"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include "FifoControllerBase.h"
+
+FifoControllerBase::FifoControllerBase(fifo_frames_t capacity, fifo_frames_t threshold)
+        : mCapacity(capacity)
+        , mThreshold(threshold)
+{
+}
+
+FifoControllerBase::~FifoControllerBase() {
+}
+
+fifo_frames_t FifoControllerBase::getFullFramesAvailable() {
+    return (fifo_frames_t) (getWriteCounter() - getReadCounter());
+}
+
+fifo_frames_t FifoControllerBase::getReadIndex() {
+    // % works with non-power of two sizes
+    return (fifo_frames_t) (getReadCounter() % mCapacity);
+}
+
+void FifoControllerBase::advanceReadIndex(fifo_frames_t numFrames) {
+    setReadCounter(getReadCounter() + numFrames);
+}
+
+fifo_frames_t FifoControllerBase::getEmptyFramesAvailable() {
+    return (int32_t)(mThreshold - getFullFramesAvailable());
+}
+
+fifo_frames_t FifoControllerBase::getWriteIndex() {
+    // % works with non-power of two sizes
+    return (fifo_frames_t) (getWriteCounter() % mCapacity);
+}
+
+void FifoControllerBase::advanceWriteIndex(fifo_frames_t numFrames) {
+    setWriteCounter(getWriteCounter() + numFrames);
+}
+
+void FifoControllerBase::setThreshold(fifo_frames_t threshold) {
+    mThreshold = threshold;
+}
diff --git a/media/liboboe/src/fifo/FifoControllerBase.h b/media/liboboe/src/fifo/FifoControllerBase.h
new file mode 100644
index 0000000..c543519
--- /dev/null
+++ b/media/liboboe/src/fifo/FifoControllerBase.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FIFO_FIFO_CONTROLLER_BASE_H
+#define FIFO_FIFO_CONTROLLER_BASE_H
+
+#include <stdint.h>
+
+typedef int64_t fifo_counter_t;
+typedef int32_t fifo_frames_t;
+
+/**
+ * Manage the read/write indices of a circular buffer.
+ *
+ * The caller is responsible for reading and writing the actual data.
+ * Note that the span of available frames may not be contiguous. They
+ * may wrap around from the end to the beginning of the buffer. In that
+ * case the data must be read or written in at least two blocks of frames.
+ *
+ */
+class FifoControllerBase {
+
+public:
+    /**
+     * Constructor for FifoControllerBase
+     * @param capacity Total size of the circular buffer in frames.
+     * @param threshold Number of frames to fill. Must be less than capacity.
+     */
+    FifoControllerBase(fifo_frames_t capacity, fifo_frames_t threshold);
+
+    virtual ~FifoControllerBase();
+
+    // Abstract methods to be implemented in subclasses.
+    /**
+     * @return Counter used by the reader of the FIFO.
+     */
+    virtual fifo_counter_t getReadCounter() = 0;
+
+    /**
+     * This is normally only used internally.
+     * @param count Number of frames that have been read.
+     */
+    virtual void setReadCounter(fifo_counter_t count) = 0;
+
+    /**
+     * @return Counter used by the reader of the FIFO.
+     */
+    virtual fifo_counter_t getWriteCounter() = 0;
+
+    /**
+     * This is normally only used internally.
+     * @param count Number of frames that have been read.
+     */
+    virtual void setWriteCounter(fifo_counter_t count) = 0;
+
+    /**
+     * This may be negative if an unthrottled reader has read beyond the available data.
+     * @return number of valid frames available to read. Never read more than this.
+     */
+    fifo_frames_t getFullFramesAvailable();
+
+    /**
+     * The index in a circular buffer of the next frame to read.
+     */
+    fifo_frames_t getReadIndex();
+
+    /**
+     * @param numFrames number of frames to advance the read index
+     */
+    void advanceReadIndex(fifo_frames_t numFrames);
+
+    /**
+     * @return number of frames that can be written. Never write more than this.
+     */
+    fifo_frames_t getEmptyFramesAvailable();
+
+    /**
+     * The index in a circular buffer of the next frame to write.
+     */
+    fifo_frames_t getWriteIndex();
+
+    /**
+     * @param numFrames number of frames to advance the write index
+     */
+    void advanceWriteIndex(fifo_frames_t numFrames);
+
+    /**
+     * You can request that the buffer not be filled above a maximum
+     * number of frames.
+     * @param threshold effective size of the buffer
+     */
+    void setThreshold(fifo_frames_t threshold);
+
+    fifo_frames_t getThreshold() const {
+        return mThreshold;
+    }
+
+    fifo_frames_t getCapacity() const {
+        return mCapacity;
+    }
+
+
+private:
+    fifo_frames_t mCapacity;
+    fifo_frames_t mThreshold;
+};
+
+#endif // FIFO_FIFO_CONTROLLER_BASE_H
diff --git a/media/liboboe/src/fifo/FifoControllerIndirect.h b/media/liboboe/src/fifo/FifoControllerIndirect.h
new file mode 100644
index 0000000..1aaf9ea
--- /dev/null
+++ b/media/liboboe/src/fifo/FifoControllerIndirect.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FIFO_FIFO_CONTROLLER_INDIRECT_H
+#define FIFO_FIFO_CONTROLLER_INDIRECT_H
+
+#include <stdint.h>
+#include <atomic>
+
+#include "FifoControllerBase.h"
+
+/**
+ * A FifoControllerBase with counters external to the class.
+ *
+ * The actual copunters may be stored in separate regions of shared memory
+ * with different access rights.
+ */
+class FifoControllerIndirect : public FifoControllerBase {
+
+public:
+    FifoControllerIndirect(fifo_frames_t capacity,
+                           fifo_frames_t threshold,
+                           fifo_counter_t * readCounterAddress,
+                           fifo_counter_t * writeCounterAddress)
+        : FifoControllerBase(capacity, threshold)
+        , mReadCounterAddress((std::atomic<fifo_counter_t> *) readCounterAddress)
+        , mWriteCounterAddress((std::atomic<fifo_counter_t> *) writeCounterAddress)
+    {
+        setReadCounter(0);
+        setWriteCounter(0);
+    }
+    virtual ~FifoControllerIndirect() {};
+
+    // TODO review use of memory barriers, probably incorrect
+    virtual fifo_counter_t getReadCounter() override {
+        return mReadCounterAddress->load(std::memory_order_acquire);
+    }
+
+    virtual void setReadCounter(fifo_counter_t count) override {
+        mReadCounterAddress->store(count, std::memory_order_release);
+    }
+
+    virtual fifo_counter_t getWriteCounter() override {
+        return mWriteCounterAddress->load(std::memory_order_acquire);
+    }
+
+    virtual void setWriteCounter(fifo_counter_t count) override {
+        mWriteCounterAddress->store(count, std::memory_order_release);
+    }
+
+private:
+    std::atomic<fifo_counter_t> * mReadCounterAddress;
+    std::atomic<fifo_counter_t> * mWriteCounterAddress;
+};
+
+#endif //FIFO_FIFO_CONTROLLER_INDIRECT_H
diff --git a/media/liboboe/src/fifo/README.md b/media/liboboe/src/fifo/README.md
new file mode 100644
index 0000000..61ffbae
--- /dev/null
+++ b/media/liboboe/src/fifo/README.md
@@ -0,0 +1,9 @@
+Simple atomic FIFO for passing data between threads or processes.
+This does not require mutexes.
+
+One thread modifies the readCounter and the other thread modifies the writeCounter.
+
+TODO The internal low-level implementation might be merged in some form with audio_utils fifo
+and/or FMQ [after confirming that requirements are met].
+The higher-levels parts related to Oboe use of the FIFO such as API, fds, relative
+location of indices and data buffer, mapping, allocation of memmory will probably be kept as-is.
diff --git a/media/liboboe/src/legacy/AudioStreamRecord.cpp b/media/liboboe/src/legacy/AudioStreamRecord.cpp
new file mode 100644
index 0000000..5854974
--- /dev/null
+++ b/media/liboboe/src/legacy/AudioStreamRecord.cpp
@@ -0,0 +1,230 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioStreamRecord"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <utils/String16.h>
+#include <media/AudioRecord.h>
+#include <oboe/OboeAudio.h>
+
+#include "AudioClock.h"
+#include "AudioStreamRecord.h"
+
+using namespace android;
+using namespace oboe;
+
+AudioStreamRecord::AudioStreamRecord()
+    : AudioStream()
+{
+}
+
+AudioStreamRecord::~AudioStreamRecord()
+{
+    const oboe_stream_state_t state = getState();
+    bool bad = !(state == OBOE_STREAM_STATE_UNINITIALIZED || state == OBOE_STREAM_STATE_CLOSED);
+    ALOGE_IF(bad, "stream not closed, in state %d", state);
+}
+
+oboe_result_t AudioStreamRecord::open(const AudioStreamBuilder& builder)
+{
+    oboe_result_t result = OBOE_OK;
+
+    result = AudioStream::open(builder);
+    if (result != OBOE_OK) {
+        return result;
+    }
+
+    // Try to create an AudioRecord
+
+    // TODO Support UNSPECIFIED in AudioTrack. For now, use stereo if unspecified.
+    int32_t samplesPerFrame = (getSamplesPerFrame() == OBOE_UNSPECIFIED)
+                              ? 2 : getSamplesPerFrame();
+    audio_channel_mask_t channelMask = audio_channel_in_mask_from_count(samplesPerFrame);
+
+    AudioRecord::callback_t callback = nullptr;
+    audio_input_flags_t flags = (audio_input_flags_t) AUDIO_INPUT_FLAG_NONE;
+
+    // TODO implement an unspecified Android format then use that.
+    audio_format_t format = (getFormat() == OBOE_UNSPECIFIED)
+            ? AUDIO_FORMAT_PCM_FLOAT
+            : OboeConvert_oboeToAndroidDataFormat(getFormat());
+
+    mAudioRecord = new AudioRecord(
+            AUDIO_SOURCE_DEFAULT,
+            getSampleRate(),
+            format,
+            channelMask,
+
+            mOpPackageName, // const String16& opPackageName TODO does not compile
+
+            0,    //    size_t frameCount = 0,
+            callback,
+            nullptr, //    void* user = nullptr,
+            0,    //    uint32_t notificationFrames = 0,
+            AUDIO_SESSION_ALLOCATE,
+            AudioRecord::TRANSFER_DEFAULT,
+            flags
+             //   int uid = -1,
+             //   pid_t pid = -1,
+             //   const audio_attributes_t* pAttributes = nullptr
+             );
+
+    // Did we get a valid track?
+    status_t status = mAudioRecord->initCheck();
+    if (status != OK) {
+        close();
+        ALOGE("AudioStreamRecord::open(), initCheck() returned %d", status);
+        return OboeConvert_androidToOboeError(status);
+    }
+
+    // Get the actual rate.
+    setSampleRate(mAudioRecord->getSampleRate());
+    setSamplesPerFrame(mAudioRecord->channelCount());
+    setFormat(OboeConvert_androidToOboeDataFormat(mAudioRecord->format()));
+
+    setState(OBOE_STREAM_STATE_OPEN);
+
+    return OBOE_OK;
+}
+
+oboe_result_t AudioStreamRecord::close()
+{
+    // TODO add close() or release() to AudioRecord API then call it from here
+    if (getState() != OBOE_STREAM_STATE_CLOSED) {
+        mAudioRecord.clear();
+        setState(OBOE_STREAM_STATE_CLOSED);
+    }
+    return OBOE_OK;
+}
+
+oboe_result_t AudioStreamRecord::requestStart()
+{
+    if (mAudioRecord.get() == nullptr) {
+        return OBOE_ERROR_INVALID_STATE;
+    }
+    // Get current position so we can detect when the track is playing.
+    status_t err = mAudioRecord->getPosition(&mPositionWhenStarting);
+    if (err != OK) {
+        return OboeConvert_androidToOboeError(err);
+    }
+    err = mAudioRecord->start();
+    if (err != OK) {
+        return OboeConvert_androidToOboeError(err);
+    } else {
+        setState(OBOE_STREAM_STATE_STARTING);
+    }
+    return OBOE_OK;
+}
+
+oboe_result_t AudioStreamRecord::requestPause()
+{
+    return OBOE_ERROR_UNIMPLEMENTED;
+}
+
+oboe_result_t AudioStreamRecord::requestFlush() {
+    return OBOE_ERROR_UNIMPLEMENTED;
+}
+
+oboe_result_t AudioStreamRecord::requestStop() {
+    if (mAudioRecord.get() == nullptr) {
+        return OBOE_ERROR_INVALID_STATE;
+    }
+    setState(OBOE_STREAM_STATE_STOPPING);
+    mAudioRecord->stop();
+    return OBOE_OK;
+}
+
+oboe_result_t AudioStreamRecord::updateState()
+{
+    oboe_result_t result = OBOE_OK;
+    oboe_wrapping_frames_t position;
+    status_t err;
+    switch (getState()) {
+    // TODO add better state visibility to AudioRecord
+    case OBOE_STREAM_STATE_STARTING:
+        err = mAudioRecord->getPosition(&position);
+        if (err != OK) {
+            result = OboeConvert_androidToOboeError(err);
+        } else if (position != mPositionWhenStarting) {
+            setState(OBOE_STREAM_STATE_STARTED);
+        }
+        break;
+    case OBOE_STREAM_STATE_STOPPING:
+        if (mAudioRecord->stopped()) {
+            setState(OBOE_STREAM_STATE_STOPPED);
+        }
+        break;
+    default:
+        break;
+    }
+    return result;
+}
+
+oboe_result_t AudioStreamRecord::read(void *buffer,
+                                      oboe_size_frames_t numFrames,
+                                      oboe_nanoseconds_t timeoutNanoseconds)
+{
+    oboe_size_frames_t bytesPerFrame = getBytesPerFrame();
+    oboe_size_bytes_t numBytes;
+    oboe_result_t result = OboeConvert_framesToBytes(numFrames, bytesPerFrame, &numBytes);
+    if (result != OBOE_OK) {
+        return result;
+    }
+
+    // TODO add timeout to AudioRecord
+    bool blocking = (timeoutNanoseconds > 0);
+    ssize_t bytesRead = mAudioRecord->read(buffer, numBytes, blocking);
+    if (bytesRead == WOULD_BLOCK) {
+        return 0;
+    } else if (bytesRead < 0) {
+        return OboeConvert_androidToOboeError(bytesRead);
+    }
+    oboe_size_frames_t framesRead = (oboe_size_frames_t)(bytesRead / bytesPerFrame);
+    return (oboe_result_t) framesRead;
+}
+
+oboe_result_t AudioStreamRecord::setBufferSize(oboe_size_frames_t requestedFrames,
+                                             oboe_size_frames_t *actualFrames)
+{
+    *actualFrames = getBufferCapacity();
+    return OBOE_OK;
+}
+
+oboe_size_frames_t AudioStreamRecord::getBufferSize() const
+{
+    return getBufferCapacity(); // TODO implement in AudioRecord?
+}
+
+oboe_size_frames_t AudioStreamRecord::getBufferCapacity() const
+{
+    return static_cast<oboe_size_frames_t>(mAudioRecord->frameCount());
+}
+
+int32_t AudioStreamRecord::getXRunCount() const
+{
+    return OBOE_ERROR_UNIMPLEMENTED; // TODO implement when AudioRecord supports it
+}
+
+oboe_size_frames_t AudioStreamRecord::getFramesPerBurst() const
+{
+    return 192; // TODO add query to AudioRecord.cpp
+}
+
+// TODO implement getTimestamp
+
diff --git a/media/liboboe/src/legacy/AudioStreamRecord.h b/media/liboboe/src/legacy/AudioStreamRecord.h
new file mode 100644
index 0000000..02ff220
--- /dev/null
+++ b/media/liboboe/src/legacy/AudioStreamRecord.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LEGACY_AUDIOSTREAMRECORD_H
+#define LEGACY_AUDIOSTREAMRECORD_H
+
+#include <media/AudioRecord.h>
+#include <oboe/OboeAudio.h>
+
+#include "AudioStreamBuilder.h"
+#include "AudioStream.h"
+#include "OboeLegacy.h"
+
+namespace oboe {
+
+/**
+ * Internal stream that uses the legacy AudioTrack path.
+ */
+class AudioStreamRecord : public AudioStream {
+public:
+    AudioStreamRecord();
+
+    virtual ~AudioStreamRecord();
+
+    virtual oboe_result_t open(const AudioStreamBuilder & builder) override;
+    virtual oboe_result_t close() override;
+
+    virtual oboe_result_t requestStart() override;
+    virtual oboe_result_t requestPause() override;
+    virtual oboe_result_t requestFlush() override;
+    virtual oboe_result_t requestStop() override;
+
+    virtual oboe_result_t getTimestamp(clockid_t clockId,
+                                       oboe_position_frames_t *framePosition,
+                                       oboe_nanoseconds_t *timeNanoseconds) override {
+        return OBOE_ERROR_UNIMPLEMENTED; // TODO
+    }
+
+    virtual oboe_result_t read(void *buffer,
+                             oboe_size_frames_t numFrames,
+                             oboe_nanoseconds_t timeoutNanoseconds) override;
+
+    virtual oboe_result_t setBufferSize(oboe_size_frames_t requestedFrames,
+                                             oboe_size_frames_t *actualFrames) override;
+
+    virtual oboe_size_frames_t getBufferSize() const override;
+
+    virtual oboe_size_frames_t getBufferCapacity() const override;
+
+    virtual int32_t getXRunCount() const override;
+
+    virtual oboe_size_frames_t getFramesPerBurst() const override;
+
+    virtual oboe_result_t updateState() override;
+
+private:
+    android::sp<android::AudioRecord> mAudioRecord;
+    // TODO add 64-bit position reporting to AudioRecord and use it.
+    oboe_wrapping_frames_t   mPositionWhenStarting = 0;
+    android::String16        mOpPackageName;
+};
+
+} /* namespace oboe */
+
+#endif /* LEGACY_AUDIOSTREAMRECORD_H */
diff --git a/media/liboboe/src/legacy/AudioStreamTrack.cpp b/media/liboboe/src/legacy/AudioStreamTrack.cpp
new file mode 100644
index 0000000..b2c4ee1
--- /dev/null
+++ b/media/liboboe/src/legacy/AudioStreamTrack.cpp
@@ -0,0 +1,297 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioStreamTrack"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <media/AudioTrack.h>
+
+#include <oboe/OboeAudio.h>
+#include "AudioClock.h"
+#include "AudioStreamTrack.h"
+
+
+using namespace android;
+using namespace oboe;
+
+/*
+ * Create a stream that uses the AudioTrack.
+ */
+AudioStreamTrack::AudioStreamTrack()
+    : AudioStream()
+{
+}
+
+AudioStreamTrack::~AudioStreamTrack()
+{
+    const oboe_stream_state_t state = getState();
+    bool bad = !(state == OBOE_STREAM_STATE_UNINITIALIZED || state == OBOE_STREAM_STATE_CLOSED);
+    ALOGE_IF(bad, "stream not closed, in state %d", state);
+}
+
+oboe_result_t AudioStreamTrack::open(const AudioStreamBuilder& builder)
+{
+    oboe_result_t result = OBOE_OK;
+
+    result = AudioStream::open(builder);
+    if (result != OK) {
+        return result;
+    }
+
+    // Try to create an AudioTrack
+    // TODO Support UNSPECIFIED in AudioTrack. For now, use stereo if unspecified.
+    int32_t samplesPerFrame = (getSamplesPerFrame() == OBOE_UNSPECIFIED)
+                              ? 2 : getSamplesPerFrame();
+    audio_channel_mask_t channelMask = audio_channel_out_mask_from_count(samplesPerFrame);
+    ALOGD("AudioStreamTrack::open(), samplesPerFrame = %d, channelMask = 0x%08x",
+            samplesPerFrame, channelMask);
+
+    AudioTrack::callback_t callback = nullptr;
+    // TODO add more performance options
+    audio_output_flags_t flags = (audio_output_flags_t) AUDIO_OUTPUT_FLAG_FAST;
+    size_t frameCount = 0;
+    // TODO implement an unspecified AudioTrack format then use that.
+    audio_format_t format = (getFormat() == OBOE_UNSPECIFIED)
+            ? AUDIO_FORMAT_PCM_FLOAT
+            : OboeConvert_oboeToAndroidDataFormat(getFormat());
+
+    mAudioTrack = new AudioTrack(
+            (audio_stream_type_t) AUDIO_STREAM_MUSIC,
+            getSampleRate(),
+            format,
+            channelMask,
+            frameCount,
+            flags,
+            callback,
+            nullptr,    // user callback data
+            0,          // notificationFrames
+            AUDIO_SESSION_ALLOCATE,
+            AudioTrack::transfer_type::TRANSFER_SYNC // TODO - this does not allow FAST
+            );
+
+    // Did we get a valid track?
+    status_t status = mAudioTrack->initCheck();
+    ALOGD("AudioStreamTrack::open(), initCheck() returned %d", status);
+    // FIXME - this should work - if (status != NO_ERROR) {
+    //         But initCheck() is returning 1 !
+    if (status < 0) {
+        close();
+        ALOGE("AudioStreamTrack::open(), initCheck() returned %d", status);
+        return OboeConvert_androidToOboeError(status);
+    }
+
+    // Get the actual values from the AudioTrack.
+    setSamplesPerFrame(mAudioTrack->channelCount());
+    setSampleRate(mAudioTrack->getSampleRate());
+    setFormat(OboeConvert_androidToOboeDataFormat(mAudioTrack->format()));
+
+    setState(OBOE_STREAM_STATE_OPEN);
+
+    return OBOE_OK;
+}
+
+oboe_result_t AudioStreamTrack::close()
+{
+    // TODO maybe add close() or release() to AudioTrack API then call it from here
+    if (getState() != OBOE_STREAM_STATE_CLOSED) {
+        mAudioTrack.clear(); // TODO is this right?
+        setState(OBOE_STREAM_STATE_CLOSED);
+    }
+    return OBOE_OK;
+}
+
+oboe_result_t AudioStreamTrack::requestStart()
+{
+    if (mAudioTrack.get() == nullptr) {
+        return OBOE_ERROR_INVALID_STATE;
+    }
+    // Get current position so we can detect when the track is playing.
+    status_t err = mAudioTrack->getPosition(&mPositionWhenStarting);
+    if (err != OK) {
+        return OboeConvert_androidToOboeError(err);
+    }
+    err = mAudioTrack->start();
+    if (err != OK) {
+        return OboeConvert_androidToOboeError(err);
+    } else {
+        setState(OBOE_STREAM_STATE_STARTING);
+    }
+    return OBOE_OK;
+}
+
+oboe_result_t AudioStreamTrack::requestPause()
+{
+    if (mAudioTrack.get() == nullptr) {
+        return OBOE_ERROR_INVALID_STATE;
+    } else if (getState() != OBOE_STREAM_STATE_STARTING
+            && getState() != OBOE_STREAM_STATE_STARTED) {
+        ALOGE("requestPause(), called when state is %s", Oboe_convertStreamStateToText(getState()));
+        return OBOE_ERROR_INVALID_STATE;
+    }
+    setState(OBOE_STREAM_STATE_PAUSING);
+    mAudioTrack->pause();
+    status_t err = mAudioTrack->getPosition(&mPositionWhenPausing);
+    if (err != OK) {
+        return OboeConvert_androidToOboeError(err);
+    }
+    return OBOE_OK;
+}
+
+oboe_result_t AudioStreamTrack::requestFlush() {
+    if (mAudioTrack.get() == nullptr) {
+        return OBOE_ERROR_INVALID_STATE;
+    } else if (getState() != OBOE_STREAM_STATE_PAUSED) {
+        return OBOE_ERROR_INVALID_STATE;
+    }
+    setState(OBOE_STREAM_STATE_FLUSHING);
+    incrementFramesRead(getFramesWritten() - getFramesRead());
+    mAudioTrack->flush();
+    mFramesWritten.reset32();
+    return OBOE_OK;
+}
+
+oboe_result_t AudioStreamTrack::requestStop() {
+    if (mAudioTrack.get() == nullptr) {
+        return OBOE_ERROR_INVALID_STATE;
+    }
+    setState(OBOE_STREAM_STATE_STOPPING);
+    incrementFramesRead(getFramesWritten() - getFramesRead()); // TODO review
+    mAudioTrack->stop();
+    mFramesWritten.reset32();
+    return OBOE_OK;
+}
+
+oboe_result_t AudioStreamTrack::updateState()
+{
+    status_t err;
+    oboe_wrapping_frames_t position;
+    switch (getState()) {
+    // TODO add better state visibility to AudioTrack
+    case OBOE_STREAM_STATE_STARTING:
+        if (mAudioTrack->hasStarted()) {
+            setState(OBOE_STREAM_STATE_STARTED);
+        }
+        break;
+    case OBOE_STREAM_STATE_PAUSING:
+        if (mAudioTrack->stopped()) {
+            err = mAudioTrack->getPosition(&position);
+            if (err != OK) {
+                return OboeConvert_androidToOboeError(err);
+            } else if (position == mPositionWhenPausing) {
+                // Has stream really stopped advancing?
+                setState(OBOE_STREAM_STATE_PAUSED);
+            }
+            mPositionWhenPausing = position;
+        }
+        break;
+    case OBOE_STREAM_STATE_FLUSHING:
+        {
+            err = mAudioTrack->getPosition(&position);
+            if (err != OK) {
+                return OboeConvert_androidToOboeError(err);
+            } else if (position == 0) {
+                // Advance frames read to match written.
+                setState(OBOE_STREAM_STATE_FLUSHED);
+            }
+        }
+        break;
+    case OBOE_STREAM_STATE_STOPPING:
+        if (mAudioTrack->stopped()) {
+            setState(OBOE_STREAM_STATE_STOPPED);
+        }
+        break;
+    default:
+        break;
+    }
+    return OBOE_OK;
+}
+
+oboe_result_t AudioStreamTrack::write(const void *buffer,
+                                      oboe_size_frames_t numFrames,
+                                      oboe_nanoseconds_t timeoutNanoseconds)
+{
+    oboe_size_frames_t bytesPerFrame = getBytesPerFrame();
+    oboe_size_bytes_t numBytes;
+    oboe_result_t result = OboeConvert_framesToBytes(numFrames, bytesPerFrame, &numBytes);
+    if (result != OBOE_OK) {
+        return result;
+    }
+
+    // TODO add timeout to AudioTrack
+    bool blocking = timeoutNanoseconds > 0;
+    ssize_t bytesWritten = mAudioTrack->write(buffer, numBytes, blocking);
+    if (bytesWritten == WOULD_BLOCK) {
+        return 0;
+    } else if (bytesWritten < 0) {
+        ALOGE("invalid write, returned %d", (int)bytesWritten);
+        return OboeConvert_androidToOboeError(bytesWritten);
+    }
+    oboe_size_frames_t framesWritten = (oboe_size_frames_t)(bytesWritten / bytesPerFrame);
+    incrementFramesWritten(framesWritten);
+    return framesWritten;
+}
+
+oboe_result_t AudioStreamTrack::setBufferSize(oboe_size_frames_t requestedFrames,
+                                             oboe_size_frames_t *actualFrames)
+{
+    ssize_t result = mAudioTrack->setBufferSizeInFrames(requestedFrames);
+    if (result != OK) {
+        return OboeConvert_androidToOboeError(result);
+    } else {
+        *actualFrames = result;
+        return OBOE_OK;
+    }
+}
+
+oboe_size_frames_t AudioStreamTrack::getBufferSize() const
+{
+    return static_cast<oboe_size_frames_t>(mAudioTrack->getBufferSizeInFrames());
+}
+
+oboe_size_frames_t AudioStreamTrack::getBufferCapacity() const
+{
+    return static_cast<oboe_size_frames_t>(mAudioTrack->frameCount());
+}
+
+int32_t AudioStreamTrack::getXRunCount() const
+{
+    return static_cast<int32_t>(mAudioTrack->getUnderrunCount());
+}
+
+int32_t AudioStreamTrack::getFramesPerBurst() const
+{
+    return 192; // TODO add query to AudioTrack.cpp
+}
+
+oboe_position_frames_t AudioStreamTrack::getFramesRead() {
+    oboe_wrapping_frames_t position;
+    status_t result;
+    switch (getState()) {
+    case OBOE_STREAM_STATE_STARTING:
+    case OBOE_STREAM_STATE_STARTED:
+    case OBOE_STREAM_STATE_STOPPING:
+        result = mAudioTrack->getPosition(&position);
+        if (result == OK) {
+            mFramesRead.update32(position);
+        }
+        break;
+    default:
+        break;
+    }
+    return AudioStream::getFramesRead();
+}
diff --git a/media/liboboe/src/legacy/AudioStreamTrack.h b/media/liboboe/src/legacy/AudioStreamTrack.h
new file mode 100644
index 0000000..8c40884
--- /dev/null
+++ b/media/liboboe/src/legacy/AudioStreamTrack.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LEGACY_AUDIOSTREAMTRACK_H
+#define LEGACY_AUDIOSTREAMTRACK_H
+
+#include <media/AudioTrack.h>
+#include <oboe/OboeAudio.h>
+
+#include "AudioStreamBuilder.h"
+#include "AudioStream.h"
+#include "OboeLegacy.h"
+
+namespace oboe {
+
+
+/**
+ * Internal stream that uses the legacy AudioTrack path.
+ */
+class AudioStreamTrack : public AudioStream {
+public:
+    AudioStreamTrack();
+
+    virtual ~AudioStreamTrack();
+
+
+    virtual oboe_result_t open(const AudioStreamBuilder & builder) override;
+    virtual oboe_result_t close() override;
+
+    virtual oboe_result_t requestStart() override;
+    virtual oboe_result_t requestPause() override;
+    virtual oboe_result_t requestFlush() override;
+    virtual oboe_result_t requestStop() override;
+
+    virtual oboe_result_t getTimestamp(clockid_t clockId,
+                                       oboe_position_frames_t *framePosition,
+                                       oboe_nanoseconds_t *timeNanoseconds) override {
+        return OBOE_ERROR_UNIMPLEMENTED; // TODO call getTimestamp(ExtendedTimestamp *timestamp);
+    }
+
+    virtual oboe_result_t write(const void *buffer,
+                             oboe_size_frames_t numFrames,
+                             oboe_nanoseconds_t timeoutNanoseconds) override;
+
+    virtual oboe_result_t setBufferSize(oboe_size_frames_t requestedFrames,
+                                             oboe_size_frames_t *actualFrames) override;
+    virtual oboe_size_frames_t getBufferSize() const override;
+    virtual oboe_size_frames_t getBufferCapacity() const override;
+    virtual oboe_size_frames_t getFramesPerBurst()const  override;
+    virtual int32_t getXRunCount() const override;
+
+    virtual oboe_position_frames_t getFramesRead() override;
+
+    virtual oboe_result_t updateState() override;
+
+private:
+    android::sp<android::AudioTrack> mAudioTrack;
+    // TODO add 64-bit position reporting to AudioRecord and use it.
+    oboe_wrapping_frames_t           mPositionWhenStarting = 0;
+    oboe_wrapping_frames_t           mPositionWhenPausing = 0;
+};
+
+} /* namespace oboe */
+
+#endif /* LEGACY_AUDIOSTREAMTRACK_H */
diff --git a/media/libstagefright/CodecBase.cpp b/media/liboboe/src/legacy/OboeLegacy.h
similarity index 60%
rename from media/libstagefright/CodecBase.cpp
rename to media/liboboe/src/legacy/OboeLegacy.h
index f729d4d..6803837 100644
--- a/media/libstagefright/CodecBase.cpp
+++ b/media/liboboe/src/legacy/OboeLegacy.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014 The Android Open Source Project
+ * Copyright 2016 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,25 +14,17 @@
  * limitations under the License.
  */
 
-//#define LOG_NDEBUG 0
-#define LOG_TAG "CodecBase"
+#ifndef OBOE_LEGACY_H
+#define OBOE_LEGACY_H
 
-#include <inttypes.h>
+#include <stdint.h>
+#include <oboe/OboeAudio.h>
 
-#include <media/stagefright/CodecBase.h>
+/**
+ * Common code for legacy classes.
+ */
 
-namespace android {
+/* AudioTrack uses a 32-bit frame counter that can wrap around in about a day. */
+typedef uint32_t oboe_wrapping_frames_t;
 
-CodecBase::CodecBase() {
-}
-
-CodecBase::~CodecBase() {
-}
-
-CodecBase::PortDescription::PortDescription() {
-}
-
-CodecBase::PortDescription::~PortDescription() {
-}
-
-}  // namespace android
+#endif /* OBOE_LEGACY_H */
diff --git a/media/liboboe/src/legacy/README.md b/media/liboboe/src/legacy/README.md
new file mode 100644
index 0000000..b51c44b
--- /dev/null
+++ b/media/liboboe/src/legacy/README.md
@@ -0,0 +1,2 @@
+The legacy folder contains the classes that implement Oboe AudioStream on top of
+Android AudioTrack and AudioRecord.
diff --git a/media/liboboe/src/utility/AudioClock.h b/media/liboboe/src/utility/AudioClock.h
new file mode 100644
index 0000000..1a5c209
--- /dev/null
+++ b/media/liboboe/src/utility/AudioClock.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef UTILITY_AUDIOCLOCK_H
+#define UTILITY_AUDIOCLOCK_H
+
+#include <stdint.h>
+#include <time.h>
+
+#include <oboe/OboeDefinitions.h>
+
+class AudioClock {
+public:
+    static oboe_nanoseconds_t getNanoseconds(clockid_t clockId = CLOCK_MONOTONIC) {
+        struct timespec time;
+        int result = clock_gettime(clockId, &time);
+        if (result < 0) {
+            return -errno;
+        }
+        return (time.tv_sec * OBOE_NANOS_PER_SECOND) + time.tv_nsec;
+    }
+
+    /**
+     * Sleep until the specified absolute time.
+     * Return immediately with OBOE_ERROR_ILLEGAL_ARGUMENT if a negative
+     * nanoTime is specified.
+     *
+     * @param nanoTime time to wake up
+     * @param clockId CLOCK_MONOTONIC is default
+     * @return 0, a negative error, or 1 if the call is interrupted by a signal handler (EINTR)
+     */
+    static int sleepUntilNanoTime(oboe_nanoseconds_t nanoTime,
+                                  clockid_t clockId = CLOCK_MONOTONIC) {
+        if (nanoTime > 0) {
+            struct timespec time;
+            time.tv_sec = nanoTime / OBOE_NANOS_PER_SECOND;
+            // Calculate the fractional nanoseconds. Avoids expensive % operation.
+            time.tv_nsec = nanoTime - (time.tv_sec * OBOE_NANOS_PER_SECOND);
+            int err = clock_nanosleep(clockId, TIMER_ABSTIME, &time, nullptr);
+            switch (err) {
+            case EINTR:
+                return 1;
+            case 0:
+                return 0;
+            default:
+                // Subtract because clock_nanosleep() returns a positive error number!
+                return 0 - err;
+            }
+        } else {
+            return OBOE_ERROR_ILLEGAL_ARGUMENT;
+        }
+    }
+
+    /**
+     * Sleep for the specified number of relative nanoseconds in real-time.
+     * Return immediately with 0 if a negative nanoseconds is specified.
+     *
+     * @param nanoseconds time to sleep
+     * @param clockId CLOCK_MONOTONIC is default
+     * @return 0, a negative error, or 1 if the call is interrupted by a signal handler (EINTR)
+     */
+    static int sleepForNanos(oboe_nanoseconds_t nanoseconds, clockid_t clockId = CLOCK_MONOTONIC) {
+        if (nanoseconds > 0) {
+            struct timespec time;
+            time.tv_sec = nanoseconds / OBOE_NANOS_PER_SECOND;
+            // Calculate the fractional nanoseconds. Avoids expensive % operation.
+            time.tv_nsec = nanoseconds - (time.tv_sec * OBOE_NANOS_PER_SECOND);
+            const int flags = 0; // documented as relative sleep
+            int err = clock_nanosleep(clockId, flags, &time, nullptr);
+            switch (err) {
+            case EINTR:
+                return 1;
+            case 0:
+                return 0;
+            default:
+                // Subtract because clock_nanosleep() returns a positive error number!
+                return 0 - err;
+            }
+        }
+        return 0;
+    }
+};
+
+
+#endif // UTILITY_AUDIOCLOCK_H
diff --git a/media/liboboe/src/utility/HandleTracker.cpp b/media/liboboe/src/utility/HandleTracker.cpp
new file mode 100644
index 0000000..bf5fb63
--- /dev/null
+++ b/media/liboboe/src/utility/HandleTracker.cpp
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#define LOG_TAG "OboeAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <new>
+#include <stdint.h>
+#include <assert.h>
+
+#include <oboe/OboeDefinitions.h>
+#include "HandleTracker.h"
+
+// Handle format is: tgggiiii
+// where each letter is 4 bits, t=type, g=generation, i=index
+
+#define TYPE_SIZE           4
+#define GENERATION_SIZE    12
+#define INDEX_SIZE         16
+
+#define GENERATION_INVALID  0
+#define GENERATION_SHIFT    INDEX_SIZE
+
+#define TYPE_MASK           ((1 << TYPE_SIZE) - 1)
+#define GENERATION_MASK     ((1 << GENERATION_SIZE) - 1)
+#define INDEX_MASK          ((1 << INDEX_SIZE) - 1)
+
+#define SLOT_UNAVAILABLE    (-1)
+
+// Error if handle is negative so type is limited to bottom half.
+#define HANDLE_INVALID_TYPE TYPE_MASK
+
+static_assert(HANDLE_TRACKER_MAX_TYPES == (1 << (TYPE_SIZE - 1)),
+    "Mismatch between header and cpp.");
+static_assert(HANDLE_TRACKER_MAX_HANDLES == (1 << (INDEX_SIZE)),
+    "Mismatch between header and cpp.");
+
+HandleTracker::HandleTracker(uint32_t maxHandles)
+        : mMaxHandleCount(maxHandles)
+        , mHandleHeaders(nullptr)
+{
+    assert(maxHandles <= HANDLE_TRACKER_MAX_HANDLES);
+    // Allocate arrays to hold addresses and validation info.
+    mHandleAddresses = (handle_tracker_address_t *)
+            new(std::nothrow) handle_tracker_address_t[maxHandles];
+    if (mHandleAddresses != nullptr) {
+        mHandleHeaders = new(std::nothrow) handle_tracker_header_t[maxHandles];
+
+        if (mHandleHeaders != nullptr) {
+            handle_tracker_header_t initialHeader = buildHeader(0, 1);
+            // Initialize linked list of free nodes. nullptr terminated.
+            for (uint32_t i = 0; i < (maxHandles - 1); i++) {
+                mHandleAddresses[i] = &mHandleAddresses[i + 1]; // point to next node
+                mHandleHeaders[i] = initialHeader;
+            }
+            mNextFreeAddress = &mHandleAddresses[0];
+            mHandleAddresses[maxHandles - 1] = nullptr;
+            mHandleHeaders[maxHandles - 1] = 0;
+        } else {
+            delete[] mHandleAddresses; // so the class appears uninitialized
+            mHandleAddresses = nullptr;
+        }
+    }
+}
+
+HandleTracker::~HandleTracker()
+{
+    delete[] mHandleAddresses;
+    delete[] mHandleHeaders;
+}
+
+bool HandleTracker::isInitialized() const {
+    return mHandleAddresses != nullptr;
+}
+
+handle_tracker_slot_t HandleTracker::allocateSlot() {
+    void **allocated = mNextFreeAddress;
+    if (allocated == nullptr) {
+        return SLOT_UNAVAILABLE;
+    }
+    // Remove this slot from the head of the linked list.
+    mNextFreeAddress = (void **) *allocated;
+    return (allocated - mHandleAddresses);
+}
+
+handle_tracker_generation_t HandleTracker::nextGeneration(handle_tracker_slot_t index) {
+    handle_tracker_generation_t generation = (mHandleHeaders[index] + 1) & GENERATION_MASK;
+    // Avoid generation zero so that 0x0 is not a valid handle.
+    if (generation == GENERATION_INVALID) {
+        generation++;
+    }
+    return generation;
+}
+
+oboe_handle_t HandleTracker::put(handle_tracker_type_t type, void *address)
+{
+    if (type < 0 || type >= HANDLE_TRACKER_MAX_TYPES) {
+        return static_cast<oboe_handle_t>(OBOE_ERROR_OUT_OF_RANGE);
+    }
+    if (!isInitialized()) {
+        return static_cast<oboe_handle_t>(OBOE_ERROR_NO_MEMORY);
+    }
+
+    // Find an empty slot.
+    handle_tracker_slot_t index = allocateSlot();
+    if (index == SLOT_UNAVAILABLE) {
+        ALOGE("HandleTracker::put() no room for more handles");
+        return static_cast<oboe_handle_t>(OBOE_ERROR_NO_FREE_HANDLES);
+    }
+
+    // Cycle the generation counter so stale handles can be detected.
+    handle_tracker_generation_t generation = nextGeneration(index); // reads header table
+    handle_tracker_header_t inputHeader = buildHeader(type, generation);
+
+    // These two writes may need to be observed by other threads or cores during get().
+    mHandleHeaders[index] = inputHeader;
+    mHandleAddresses[index] = address;
+    // TODO use store release to enforce memory order with get()
+
+    // Generate a handle.
+    oboe_handle_t handle = buildHandle(inputHeader, index);
+
+    ALOGV("HandleTracker::put(%p) returns 0x%08x", address, handle);
+    return handle;
+}
+
+handle_tracker_slot_t HandleTracker::handleToIndex(handle_tracker_type_t type,
+                                                   oboe_handle_t handle) const
+{
+    // Validate the handle.
+    handle_tracker_slot_t index = extractIndex(handle);
+    if (index >= mMaxHandleCount) {
+        ALOGE("HandleTracker::handleToIndex() invalid handle = 0x%08X", handle);
+        return static_cast<oboe_handle_t>(OBOE_ERROR_INVALID_HANDLE);
+    }
+    handle_tracker_generation_t handleGeneration = extractGeneration(handle);
+    handle_tracker_header_t inputHeader = buildHeader(type, handleGeneration);
+    if (inputHeader != mHandleHeaders[index]) {
+        ALOGE("HandleTracker::handleToIndex() inputHeader = 0x%08x != mHandleHeaders[%d] = 0x%08x",
+             inputHeader, index, mHandleHeaders[index]);
+        return static_cast<oboe_handle_t>(OBOE_ERROR_INVALID_HANDLE);
+    }
+    return index;
+}
+
+handle_tracker_address_t HandleTracker::get(handle_tracker_type_t type, oboe_handle_t handle) const
+{
+    if (!isInitialized()) {
+        return nullptr;
+    }
+    handle_tracker_slot_t index = handleToIndex(type, handle);
+    if (index >= 0) {
+        return mHandleAddresses[index];
+    } else {
+        return nullptr;
+    }
+}
+
+handle_tracker_address_t HandleTracker::remove(handle_tracker_type_t type, oboe_handle_t handle) {
+    if (!isInitialized()) {
+        return nullptr;
+    }
+    handle_tracker_slot_t index = handleToIndex(type,handle);
+    if (index >= 0) {
+        handle_tracker_address_t address = mHandleAddresses[index];
+
+        // Invalidate the header type but preserve the generation count.
+        handle_tracker_generation_t generation = mHandleHeaders[index] & GENERATION_MASK;
+        handle_tracker_header_t inputHeader = buildHeader(
+                (handle_tracker_type_t) HANDLE_INVALID_TYPE, generation);
+        mHandleHeaders[index] = inputHeader;
+
+        // Add this slot to the head of the linked list.
+        mHandleAddresses[index] = mNextFreeAddress;
+        mNextFreeAddress = (handle_tracker_address_t *) &mHandleAddresses[index];
+        return address;
+    } else {
+        return nullptr;
+    }
+}
+
+oboe_handle_t HandleTracker::buildHandle(handle_tracker_header_t typeGeneration,
+                                         handle_tracker_slot_t index) {
+    return (oboe_handle_t)((typeGeneration << GENERATION_SHIFT) | (index & INDEX_MASK));
+}
+
+handle_tracker_header_t HandleTracker::buildHeader(handle_tracker_type_t type,
+                                    handle_tracker_generation_t generation)
+{
+    return (handle_tracker_header_t) (((type & TYPE_MASK) << GENERATION_SIZE)
+        | (generation & GENERATION_MASK));
+}
+
+handle_tracker_slot_t HandleTracker::extractIndex(oboe_handle_t handle)
+{
+    return handle & INDEX_MASK;
+}
+
+handle_tracker_generation_t HandleTracker::extractGeneration(oboe_handle_t handle)
+{
+    return (handle >> GENERATION_SHIFT) & GENERATION_MASK;
+}
diff --git a/media/liboboe/src/utility/HandleTracker.h b/media/liboboe/src/utility/HandleTracker.h
new file mode 100644
index 0000000..4c08321
--- /dev/null
+++ b/media/liboboe/src/utility/HandleTracker.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef UTILITY_HANDLETRACKER_H
+#define UTILITY_HANDLETRACKER_H
+
+#include <stdint.h>
+
+typedef int32_t  handle_tracker_type_t;       // what kind of handle
+typedef int32_t  handle_tracker_slot_t;       // index in allocation table
+typedef int32_t  handle_tracker_generation_t; // incremented when slot used
+typedef uint16_t handle_tracker_header_t;     // combines type and generation
+typedef void    *handle_tracker_address_t;    // address of something that is stored here
+
+#define HANDLE_TRACKER_MAX_TYPES    (1 << 3)
+#define HANDLE_TRACKER_MAX_HANDLES  (1 << 16)
+
+/**
+ * Represent Objects using an integer handle that can be used with Java.
+ * This also makes the 'C' ABI more robust.
+ *
+ * Note that this should only be called from a single thread.
+ * If you call it from more than one thread then you need to use your own mutex.
+ */
+class HandleTracker {
+
+public:
+    /**
+     * @param maxHandles cannot exceed HANDLE_TRACKER_MAX_HANDLES
+     */
+    HandleTracker(uint32_t maxHandles = 256);
+    virtual ~HandleTracker();
+
+    /**
+     * Don't use if this returns false;
+     * @return true if the internal allocation succeeded
+     */
+    bool isInitialized() const;
+
+    /**
+     * Store a pointer and return a handle that can be used to retrieve the pointer.
+     *
+     * @param expectedType the type of the object to be tracked
+     * @param address pointer to be converted to a handle
+     * @return a valid handle or a negative error
+     */
+    oboe_handle_t put(handle_tracker_type_t expectedType, handle_tracker_address_t address);
+
+    /**
+     * Get the original pointer associated with the handle.
+     * The handle will be validated to prevent stale handles from being reused.
+     * Note that the validation is designed to prevent common coding errors and not
+     * to prevent deliberate hacking.
+     *
+     * @param expectedType shouldmatch the type we passed to put()
+     * @param handle to be converted to a pointer
+     * @return address associated with handle or nullptr
+     */
+    handle_tracker_address_t get(handle_tracker_type_t expectedType, oboe_handle_t handle) const;
+
+    /**
+     * Free up the storage associated with the handle.
+     * Subsequent attempts to use the handle will fail.
+     *
+     * @param expectedType shouldmatch the type we passed to put()
+     * @param handle to be removed from tracking
+     * @return address associated with handle or nullptr if not found
+     */
+    handle_tracker_address_t remove(handle_tracker_type_t expectedType, oboe_handle_t handle);
+
+private:
+    const int32_t               mMaxHandleCount;   // size of array
+    // This is const after initialization.
+    handle_tracker_address_t  * mHandleAddresses;  // address of objects or a free linked list node
+    // This is const after initialization.
+    handle_tracker_header_t   * mHandleHeaders;    // combination of type and generation
+    handle_tracker_address_t  * mNextFreeAddress; // head of the linked list of free nodes in mHandleAddresses
+
+    /**
+     * Pull slot off of a list of empty slots.
+     * @return index or a negative error
+     */
+    handle_tracker_slot_t allocateSlot();
+
+    /**
+     * Validate the handle and return the corresponding index.
+     * @return slot index or a negative error
+     */
+    handle_tracker_slot_t handleToIndex(oboe_handle_t handle, handle_tracker_type_t type) const;
+
+    /**
+     * Construct a handle from a header and an index.
+     * @param header combination of a type and a generation
+     * @param index slot index returned from allocateSlot
+     * @return handle or a negative error
+     */
+    oboe_handle_t buildHandle(handle_tracker_header_t header, handle_tracker_slot_t index);
+
+    /**
+     * Combine a type and a generation field into a header.
+     */
+    static handle_tracker_header_t buildHeader(handle_tracker_type_t type,
+                handle_tracker_generation_t generation);
+
+    /**
+     * Extract the index from a handle.
+     * Does not validate the handle.
+     * @return index associated with a handle
+     */
+    static handle_tracker_slot_t extractIndex(oboe_handle_t handle);
+
+    /**
+     * Extract the generation from a handle.
+     * Does not validate the handle.
+     * @return generation associated with a handle
+     */
+    static handle_tracker_generation_t extractGeneration(oboe_handle_t handle);
+
+    /**
+     * Increment the generation for the slot, avoiding zero.
+     */
+    handle_tracker_generation_t nextGeneration(handle_tracker_slot_t index);
+
+};
+
+#endif //UTILITY_HANDLETRACKER_H
diff --git a/media/liboboe/src/utility/MonotonicCounter.h b/media/liboboe/src/utility/MonotonicCounter.h
new file mode 100644
index 0000000..befad21
--- /dev/null
+++ b/media/liboboe/src/utility/MonotonicCounter.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef UTILITY_MONOTONICCOUNTER_H
+#define UTILITY_MONOTONICCOUNTER_H
+
+#include <stdint.h>
+
+/**
+ * Maintain a 64-bit monotonic counter.
+ * Can be used to track a 32-bit counter that wraps or gets reset.
+ *
+ * Note that this is not atomic and has no interior locks.
+ * A caller will need to provide their own exterior locking
+ * if they need to use it from multiple threads.
+ */
+class MonotonicCounter {
+
+public:
+    MonotonicCounter() {};
+    virtual ~MonotonicCounter() {};
+
+    /**
+     * @return current value of the counter
+     */
+    int64_t get() const {
+        return mCounter64;
+    }
+
+    /**
+     * Advance the counter if delta is positive.
+     * @return current value of the counter
+     */
+    int64_t increment(int64_t delta) {
+        if (delta > 0) {
+            mCounter64 += delta;
+        }
+        return mCounter64;
+    }
+
+    /**
+     * Advance the 64-bit counter if (current32 - previousCurrent32) > 0.
+     * This can be used to convert a 32-bit counter that may be wrapping into
+     * a monotonic 64-bit counter.
+     *
+     * This counter32 should NOT be allowed to advance by more than 0x7FFFFFFF between calls.
+     * Think of the wrapping counter like a sine wave. If the frequency of the signal
+     * is more than half the sampling rate (Nyquist rate) then you cannot measure it properly.
+     * If the counter wraps around every 24 hours then we should measure it with a period
+     * of less than 12 hours.
+     *
+     * @return current value of the 64-bit counter
+     */
+    int64_t update32(int32_t counter32) {
+        int32_t delta = counter32 - mCounter32;
+        // protect against the mCounter64 going backwards
+        if (delta > 0) {
+            mCounter64 += delta;
+            mCounter32 = counter32;
+        }
+        return mCounter64;
+    }
+
+    /**
+     * Reset the stored value of the 32-bit counter.
+     * This is used if your counter32 has been reset to zero.
+     */
+    void reset32() {
+        mCounter32 = 0;
+    }
+
+private:
+    int64_t mCounter64 = 0;
+    int32_t mCounter32 = 0;
+};
+
+
+#endif //UTILITY_MONOTONICCOUNTER_H
diff --git a/media/liboboe/src/utility/OboeUtilities.cpp b/media/liboboe/src/utility/OboeUtilities.cpp
new file mode 100644
index 0000000..d9d2e88
--- /dev/null
+++ b/media/liboboe/src/utility/OboeUtilities.cpp
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "OboeAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <utils/Errors.h>
+
+#include "oboe/OboeDefinitions.h"
+#include "OboeUtilities.h"
+
+using namespace android;
+
+oboe_size_bytes_t OboeConvert_formatToSizeInBytes(oboe_audio_format_t format) {
+    oboe_size_bytes_t size = OBOE_ERROR_ILLEGAL_ARGUMENT;
+    switch (format) {
+        case OBOE_AUDIO_FORMAT_PCM16:
+            size = sizeof(int16_t);
+            break;
+        case OBOE_AUDIO_FORMAT_PCM32:
+        case OBOE_AUDIO_FORMAT_PCM824:
+            size = sizeof(int32_t);
+            break;
+        case OBOE_AUDIO_FORMAT_PCM_FLOAT:
+            size = sizeof(float);
+            break;
+        default:
+            break;
+    }
+    return size;
+}
+
+// TODO This similar to a function in audio_utils. Consider using that instead.
+void OboeConvert_floatToPcm16(const float *source, int32_t numSamples, int16_t *destination) {
+    for (int i = 0; i < numSamples; i++) {
+        float fval = source[i];
+        fval += 1.0; // to avoid discontinuity at 0.0 caused by truncation
+        fval *= 32768.0f;
+        int32_t sample = (int32_t) fval;
+        // clip to 16-bit range
+        if (sample < 0) sample = 0;
+        else if (sample > 0x0FFFF) sample = 0x0FFFF;
+        sample -= 32768; // center at zero
+        destination[i] = (int16_t) sample;
+    }
+}
+
+void OboeConvert_pcm16ToFloat(const float *source, int32_t numSamples, int16_t *destination) {
+    for (int i = 0; i < numSamples; i++) {
+        destination[i] = source[i] * (1.0f / 32768.0f);
+    }
+}
+
+oboe_result_t OboeConvert_androidToOboeError(status_t error) {
+    if (error >= 0) {
+        return error;
+    }
+    oboe_result_t result;
+    switch (error) {
+    case OK:
+        result = OBOE_OK;
+        break;
+    case INVALID_OPERATION:
+        result = OBOE_ERROR_INVALID_STATE;
+        break;
+    case BAD_VALUE:
+        result = OBOE_ERROR_UNEXPECTED_VALUE;
+        break;
+    case WOULD_BLOCK:
+        result = OBOE_ERROR_WOULD_BLOCK;
+        break;
+    // TODO add more error codes
+    default:
+        result = OBOE_ERROR_INTERNAL;
+        break;
+    }
+    return result;
+}
+
+audio_format_t OboeConvert_oboeToAndroidDataFormat(oboe_audio_format_t oboeFormat) {
+    audio_format_t androidFormat;
+    switch (oboeFormat) {
+    case OBOE_AUDIO_FORMAT_PCM16:
+        androidFormat = AUDIO_FORMAT_PCM_16_BIT;
+        break;
+    case OBOE_AUDIO_FORMAT_PCM_FLOAT:
+        androidFormat = AUDIO_FORMAT_PCM_FLOAT;
+        break;
+    case OBOE_AUDIO_FORMAT_PCM824:
+        androidFormat = AUDIO_FORMAT_PCM_8_24_BIT;
+        break;
+    case OBOE_AUDIO_FORMAT_PCM32:
+        androidFormat = AUDIO_FORMAT_PCM_32_BIT;
+        break;
+    default:
+        androidFormat = AUDIO_FORMAT_DEFAULT;
+        ALOGE("OboeConvert_oboeToAndroidDataFormat 0x%08X unrecognized", oboeFormat);
+        break;
+    }
+    return androidFormat;
+}
+
+oboe_audio_format_t OboeConvert_androidToOboeDataFormat(audio_format_t androidFormat) {
+    oboe_audio_format_t oboeFormat = OBOE_AUDIO_FORMAT_INVALID;
+    switch (androidFormat) {
+    case AUDIO_FORMAT_PCM_16_BIT:
+        oboeFormat = OBOE_AUDIO_FORMAT_PCM16;
+        break;
+    case AUDIO_FORMAT_PCM_FLOAT:
+        oboeFormat = OBOE_AUDIO_FORMAT_PCM_FLOAT;
+        break;
+    case AUDIO_FORMAT_PCM_32_BIT:
+        oboeFormat = OBOE_AUDIO_FORMAT_PCM32;
+        break;
+    case AUDIO_FORMAT_PCM_8_24_BIT:
+        oboeFormat = OBOE_AUDIO_FORMAT_PCM824;
+        break;
+    default:
+        oboeFormat = OBOE_AUDIO_FORMAT_INVALID;
+        ALOGE("OboeConvert_androidToOboeDataFormat 0x%08X unrecognized", androidFormat);
+        break;
+    }
+    return oboeFormat;
+}
+
+oboe_size_bytes_t OboeConvert_framesToBytes(oboe_size_frames_t numFrames,
+                                            oboe_size_bytes_t bytesPerFrame,
+                                            oboe_size_bytes_t *sizeInBytes) {
+    // TODO implement more elegantly
+    const int32_t maxChannels = 256; // ridiculously large
+    const oboe_size_frames_t maxBytesPerFrame = maxChannels * sizeof(float);
+    // Prevent overflow by limiting multiplicands.
+    if (bytesPerFrame > maxBytesPerFrame || numFrames > (0x3FFFFFFF / maxBytesPerFrame)) {
+        ALOGE("size overflow, numFrames = %d, frameSize = %zd", numFrames, bytesPerFrame);
+        return OBOE_ERROR_OUT_OF_RANGE;
+    }
+    *sizeInBytes = numFrames * bytesPerFrame;
+    return OBOE_OK;
+}
diff --git a/media/liboboe/src/utility/OboeUtilities.h b/media/liboboe/src/utility/OboeUtilities.h
new file mode 100644
index 0000000..974ccf6
--- /dev/null
+++ b/media/liboboe/src/utility/OboeUtilities.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef UTILITY_OBOEUTILITIES_H
+#define UTILITY_OBOEUTILITIES_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <utils/Errors.h>
+#include <hardware/audio.h>
+
+#include "oboe/OboeDefinitions.h"
+
+oboe_result_t OboeConvert_androidToOboeError(android::status_t error);
+
+void OboeConvert_floatToPcm16(const float *source, int32_t numSamples, int16_t *destination);
+
+void OboeConvert_pcm16ToFloat(const int16_t *source, int32_t numSamples, float *destination);
+
+/**
+ * Calculate the number of bytes and prevent numeric overflow.
+ * @param numFrames frame count
+ * @param bytesPerFrame size of a frame in bytes
+ * @param sizeInBytes total size in bytes
+ * @return OBOE_OK or negative error, eg. OBOE_ERROR_OUT_OF_RANGE
+ */
+oboe_size_bytes_t OboeConvert_framesToBytes(oboe_size_frames_t numFrames,
+                                            oboe_size_bytes_t bytesPerFrame,
+                                            oboe_size_bytes_t *sizeInBytes);
+
+audio_format_t OboeConvert_oboeToAndroidDataFormat(oboe_audio_format_t oboe_format);
+
+oboe_audio_format_t OboeConvert_androidToOboeDataFormat(audio_format_t format);
+
+/**
+ * @return the size of a sample of the given format in bytes or OBOE_ERROR_ILLEGAL_ARGUMENT
+ */
+oboe_size_bytes_t OboeConvert_formatToSizeInBytes(oboe_audio_format_t format);
+
+#endif //UTILITY_OBOEUTILITIES_H
diff --git a/media/liboboe/src/utility/README.md b/media/liboboe/src/utility/README.md
new file mode 100644
index 0000000..9db926a
--- /dev/null
+++ b/media/liboboe/src/utility/README.md
@@ -0,0 +1,3 @@
+The utility folder contains things that may be shared between the Oboe client and server.
+They might also be handy outside Oboe.
+They generally do not depend on Oboe functionality.
diff --git a/media/liboboe/tests/Android.mk b/media/liboboe/tests/Android.mk
new file mode 100644
index 0000000..165669b
--- /dev/null
+++ b/media/liboboe/tests/Android.mk
@@ -0,0 +1,42 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/av/media/liboboe/include \
+    frameworks/av/media/liboboe/src/core \
+    frameworks/av/media/liboboe/src/utility
+LOCAL_SRC_FILES := test_oboe_api.cpp
+LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
+                          libcutils liblog libmedia libutils
+LOCAL_STATIC_LIBRARIES := liboboe
+LOCAL_MODULE := test_oboe_api
+include $(BUILD_NATIVE_TEST)
+
+include $(CLEAR_VARS)
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/av/media/liboboe/include \
+    frameworks/av/media/liboboe/src/core \
+    frameworks/av/media/liboboe/src/utility
+LOCAL_SRC_FILES:= test_handle_tracker.cpp
+LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
+                          libcutils liblog libmedia libutils
+LOCAL_STATIC_LIBRARIES := liboboe
+LOCAL_MODULE := test_handle_tracker
+include $(BUILD_NATIVE_TEST)
+
+include $(CLEAR_VARS)
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/av/media/liboboe/include \
+    frameworks/av/media/liboboe/src \
+    frameworks/av/media/liboboe/src/core \
+    frameworks/av/media/liboboe/src/fifo \
+    frameworks/av/media/liboboe/src/utility
+LOCAL_SRC_FILES:= test_marshalling.cpp
+LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
+                          libcutils liblog libmedia libutils
+LOCAL_STATIC_LIBRARIES := liboboe
+LOCAL_MODULE := test_marshalling
+include $(BUILD_NATIVE_TEST)
diff --git a/media/liboboe/tests/test_handle_tracker.cpp b/media/liboboe/tests/test_handle_tracker.cpp
new file mode 100644
index 0000000..a146e76
--- /dev/null
+++ b/media/liboboe/tests/test_handle_tracker.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Unit tests for Oboe Handle Tracker
+
+#include <stdlib.h>
+#include <math.h>
+
+#include <gtest/gtest.h>
+
+#include <oboe/OboeDefinitions.h>
+#include "HandleTracker.h"
+
+// Test adding one address.
+TEST(test_handle_tracker, oboe_handle_tracker) {
+    const int MAX_HANDLES = 4;
+    HandleTracker tracker(MAX_HANDLES);
+    handle_tracker_type_t type = 3; // arbitrary generic type
+    int data; // something that has an address we can use
+    handle_tracker_address_t found;
+
+    // repeat the test several times to see if it breaks
+    const int SEVERAL = 5; // arbitrary
+    for (int i = 0; i < SEVERAL; i++) {
+        // should fail to find a bogus handle
+        found = tracker.get(type, 0);  // bad handle
+        EXPECT_EQ(nullptr, found);
+
+        // create a valid handle and use it to lookup the object again
+        oboe_handle_t dataHandle = tracker.put(type, &data);
+        ASSERT_TRUE(dataHandle > 0);
+        found = tracker.get(type, dataHandle);
+        EXPECT_EQ(&data, found);
+        found = tracker.get(type, 0); // bad handle
+        EXPECT_EQ(nullptr, found);
+
+        // wrong type
+        found = tracker.get(type+1, dataHandle);
+        EXPECT_EQ(nullptr, found);
+
+        // remove from storage
+        found = tracker.remove(type, dataHandle);
+        EXPECT_EQ(&data, found);
+        // should fail the second time
+        found = tracker.remove(type, dataHandle);
+        EXPECT_EQ(nullptr, found);
+    }
+}
+
+// Test filling the tracker.
+TEST(test_handle_tracker, oboe_full_up) {
+    const int MAX_HANDLES = 5;
+    HandleTracker tracker(MAX_HANDLES);
+    handle_tracker_type_t type = 4; // arbitrary generic type
+    int data[MAX_HANDLES];
+    oboe_handle_t handles[MAX_HANDLES];
+    handle_tracker_address_t found;
+
+    // repeat the test several times to see if it breaks
+    const int SEVERAL = 5; // arbitrary
+    for (int i = 0; i < SEVERAL; i++) {
+        for (int i = 0; i < MAX_HANDLES; i++) {
+            // add a handle
+            handles[i] = tracker.put(type, &data[i]);
+            ASSERT_TRUE(handles[i] > 0);
+            found = tracker.get(type, handles[i]);
+            EXPECT_EQ(&data[i], found);
+        }
+
+        // Now that it is full, try to add one more.
+        oboe_handle_t handle = tracker.put(type, &data[0]);
+        EXPECT_TRUE(handle < 0);
+
+        for (int i = 0; i < MAX_HANDLES; i++) {
+            // look up each handle
+            found = tracker.get(type, handles[i]);
+            EXPECT_EQ(&data[i], found);
+        }
+
+        // remove one from storage
+        found = tracker.remove(type, handles[2]);
+        EXPECT_EQ(&data[2], found);
+        // now try to look up the same handle and fail
+        found = tracker.get(type, handles[2]);
+        EXPECT_EQ(nullptr, found);
+
+        // add that same one back
+        handle = tracker.put(type, &data[2]);
+        ASSERT_TRUE(handle > 0);
+        found = tracker.get(type, handle);
+        EXPECT_EQ(&data[2], found);
+        // now use a stale handle again with a valid index and fail
+        found = tracker.get(type, handles[2]);
+        EXPECT_EQ(nullptr, found);
+
+        // remove them all
+        handles[2] = handle;
+        for (int i = 0; i < MAX_HANDLES; i++) {
+            // look up each handle
+            found = tracker.remove(type, handles[i]);
+            EXPECT_EQ(&data[i], found);
+        }
+    }
+}
diff --git a/media/liboboe/tests/test_marshalling.cpp b/media/liboboe/tests/test_marshalling.cpp
new file mode 100644
index 0000000..8f4cc2c
--- /dev/null
+++ b/media/liboboe/tests/test_marshalling.cpp
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Unit tests for Oboe Marshalling of RingBuffer information.
+
+#include <stdlib.h>
+#include <math.h>
+
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+#include <cutils/ashmem.h>
+#include <gtest/gtest.h>
+#include <sys/mman.h>
+
+#include <oboe/OboeDefinitions.h>
+#include <binding/AudioEndpointParcelable.h>
+
+using namespace android;
+using namespace oboe;
+
+// Test adding one value.
+TEST(test_marshalling, oboe_one_read_write) {
+    Parcel parcel;
+    size_t pos = parcel.dataPosition();
+    const int arbitraryValue = 235;
+    parcel.writeInt32(arbitraryValue);
+    parcel.setDataPosition(pos);
+    int32_t y;
+    parcel.readInt32(&y);
+    EXPECT_EQ(arbitraryValue, y);
+}
+
+// Test SharedMemoryParcel.
+TEST(test_marshalling, oboe_shared_memory) {
+    SharedMemoryParcelable sharedMemoryA;
+    SharedMemoryParcelable sharedMemoryB;
+    const size_t memSizeBytes = 840;
+    int fd = ashmem_create_region("TestMarshalling", memSizeBytes);
+    ASSERT_LE(0, fd);
+    sharedMemoryA.setup(fd, memSizeBytes);
+    void *region1;
+    EXPECT_EQ(OBOE_OK, sharedMemoryA.resolve(0, 16, &region1)); // fits in region
+    EXPECT_NE(OBOE_OK, sharedMemoryA.resolve(-2, 16, &region1)); // offset is negative
+    EXPECT_NE(OBOE_OK, sharedMemoryA.resolve(0, memSizeBytes + 8, &region1)); // size too big
+    EXPECT_NE(OBOE_OK, sharedMemoryA.resolve(memSizeBytes - 8, 16, &region1)); // goes past the end
+    int32_t *buffer1 = (int32_t *)region1;
+    buffer1[0] = 98735; // arbitrary value
+
+    Parcel parcel;
+    size_t pos = parcel.dataPosition();
+    sharedMemoryA.writeToParcel(&parcel);
+
+    parcel.setDataPosition(pos);
+    sharedMemoryB.readFromParcel(&parcel);
+    EXPECT_EQ(sharedMemoryA.getSizeInBytes(), sharedMemoryB.getSizeInBytes());
+
+    // should see same value at two different addresses
+    void *region2;
+    EXPECT_EQ(OBOE_OK, sharedMemoryB.resolve(0, 16, &region2));
+    int32_t *buffer2 = (int32_t *)region2;
+    EXPECT_NE(buffer1, buffer2);
+    EXPECT_EQ(buffer1[0], buffer2[0]);
+}
+
+// Test SharedRegionParcel.
+TEST(test_marshalling, oboe_shared_region) {
+    SharedMemoryParcelable sharedMemories[2];
+    SharedRegionParcelable sharedRegionA;
+    SharedRegionParcelable sharedRegionB;
+    const size_t memSizeBytes = 840;
+    int fd = ashmem_create_region("TestMarshalling", memSizeBytes);
+    ASSERT_LE(0, fd);
+    sharedMemories[0].setup(fd, memSizeBytes);
+    int32_t regionOffset1 = 32;
+    int32_t regionSize1 = 16;
+    sharedRegionA.setup(0, regionOffset1, regionSize1);
+
+    void *region1;
+    EXPECT_EQ(OBOE_OK, sharedRegionA.resolve(sharedMemories, &region1));
+    int32_t *buffer1 = (int32_t *)region1;
+    buffer1[0] = 336677; // arbitrary value
+
+    Parcel parcel;
+    size_t pos = parcel.dataPosition();
+    sharedRegionA.writeToParcel(&parcel);
+
+    parcel.setDataPosition(pos);
+    sharedRegionB.readFromParcel(&parcel);
+
+    // should see same value
+    void *region2;
+    EXPECT_EQ(OBOE_OK, sharedRegionB.resolve(sharedMemories, &region2));
+    int32_t *buffer2 = (int32_t *)region2;
+    EXPECT_EQ(buffer1[0], buffer2[0]);
+}
+
+// Test RingBufferParcelable.
+TEST(test_marshalling, oboe_ring_buffer_parcelable) {
+    SharedMemoryParcelable sharedMemories[2];
+    RingBufferParcelable ringBufferA;
+    RingBufferParcelable ringBufferB;
+
+    const size_t bytesPerFrame = 8;
+    const size_t framesPerBurst = 32;
+    const size_t dataSizeBytes = 2048;
+    const int32_t counterSizeBytes = sizeof(int64_t);
+    const size_t memSizeBytes = dataSizeBytes + (2 * counterSizeBytes);
+
+    int fd = ashmem_create_region("TestMarshalling", memSizeBytes);
+    ASSERT_LE(0, fd);
+    sharedMemories[0].setup(fd, memSizeBytes);
+
+    int32_t sharedMemoryIndex = 0;
+    // arrange indices and data in the shared memory
+    int32_t readOffset = 0;
+    int32_t writeOffset = readOffset + counterSizeBytes;
+    int32_t dataOffset = writeOffset + counterSizeBytes;
+    ringBufferA.setupMemory(sharedMemoryIndex, dataOffset, dataSizeBytes,
+        readOffset, writeOffset, counterSizeBytes);
+    ringBufferA.setFramesPerBurst(framesPerBurst);
+    ringBufferA.setBytesPerFrame(bytesPerFrame);
+    ringBufferA.setCapacityInFrames(dataSizeBytes / bytesPerFrame);
+
+    // setup A
+    RingBufferDescriptor descriptorA;
+    EXPECT_EQ(OBOE_OK, ringBufferA.resolve(sharedMemories, &descriptorA));
+    descriptorA.dataAddress[0] = 95;
+    descriptorA.dataAddress[1] = 57;
+    descriptorA.readCounterAddress[0] = 17;
+    descriptorA.writeCounterAddress[0] = 39;
+
+    // write A to parcel
+    Parcel parcel;
+    size_t pos = parcel.dataPosition();
+    ringBufferA.writeToParcel(&parcel);
+
+    // read B from parcel
+    parcel.setDataPosition(pos);
+    ringBufferB.readFromParcel(&parcel);
+
+    RingBufferDescriptor descriptorB;
+    EXPECT_EQ(OBOE_OK, ringBufferB.resolve(sharedMemories, &descriptorB));
+
+    // A and B should match
+    EXPECT_EQ(descriptorA.dataAddress[0], descriptorB.dataAddress[0]);
+    EXPECT_EQ(descriptorA.dataAddress[1], descriptorB.dataAddress[1]);
+    EXPECT_EQ(descriptorA.readCounterAddress[0], descriptorB.readCounterAddress[0]);
+    EXPECT_EQ(descriptorA.writeCounterAddress[0], descriptorB.writeCounterAddress[0]);
+
+    EXPECT_EQ(ringBufferA.getFramesPerBurst(), ringBufferB.getFramesPerBurst());
+    EXPECT_EQ(ringBufferA.getBytesPerFrame(), ringBufferB.getBytesPerFrame());
+    EXPECT_EQ(ringBufferA.getCapacityInFrames(), ringBufferB.getCapacityInFrames());
+}
diff --git a/media/liboboe/tests/test_oboe_api.cpp b/media/liboboe/tests/test_oboe_api.cpp
new file mode 100644
index 0000000..0bc469f
--- /dev/null
+++ b/media/liboboe/tests/test_oboe_api.cpp
@@ -0,0 +1,362 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Unit tests for Oboe 'C' API.
+
+#include <stdlib.h>
+#include <math.h>
+
+#include <gtest/gtest.h>
+
+#include <oboe/OboeDefinitions.h>
+#include <oboe/OboeAudio.h>
+#include "OboeUtilities.h"
+
+#define DEFAULT_STATE_TIMEOUT  (500 * OBOE_NANOS_PER_MILLISECOND)
+
+// Test OboeStreamBuilder
+TEST(test_oboe_api, oboe_stream_builder) {
+    const oboe_sample_rate_t requestedSampleRate1 = 48000;
+    const oboe_sample_rate_t requestedSampleRate2 = 44100;
+    const int32_t requestedSamplesPerFrame = 2;
+    const oboe_audio_format_t requestedDataFormat = OBOE_AUDIO_FORMAT_PCM16;
+
+    oboe_sample_rate_t sampleRate = 0;
+    int32_t samplesPerFrame = 0;
+    oboe_audio_format_t actualDataFormat;
+    OboeStreamBuilder oboeBuilder1;
+    OboeStreamBuilder oboeBuilder2;
+
+    oboe_result_t result = OBOE_OK;
+
+    // Use an OboeStreamBuilder to define the stream.
+    result = Oboe_createStreamBuilder(&oboeBuilder1);
+    ASSERT_EQ(OBOE_OK, result);
+
+    // Request stream properties.
+    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_setSampleRate(oboeBuilder1, requestedSampleRate1));
+    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_setSamplesPerFrame(oboeBuilder1, requestedSamplesPerFrame));
+    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_setFormat(oboeBuilder1, requestedDataFormat));
+
+    // Check to make sure builder saved the properties.
+    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_getSampleRate(oboeBuilder1, &sampleRate));
+    EXPECT_EQ(requestedSampleRate1, sampleRate);
+
+    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_getSamplesPerFrame(oboeBuilder1, &samplesPerFrame));
+    EXPECT_EQ(requestedSamplesPerFrame, samplesPerFrame);
+
+    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_getFormat(oboeBuilder1, &actualDataFormat));
+    EXPECT_EQ(requestedDataFormat, actualDataFormat);
+
+    result = OboeStreamBuilder_getSampleRate(0x0BADCAFE, &sampleRate); // ridiculous token
+    EXPECT_EQ(OBOE_ERROR_INVALID_HANDLE, result);
+
+    // Create a second builder and make sure they do not collide.
+    ASSERT_EQ(OBOE_OK, Oboe_createStreamBuilder(&oboeBuilder2));
+    ASSERT_NE(oboeBuilder1, oboeBuilder2);
+
+    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_setSampleRate(oboeBuilder2, requestedSampleRate2));
+    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_getSampleRate(oboeBuilder1, &sampleRate));
+    EXPECT_EQ(requestedSampleRate1, sampleRate);
+    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_getSampleRate(oboeBuilder2, &sampleRate));
+    EXPECT_EQ(requestedSampleRate2, sampleRate);
+
+    // Delete the builder.
+    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_delete(oboeBuilder1));
+
+    // Now it should no longer be valid.
+    // Note that test assumes we are using the HandleTracker. If we use plain pointers
+    // then it will be difficult to detect this kind of error.
+    result = OboeStreamBuilder_getSampleRate(oboeBuilder1, &sampleRate); // stale token
+    EXPECT_EQ(OBOE_ERROR_INVALID_HANDLE, result);
+
+    // Second builder should still be valid.
+    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_getSampleRate(oboeBuilder2, &sampleRate));
+    EXPECT_EQ(requestedSampleRate2, sampleRate);
+
+    // Delete the second builder.
+    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_delete(oboeBuilder2));
+
+    // Now it should no longer be valid. Assumes HandlerTracker used.
+    EXPECT_EQ(OBOE_ERROR_INVALID_HANDLE, OboeStreamBuilder_getSampleRate(oboeBuilder2, &sampleRate));
+}
+
+// Test creating a default stream with everything unspecified.
+TEST(test_oboe_api, oboe_stream_unspecified) {
+    OboeStreamBuilder oboeBuilder;
+    OboeStream oboeStream;
+    oboe_result_t result = OBOE_OK;
+
+    // Use an OboeStreamBuilder to define the stream.
+    result = Oboe_createStreamBuilder(&oboeBuilder);
+    ASSERT_EQ(OBOE_OK, result);
+
+    // Create an OboeStream using the Builder.
+    ASSERT_EQ(OBOE_OK, OboeStreamBuilder_openStream(oboeBuilder, &oboeStream));
+
+    // Cleanup
+    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_delete(oboeBuilder));
+    EXPECT_EQ(OBOE_OK, OboeStream_close(oboeStream));
+}
+
+// Test Writing to an OboeStream
+void runtest_oboe_stream(oboe_sharing_mode_t requestedSharingMode) {
+    const oboe_sample_rate_t requestedSampleRate = 48000;
+    const oboe_sample_rate_t requestedSamplesPerFrame = 2;
+    const oboe_audio_format_t requestedDataFormat = OBOE_AUDIO_FORMAT_PCM16;
+
+    oboe_sample_rate_t actualSampleRate = -1;
+    int32_t actualSamplesPerFrame = -1;
+    oboe_audio_format_t actualDataFormat = OBOE_AUDIO_FORMAT_INVALID;
+    oboe_sharing_mode_t actualSharingMode;
+    oboe_size_frames_t framesPerBurst = -1;
+    int writeLoops = 0;
+
+    oboe_size_frames_t framesWritten = 0;
+    oboe_size_frames_t framesPrimed = 0;
+    oboe_position_frames_t framesTotal = 0;
+    oboe_position_frames_t oboeFramesRead = 0;
+    oboe_position_frames_t oboeFramesRead1 = 0;
+    oboe_position_frames_t oboeFramesRead2 = 0;
+    oboe_position_frames_t oboeFramesWritten = 0;
+
+    oboe_nanoseconds_t timeoutNanos;
+
+    oboe_stream_state_t state = OBOE_STREAM_STATE_UNINITIALIZED;
+    OboeStreamBuilder oboeBuilder;
+    OboeStream oboeStream;
+
+    oboe_result_t result = OBOE_OK;
+
+    // Use an OboeStreamBuilder to define the stream.
+    result = Oboe_createStreamBuilder(&oboeBuilder);
+    ASSERT_EQ(OBOE_OK, result);
+
+    // Request stream properties.
+    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_setSampleRate(oboeBuilder, requestedSampleRate));
+    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_setSamplesPerFrame(oboeBuilder, requestedSamplesPerFrame));
+    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_setFormat(oboeBuilder, requestedDataFormat));
+    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_setSharingMode(oboeBuilder, requestedSharingMode));
+
+    // Create an OboeStream using the Builder.
+    ASSERT_EQ(OBOE_OK, OboeStreamBuilder_openStream(oboeBuilder, &oboeStream));
+    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_delete(oboeBuilder));
+
+    EXPECT_EQ(OBOE_OK, OboeStream_getState(oboeStream, &state));
+    EXPECT_EQ(OBOE_STREAM_STATE_OPEN, state);
+
+    // Check to see what kind of stream we actually got.
+    EXPECT_EQ(OBOE_OK, OboeStream_getSampleRate(oboeStream, &actualSampleRate));
+    ASSERT_TRUE(actualSampleRate >= 44100 && actualSampleRate <= 96000);  // TODO what is range?
+
+    EXPECT_EQ(OBOE_OK, OboeStream_getSamplesPerFrame(oboeStream, &actualSamplesPerFrame));
+    ASSERT_TRUE(actualSamplesPerFrame >= 1 && actualSamplesPerFrame <= 16); // TODO what is max?
+
+    EXPECT_EQ(OBOE_OK, OboeStream_getSharingMode(oboeStream, &actualSharingMode));
+    ASSERT_TRUE(actualSharingMode == OBOE_SHARING_MODE_EXCLUSIVE
+                || actualSharingMode == OBOE_SHARING_MODE_LEGACY);
+
+    EXPECT_EQ(OBOE_OK, OboeStream_getFormat(oboeStream, &actualDataFormat));
+    EXPECT_NE(OBOE_AUDIO_FORMAT_INVALID, actualDataFormat);
+
+    EXPECT_EQ(OBOE_OK, OboeStream_getFramesPerBurst(oboeStream, &framesPerBurst));
+    ASSERT_TRUE(framesPerBurst >= 16 && framesPerBurst <= 1024); // TODO what is min/max?
+
+    // Allocate a buffer for the audio data.
+    // TODO handle possibility of other data formats
+    ASSERT_TRUE(actualDataFormat == OBOE_AUDIO_FORMAT_PCM16);
+    size_t dataSizeSamples = framesPerBurst * actualSamplesPerFrame;
+    int16_t *data = new int16_t[dataSizeSamples];
+    ASSERT_TRUE(nullptr != data);
+    memset(data, 0, sizeof(int16_t) * dataSizeSamples);
+
+    // Prime the buffer.
+    timeoutNanos = 0;
+    do {
+        framesWritten = OboeStream_write(oboeStream, data, framesPerBurst, timeoutNanos);
+        // There should be some room for priming the buffer.
+        framesTotal += framesWritten;
+        ASSERT_GE(framesWritten, 0);
+        ASSERT_LE(framesWritten, framesPerBurst);
+    } while (framesWritten > 0);
+    ASSERT_TRUE(framesTotal > 0);
+
+    // Start/write/pause more than once to see if it fails after the first time.
+    // Write some data and measure the rate to see if the timing is OK.
+    for (int numLoops = 0; numLoops < 2; numLoops++) {
+        // Start and wait for server to respond.
+        ASSERT_EQ(OBOE_OK, OboeStream_requestStart(oboeStream));
+        ASSERT_EQ(OBOE_OK, OboeStream_waitForStateChange(oboeStream,
+                                                         OBOE_STREAM_STATE_STARTING,
+                                                         &state,
+                                                         DEFAULT_STATE_TIMEOUT));
+        EXPECT_EQ(OBOE_STREAM_STATE_STARTED, state);
+
+        // Write some data while we are running. Read counter should be advancing.
+        writeLoops = 1 * actualSampleRate / framesPerBurst; // 1 second
+        ASSERT_LT(2, writeLoops); // detect absurdly high framesPerBurst
+        timeoutNanos = 10 * OBOE_NANOS_PER_SECOND * framesPerBurst / actualSampleRate; // bursts
+        framesWritten = 1;
+        ASSERT_EQ(OBOE_OK, OboeStream_getFramesRead(oboeStream, &oboeFramesRead));
+        oboeFramesRead1 = oboeFramesRead;
+        oboe_nanoseconds_t beginTime = Oboe_getNanoseconds(OBOE_CLOCK_MONOTONIC);
+        do {
+            framesWritten = OboeStream_write(oboeStream, data, framesPerBurst, timeoutNanos);
+            ASSERT_GE(framesWritten, 0);
+            ASSERT_LE(framesWritten, framesPerBurst);
+
+            framesTotal += framesWritten;
+            EXPECT_EQ(OBOE_OK, OboeStream_getFramesWritten(oboeStream, &oboeFramesWritten));
+            EXPECT_EQ(framesTotal, oboeFramesWritten);
+
+            // Try to get a more accurate measure of the sample rate.
+            if (beginTime == 0) {
+                EXPECT_EQ(OBOE_OK, OboeStream_getFramesRead(oboeStream, &oboeFramesRead));
+                if (oboeFramesRead > oboeFramesRead1) { // is read pointer advancing
+                    beginTime = Oboe_getNanoseconds(OBOE_CLOCK_MONOTONIC);
+                    oboeFramesRead1 = oboeFramesRead;
+                }
+            }
+        } while (framesWritten > 0 && writeLoops-- > 0);
+
+        EXPECT_EQ(OBOE_OK, OboeStream_getFramesRead(oboeStream, &oboeFramesRead2));
+        oboe_nanoseconds_t endTime = Oboe_getNanoseconds(OBOE_CLOCK_MONOTONIC);
+        ASSERT_GT(oboeFramesRead2, 0);
+        ASSERT_GT(oboeFramesRead2, oboeFramesRead1);
+        ASSERT_LE(oboeFramesRead2, oboeFramesWritten);
+
+        // TODO why is legacy so inaccurate?
+        const double rateTolerance = 200.0; // arbitrary tolerance for sample rate
+        if (requestedSharingMode != OBOE_SHARING_MODE_LEGACY) {
+            // Calculate approximate sample rate and compare with stream rate.
+            double seconds = (endTime - beginTime) / (double) OBOE_NANOS_PER_SECOND;
+            double measuredRate = (oboeFramesRead2 - oboeFramesRead1) / seconds;
+            ASSERT_NEAR(actualSampleRate, measuredRate, rateTolerance);
+        }
+
+        // Request async pause and wait for server to say that it has completed the pause.
+        ASSERT_EQ(OBOE_OK, OboeStream_requestPause(oboeStream));
+        EXPECT_EQ(OBOE_OK, OboeStream_waitForStateChange(oboeStream,
+                                                OBOE_STREAM_STATE_PAUSING,
+                                                &state,
+                                                DEFAULT_STATE_TIMEOUT));
+        EXPECT_EQ(OBOE_STREAM_STATE_PAUSED, state);
+    }
+
+    // Make sure the read counter is not advancing when we are paused.
+    ASSERT_EQ(OBOE_OK, OboeStream_getFramesRead(oboeStream, &oboeFramesRead));
+    ASSERT_GE(oboeFramesRead, oboeFramesRead2); // monotonic increase
+
+    // Use this to sleep by waiting for something that won't happen.
+    OboeStream_waitForStateChange(oboeStream, OBOE_STREAM_STATE_PAUSED, &state, timeoutNanos);
+    ASSERT_EQ(OBOE_OK, OboeStream_getFramesRead(oboeStream, &oboeFramesRead2));
+    EXPECT_EQ(oboeFramesRead, oboeFramesRead2);
+
+    // ------------------- TEST FLUSH -----------------
+    // Prime the buffer.
+    timeoutNanos = 0;
+    writeLoops = 100;
+    do {
+        framesWritten = OboeStream_write(oboeStream, data, framesPerBurst, timeoutNanos);
+        framesTotal += framesWritten;
+    } while (framesWritten > 0 && writeLoops-- > 0);
+    EXPECT_EQ(0, framesWritten);
+
+    // Flush and wait for server to respond.
+    ASSERT_EQ(OBOE_OK, OboeStream_requestFlush(oboeStream));
+    EXPECT_EQ(OBOE_OK, OboeStream_waitForStateChange(oboeStream,
+                                                     OBOE_STREAM_STATE_FLUSHING,
+                                                     &state,
+                                                     DEFAULT_STATE_TIMEOUT));
+    EXPECT_EQ(OBOE_STREAM_STATE_FLUSHED, state);
+
+    // After a flush, the read counter should be caught up with the write counter.
+    EXPECT_EQ(OBOE_OK, OboeStream_getFramesWritten(oboeStream, &oboeFramesWritten));
+    EXPECT_EQ(framesTotal, oboeFramesWritten);
+    EXPECT_EQ(OBOE_OK, OboeStream_getFramesRead(oboeStream, &oboeFramesRead));
+    EXPECT_EQ(oboeFramesRead, oboeFramesWritten);
+
+    // The buffer should be empty after a flush so we should be able to write.
+    framesWritten = OboeStream_write(oboeStream, data, framesPerBurst, timeoutNanos);
+    // There should be some room for priming the buffer.
+    ASSERT_TRUE(framesWritten > 0 && framesWritten <= framesPerBurst);
+
+    EXPECT_EQ(OBOE_OK, OboeStream_close(oboeStream));
+}
+
+// Test Writing to an OboeStream using LEGACY sharing mode.
+TEST(test_oboe_api, oboe_stream_legacy) {
+    runtest_oboe_stream(OBOE_SHARING_MODE_LEGACY);
+}
+
+// Test Writing to an OboeStream using EXCLUSIVE sharing mode.
+TEST(test_oboe_api, oboe_stream_exclusive) {
+    runtest_oboe_stream(OBOE_SHARING_MODE_EXCLUSIVE);
+}
+
+#define OBOE_THREAD_ANSWER          1826375
+#define OBOE_THREAD_DURATION_MSEC       500
+
+static void *TestOboeStreamThreadProc(void *arg) {
+    OboeStream oboeStream = (OboeStream) reinterpret_cast<size_t>(arg);
+    oboe_stream_state_t state;
+
+    // Use this to sleep by waiting for something that won't happen.
+    EXPECT_EQ(OBOE_OK, OboeStream_getState(oboeStream, &state));
+    OboeStream_waitForStateChange(oboeStream, OBOE_STREAM_STATE_PAUSED, &state,
+            OBOE_THREAD_DURATION_MSEC * OBOE_NANOS_PER_MILLISECOND);
+    return reinterpret_cast<void *>(OBOE_THREAD_ANSWER);
+}
+
+// Test creating a stream related thread.
+TEST(test_oboe_api, oboe_stream_thread_basic) {
+    OboeStreamBuilder oboeBuilder;
+    OboeStream oboeStream;
+    oboe_result_t result = OBOE_OK;
+    void *threadResult;
+
+    // Use an OboeStreamBuilder to define the stream.
+    result = Oboe_createStreamBuilder(&oboeBuilder);
+    ASSERT_EQ(OBOE_OK, result);
+
+    // Create an OboeStream using the Builder.
+    ASSERT_EQ(OBOE_OK, OboeStreamBuilder_openStream(oboeBuilder, &oboeStream));
+
+    // Start a thread.
+    ASSERT_EQ(OBOE_OK, OboeStream_createThread(oboeStream,
+            10 * OBOE_NANOS_PER_MILLISECOND,
+            TestOboeStreamThreadProc,
+            reinterpret_cast<void *>(oboeStream)));
+    // Thread already started.
+    ASSERT_NE(OBOE_OK, OboeStream_createThread(oboeStream,   // should fail!
+            10 * OBOE_NANOS_PER_MILLISECOND,
+            TestOboeStreamThreadProc,
+            reinterpret_cast<void *>(oboeStream)));
+
+    // Wait for the thread to finish.
+    ASSERT_EQ(OBOE_OK, OboeStream_joinThread(oboeStream,
+            &threadResult, 2 * OBOE_THREAD_DURATION_MSEC * OBOE_NANOS_PER_MILLISECOND));
+    // The thread returns a special answer.
+    ASSERT_EQ(OBOE_THREAD_ANSWER, (int)reinterpret_cast<size_t>(threadResult));
+
+    // Thread should already be joined.
+    ASSERT_NE(OBOE_OK, OboeStream_joinThread(oboeStream,  // should fail!
+            &threadResult, 2 * OBOE_THREAD_DURATION_MSEC * OBOE_NANOS_PER_MILLISECOND));
+
+    // Cleanup
+    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_delete(oboeBuilder));
+    EXPECT_EQ(OBOE_OK, OboeStream_close(oboeStream));
+}
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index e7057ce..c63ab47 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -44,6 +44,7 @@
 #include <media/stagefright/PersistentSurface.h>
 #include <media/stagefright/SurfaceUtils.h>
 #include <media/hardware/HardwareAPI.h>
+#include <media/OMXBuffer.h>
 
 #include <OMX_AudioExt.h>
 #include <OMX_VideoExt.h>
@@ -52,11 +53,16 @@
 #include <OMX_AsString.h>
 
 #include "include/avc_utils.h"
+#include "include/ACodecBufferChannel.h"
 #include "include/DataConverter.h"
+#include "include/SecureBuffer.h"
+#include "include/SharedMemoryBuffer.h"
 #include "omx/OMXUtils.h"
 
 namespace android {
 
+using binder::Status;
+
 enum {
     kMaxIndicesToCheck = 32, // used when enumerating supported formats and profiles
 };
@@ -90,6 +96,21 @@
     }
 }
 
+static inline status_t statusFromBinderStatus(const Status &status) {
+    if (status.isOk()) {
+        return OK;
+    }
+    status_t err;
+    if ((err = status.serviceSpecificErrorCode()) != OK) {
+        return err;
+    }
+    if ((err = status.transactionError()) != OK) {
+        return err;
+    }
+    // Other exception
+    return UNKNOWN_ERROR;
+}
+
 // checks and converts status_t to a non-side-effect status_t
 static inline status_t makeNoSideEffectStatus(status_t err) {
     switch (err) {
@@ -136,15 +157,10 @@
         }
 
         sp<AMessage> notify = mNotify->dup();
-        bool first = true;
         sp<MessageList> msgList = new MessageList();
         for (std::list<omx_message>::const_iterator it = messages.cbegin();
               it != messages.cend(); ++it) {
             const omx_message &omx_msg = *it;
-            if (first) {
-                notify->setInt32("node", omx_msg.node);
-                first = false;
-            }
 
             sp<AMessage> msg = new AMessage;
             msg->setInt32("type", omx_msg.type);
@@ -495,8 +511,7 @@
 
 ACodec::ACodec()
     : mSampleRate(0),
-      mQuirks(0),
-      mNode(0),
+      mNodeGeneration(0),
       mUsingNativeWindow(false),
       mNativeWindowUsageBits(0),
       mLastNativeWindowDataSpace(HAL_DATASPACE_UNKNOWN),
@@ -512,9 +527,6 @@
       mChannelMaskPresent(false),
       mChannelMask(0),
       mDequeueCounter(0),
-      mInputMetadataType(kMetadataBufferTypeInvalid),
-      mOutputMetadataType(kMetadataBufferTypeInvalid),
-      mLegacyAdaptiveExperiment(false),
       mMetadataBuffersToSubmit(0),
       mNumUndequeuedBuffers(0),
       mRepeatFrameDelayUs(-1ll),
@@ -542,6 +554,9 @@
     mPortEOS[kPortIndexInput] = mPortEOS[kPortIndexOutput] = false;
     mInputEOSResult = OK;
 
+    mPortMode[kPortIndexInput] = IOMX::kPortModePresetByteBuffer;
+    mPortMode[kPortIndexOutput] = IOMX::kPortModePresetByteBuffer;
+
     memset(&mLastNativeWindowCrop, 0, sizeof(mLastNativeWindowCrop));
 
     changeState(mUninitializedState);
@@ -550,16 +565,21 @@
 ACodec::~ACodec() {
 }
 
-void ACodec::setNotificationMessage(const sp<AMessage> &msg) {
-    mNotify = msg;
-}
-
 void ACodec::initiateSetup(const sp<AMessage> &msg) {
     msg->setWhat(kWhatSetup);
     msg->setTarget(this);
     msg->post();
 }
 
+std::shared_ptr<BufferChannelBase> ACodec::getBufferChannel() {
+    if (!mBufferChannel) {
+        mBufferChannel = std::make_shared<ACodecBufferChannel>(
+                new AMessage(kWhatInputBufferFilled, this),
+                new AMessage(kWhatOutputBufferDrained, this));
+    }
+    return mBufferChannel;
+}
+
 void ACodec::signalSetParameters(const sp<AMessage> &params) {
     sp<AMessage> msg = new AMessage(kWhatSetParameters, this);
     msg->setMessage("params", params);
@@ -677,8 +697,7 @@
     int usageBits = 0;
     // no need to reconnect as we will not dequeue all buffers
     status_t err = setupNativeWindowSizeFormatAndUsage(
-            nativeWindow, &usageBits,
-            !storingMetadataInDecodedBuffers() || mLegacyAdaptiveExperiment /* reconnect */);
+            nativeWindow, &usageBits, !storingMetadataInDecodedBuffers());
     if (err != OK) {
         return err;
     }
@@ -728,7 +747,6 @@
         const BufferInfo &info = buffers[i];
         // skip undequeued buffers for meta data mode
         if (storingMetadataInDecodedBuffers()
-                && !mLegacyAdaptiveExperiment
                 && info.mStatus == BufferInfo::OWNED_BY_NATIVE_WINDOW) {
             ALOGV("skipping buffer");
             continue;
@@ -745,7 +763,7 @@
     }
 
     // cancel undequeued buffers to new surface
-    if (!storingMetadataInDecodedBuffers() || mLegacyAdaptiveExperiment) {
+    if (!storingMetadataInDecodedBuffers()) {
         for (size_t i = 0; i < buffers.size(); ++i) {
             BufferInfo &info = buffers.editItemAt(i);
             if (info.mStatus == BufferInfo::OWNED_BY_NATIVE_WINDOW) {
@@ -775,6 +793,21 @@
     return OK;
 }
 
+status_t ACodec::setPortMode(int32_t portIndex, IOMX::PortMode mode) {
+    status_t err = mOMXNode->setPortMode(portIndex, mode);
+    if (err != OK) {
+        ALOGE("[%s] setPortMode on %s to %s failed w/ err %d",
+                mComponentName.c_str(),
+                portIndex == kPortIndexInput ? "input" : "output",
+                asString(mode),
+                err);
+        return err;
+    }
+
+    mPortMode[portIndex] = mode;
+    return OK;
+}
+
 status_t ACodec::allocateBuffersOnPort(OMX_U32 portIndex) {
     CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
 
@@ -793,30 +826,21 @@
         InitOMXParams(&def);
         def.nPortIndex = portIndex;
 
-        err = mOMX->getParameter(
-                mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+        err = mOMXNode->getParameter(
+                OMX_IndexParamPortDefinition, &def, sizeof(def));
 
         if (err == OK) {
-            MetadataBufferType type =
-                portIndex == kPortIndexOutput ? mOutputMetadataType : mInputMetadataType;
+            const IOMX::PortMode &mode = mPortMode[portIndex];
             size_t bufSize = def.nBufferSize;
-            if (type == kMetadataBufferTypeANWBuffer) {
+            // Always allocate VideoNativeMetadata if using ANWBuffer.
+            // OMX might use gralloc source internally, but we don't share
+            // metadata buffer with OMX, OMX has its own headers.
+            if (mode == IOMX::kPortModeDynamicANWBuffer) {
                 bufSize = sizeof(VideoNativeMetadata);
-            } else if (type == kMetadataBufferTypeNativeHandleSource) {
+            } else if (mode == IOMX::kPortModeDynamicNativeHandle) {
                 bufSize = sizeof(VideoNativeHandleMetadata);
             }
 
-            // If using gralloc or native source input metadata buffers, allocate largest
-            // metadata size as we prefer to generate native source metadata, but component
-            // may require gralloc source. For camera source, allocate at least enough
-            // size for native metadata buffers.
-            size_t allottedSize = bufSize;
-            if (portIndex == kPortIndexInput && type == kMetadataBufferTypeANWBuffer) {
-                bufSize = max(sizeof(VideoGrallocMetadata), sizeof(VideoNativeMetadata));
-            } else if (portIndex == kPortIndexInput && type == kMetadataBufferTypeCameraSource) {
-                bufSize = max(bufSize, sizeof(VideoNativeMetadata));
-            }
-
             size_t conversionBufferSize = 0;
 
             sp<DataConverter> converter = mConverter[portIndex];
@@ -831,9 +855,9 @@
 
             size_t alignment = MemoryDealer::getAllocationAlignment();
 
-            ALOGV("[%s] Allocating %u buffers of size %zu/%zu (from %u using %s) on %s port",
+            ALOGV("[%s] Allocating %u buffers of size %zu (from %u using %s) on %s port",
                     mComponentName.c_str(),
-                    def.nBufferCountActual, bufSize, allottedSize, def.nBufferSize, asString(type),
+                    def.nBufferCountActual, bufSize, def.nBufferSize, asString(mode),
                     portIndex == kPortIndexInput ? "input" : "output");
 
             // verify buffer sizes to avoid overflow in align()
@@ -851,63 +875,48 @@
             }
 
             size_t totalSize = def.nBufferCountActual * (alignedSize + alignedConvSize);
-            mDealer[portIndex] = new MemoryDealer(totalSize, "ACodec");
+            if (mode != IOMX::kPortModePresetSecureBuffer) {
+                mDealer[portIndex] = new MemoryDealer(totalSize, "ACodec");
+            }
 
+            const sp<AMessage> &format =
+                    portIndex == kPortIndexInput ? mInputFormat : mOutputFormat;
             for (OMX_U32 i = 0; i < def.nBufferCountActual && err == OK; ++i) {
-                sp<IMemory> mem = mDealer[portIndex]->allocate(bufSize);
-                if (mem == NULL || mem->pointer() == NULL) {
-                    return NO_MEMORY;
-                }
+                sp<IMemory> mem;
 
                 BufferInfo info;
                 info.mStatus = BufferInfo::OWNED_BY_US;
                 info.mFenceFd = -1;
                 info.mRenderInfo = NULL;
-                info.mNativeHandle = NULL;
+                info.mGraphicBuffer = NULL;
+                info.mNewGraphicBuffer = false;
 
-                uint32_t requiresAllocateBufferBit =
-                    (portIndex == kPortIndexInput)
-                        ? kRequiresAllocateBufferOnInputPorts
-                        : kRequiresAllocateBufferOnOutputPorts;
-
-                if (portIndex == kPortIndexInput && (mFlags & kFlagIsSecure)) {
-                    mem.clear();
-
+                if (mode == IOMX::kPortModePresetSecureBuffer) {
                     void *ptr = NULL;
                     sp<NativeHandle> native_handle;
-                    err = mOMX->allocateSecureBuffer(
-                            mNode, portIndex, bufSize, &info.mBufferID,
+                    err = mOMXNode->allocateSecureBuffer(
+                            portIndex, bufSize, &info.mBufferID,
                             &ptr, &native_handle);
 
-                    // TRICKY: this representation is unorthodox, but ACodec requires
-                    // an ABuffer with a proper size to validate range offsets and lengths.
-                    // Since mData is never referenced for secure input, it is used to store
-                    // either the pointer to the secure buffer, or the opaque handle as on
-                    // some devices ptr is actually an opaque handle, not a pointer.
-
-                    // TRICKY2: use native handle as the base of the ABuffer if received one,
-                    // because Widevine source only receives these base addresses.
-                    const native_handle_t *native_handle_ptr =
-                        native_handle == NULL ? NULL : native_handle->handle();
-                    info.mData = new ABuffer(
-                            ptr != NULL ? ptr : (void *)native_handle_ptr, bufSize);
-                    info.mNativeHandle = native_handle;
+                    info.mData = (native_handle == NULL)
+                            ? new SecureBuffer(format, ptr, bufSize)
+                            : new SecureBuffer(format, native_handle, bufSize);
                     info.mCodecData = info.mData;
-                } else if (mQuirks & requiresAllocateBufferBit) {
-                    err = mOMX->allocateBufferWithBackup(
-                            mNode, portIndex, mem, &info.mBufferID, allottedSize);
                 } else {
-                    err = mOMX->useBuffer(mNode, portIndex, mem, &info.mBufferID, allottedSize);
-                }
+                    mem = mDealer[portIndex]->allocate(bufSize);
+                    if (mem == NULL || mem->pointer() == NULL) {
+                        return NO_MEMORY;
+                    }
 
-                if (mem != NULL) {
-                    info.mCodecData = new ABuffer(mem->pointer(), bufSize);
-                    info.mCodecRef = mem;
+                    err = mOMXNode->useBuffer(portIndex, mem, &info.mBufferID);
 
-                    if (type == kMetadataBufferTypeANWBuffer) {
+                    if (mode == IOMX::kPortModeDynamicANWBuffer) {
                         ((VideoNativeMetadata *)mem->pointer())->nFenceFd = -1;
                     }
 
+                    info.mCodecData = new SharedMemoryBuffer(format, mem);
+                    info.mCodecRef = mem;
+
                     // if we require conversion, allocate conversion buffer for client use;
                     // otherwise, reuse codec buffer
                     if (mConverter[portIndex] != NULL) {
@@ -916,7 +925,7 @@
                         if (mem == NULL|| mem->pointer() == NULL) {
                             return NO_MEMORY;
                         }
-                        info.mData = new ABuffer(mem->pointer(), conversionBufferSize);
+                        info.mData = new SharedMemoryBuffer(format, mem);
                         info.mMemRef = mem;
                     } else {
                         info.mData = info.mCodecData;
@@ -933,20 +942,17 @@
         return err;
     }
 
-    sp<AMessage> notify = mNotify->dup();
-    notify->setInt32("what", CodecBase::kWhatBuffersAllocated);
-
-    notify->setInt32("portIndex", portIndex);
-
-    sp<PortDescription> desc = new PortDescription;
-
+    std::vector<ACodecBufferChannel::BufferAndId> array(mBuffers[portIndex].size());
     for (size_t i = 0; i < mBuffers[portIndex].size(); ++i) {
-        const BufferInfo &info = mBuffers[portIndex][i];
-        desc->addBuffer(info.mBufferID, info.mData, info.mNativeHandle, info.mMemRef);
+        array[i] = {mBuffers[portIndex][i].mData, mBuffers[portIndex][i].mBufferID};
     }
-
-    notify->setObject("portDesc", desc);
-    notify->post();
+    if (portIndex == kPortIndexInput) {
+        mBufferChannel->setInputBufferArray(array);
+    } else if (portIndex == kPortIndexOutput) {
+        mBufferChannel->setOutputBufferArray(array);
+    } else {
+        TRESPASS();
+    }
 
     return OK;
 }
@@ -958,15 +964,15 @@
     InitOMXParams(&def);
     def.nPortIndex = kPortIndexOutput;
 
-    status_t err = mOMX->getParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+    status_t err = mOMXNode->getParameter(
+            OMX_IndexParamPortDefinition, &def, sizeof(def));
 
     if (err != OK) {
         return err;
     }
 
     OMX_U32 usage = 0;
-    err = mOMX->getGraphicBufferUsage(mNode, kPortIndexOutput, &usage);
+    err = mOMXNode->getGraphicBufferUsage(kPortIndexOutput, &usage);
     if (err != 0) {
         ALOGW("querying usage flags from OMX IL component failed: %d", err);
         // XXX: Currently this error is logged, but not fatal.
@@ -1003,8 +1009,8 @@
     InitOMXParams(&def);
     def.nPortIndex = kPortIndexOutput;
 
-    status_t err = mOMX->getParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+    status_t err = mOMXNode->getParameter(
+            OMX_IndexParamPortDefinition, &def, sizeof(def));
 
     if (err == OK) {
         err = setupNativeWindowSizeFormatAndUsage(
@@ -1021,8 +1027,8 @@
     if (mTunneled) {
         ALOGV("Tunneled Playback: skipping native window buffer allocation.");
         def.nBufferCountActual = 0;
-        err = mOMX->setParameter(
-                mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+        err = mOMXNode->setParameter(
+                OMX_IndexParamPortDefinition, &def, sizeof(def));
 
         *minUndequeuedBuffers = 0;
         *bufferCount = 0;
@@ -1057,8 +1063,8 @@
         OMX_U32 newBufferCount =
             def.nBufferCountMin + *minUndequeuedBuffers + extraBuffers;
         def.nBufferCountActual = newBufferCount;
-        err = mOMX->setParameter(
-                mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+        err = mOMXNode->setParameter(
+                OMX_IndexParamPortDefinition, &def, sizeof(def));
 
         if (err == OK) {
             *minUndequeuedBuffers += extraBuffers;
@@ -1088,6 +1094,10 @@
 }
 
 status_t ACodec::allocateOutputBuffersFromNativeWindow() {
+    // This method only handles the non-metadata mode (or simulating legacy
+    // mode with metadata, which is transparent to ACodec).
+    CHECK(!storingMetadataInDecodedBuffers());
+
     OMX_U32 bufferCount, bufferSize, minUndequeuedBuffers;
     status_t err = configureOutputBuffersFromNativeWindow(
             &bufferCount, &bufferSize, &minUndequeuedBuffers, true /* preregister */);
@@ -1095,10 +1105,8 @@
         return err;
     mNumUndequeuedBuffers = minUndequeuedBuffers;
 
-    if (!storingMetadataInDecodedBuffers()) {
-        static_cast<Surface*>(mNativeWindow.get())
-                ->getIGraphicBufferProducer()->allowAllocation(true);
-    }
+    static_cast<Surface*>(mNativeWindow.get())
+            ->getIGraphicBufferProducer()->allowAllocation(true);
 
     ALOGV("[%s] Allocating %u buffers from a native window of size %u on "
          "output port",
@@ -1120,14 +1128,19 @@
         info.mFenceFd = fenceFd;
         info.mIsReadFence = false;
         info.mRenderInfo = NULL;
-        info.mData = new ABuffer(NULL /* data */, bufferSize /* capacity */);
-        info.mCodecData = info.mData;
         info.mGraphicBuffer = graphicBuffer;
+        info.mNewGraphicBuffer = false;
+
+        // TODO: We shouln't need to create MediaCodecBuffer. In metadata mode
+        //       OMX doesn't use the shared memory buffer, but some code still
+        //       access info.mData. Create an ABuffer as a placeholder.
+        info.mData = new MediaCodecBuffer(mOutputFormat, new ABuffer(bufferSize));
+        info.mCodecData = info.mData;
+
         mBuffers[kPortIndexOutput].push(info);
 
         IOMX::buffer_id bufferId;
-        err = mOMX->useGraphicBuffer(mNode, kPortIndexOutput, graphicBuffer,
-                &bufferId);
+        err = mOMXNode->useBuffer(kPortIndexOutput, graphicBuffer, &bufferId);
         if (err != 0) {
             ALOGE("registering GraphicBuffer %u with OMX IL component failed: "
                  "%d", i, err);
@@ -1144,9 +1157,9 @@
     OMX_U32 cancelStart;
     OMX_U32 cancelEnd;
 
-    if (err != 0) {
+    if (err != OK) {
         // If an error occurred while dequeuing we need to cancel any buffers
-        // that were dequeued.
+        // that were dequeued. Also cancel all if we're in legacy metadata mode.
         cancelStart = 0;
         cancelEnd = mBuffers[kPortIndexOutput].size();
     } else {
@@ -1165,102 +1178,47 @@
         }
     }
 
-    if (!storingMetadataInDecodedBuffers()) {
-        static_cast<Surface*>(mNativeWindow.get())
-                ->getIGraphicBufferProducer()->allowAllocation(false);
-    }
+    static_cast<Surface*>(mNativeWindow.get())
+            ->getIGraphicBufferProducer()->allowAllocation(false);
 
     return err;
 }
 
 status_t ACodec::allocateOutputMetadataBuffers() {
+    CHECK(storingMetadataInDecodedBuffers());
+
     OMX_U32 bufferCount, bufferSize, minUndequeuedBuffers;
     status_t err = configureOutputBuffersFromNativeWindow(
             &bufferCount, &bufferSize, &minUndequeuedBuffers,
-            mLegacyAdaptiveExperiment /* preregister */);
-    if (err != 0)
+            false /* preregister */);
+    if (err != OK)
         return err;
     mNumUndequeuedBuffers = minUndequeuedBuffers;
 
     ALOGV("[%s] Allocating %u meta buffers on output port",
          mComponentName.c_str(), bufferCount);
 
-    size_t bufSize = mOutputMetadataType == kMetadataBufferTypeANWBuffer ?
-            sizeof(struct VideoNativeMetadata) : sizeof(struct VideoGrallocMetadata);
-    size_t totalSize = bufferCount * align(bufSize, MemoryDealer::getAllocationAlignment());
-    mDealer[kPortIndexOutput] = new MemoryDealer(totalSize, "ACodec");
-
-    // Dequeue buffers and send them to OMX
     for (OMX_U32 i = 0; i < bufferCount; i++) {
         BufferInfo info;
         info.mStatus = BufferInfo::OWNED_BY_NATIVE_WINDOW;
         info.mFenceFd = -1;
         info.mRenderInfo = NULL;
         info.mGraphicBuffer = NULL;
+        info.mNewGraphicBuffer = false;
         info.mDequeuedAt = mDequeueCounter;
 
-        sp<IMemory> mem = mDealer[kPortIndexOutput]->allocate(bufSize);
-        if (mem == NULL || mem->pointer() == NULL) {
-            return NO_MEMORY;
-        }
-        if (mOutputMetadataType == kMetadataBufferTypeANWBuffer) {
-            ((VideoNativeMetadata *)mem->pointer())->nFenceFd = -1;
-        }
-        info.mData = new ABuffer(mem->pointer(), mem->size());
-        info.mMemRef = mem;
-        info.mCodecData = info.mData;
-        info.mCodecRef = mem;
+        info.mData = new MediaCodecBuffer(mOutputFormat, new ABuffer(bufferSize));
 
-        // we use useBuffer for metadata regardless of quirks
-        err = mOMX->useBuffer(
-                mNode, kPortIndexOutput, mem, &info.mBufferID, mem->size());
+        // Initialize fence fd to -1 to avoid warning in freeBuffer().
+        ((VideoNativeMetadata *)info.mData->base())->nFenceFd = -1;
+
+        info.mCodecData = info.mData;
+
+        err = mOMXNode->useBuffer(kPortIndexOutput, OMXBuffer::sPreset, &info.mBufferID);
         mBuffers[kPortIndexOutput].push(info);
 
-        ALOGV("[%s] allocated meta buffer with ID %u (pointer = %p)",
-             mComponentName.c_str(), info.mBufferID, mem->pointer());
-    }
-
-    if (mLegacyAdaptiveExperiment) {
-        // preallocate and preregister buffers
-        static_cast<Surface *>(mNativeWindow.get())
-                ->getIGraphicBufferProducer()->allowAllocation(true);
-
-        ALOGV("[%s] Allocating %u buffers from a native window of size %u on "
-             "output port",
-             mComponentName.c_str(), bufferCount, bufferSize);
-
-        // Dequeue buffers then cancel them all
-        for (OMX_U32 i = 0; i < bufferCount; i++) {
-            BufferInfo *info = &mBuffers[kPortIndexOutput].editItemAt(i);
-
-            ANativeWindowBuffer *buf;
-            int fenceFd;
-            err = mNativeWindow->dequeueBuffer(mNativeWindow.get(), &buf, &fenceFd);
-            if (err != 0) {
-                ALOGE("dequeueBuffer failed: %s (%d)", strerror(-err), -err);
-                break;
-            }
-
-            sp<GraphicBuffer> graphicBuffer(new GraphicBuffer(buf, false));
-            mOMX->updateGraphicBufferInMeta(
-                    mNode, kPortIndexOutput, graphicBuffer, info->mBufferID);
-            info->mStatus = BufferInfo::OWNED_BY_US;
-            info->setWriteFence(fenceFd, "allocateOutputMetadataBuffers for legacy");
-            info->mGraphicBuffer = graphicBuffer;
-        }
-
-        for (OMX_U32 i = 0; i < mBuffers[kPortIndexOutput].size(); i++) {
-            BufferInfo *info = &mBuffers[kPortIndexOutput].editItemAt(i);
-            if (info->mStatus == BufferInfo::OWNED_BY_US) {
-                status_t error = cancelBufferToNativeWindow(info);
-                if (err == OK) {
-                    err = error;
-                }
-            }
-        }
-
-        static_cast<Surface*>(mNativeWindow.get())
-                ->getIGraphicBufferProducer()->allowAllocation(false);
+        ALOGV("[%s] allocated meta buffer with ID %u",
+                mComponentName.c_str(), info.mBufferID);
     }
 
     mMetadataBuffersToSubmit = bufferCount - minUndequeuedBuffers;
@@ -1278,17 +1236,11 @@
     }
 
     ALOGV("[%s] submitting output meta buffer ID %u for graphic buffer %p",
-          mComponentName.c_str(), info->mBufferID, info->mGraphicBuffer.get());
+          mComponentName.c_str(), info->mBufferID, info->mGraphicBuffer->handle);
 
     --mMetadataBuffersToSubmit;
     info->checkWriteFence("submitOutputMetadataBuffer");
-    status_t err = mOMX->fillBuffer(mNode, info->mBufferID, info->mFenceFd);
-    info->mFenceFd = -1;
-    if (err == OK) {
-        info->mStatus = BufferInfo::OWNED_BY_COMPONENT;
-    }
-
-    return err;
+    return fillBuffer(info);
 }
 
 status_t ACodec::waitForFence(int fd, const char *dbg ) {
@@ -1364,8 +1316,6 @@
 }
 
 void ACodec::notifyOfRenderedFrames(bool dropIncomplete, FrameRenderTracker::Info *until) {
-    sp<AMessage> msg = mNotify->dup();
-    msg->setInt32("what", CodecBase::kWhatOutputFramesRendered);
     std::list<FrameRenderTracker::Info> done =
         mRenderTracker.checkFencesAndGetRenderedFrames(until, dropIncomplete);
 
@@ -1381,9 +1331,7 @@
         }
     }
 
-    if (MediaCodec::CreateFramesRenderedMessage(done, msg)) {
-        msg->post();
-    }
+    mCallback->onOutputFramesRendered(done);
 }
 
 ACodec::BufferInfo *ACodec::dequeueBufferFromNativeWindow() {
@@ -1429,7 +1377,11 @@
                     break;
                 }
 
-                ALOGV("dequeued buffer %p", info->mGraphicBuffer->getNativeBuffer());
+                ALOGV("dequeued buffer #%u with age %u, graphicBuffer %p",
+                        (unsigned)(info - &mBuffers[kPortIndexOutput][0]),
+                        mDequeueCounter - info->mDequeuedAt,
+                        info->mGraphicBuffer->handle);
+
                 info->mStatus = BufferInfo::OWNED_BY_US;
                 info->setWriteFence(fenceFd, "dequeueBufferFromNativeWindow");
                 updateRenderInfoForDequeuedBuffer(buf, fenceFd, info);
@@ -1442,7 +1394,7 @@
         // same is possible in meta mode, in which case, it will be treated
         // as a normal buffer, which is not desirable.
         // TODO: fix this.
-        if (!stale && (!storingMetadataInDecodedBuffers() || mLegacyAdaptiveExperiment)) {
+        if (!stale && !storingMetadataInDecodedBuffers()) {
             ALOGI("dequeued unrecognized (stale) buffer %p. discarding", buf);
             stale = true;
         }
@@ -1475,38 +1427,28 @@
 
     // discard buffer in LRU info and replace with new buffer
     oldest->mGraphicBuffer = new GraphicBuffer(buf, false);
+    oldest->mNewGraphicBuffer = true;
     oldest->mStatus = BufferInfo::OWNED_BY_US;
     oldest->setWriteFence(fenceFd, "dequeueBufferFromNativeWindow for oldest");
     mRenderTracker.untrackFrame(oldest->mRenderInfo);
     oldest->mRenderInfo = NULL;
 
-    mOMX->updateGraphicBufferInMeta(
-            mNode, kPortIndexOutput, oldest->mGraphicBuffer,
-            oldest->mBufferID);
-
-    if (mOutputMetadataType == kMetadataBufferTypeGrallocSource) {
-        VideoGrallocMetadata *grallocMeta =
-            reinterpret_cast<VideoGrallocMetadata *>(oldest->mData->base());
-        ALOGV("replaced oldest buffer #%u with age %u (%p/%p stored in %p)",
-                (unsigned)(oldest - &mBuffers[kPortIndexOutput][0]),
-                mDequeueCounter - oldest->mDequeuedAt,
-                (void *)(uintptr_t)grallocMeta->pHandle,
-                oldest->mGraphicBuffer->handle, oldest->mData->base());
-    } else if (mOutputMetadataType == kMetadataBufferTypeANWBuffer) {
-        VideoNativeMetadata *nativeMeta =
-            reinterpret_cast<VideoNativeMetadata *>(oldest->mData->base());
-        ALOGV("replaced oldest buffer #%u with age %u (%p/%p stored in %p)",
-                (unsigned)(oldest - &mBuffers[kPortIndexOutput][0]),
-                mDequeueCounter - oldest->mDequeuedAt,
-                (void *)(uintptr_t)nativeMeta->pBuffer,
-                oldest->mGraphicBuffer->getNativeBuffer(), oldest->mData->base());
-    }
+    ALOGV("replaced oldest buffer #%u with age %u, graphicBuffer %p",
+            (unsigned)(oldest - &mBuffers[kPortIndexOutput][0]),
+            mDequeueCounter - oldest->mDequeuedAt,
+            oldest->mGraphicBuffer->handle);
 
     updateRenderInfoForDequeuedBuffer(buf, fenceFd, oldest);
     return oldest;
 }
 
 status_t ACodec::freeBuffersOnPort(OMX_U32 portIndex) {
+    if (portIndex == kPortIndexInput) {
+        mBufferChannel->setInputBufferArray({});
+    } else {
+        mBufferChannel->setOutputBufferArray({});
+    }
+
     status_t err = OK;
     for (size_t i = mBuffers[portIndex].size(); i > 0;) {
         i--;
@@ -1547,11 +1489,9 @@
     status_t err = OK;
 
     // there should not be any fences in the metadata
-    MetadataBufferType type =
-        portIndex == kPortIndexOutput ? mOutputMetadataType : mInputMetadataType;
-    if (type == kMetadataBufferTypeANWBuffer && info->mData != NULL
-            && info->mData->size() >= sizeof(VideoNativeMetadata)) {
-        int fenceFd = ((VideoNativeMetadata *)info->mData->data())->nFenceFd;
+    if (mPortMode[portIndex] == IOMX::kPortModeDynamicANWBuffer && info->mCodecData != NULL
+            && info->mCodecData->size() >= sizeof(VideoNativeMetadata)) {
+        int fenceFd = ((VideoNativeMetadata *)info->mCodecData->base())->nFenceFd;
         if (fenceFd >= 0) {
             ALOGW("unreleased fence (%d) in %s metadata buffer %zu",
                     fenceFd, portIndex == kPortIndexInput ? "input" : "output", i);
@@ -1566,7 +1506,7 @@
             // fall through
 
         case BufferInfo::OWNED_BY_NATIVE_WINDOW:
-            err = mOMX->freeBuffer(mNode, portIndex, info->mBufferID);
+            err = mOMXNode->freeBuffer(portIndex, info->mBufferID);
             break;
 
         default:
@@ -1584,7 +1524,7 @@
         info->mRenderInfo = NULL;
     }
 
-    // remove buffer even if mOMX->freeBuffer fails
+    // remove buffer even if mOMXNode->freeBuffer fails
     mBuffers[portIndex].removeAt(i);
     return err;
 }
@@ -1606,13 +1546,37 @@
     return NULL;
 }
 
+status_t ACodec::fillBuffer(BufferInfo *info) {
+    status_t err;
+    // Even in dynamic ANW buffer mode, if the graphic buffer is not changing,
+    // send sPreset instead of the same graphic buffer, so that OMX server
+    // side doesn't update the meta. In theory it should make no difference,
+    // however when the same buffer is parcelled again, a new handle could be
+    // created on server side, and some decoder doesn't recognize the handle
+    // even if it's the same buffer.
+    if (!storingMetadataInDecodedBuffers() || !info->mNewGraphicBuffer) {
+        err = mOMXNode->fillBuffer(
+            info->mBufferID, OMXBuffer::sPreset, info->mFenceFd);
+    } else {
+        err = mOMXNode->fillBuffer(
+            info->mBufferID, info->mGraphicBuffer, info->mFenceFd);
+    }
+
+    info->mNewGraphicBuffer = false;
+    info->mFenceFd = -1;
+    if (err == OK) {
+        info->mStatus = BufferInfo::OWNED_BY_COMPONENT;
+    }
+    return err;
+}
+
 status_t ACodec::setComponentRole(
         bool isEncoder, const char *mime) {
-    const char *role = getComponentRole(isEncoder, mime);
+    const char *role = GetComponentRole(isEncoder, mime);
     if (role == NULL) {
         return BAD_VALUE;
     }
-    status_t err = setComponentRole(mOMX, mNode, role);
+    status_t err = SetComponentRole(mOMXNode, role);
     if (err != OK) {
         ALOGW("[%s] Failed to set standard component role '%s'.",
              mComponentName.c_str(), role);
@@ -1620,98 +1584,6 @@
     return err;
 }
 
-//static
-const char *ACodec::getComponentRole(
-        bool isEncoder, const char *mime) {
-    struct MimeToRole {
-        const char *mime;
-        const char *decoderRole;
-        const char *encoderRole;
-    };
-
-    static const MimeToRole kMimeToRole[] = {
-        { MEDIA_MIMETYPE_AUDIO_MPEG,
-            "audio_decoder.mp3", "audio_encoder.mp3" },
-        { MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I,
-            "audio_decoder.mp1", "audio_encoder.mp1" },
-        { MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II,
-            "audio_decoder.mp2", "audio_encoder.mp2" },
-        { MEDIA_MIMETYPE_AUDIO_AMR_NB,
-            "audio_decoder.amrnb", "audio_encoder.amrnb" },
-        { MEDIA_MIMETYPE_AUDIO_AMR_WB,
-            "audio_decoder.amrwb", "audio_encoder.amrwb" },
-        { MEDIA_MIMETYPE_AUDIO_AAC,
-            "audio_decoder.aac", "audio_encoder.aac" },
-        { MEDIA_MIMETYPE_AUDIO_VORBIS,
-            "audio_decoder.vorbis", "audio_encoder.vorbis" },
-        { MEDIA_MIMETYPE_AUDIO_OPUS,
-            "audio_decoder.opus", "audio_encoder.opus" },
-        { MEDIA_MIMETYPE_AUDIO_G711_MLAW,
-            "audio_decoder.g711mlaw", "audio_encoder.g711mlaw" },
-        { MEDIA_MIMETYPE_AUDIO_G711_ALAW,
-            "audio_decoder.g711alaw", "audio_encoder.g711alaw" },
-        { MEDIA_MIMETYPE_VIDEO_AVC,
-            "video_decoder.avc", "video_encoder.avc" },
-        { MEDIA_MIMETYPE_VIDEO_HEVC,
-            "video_decoder.hevc", "video_encoder.hevc" },
-        { MEDIA_MIMETYPE_VIDEO_MPEG4,
-            "video_decoder.mpeg4", "video_encoder.mpeg4" },
-        { MEDIA_MIMETYPE_VIDEO_H263,
-            "video_decoder.h263", "video_encoder.h263" },
-        { MEDIA_MIMETYPE_VIDEO_VP8,
-            "video_decoder.vp8", "video_encoder.vp8" },
-        { MEDIA_MIMETYPE_VIDEO_VP9,
-            "video_decoder.vp9", "video_encoder.vp9" },
-        { MEDIA_MIMETYPE_AUDIO_RAW,
-            "audio_decoder.raw", "audio_encoder.raw" },
-        { MEDIA_MIMETYPE_VIDEO_DOLBY_VISION,
-            "video_decoder.dolby-vision", "video_encoder.dolby-vision" },
-        { MEDIA_MIMETYPE_AUDIO_FLAC,
-            "audio_decoder.flac", "audio_encoder.flac" },
-        { MEDIA_MIMETYPE_AUDIO_MSGSM,
-            "audio_decoder.gsm", "audio_encoder.gsm" },
-        { MEDIA_MIMETYPE_VIDEO_MPEG2,
-            "video_decoder.mpeg2", "video_encoder.mpeg2" },
-        { MEDIA_MIMETYPE_AUDIO_AC3,
-            "audio_decoder.ac3", "audio_encoder.ac3" },
-        { MEDIA_MIMETYPE_AUDIO_EAC3,
-            "audio_decoder.eac3", "audio_encoder.eac3" },
-    };
-
-    static const size_t kNumMimeToRole =
-        sizeof(kMimeToRole) / sizeof(kMimeToRole[0]);
-
-    size_t i;
-    for (i = 0; i < kNumMimeToRole; ++i) {
-        if (!strcasecmp(mime, kMimeToRole[i].mime)) {
-            break;
-        }
-    }
-
-    if (i == kNumMimeToRole) {
-        return NULL;
-    }
-
-    return isEncoder ? kMimeToRole[i].encoderRole
-                  : kMimeToRole[i].decoderRole;
-}
-
-//static
-status_t ACodec::setComponentRole(
-        const sp<IOMX> &omx, IOMX::node_id node, const char *role) {
-    OMX_PARAM_COMPONENTROLETYPE roleParams;
-    InitOMXParams(&roleParams);
-
-    strncpy((char *)roleParams.cRole,
-            role, OMX_MAX_STRINGNAME_SIZE - 1);
-
-    roleParams.cRole[OMX_MAX_STRINGNAME_SIZE - 1] = '\0';
-
-    return omx->setParameter(
-            node, OMX_IndexParamStandardComponentRole,
-            &roleParams, sizeof(roleParams));
-}
-
 status_t ACodec::configureCodec(
         const char *mime, const sp<AMessage> &msg) {
     int32_t encoder;
@@ -1725,8 +1597,8 @@
 
     mIsEncoder = encoder;
 
-    mInputMetadataType = kMetadataBufferTypeInvalid;
-    mOutputMetadataType = kMetadataBufferTypeInvalid;
+    mPortMode[kPortIndexInput] = IOMX::kPortModePresetByteBuffer;
+    mPortMode[kPortIndexOutput] = IOMX::kPortModePresetByteBuffer;
 
     status_t err = setComponentRole(encoder /* isEncoder */, mime);
 
@@ -1753,23 +1625,23 @@
     if (encoder
             && msg->findInt32("android._input-metadata-buffer-type", &storeMeta)
             && storeMeta != kMetadataBufferTypeInvalid) {
-        mInputMetadataType = (MetadataBufferType)storeMeta;
-        err = mOMX->storeMetaDataInBuffers(
-                mNode, kPortIndexInput, OMX_TRUE, &mInputMetadataType);
+        IOMX::PortMode mode;
+        if (storeMeta == kMetadataBufferTypeNativeHandleSource) {
+            mode = IOMX::kPortModeDynamicNativeHandle;
+        } else if (storeMeta == kMetadataBufferTypeANWBuffer ||
+                storeMeta == kMetadataBufferTypeGrallocSource) {
+            mode = IOMX::kPortModeDynamicANWBuffer;
+        } else {
+            return BAD_VALUE;
+        }
+        err = setPortMode(kPortIndexInput, mode);
         if (err != OK) {
-            ALOGE("[%s] storeMetaDataInBuffers (input) failed w/ err %d",
-                    mComponentName.c_str(), err);
-
             return err;
-        } else if (storeMeta == kMetadataBufferTypeANWBuffer
-                && mInputMetadataType == kMetadataBufferTypeGrallocSource) {
-            // IOMX translates ANWBuffers to gralloc source already.
-            mInputMetadataType = (MetadataBufferType)storeMeta;
         }
 
         uint32_t usageBits;
-        if (mOMX->getParameter(
-                mNode, (OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits,
+        if (mOMXNode->getParameter(
+                (OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits,
                 &usageBits, sizeof(usageBits)) == OK) {
             inputFormat->setInt32(
                     "using-sw-read-often", !!(usageBits & GRALLOC_USAGE_SW_READ_OFTEN));
@@ -1781,18 +1653,15 @@
             && msg->findInt32("prepend-sps-pps-to-idr-frames", &prependSPSPPS)
             && prependSPSPPS != 0) {
         OMX_INDEXTYPE index;
-        err = mOMX->getExtensionIndex(
-                mNode,
-                "OMX.google.android.index.prependSPSPPSToIDRFrames",
-                &index);
+        err = mOMXNode->getExtensionIndex(
+                "OMX.google.android.index.prependSPSPPSToIDRFrames", &index);
 
         if (err == OK) {
             PrependSPSPPSToIDRFramesParams params;
             InitOMXParams(&params);
             params.bEnable = OMX_TRUE;
 
-            err = mOMX->setParameter(
-                    mNode, index, &params, sizeof(params));
+            err = mOMXNode->setParameter(index, &params, sizeof(params));
         }
 
         if (err != OK) {
@@ -1812,12 +1681,14 @@
         OMX_BOOL enable = (OMX_BOOL) (prependSPSPPS
             && msg->findInt32("android._store-metadata-in-buffers-output", &storeMeta)
             && storeMeta != 0);
+        if (mFlags & kFlagIsSecure) {
+            enable = OMX_TRUE;
+        }
 
-        mOutputMetadataType = kMetadataBufferTypeNativeHandleSource;
-        err = mOMX->storeMetaDataInBuffers(mNode, kPortIndexOutput, enable, &mOutputMetadataType);
+        err = setPortMode(kPortIndexOutput, enable ?
+                IOMX::kPortModePresetSecureBuffer : IOMX::kPortModePresetByteBuffer);
         if (err != OK) {
-            ALOGE("[%s] storeMetaDataInBuffers (output) failed w/ err %d",
-                mComponentName.c_str(), err);
+            return err;
         }
 
         if (!msg->findInt64(
@@ -1826,8 +1697,12 @@
             mRepeatFrameDelayUs = -1ll;
         }
 
+        // only allow 32-bit value, since we pass it as U32 to OMX.
         if (!msg->findInt64("max-pts-gap-to-encoder", &mMaxPtsGapUs)) {
             mMaxPtsGapUs = -1ll;
+        } else if (mMaxPtsGapUs > INT32_MAX || mMaxPtsGapUs < 0) {
+            ALOGW("Unsupported value for max pts gap %lld", (long long) mMaxPtsGapUs);
+            mMaxPtsGapUs = -1ll;
         }
 
         if (!msg->findFloat("max-fps-to-encoder", &mMaxFps)) {
@@ -1850,7 +1725,6 @@
     bool haveNativeWindow = msg->findObject("native-window", &obj)
             && obj != NULL && video && !encoder;
     mUsingNativeWindow = haveNativeWindow;
-    mLegacyAdaptiveExperiment = false;
     if (video && !encoder) {
         inputFormat->setInt32("adaptive-playback", false);
 
@@ -1866,10 +1740,13 @@
 
         if (mFlags & kFlagIsSecure) {
             // use native_handles for secure input buffers
-            err = mOMX->enableNativeBuffers(
-                    mNode, kPortIndexInput, OMX_FALSE /* graphic */, OMX_TRUE);
-            ALOGI_IF(err != OK, "falling back to non-native_handles");
-            err = OK; // ignore error for now
+            err = setPortMode(kPortIndexInput, IOMX::kPortModePresetSecureBuffer);
+
+            if (err != OK) {
+                ALOGI("falling back to non-native_handles");
+                setPortMode(kPortIndexInput, IOMX::kPortModePresetByteBuffer);
+                err = OK; // ignore error for now
+            }
         }
     }
     if (haveNativeWindow) {
@@ -1883,8 +1760,8 @@
             OMX_CONFIG_BOOLEANTYPE config;
             InitOMXParams(&config);
             config.bEnabled = (OMX_BOOL)enabled;
-            status_t temp = mOMX->setConfig(
-                    mNode, (OMX_INDEXTYPE)OMX_IndexConfigAutoFramerateConversion,
+            status_t temp = mOMXNode->setConfig(
+                    (OMX_INDEXTYPE)OMX_IndexConfigAutoFramerateConversion,
                     &config, sizeof(config));
             if (temp == OK) {
                 outputFormat->setInt32("auto-frc", enabled);
@@ -1915,8 +1792,8 @@
             if (msg->findInt32("max-width", &maxWidth) &&
                     msg->findInt32("max-height", &maxHeight)) {
 
-                err = mOMX->prepareForAdaptivePlayback(
-                        mNode, kPortIndexOutput, OMX_TRUE, maxWidth, maxHeight);
+                err = mOMXNode->prepareForAdaptivePlayback(
+                        kPortIndexOutput, OMX_TRUE, maxWidth, maxHeight);
                 if (err != OK) {
                     ALOGW("[%s] prepareForAdaptivePlayback failed w/ err %d",
                             mComponentName.c_str(), err);
@@ -1941,14 +1818,8 @@
                 return err;
             }
 
-            // Always try to enable dynamic output buffers on native surface
-            mOutputMetadataType = kMetadataBufferTypeANWBuffer;
-            err = mOMX->storeMetaDataInBuffers(
-                    mNode, kPortIndexOutput, OMX_TRUE, &mOutputMetadataType);
+            err = setPortMode(kPortIndexOutput, IOMX::kPortModeDynamicANWBuffer);
             if (err != OK) {
-                ALOGE("[%s] storeMetaDataInBuffers failed w/ err %d",
-                        mComponentName.c_str(), err);
-
                 // if adaptive playback has been requested, try JB fallback
                 // NOTE: THIS FALLBACK MECHANISM WILL BE REMOVED DUE TO ITS
                 // LARGE MEMORY REQUIREMENT
@@ -1978,9 +1849,8 @@
                     ALOGV("[%s] prepareForAdaptivePlayback(%dx%d)",
                             mComponentName.c_str(), maxWidth, maxHeight);
 
-                    err = mOMX->prepareForAdaptivePlayback(
-                            mNode, kPortIndexOutput, OMX_TRUE, maxWidth,
-                            maxHeight);
+                    err = mOMXNode->prepareForAdaptivePlayback(
+                            kPortIndexOutput, OMX_TRUE, maxWidth, maxHeight);
                     ALOGW_IF(err != OK,
                             "[%s] prepareForAdaptivePlayback failed w/ err %d",
                             mComponentName.c_str(), err);
@@ -1994,12 +1864,9 @@
                 // allow failure
                 err = OK;
             } else {
-                ALOGV("[%s] storeMetaDataInBuffers succeeded",
-                        mComponentName.c_str());
+                ALOGV("[%s] setPortMode on output to %s succeeded",
+                        mComponentName.c_str(), asString(IOMX::kPortModeDynamicANWBuffer));
                 CHECK(storingMetadataInDecodedBuffers());
-                mLegacyAdaptiveExperiment = ADebug::isExperimentEnabled(
-                        "legacy-adaptive", !msg->contains("no-experiments"));
-
                 inputFormat->setInt32("adaptive-playback", true);
             }
 
@@ -2028,6 +1895,12 @@
         if (haveNativeWindow && mComponentName.startsWith("OMX.google.")) {
             usingSwRenderer = true;
             haveNativeWindow = false;
+            (void)setPortMode(kPortIndexOutput, IOMX::kPortModePresetByteBuffer);
+        } else if (haveNativeWindow && !storingMetadataInDecodedBuffers()) {
+            err = setPortMode(kPortIndexOutput, IOMX::kPortModePresetANWBuffer);
+            if (err != OK) {
+                return err;
+            }
         }
 
         if (encoder) {
@@ -2042,17 +1915,8 @@
 
         if (haveNativeWindow) {
             mNativeWindow = static_cast<Surface *>(obj.get());
-        }
 
-        // initialize native window now to get actual output format
-        // TODO: this is needed for some encoders even though they don't use native window
-        err = initNativeWindow();
-        if (err != OK) {
-            return err;
-        }
-
-        // fallback for devices that do not handle flex-YUV for native buffers
-        if (haveNativeWindow) {
+            // fallback for devices that do not handle flex-YUV for native buffers
             int32_t requestedColorFormat = OMX_COLOR_FormatUnused;
             if (msg->findInt32("color-format", &requestedColorFormat) &&
                     requestedColorFormat == OMX_COLOR_FormatYUV420Flexible) {
@@ -2068,8 +1932,8 @@
                 }
                 ALOGD("[%s] Requested output format %#x and got %#x.",
                         mComponentName.c_str(), requestedColorFormat, colorFormat);
-                if (!isFlexibleColorFormat(
-                                mOMX, mNode, colorFormat, haveNativeWindow, &flexibleEquivalent)
+                if (!IsFlexibleColorFormat(
+                        mOMXNode, colorFormat, haveNativeWindow, &flexibleEquivalent)
                         || flexibleEquivalent != (OMX_U32)requestedColorFormat) {
                     // device did not handle flex-YUV request for native window, fall back
                     // to SW renderer
@@ -2078,18 +1942,10 @@
                     mNativeWindowUsageBits = 0;
                     haveNativeWindow = false;
                     usingSwRenderer = true;
-                    if (storingMetadataInDecodedBuffers()) {
-                        err = mOMX->storeMetaDataInBuffers(
-                                mNode, kPortIndexOutput, OMX_FALSE, &mOutputMetadataType);
-                        mOutputMetadataType = kMetadataBufferTypeInvalid; // just in case
-                        // TODO: implement adaptive-playback support for bytebuffer mode.
-                        // This is done by SW codecs, but most HW codecs don't support it.
-                        inputFormat->setInt32("adaptive-playback", false);
-                    }
-                    if (err == OK) {
-                        err = mOMX->enableNativeBuffers(
-                                mNode, kPortIndexOutput, OMX_TRUE /* graphic */, OMX_FALSE);
-                    }
+                    // TODO: implement adaptive-playback support for bytebuffer mode.
+                    // This is done by SW codecs, but most HW codecs don't support it.
+                    err = setPortMode(kPortIndexOutput, IOMX::kPortModePresetByteBuffer);
+                    inputFormat->setInt32("adaptive-playback", false);
                     if (mFlags & kFlagIsGrallocUsageProtected) {
                         // fallback is not supported for protected playback
                         err = PERMISSION_DENIED;
@@ -2290,7 +2146,6 @@
 
     // NOTE: both mBaseOutputFormat and mOutputFormat are outputFormat to signal first frame.
     mBaseOutputFormat = outputFormat;
-    // trigger a kWhatOutputFormatChanged msg on first buffer
     mLastOutputFormat.clear();
 
     err = getPortFormat(kPortIndexInput, inputFormat);
@@ -2330,8 +2185,8 @@
     OMX_PARAM_U32TYPE config;
     InitOMXParams(&config);
     config.nU32 = (OMX_U32)priority;
-    status_t temp = mOMX->setConfig(
-            mNode, (OMX_INDEXTYPE)OMX_IndexConfigPriority,
+    status_t temp = mOMXNode->setConfig(
+            (OMX_INDEXTYPE)OMX_IndexConfigPriority,
             &config, sizeof(config));
     if (temp != OK) {
         ALOGI("codec does not support config priority (err %d)", temp);
@@ -2358,8 +2213,8 @@
     OMX_PARAM_U32TYPE config;
     InitOMXParams(&config);
     config.nU32 = rate;
-    status_t err = mOMX->setConfig(
-            mNode, (OMX_INDEXTYPE)OMX_IndexConfigOperatingRate,
+    status_t err = mOMXNode->setConfig(
+            (OMX_INDEXTYPE)OMX_IndexConfigOperatingRate,
             &config, sizeof(config));
     if (err != OK) {
         ALOGI("codec does not support config operating rate (err %d)", err);
@@ -2371,8 +2226,8 @@
     OMX_VIDEO_CONFIG_ANDROID_INTRAREFRESHTYPE params;
     InitOMXParams(&params);
     params.nPortIndex = kPortIndexOutput;
-    status_t err = mOMX->getConfig(
-            mNode, (OMX_INDEXTYPE)OMX_IndexConfigAndroidIntraRefresh, &params, sizeof(params));
+    status_t err = mOMXNode->getConfig(
+            (OMX_INDEXTYPE)OMX_IndexConfigAndroidIntraRefresh, &params, sizeof(params));
     if (err == OK) {
         *intraRefreshPeriod = params.nRefreshPeriod;
         return OK;
@@ -2383,8 +2238,8 @@
     InitOMXParams(&refreshParams);
     refreshParams.nPortIndex = kPortIndexOutput;
     refreshParams.eRefreshMode = OMX_VIDEO_IntraRefreshCyclic;
-    err = mOMX->getParameter(
-            mNode, OMX_IndexParamVideoIntraRefresh, &refreshParams, sizeof(refreshParams));
+    err = mOMXNode->getParameter(
+            OMX_IndexParamVideoIntraRefresh, &refreshParams, sizeof(refreshParams));
     if (err != OK || refreshParams.nCirMBs == 0) {
         *intraRefreshPeriod = 0;
         return OK;
@@ -2396,8 +2251,8 @@
     InitOMXParams(&def);
     OMX_VIDEO_PORTDEFINITIONTYPE *video_def = &def.format.video;
     def.nPortIndex = kPortIndexOutput;
-    err = mOMX->getParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+    err = mOMXNode->getParameter(
+            OMX_IndexParamPortDefinition, &def, sizeof(def));
     if (err != OK) {
         *intraRefreshPeriod = 0;
         return err;
@@ -2415,8 +2270,8 @@
     InitOMXParams(&params);
     params.nPortIndex = kPortIndexOutput;
     params.nRefreshPeriod = intraRefreshPeriod;
-    status_t err = mOMX->setConfig(
-            mNode, (OMX_INDEXTYPE)OMX_IndexConfigAndroidIntraRefresh, &params, sizeof(params));
+    status_t err = mOMXNode->setConfig(
+            (OMX_INDEXTYPE)OMX_IndexConfigAndroidIntraRefresh, &params, sizeof(params));
     if (err == OK) {
         return OK;
     }
@@ -2443,8 +2298,8 @@
         InitOMXParams(&def);
         OMX_VIDEO_PORTDEFINITIONTYPE *video_def = &def.format.video;
         def.nPortIndex = kPortIndexOutput;
-        err = mOMX->getParameter(
-                mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+        err = mOMXNode->getParameter(
+                OMX_IndexParamPortDefinition, &def, sizeof(def));
         if (err != OK) {
             return err;
         }
@@ -2454,8 +2309,9 @@
         refreshParams.nCirMBs = divUp((divUp(width, 16u) * divUp(height, 16u)), intraRefreshPeriod);
     }
 
-    err = mOMX->setParameter(mNode, OMX_IndexParamVideoIntraRefresh,
-                             &refreshParams, sizeof(refreshParams));
+    err = mOMXNode->setParameter(
+            OMX_IndexParamVideoIntraRefresh,
+            &refreshParams, sizeof(refreshParams));
     if (err != OK) {
         return err;
     }
@@ -2498,9 +2354,9 @@
     InitOMXParams(&layerParams);
     layerParams.nPortIndex = kPortIndexOutput;
 
-    status_t err = mOMX->getParameter(
-        mNode, (OMX_INDEXTYPE)OMX_IndexParamAndroidVideoTemporalLayering,
-        &layerParams, sizeof(layerParams));
+    status_t err = mOMXNode->getParameter(
+            (OMX_INDEXTYPE)OMX_IndexParamAndroidVideoTemporalLayering,
+            &layerParams, sizeof(layerParams));
 
     if (err != OK) {
         return err;
@@ -2520,8 +2376,8 @@
         layerConfig.nBLayerCountActual = numBLayers;
         layerConfig.bBitrateRatiosSpecified = OMX_FALSE;
 
-        err = mOMX->setConfig(
-                mNode, (OMX_INDEXTYPE)OMX_IndexConfigAndroidVideoTemporalLayering,
+        err = mOMXNode->setConfig(
+                (OMX_INDEXTYPE)OMX_IndexConfigAndroidVideoTemporalLayering,
                 &layerConfig, sizeof(layerConfig));
     } else {
         layerParams.ePattern = pattern;
@@ -2529,8 +2385,8 @@
         layerParams.nBLayerCountActual = numBLayers;
         layerParams.bBitrateRatiosSpecified = OMX_FALSE;
 
-        err = mOMX->setParameter(
-                mNode, (OMX_INDEXTYPE)OMX_IndexParamAndroidVideoTemporalLayering,
+        err = mOMXNode->setParameter(
+                (OMX_INDEXTYPE)OMX_IndexParamAndroidVideoTemporalLayering,
                 &layerParams, sizeof(layerParams));
     }
 
@@ -2547,8 +2403,8 @@
         return err;
     }
 
-    err = mOMX->getParameter(
-            mNode, (OMX_INDEXTYPE)OMX_IndexParamAndroidVideoTemporalLayering,
+    err = mOMXNode->getParameter(
+            (OMX_INDEXTYPE)OMX_IndexParamAndroidVideoTemporalLayering,
             &layerParams, sizeof(layerParams));
 
     if (err == OK) {
@@ -2571,8 +2427,8 @@
     InitOMXParams(&def);
     def.nPortIndex = portIndex;
 
-    status_t err = mOMX->getParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+    status_t err = mOMXNode->getParameter(
+            OMX_IndexParamPortDefinition, &def, sizeof(def));
 
     if (err != OK) {
         return err;
@@ -2584,15 +2440,15 @@
 
     def.nBufferSize = size;
 
-    err = mOMX->setParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+    err = mOMXNode->setParameter(
+            OMX_IndexParamPortDefinition, &def, sizeof(def));
 
     if (err != OK) {
         return err;
     }
 
-    err = mOMX->getParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+    err = mOMXNode->getParameter(
+            OMX_IndexParamPortDefinition, &def, sizeof(def));
 
     if (err != OK) {
         return err;
@@ -2614,9 +2470,8 @@
     format.nPortIndex = portIndex;
     for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
         format.nIndex = index;
-        status_t err = mOMX->getParameter(
-                mNode, OMX_IndexParamAudioPortFormat,
-                &format, sizeof(format));
+        status_t err = mOMXNode->getParameter(
+                OMX_IndexParamAudioPortFormat, &format, sizeof(format));
 
         if (err != OK) {
             return err;
@@ -2634,8 +2489,8 @@
         }
     }
 
-    return mOMX->setParameter(
-            mNode, OMX_IndexParamAudioPortFormat, &format, sizeof(format));
+    return mOMXNode->setParameter(
+            OMX_IndexParamAudioPortFormat, &format, sizeof(format));
 }
 
 status_t ACodec::setupAACCodec(
@@ -2667,8 +2522,8 @@
         InitOMXParams(&def);
         def.nPortIndex = kPortIndexOutput;
 
-        err = mOMX->getParameter(
-                mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+        err = mOMXNode->getParameter(
+                OMX_IndexParamPortDefinition, &def, sizeof(def));
 
         if (err != OK) {
             return err;
@@ -2677,8 +2532,8 @@
         def.format.audio.bFlagErrorConcealment = OMX_TRUE;
         def.format.audio.eEncoding = OMX_AUDIO_CodingAAC;
 
-        err = mOMX->setParameter(
-                mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+        err = mOMXNode->setParameter(
+                OMX_IndexParamPortDefinition, &def, sizeof(def));
 
         if (err != OK) {
             return err;
@@ -2688,8 +2543,8 @@
         InitOMXParams(&profile);
         profile.nPortIndex = kPortIndexOutput;
 
-        err = mOMX->getParameter(
-                mNode, OMX_IndexParamAudioAac, &profile, sizeof(profile));
+        err = mOMXNode->getParameter(
+                OMX_IndexParamAudioAac, &profile, sizeof(profile));
 
         if (err != OK) {
             return err;
@@ -2736,8 +2591,8 @@
         }
 
 
-        err = mOMX->setParameter(
-                mNode, OMX_IndexParamAudioAac, &profile, sizeof(profile));
+        err = mOMXNode->setParameter(
+                OMX_IndexParamAudioAac, &profile, sizeof(profile));
 
         if (err != OK) {
             return err;
@@ -2750,8 +2605,8 @@
     InitOMXParams(&profile);
     profile.nPortIndex = kPortIndexInput;
 
-    err = mOMX->getParameter(
-            mNode, OMX_IndexParamAudioAac, &profile, sizeof(profile));
+    err = mOMXNode->getParameter(
+            OMX_IndexParamAudioAac, &profile, sizeof(profile));
 
     if (err != OK) {
         return err;
@@ -2775,10 +2630,12 @@
     presentation.nEncodedTargetLevel = drc.encodedTargetLevel;
     presentation.nPCMLimiterEnable = pcmLimiterEnable;
 
-    status_t res = mOMX->setParameter(mNode, OMX_IndexParamAudioAac, &profile, sizeof(profile));
+    status_t res = mOMXNode->setParameter(
+            OMX_IndexParamAudioAac, &profile, sizeof(profile));
     if (res == OK) {
         // optional parameters, will not cause configuration failure
-        mOMX->setParameter(mNode, (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAacPresentation,
+        mOMXNode->setParameter(
+                (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAacPresentation,
                 &presentation, sizeof(presentation));
     } else {
         ALOGW("did not set AudioAndroidAacPresentation due to error %d when setting AudioAac", res);
@@ -2805,11 +2662,8 @@
     InitOMXParams(&def);
     def.nPortIndex = kPortIndexInput;
 
-    err = mOMX->getParameter(
-            mNode,
-            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3,
-            &def,
-            sizeof(def));
+    err = mOMXNode->getParameter(
+            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3, &def, sizeof(def));
 
     if (err != OK) {
         return err;
@@ -2818,11 +2672,8 @@
     def.nChannels = numChannels;
     def.nSampleRate = sampleRate;
 
-    return mOMX->setParameter(
-            mNode,
-            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3,
-            &def,
-            sizeof(def));
+    return mOMXNode->setParameter(
+            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3, &def, sizeof(def));
 }
 
 status_t ACodec::setupEAC3Codec(
@@ -2843,11 +2694,8 @@
     InitOMXParams(&def);
     def.nPortIndex = kPortIndexInput;
 
-    err = mOMX->getParameter(
-            mNode,
-            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidEac3,
-            &def,
-            sizeof(def));
+    err = mOMXNode->getParameter(
+            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidEac3, &def, sizeof(def));
 
     if (err != OK) {
         return err;
@@ -2856,11 +2704,8 @@
     def.nChannels = numChannels;
     def.nSampleRate = sampleRate;
 
-    return mOMX->setParameter(
-            mNode,
-            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidEac3,
-            &def,
-            sizeof(def));
+    return mOMXNode->setParameter(
+            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidEac3, &def, sizeof(def));
 }
 
 static OMX_AUDIO_AMRBANDMODETYPE pickModeFromBitRate(
@@ -2913,8 +2758,8 @@
     InitOMXParams(&def);
     def.nPortIndex = encoder ? kPortIndexOutput : kPortIndexInput;
 
-    status_t err =
-        mOMX->getParameter(mNode, OMX_IndexParamAudioAmr, &def, sizeof(def));
+    status_t err = mOMXNode->getParameter(
+            OMX_IndexParamAudioAmr, &def, sizeof(def));
 
     if (err != OK) {
         return err;
@@ -2923,8 +2768,8 @@
     def.eAMRFrameFormat = OMX_AUDIO_AMRFrameFormatFSF;
     def.eAMRBandMode = pickModeFromBitRate(isWAMR, bitrate);
 
-    err = mOMX->setParameter(
-            mNode, OMX_IndexParamAudioAmr, &def, sizeof(def));
+    err = mOMXNode->setParameter(
+            OMX_IndexParamAudioAmr, &def, sizeof(def));
 
     if (err != OK) {
         return err;
@@ -2954,13 +2799,13 @@
         def.nPortIndex = kPortIndexOutput;
 
         // configure compression level
-        status_t err = mOMX->getParameter(mNode, OMX_IndexParamAudioFlac, &def, sizeof(def));
+        status_t err = mOMXNode->getParameter(OMX_IndexParamAudioFlac, &def, sizeof(def));
         if (err != OK) {
             ALOGE("setupFlacCodec(): Error %d getting OMX_IndexParamAudioFlac parameter", err);
             return err;
         }
         def.nCompressionLevel = compressionLevel;
-        err = mOMX->setParameter(mNode, OMX_IndexParamAudioFlac, &def, sizeof(def));
+        err = mOMXNode->setParameter(OMX_IndexParamAudioFlac, &def, sizeof(def));
         if (err != OK) {
             ALOGE("setupFlacCodec(): Error %d setting OMX_IndexParamAudioFlac parameter", err);
             return err;
@@ -2979,8 +2824,8 @@
     InitOMXParams(&def);
     def.nPortIndex = portIndex;
 
-    status_t err = mOMX->getParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+    status_t err = mOMXNode->getParameter(
+            OMX_IndexParamPortDefinition, &def, sizeof(def));
 
     if (err != OK) {
         return err;
@@ -2988,8 +2833,8 @@
 
     def.format.audio.eEncoding = OMX_AUDIO_CodingPCM;
 
-    err = mOMX->setParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+    err = mOMXNode->setParameter(
+            OMX_IndexParamPortDefinition, &def, sizeof(def));
 
     if (err != OK) {
         return err;
@@ -2999,8 +2844,8 @@
     InitOMXParams(&pcmParams);
     pcmParams.nPortIndex = portIndex;
 
-    err = mOMX->getParameter(
-            mNode, OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
+    err = mOMXNode->getParameter(
+            OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
 
     if (err != OK) {
         return err;
@@ -3031,15 +2876,15 @@
         return OMX_ErrorNone;
     }
 
-    err = mOMX->setParameter(
-            mNode, OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
+    err = mOMXNode->setParameter(
+            OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
     // if we could not set up raw format to non-16-bit, try with 16-bit
     // NOTE: we will also verify this via readback, in case codec ignores these fields
     if (err != OK && encoding != kAudioEncodingPcm16bit) {
         pcmParams.eNumData = OMX_NumericalDataSigned;
         pcmParams.nBitPerSample = 16;
-        err = mOMX->setParameter(
-                mNode, OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
+        err = mOMXNode->setParameter(
+                OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
     }
     return err;
 }
@@ -3048,8 +2893,8 @@
         int32_t audioHwSync, const sp<ANativeWindow> &nativeWindow) {
     native_handle_t* sidebandHandle;
 
-    status_t err = mOMX->configureVideoTunnelMode(
-            mNode, kPortIndexOutput, OMX_TRUE, audioHwSync, &sidebandHandle);
+    status_t err = mOMXNode->configureVideoTunnelMode(
+            kPortIndexOutput, OMX_TRUE, audioHwSync, &sidebandHandle);
     if (err != OK) {
         ALOGE("configureVideoTunnelMode failed! (err %d).", err);
         return err;
@@ -3078,8 +2923,8 @@
 
     for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
         format.nIndex = index;
-        status_t err = mOMX->getParameter(
-                mNode, OMX_IndexParamVideoPortFormat,
+        status_t err = mOMXNode->getParameter(
+                OMX_IndexParamVideoPortFormat,
                 &format, sizeof(format));
 
         if (err != OK) {
@@ -3089,8 +2934,8 @@
         // substitute back flexible color format to codec supported format
         OMX_U32 flexibleEquivalent;
         if (compressionFormat == OMX_VIDEO_CodingUnused
-                && isFlexibleColorFormat(
-                        mOMX, mNode, format.eColorFormat, usingNativeBuffers, &flexibleEquivalent)
+                && IsFlexibleColorFormat(
+                        mOMXNode, format.eColorFormat, usingNativeBuffers, &flexibleEquivalent)
                 && colorFormat == flexibleEquivalent) {
             ALOGI("[%s] using color format %#x in place of %#x",
                     mComponentName.c_str(), format.eColorFormat, colorFormat);
@@ -3133,9 +2978,8 @@
         return UNKNOWN_ERROR;
     }
 
-    status_t err = mOMX->setParameter(
-            mNode, OMX_IndexParamVideoPortFormat,
-            &format, sizeof(format));
+    status_t err = mOMXNode->setParameter(
+            OMX_IndexParamVideoPortFormat, &format, sizeof(format));
 
     return err;
 }
@@ -3165,9 +3009,8 @@
 
     for (OMX_U32 index = 0; ; ++index) {
         format.nIndex = index;
-        status_t err = mOMX->getParameter(
-                mNode, OMX_IndexParamVideoPortFormat,
-                &format, sizeof(format));
+        status_t err = mOMXNode->getParameter(
+                OMX_IndexParamVideoPortFormat, &format, sizeof(format));
         if (err != OK) {
             // no more formats, pick legacy format if found
             if (legacyFormat.eColorFormat != OMX_COLOR_FormatUnused) {
@@ -3193,16 +3036,15 @@
         // find best legacy non-standard format
         OMX_U32 flexibleEquivalent;
         if (legacyFormat.eColorFormat == OMX_COLOR_FormatUnused
-                && isFlexibleColorFormat(
-                        mOMX, mNode, format.eColorFormat, false /* usingNativeBuffers */,
+                && IsFlexibleColorFormat(
+                        mOMXNode, format.eColorFormat, false /* usingNativeBuffers */,
                         &flexibleEquivalent)
                 && flexibleEquivalent == OMX_COLOR_FormatYUV420Flexible) {
             memcpy(&legacyFormat, &format, sizeof(format));
         }
     }
-    return mOMX->setParameter(
-            mNode, OMX_IndexParamVideoPortFormat,
-            &format, sizeof(format));
+    return mOMXNode->setParameter(
+            OMX_IndexParamVideoPortFormat, &format, sizeof(format));
 }
 
 static const struct VideoCodingMapEntry {
@@ -3258,14 +3100,14 @@
     status_t err;
     ALOGD("Setting [%s] %s port buffer number: %d", mComponentName.c_str(),
             portIndex == kPortIndexInput ? "input" : "output", bufferNum);
-    err = mOMX->getParameter(
-        mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+    err = mOMXNode->getParameter(
+        OMX_IndexParamPortDefinition, &def, sizeof(def));
     if (err != OK) {
         return err;
     }
     def.nBufferCountActual = bufferNum;
-    err = mOMX->setParameter(
-        mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+    err = mOMXNode->setParameter(
+        OMX_IndexParamPortDefinition, &def, sizeof(def));
     if (err != OK) {
         // Component could reject this request.
         ALOGW("Fail to set [%s] %s port buffer number: %d", mComponentName.c_str(),
@@ -3296,11 +3138,9 @@
         params.nPortIndex = kPortIndexInput;
         // Check if VP9 decoder advertises supported profiles.
         params.nProfileIndex = 0;
-        status_t err = mOMX->getParameter(
-                mNode,
+        status_t err = mOMXNode->getParameter(
                 OMX_IndexParamVideoProfileLevelQuerySupported,
-                &params,
-                sizeof(params));
+                &params, sizeof(params));
         mIsLegacyVP9Decoder = err != OK;
     }
 
@@ -3389,8 +3229,8 @@
 }
 
 status_t ACodec::initDescribeColorAspectsIndex() {
-    status_t err = mOMX->getExtensionIndex(
-            mNode, "OMX.google.android.index.describeColorAspects", &mDescribeColorAspectsIndex);
+    status_t err = mOMXNode->getExtensionIndex(
+            "OMX.google.android.index.describeColorAspects", &mDescribeColorAspectsIndex);
     if (err != OK) {
         mDescribeColorAspectsIndex = (OMX_INDEXTYPE)0;
     }
@@ -3400,7 +3240,7 @@
 status_t ACodec::setCodecColorAspects(DescribeColorAspectsParams &params, bool verify) {
     status_t err = ERROR_UNSUPPORTED;
     if (mDescribeColorAspectsIndex) {
-        err = mOMX->setConfig(mNode, mDescribeColorAspectsIndex, &params, sizeof(params));
+        err = mOMXNode->setConfig(mDescribeColorAspectsIndex, &params, sizeof(params));
     }
     ALOGV("[%s] setting color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
             mComponentName.c_str(),
@@ -3445,7 +3285,7 @@
 status_t ACodec::getCodecColorAspects(DescribeColorAspectsParams &params) {
     status_t err = ERROR_UNSUPPORTED;
     if (mDescribeColorAspectsIndex) {
-        err = mOMX->getConfig(mNode, mDescribeColorAspectsIndex, &params, sizeof(params));
+        err = mOMXNode->getConfig(mDescribeColorAspectsIndex, &params, sizeof(params));
     }
     ALOGV("[%s] got color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
             mComponentName.c_str(),
@@ -3667,8 +3507,8 @@
 }
 
 status_t ACodec::initDescribeHDRStaticInfoIndex() {
-    status_t err = mOMX->getExtensionIndex(
-            mNode, "OMX.google.android.index.describeHDRStaticInfo", &mDescribeHDRStaticInfoIndex);
+    status_t err = mOMXNode->getExtensionIndex(
+            "OMX.google.android.index.describeHDRStaticInfo", &mDescribeHDRStaticInfoIndex);
     if (err != OK) {
         mDescribeHDRStaticInfoIndex = (OMX_INDEXTYPE)0;
     }
@@ -3678,7 +3518,7 @@
 status_t ACodec::setHDRStaticInfo(const DescribeHDRStaticInfoParams &params) {
     status_t err = ERROR_UNSUPPORTED;
     if (mDescribeHDRStaticInfoIndex) {
-        err = mOMX->setConfig(mNode, mDescribeHDRStaticInfoIndex, &params, sizeof(params));
+        err = mOMXNode->setConfig(mDescribeHDRStaticInfoIndex, &params, sizeof(params));
     }
 
     const HDRStaticInfo *info = &params.sInfo;
@@ -3699,7 +3539,7 @@
 status_t ACodec::getHDRStaticInfo(DescribeHDRStaticInfoParams &params) {
     status_t err = ERROR_UNSUPPORTED;
     if (mDescribeHDRStaticInfoIndex) {
-        err = mOMX->getConfig(mNode, mDescribeHDRStaticInfoIndex, &params, sizeof(params));
+        err = mOMXNode->getConfig(mDescribeHDRStaticInfoIndex, &params, sizeof(params));
     }
 
     ALOGW_IF(err == ERROR_UNSUPPORTED && mDescribeHDRStaticInfoIndex,
@@ -3738,8 +3578,8 @@
 
     def.nPortIndex = kPortIndexInput;
 
-    err = mOMX->getParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+    err = mOMXNode->getParameter(
+            OMX_IndexParamPortDefinition, &def, sizeof(def));
 
     if (err != OK) {
         return err;
@@ -3789,8 +3629,8 @@
         video_def->eColorFormat = colorFormat;
     }
 
-    err = mOMX->setParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+    err = mOMXNode->setParameter(
+            OMX_IndexParamPortDefinition, &def, sizeof(def));
 
     if (err != OK) {
         ALOGE("[%s] failed to set input port definition parameters.",
@@ -3820,8 +3660,8 @@
 
     def.nPortIndex = kPortIndexOutput;
 
-    err = mOMX->getParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+    err = mOMXNode->getParameter(
+            OMX_IndexParamPortDefinition, &def, sizeof(def));
 
     if (err != OK) {
         return err;
@@ -3834,8 +3674,8 @@
     video_def->eCompressionFormat = compressionFormat;
     video_def->eColorFormat = OMX_COLOR_FormatUnused;
 
-    err = mOMX->setParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+    err = mOMXNode->setParameter(
+            OMX_IndexParamPortDefinition, &def, sizeof(def));
 
     if (err != OK) {
         ALOGE("[%s] failed to set output port definition parameters.",
@@ -3964,9 +3804,8 @@
         params.nAirRef = ref;
     }
 
-    status_t err = mOMX->setParameter(
-            mNode, OMX_IndexParamVideoIntraRefresh,
-            &params, sizeof(params));
+    status_t err = mOMXNode->setParameter(
+            OMX_IndexParamVideoIntraRefresh, &params, sizeof(params));
     return err;
 }
 
@@ -4029,8 +3868,8 @@
     InitOMXParams(&mpeg4type);
     mpeg4type.nPortIndex = kPortIndexOutput;
 
-    status_t err = mOMX->getParameter(
-            mNode, OMX_IndexParamVideoMpeg4, &mpeg4type, sizeof(mpeg4type));
+    status_t err = mOMXNode->getParameter(
+            OMX_IndexParamVideoMpeg4, &mpeg4type, sizeof(mpeg4type));
 
     if (err != OK) {
         return err;
@@ -4072,8 +3911,8 @@
         mpeg4type.eLevel = static_cast<OMX_VIDEO_MPEG4LEVELTYPE>(level);
     }
 
-    err = mOMX->setParameter(
-            mNode, OMX_IndexParamVideoMpeg4, &mpeg4type, sizeof(mpeg4type));
+    err = mOMXNode->setParameter(
+            OMX_IndexParamVideoMpeg4, &mpeg4type, sizeof(mpeg4type));
 
     if (err != OK) {
         return err;
@@ -4111,8 +3950,8 @@
     InitOMXParams(&h263type);
     h263type.nPortIndex = kPortIndexOutput;
 
-    status_t err = mOMX->getParameter(
-            mNode, OMX_IndexParamVideoH263, &h263type, sizeof(h263type));
+    status_t err = mOMXNode->getParameter(
+            OMX_IndexParamVideoH263, &h263type, sizeof(h263type));
 
     if (err != OK) {
         return err;
@@ -4149,8 +3988,8 @@
     h263type.nPictureHeaderRepetition = 0;
     h263type.nGOBHeaderInterval = 0;
 
-    err = mOMX->setParameter(
-            mNode, OMX_IndexParamVideoH263, &h263type, sizeof(h263type));
+    err = mOMXNode->setParameter(
+            OMX_IndexParamVideoH263, &h263type, sizeof(h263type));
 
     if (err != OK) {
         return err;
@@ -4251,8 +4090,8 @@
     InitOMXParams(&h264type);
     h264type.nPortIndex = kPortIndexOutput;
 
-    err = mOMX->getParameter(
-            mNode, OMX_IndexParamVideoAvc, &h264type, sizeof(h264type));
+    err = mOMXNode->getParameter(
+            OMX_IndexParamVideoAvc, &h264type, sizeof(h264type));
 
     if (err != OK) {
         return err;
@@ -4341,8 +4180,8 @@
     h264type.bMBAFF = OMX_FALSE;
     h264type.eLoopFilterMode = OMX_VIDEO_AVCLoopFilterEnable;
 
-    err = mOMX->setParameter(
-            mNode, OMX_IndexParamVideoAvc, &h264type, sizeof(h264type));
+    err = mOMXNode->setParameter(
+            OMX_IndexParamVideoAvc, &h264type, sizeof(h264type));
 
     if (err != OK) {
         return err;
@@ -4358,8 +4197,8 @@
         OMX_VIDEO_PARAM_ANDROID_TEMPORALLAYERINGTYPE layering;
         InitOMXParams(&layering);
         layering.nPortIndex = kPortIndexOutput;
-        if (mOMX->getParameter(
-                        mNode, (OMX_INDEXTYPE)OMX_IndexParamAndroidVideoTemporalLayering,
+        if (mOMXNode->getParameter(
+                        (OMX_INDEXTYPE)OMX_IndexParamAndroidVideoTemporalLayering,
                         &layering, sizeof(layering)) == OK
                 && layering.eSupportedPatterns
                 && layering.nBLayerCountMax == 0) {
@@ -4367,8 +4206,8 @@
             h264type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate, h264type.nBFrames);
             h264type.nAllowedPictureTypes &= ~OMX_VIDEO_PictureTypeB;
             ALOGI("disabling B-frames");
-            err = mOMX->setParameter(
-                    mNode, OMX_IndexParamVideoAvc, &h264type, sizeof(h264type));
+            err = mOMXNode->setParameter(
+                    OMX_IndexParamVideoAvc, &h264type, sizeof(h264type));
 
             if (err != OK) {
                 return err;
@@ -4403,8 +4242,8 @@
     hevcType.nPortIndex = kPortIndexOutput;
 
     status_t err = OK;
-    err = mOMX->getParameter(
-            mNode, (OMX_INDEXTYPE)OMX_IndexParamVideoHevc, &hevcType, sizeof(hevcType));
+    err = mOMXNode->getParameter(
+            (OMX_INDEXTYPE)OMX_IndexParamVideoHevc, &hevcType, sizeof(hevcType));
     if (err != OK) {
         return err;
     }
@@ -4427,8 +4266,8 @@
     // TODO: finer control?
     hevcType.nKeyFrameInterval = setPFramesSpacing(iFrameInterval, frameRate) + 1;
 
-    err = mOMX->setParameter(
-            mNode, (OMX_INDEXTYPE)OMX_IndexParamVideoHevc, &hevcType, sizeof(hevcType));
+    err = mOMXNode->setParameter(
+            (OMX_INDEXTYPE)OMX_IndexParamVideoHevc, &hevcType, sizeof(hevcType));
     if (err != OK) {
         return err;
     }
@@ -4496,8 +4335,8 @@
     OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE vp8type;
     InitOMXParams(&vp8type);
     vp8type.nPortIndex = kPortIndexOutput;
-    status_t err = mOMX->getParameter(
-            mNode, (OMX_INDEXTYPE)OMX_IndexParamVideoAndroidVp8Encoder,
+    status_t err = mOMXNode->getParameter(
+            (OMX_INDEXTYPE)OMX_IndexParamVideoAndroidVp8Encoder,
             &vp8type, sizeof(vp8type));
 
     if (err == OK) {
@@ -4517,8 +4356,8 @@
             vp8type.nMaxQuantizer = 63;
         }
 
-        err = mOMX->setParameter(
-                mNode, (OMX_INDEXTYPE)OMX_IndexParamVideoAndroidVp8Encoder,
+        err = mOMXNode->setParameter(
+                (OMX_INDEXTYPE)OMX_IndexParamVideoAndroidVp8Encoder,
                 &vp8type, sizeof(vp8type));
         if (err != OK) {
             ALOGW("Extended VP8 parameters set failed: %d", err);
@@ -4542,11 +4381,9 @@
 
     for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
         params.nProfileIndex = index;
-        status_t err = mOMX->getParameter(
-                mNode,
+        status_t err = mOMXNode->getParameter(
                 OMX_IndexParamVideoProfileLevelQuerySupported,
-                &params,
-                sizeof(params));
+                &params, sizeof(params));
 
         if (err != OK) {
             return err;
@@ -4574,9 +4411,8 @@
     InitOMXParams(&bitrateType);
     bitrateType.nPortIndex = kPortIndexOutput;
 
-    status_t err = mOMX->getParameter(
-            mNode, OMX_IndexParamVideoBitrate,
-            &bitrateType, sizeof(bitrateType));
+    status_t err = mOMXNode->getParameter(
+            OMX_IndexParamVideoBitrate, &bitrateType, sizeof(bitrateType));
 
     if (err != OK) {
         return err;
@@ -4585,9 +4421,8 @@
     bitrateType.eControlRate = bitrateMode;
     bitrateType.nTargetBitrate = bitrate;
 
-    return mOMX->setParameter(
-            mNode, OMX_IndexParamVideoBitrate,
-            &bitrateType, sizeof(bitrateType));
+    return mOMXNode->setParameter(
+            OMX_IndexParamVideoBitrate, &bitrateType, sizeof(bitrateType));
 }
 
 status_t ACodec::setupErrorCorrectionParameters() {
@@ -4595,8 +4430,8 @@
     InitOMXParams(&errorCorrectionType);
     errorCorrectionType.nPortIndex = kPortIndexOutput;
 
-    status_t err = mOMX->getParameter(
-            mNode, OMX_IndexParamVideoErrorCorrection,
+    status_t err = mOMXNode->getParameter(
+            OMX_IndexParamVideoErrorCorrection,
             &errorCorrectionType, sizeof(errorCorrectionType));
 
     if (err != OK) {
@@ -4609,8 +4444,8 @@
     errorCorrectionType.bEnableDataPartitioning = OMX_FALSE;
     errorCorrectionType.bEnableRVLC = OMX_FALSE;
 
-    return mOMX->setParameter(
-            mNode, OMX_IndexParamVideoErrorCorrection,
+    return mOMXNode->setParameter(
+            OMX_IndexParamVideoErrorCorrection,
             &errorCorrectionType, sizeof(errorCorrectionType));
 }
 
@@ -4624,8 +4459,8 @@
 
     OMX_VIDEO_PORTDEFINITIONTYPE *video_def = &def.format.video;
 
-    status_t err = mOMX->getParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+    status_t err = mOMXNode->getParameter(
+            OMX_IndexParamPortDefinition, &def, sizeof(def));
     if (err != OK) {
         return err;
     }
@@ -4654,21 +4489,12 @@
         }
     }
 
-    err = mOMX->setParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+    err = mOMXNode->setParameter(
+            OMX_IndexParamPortDefinition, &def, sizeof(def));
 
     return err;
 }
 
-status_t ACodec::initNativeWindow() {
-    if (mNativeWindow != NULL) {
-        return mOMX->enableNativeBuffers(mNode, kPortIndexOutput, OMX_TRUE /* graphic */, OMX_TRUE);
-    }
-
-    mOMX->enableNativeBuffers(mNode, kPortIndexOutput, OMX_TRUE /* graphic */, OMX_FALSE);
-    return OK;
-}
-
 size_t ACodec::countBuffersOwnedByComponent(OMX_U32 portIndex) const {
     size_t n = 0;
 
@@ -4747,195 +4573,13 @@
     }
 }
 
-// static
-bool ACodec::describeDefaultColorFormat(DescribeColorFormat2Params &params) {
-    MediaImage2 &image = params.sMediaImage;
-    memset(&image, 0, sizeof(image));
-
-    image.mType = MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
-    image.mNumPlanes = 0;
-
-    const OMX_COLOR_FORMATTYPE fmt = params.eColorFormat;
-    image.mWidth = params.nFrameWidth;
-    image.mHeight = params.nFrameHeight;
-
-    // only supporting YUV420
-    if (fmt != OMX_COLOR_FormatYUV420Planar &&
-        fmt != OMX_COLOR_FormatYUV420PackedPlanar &&
-        fmt != OMX_COLOR_FormatYUV420SemiPlanar &&
-        fmt != OMX_COLOR_FormatYUV420PackedSemiPlanar &&
-        fmt != (OMX_COLOR_FORMATTYPE)HAL_PIXEL_FORMAT_YV12) {
-        ALOGW("do not know color format 0x%x = %d", fmt, fmt);
-        return false;
-    }
-
-    // TEMPORARY FIX for some vendors that advertise sliceHeight as 0
-    if (params.nStride != 0 && params.nSliceHeight == 0) {
-        ALOGW("using sliceHeight=%u instead of what codec advertised (=0)",
-                params.nFrameHeight);
-        params.nSliceHeight = params.nFrameHeight;
-    }
-
-    // we need stride and slice-height to be non-zero and sensible. These values were chosen to
-    // prevent integer overflows further down the line, and do not indicate support for
-    // 32kx32k video.
-    if (params.nStride == 0 || params.nSliceHeight == 0
-            || params.nStride > 32768 || params.nSliceHeight > 32768) {
-        ALOGW("cannot describe color format 0x%x = %d with stride=%u and sliceHeight=%u",
-                fmt, fmt, params.nStride, params.nSliceHeight);
-        return false;
-    }
-
-    // set-up YUV format
-    image.mType = MediaImage2::MEDIA_IMAGE_TYPE_YUV;
-    image.mNumPlanes = 3;
-    image.mBitDepth = 8;
-    image.mBitDepthAllocated = 8;
-    image.mPlane[image.Y].mOffset = 0;
-    image.mPlane[image.Y].mColInc = 1;
-    image.mPlane[image.Y].mRowInc = params.nStride;
-    image.mPlane[image.Y].mHorizSubsampling = 1;
-    image.mPlane[image.Y].mVertSubsampling = 1;
-
-    switch ((int)fmt) {
-        case HAL_PIXEL_FORMAT_YV12:
-            if (params.bUsingNativeBuffers) {
-                size_t ystride = align(params.nStride, 16);
-                size_t cstride = align(params.nStride / 2, 16);
-                image.mPlane[image.Y].mRowInc = ystride;
-
-                image.mPlane[image.V].mOffset = ystride * params.nSliceHeight;
-                image.mPlane[image.V].mColInc = 1;
-                image.mPlane[image.V].mRowInc = cstride;
-                image.mPlane[image.V].mHorizSubsampling = 2;
-                image.mPlane[image.V].mVertSubsampling = 2;
-
-                image.mPlane[image.U].mOffset = image.mPlane[image.V].mOffset
-                        + (cstride * params.nSliceHeight / 2);
-                image.mPlane[image.U].mColInc = 1;
-                image.mPlane[image.U].mRowInc = cstride;
-                image.mPlane[image.U].mHorizSubsampling = 2;
-                image.mPlane[image.U].mVertSubsampling = 2;
-                break;
-            } else {
-                // fall through as YV12 is used for YUV420Planar by some codecs
-            }
-
-        case OMX_COLOR_FormatYUV420Planar:
-        case OMX_COLOR_FormatYUV420PackedPlanar:
-            image.mPlane[image.U].mOffset = params.nStride * params.nSliceHeight;
-            image.mPlane[image.U].mColInc = 1;
-            image.mPlane[image.U].mRowInc = params.nStride / 2;
-            image.mPlane[image.U].mHorizSubsampling = 2;
-            image.mPlane[image.U].mVertSubsampling = 2;
-
-            image.mPlane[image.V].mOffset = image.mPlane[image.U].mOffset
-                    + (params.nStride * params.nSliceHeight / 4);
-            image.mPlane[image.V].mColInc = 1;
-            image.mPlane[image.V].mRowInc = params.nStride / 2;
-            image.mPlane[image.V].mHorizSubsampling = 2;
-            image.mPlane[image.V].mVertSubsampling = 2;
-            break;
-
-        case OMX_COLOR_FormatYUV420SemiPlanar:
-            // FIXME: NV21 for sw-encoder, NV12 for decoder and hw-encoder
-        case OMX_COLOR_FormatYUV420PackedSemiPlanar:
-            // NV12
-            image.mPlane[image.U].mOffset = params.nStride * params.nSliceHeight;
-            image.mPlane[image.U].mColInc = 2;
-            image.mPlane[image.U].mRowInc = params.nStride;
-            image.mPlane[image.U].mHorizSubsampling = 2;
-            image.mPlane[image.U].mVertSubsampling = 2;
-
-            image.mPlane[image.V].mOffset = image.mPlane[image.U].mOffset + 1;
-            image.mPlane[image.V].mColInc = 2;
-            image.mPlane[image.V].mRowInc = params.nStride;
-            image.mPlane[image.V].mHorizSubsampling = 2;
-            image.mPlane[image.V].mVertSubsampling = 2;
-            break;
-
-        default:
-            TRESPASS();
-    }
-    return true;
-}
-
-// static
-bool ACodec::describeColorFormat(
-        const sp<IOMX> &omx, IOMX::node_id node,
-        DescribeColorFormat2Params &describeParams)
-{
-    OMX_INDEXTYPE describeColorFormatIndex;
-    if (omx->getExtensionIndex(
-            node, "OMX.google.android.index.describeColorFormat",
-            &describeColorFormatIndex) == OK) {
-        DescribeColorFormatParams describeParamsV1(describeParams);
-        if (omx->getParameter(
-                node, describeColorFormatIndex,
-                &describeParamsV1, sizeof(describeParamsV1)) == OK) {
-            describeParams.initFromV1(describeParamsV1);
-            return describeParams.sMediaImage.mType != MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
-        }
-    } else if (omx->getExtensionIndex(
-            node, "OMX.google.android.index.describeColorFormat2", &describeColorFormatIndex) == OK
-               && omx->getParameter(
-            node, describeColorFormatIndex, &describeParams, sizeof(describeParams)) == OK) {
-        return describeParams.sMediaImage.mType != MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
-    }
-
-    return describeDefaultColorFormat(describeParams);
-}
-
-// static
-bool ACodec::isFlexibleColorFormat(
-         const sp<IOMX> &omx, IOMX::node_id node,
-         uint32_t colorFormat, bool usingNativeBuffers, OMX_U32 *flexibleEquivalent) {
-    DescribeColorFormat2Params describeParams;
-    InitOMXParams(&describeParams);
-    describeParams.eColorFormat = (OMX_COLOR_FORMATTYPE)colorFormat;
-    // reasonable dummy values
-    describeParams.nFrameWidth = 128;
-    describeParams.nFrameHeight = 128;
-    describeParams.nStride = 128;
-    describeParams.nSliceHeight = 128;
-    describeParams.bUsingNativeBuffers = (OMX_BOOL)usingNativeBuffers;
-
-    CHECK(flexibleEquivalent != NULL);
-
-    if (!describeColorFormat(omx, node, describeParams)) {
-        return false;
-    }
-
-    const MediaImage2 &img = describeParams.sMediaImage;
-    if (img.mType == MediaImage2::MEDIA_IMAGE_TYPE_YUV) {
-        if (img.mNumPlanes != 3
-                || img.mPlane[img.Y].mHorizSubsampling != 1
-                || img.mPlane[img.Y].mVertSubsampling != 1) {
-            return false;
-        }
-
-        // YUV 420
-        if (img.mPlane[img.U].mHorizSubsampling == 2
-                && img.mPlane[img.U].mVertSubsampling == 2
-                && img.mPlane[img.V].mHorizSubsampling == 2
-                && img.mPlane[img.V].mVertSubsampling == 2) {
-            // possible flexible YUV420 format
-            if (img.mBitDepth <= 8) {
-               *flexibleEquivalent = OMX_COLOR_FormatYUV420Flexible;
-               return true;
-            }
-        }
-    }
-    return false;
-}
-
 status_t ACodec::getPortFormat(OMX_U32 portIndex, sp<AMessage> &notify) {
     const char *niceIndex = portIndex == kPortIndexInput ? "input" : "output";
     OMX_PARAM_PORTDEFINITIONTYPE def;
     InitOMXParams(&def);
     def.nPortIndex = portIndex;
 
-    status_t err = mOMX->getParameter(mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+    status_t err = mOMXNode->getParameter(OMX_IndexParamPortDefinition, &def, sizeof(def));
     if (err != OK) {
         return err;
     }
@@ -4969,7 +4613,7 @@
                         describeParams.nSliceHeight = videoDef->nSliceHeight;
                         describeParams.bUsingNativeBuffers = OMX_FALSE;
 
-                        if (describeColorFormat(mOMX, mNode, describeParams)) {
+                        if (DescribeColorFormat(mOMXNode, describeParams)) {
                             notify->setBuffer(
                                     "image-data",
                                     ABuffer::CreateAsCopy(
@@ -4994,8 +4638,7 @@
                         InitOMXParams(&rect);
                         rect.nPortIndex = portIndex;
 
-                        if (mOMX->getConfig(
-                                    mNode,
+                        if (mOMXNode->getConfig(
                                     (portIndex == kPortIndexOutput ?
                                             OMX_IndexConfigCommonOutputCrop :
                                             OMX_IndexConfigCommonInputCrop),
@@ -5051,8 +4694,7 @@
                     OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE vp8type;
                     InitOMXParams(&vp8type);
                     vp8type.nPortIndex = kPortIndexOutput;
-                    status_t err = mOMX->getParameter(
-                            mNode,
+                    status_t err = mOMXNode->getParameter(
                             (OMX_INDEXTYPE)OMX_IndexParamVideoAndroidVp8Encoder,
                             &vp8type,
                             sizeof(vp8type));
@@ -5121,8 +4763,8 @@
                     InitOMXParams(&params);
                     params.nPortIndex = portIndex;
 
-                    err = mOMX->getParameter(
-                            mNode, OMX_IndexParamAudioPcm, &params, sizeof(params));
+                    err = mOMXNode->getParameter(
+                            OMX_IndexParamAudioPcm, &params, sizeof(params));
                     if (err != OK) {
                         return err;
                     }
@@ -5169,8 +4811,8 @@
                     InitOMXParams(&params);
                     params.nPortIndex = portIndex;
 
-                    err = mOMX->getParameter(
-                            mNode, OMX_IndexParamAudioAac, &params, sizeof(params));
+                    err = mOMXNode->getParameter(
+                            OMX_IndexParamAudioAac, &params, sizeof(params));
                     if (err != OK) {
                         return err;
                     }
@@ -5187,8 +4829,8 @@
                     InitOMXParams(&params);
                     params.nPortIndex = portIndex;
 
-                    err = mOMX->getParameter(
-                            mNode, OMX_IndexParamAudioAmr, &params, sizeof(params));
+                    err = mOMXNode->getParameter(
+                            OMX_IndexParamAudioAmr, &params, sizeof(params));
                     if (err != OK) {
                         return err;
                     }
@@ -5210,8 +4852,8 @@
                     InitOMXParams(&params);
                     params.nPortIndex = portIndex;
 
-                    err = mOMX->getParameter(
-                            mNode, OMX_IndexParamAudioFlac, &params, sizeof(params));
+                    err = mOMXNode->getParameter(
+                            OMX_IndexParamAudioFlac, &params, sizeof(params));
                     if (err != OK) {
                         return err;
                     }
@@ -5228,8 +4870,8 @@
                     InitOMXParams(&params);
                     params.nPortIndex = portIndex;
 
-                    err = mOMX->getParameter(
-                            mNode, OMX_IndexParamAudioMp3, &params, sizeof(params));
+                    err = mOMXNode->getParameter(
+                            OMX_IndexParamAudioMp3, &params, sizeof(params));
                     if (err != OK) {
                         return err;
                     }
@@ -5246,8 +4888,8 @@
                     InitOMXParams(&params);
                     params.nPortIndex = portIndex;
 
-                    err = mOMX->getParameter(
-                            mNode, OMX_IndexParamAudioVorbis, &params, sizeof(params));
+                    err = mOMXNode->getParameter(
+                            OMX_IndexParamAudioVorbis, &params, sizeof(params));
                     if (err != OK) {
                         return err;
                     }
@@ -5264,8 +4906,8 @@
                     InitOMXParams(&params);
                     params.nPortIndex = portIndex;
 
-                    err = mOMX->getParameter(
-                            mNode, (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3,
+                    err = mOMXNode->getParameter(
+                            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3,
                             &params, sizeof(params));
                     if (err != OK) {
                         return err;
@@ -5283,8 +4925,8 @@
                     InitOMXParams(&params);
                     params.nPortIndex = portIndex;
 
-                    err = mOMX->getParameter(
-                            mNode, (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidEac3,
+                    err = mOMXNode->getParameter(
+                            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidEac3,
                             &params, sizeof(params));
                     if (err != OK) {
                         return err;
@@ -5302,8 +4944,8 @@
                     InitOMXParams(&params);
                     params.nPortIndex = portIndex;
 
-                    err = mOMX->getParameter(
-                            mNode, (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidOpus,
+                    err = mOMXNode->getParameter(
+                            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidOpus,
                             &params, sizeof(params));
                     if (err != OK) {
                         return err;
@@ -5321,8 +4963,8 @@
                     InitOMXParams(&params);
                     params.nPortIndex = portIndex;
 
-                    err = mOMX->getParameter(
-                            mNode, (OMX_INDEXTYPE)OMX_IndexParamAudioPcm, &params, sizeof(params));
+                    err = mOMXNode->getParameter(
+                            (OMX_INDEXTYPE)OMX_IndexParamAudioPcm, &params, sizeof(params));
                     if (err != OK) {
                         return err;
                     }
@@ -5348,8 +4990,8 @@
                     InitOMXParams(&params);
                     params.nPortIndex = portIndex;
 
-                    err = mOMX->getParameter(
-                                mNode, OMX_IndexParamAudioPcm, &params, sizeof(params));
+                    err = mOMXNode->getParameter(
+                                OMX_IndexParamAudioPcm, &params, sizeof(params));
                     if (err != OK) {
                         return err;
                     }
@@ -5451,25 +5093,6 @@
     }
 }
 
-void ACodec::addKeyFormatChangesToRenderBufferNotification(sp<AMessage> &notify) {
-    AString mime;
-    CHECK(mOutputFormat->findString("mime", &mime));
-
-    if (mime == MEDIA_MIMETYPE_VIDEO_RAW && mNativeWindow != NULL) {
-        // notify renderer of the crop change and dataspace change
-        // NOTE: native window uses extended right-bottom coordinate
-        int32_t left, top, right, bottom;
-        if (mOutputFormat->findRect("crop", &left, &top, &right, &bottom)) {
-            notify->setRect("crop", left, top, right + 1, bottom + 1);
-        }
-
-        int32_t dataSpace;
-        if (mOutputFormat->findInt32("android._dataspace", &dataSpace)) {
-            notify->setInt32("dataspace", dataSpace);
-        }
-    }
-}
-
 void ACodec::sendFormatChange() {
     AString mime;
     CHECK(mOutputFormat->findString("mime", &mime));
@@ -5492,18 +5115,11 @@
         mSkipCutBuffer = new SkipCutBuffer(mEncoderDelay, mEncoderPadding, channelCount);
     }
 
-    sp<AMessage> notify = mNotify->dup();
-    notify->setInt32("what", kWhatOutputFormatChanged);
-    notify->setMessage("format", mOutputFormat);
-    notify->post();
-
     // mLastOutputFormat is not used when tunneled; doing this just to stay consistent
     mLastOutputFormat = mOutputFormat;
 }
 
 void ACodec::signalError(OMX_ERRORTYPE error, status_t internalError) {
-    sp<AMessage> notify = mNotify->dup();
-    notify->setInt32("what", CodecBase::kWhatError);
     ALOGE("signalError(omxError %#x, internalError %d)", error, internalError);
 
     if (internalError == UNKNOWN_ERROR) { // find better error code
@@ -5516,15 +5132,7 @@
     }
 
     mFatalError = true;
-
-    notify->setInt32("err", internalError);
-    notify->setInt32("actionCode", ACTION_CODE_FATAL); // could translate from OMX error.
-    notify->post();
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-ACodec::PortDescription::PortDescription() {
+    mCallback->onError(internalError, ACTION_CODE_FATAL);
 }
 
 status_t ACodec::requestIDRFrame() {
@@ -5538,42 +5146,12 @@
     params.nPortIndex = kPortIndexOutput;
     params.IntraRefreshVOP = OMX_TRUE;
 
-    return mOMX->setConfig(
-            mNode,
+    return mOMXNode->setConfig(
             OMX_IndexConfigVideoIntraVOPRefresh,
             &params,
             sizeof(params));
 }
 
-void ACodec::PortDescription::addBuffer(
-        IOMX::buffer_id id, const sp<ABuffer> &buffer,
-        const sp<NativeHandle> &handle, const sp<RefBase> &memRef) {
-    mBufferIDs.push_back(id);
-    mBuffers.push_back(buffer);
-    mHandles.push_back(handle);
-    mMemRefs.push_back(memRef);
-}
-
-size_t ACodec::PortDescription::countBuffers() {
-    return mBufferIDs.size();
-}
-
-IOMX::buffer_id ACodec::PortDescription::bufferIDAt(size_t index) const {
-    return mBufferIDs.itemAt(index);
-}
-
-sp<ABuffer> ACodec::PortDescription::bufferAt(size_t index) const {
-    return mBuffers.itemAt(index);
-}
-
-sp<NativeHandle> ACodec::PortDescription::handleAt(size_t index) const {
-    return mHandles.itemAt(index);
-}
-
-sp<RefBase> ACodec::PortDescription::memRefAt(size_t index) const {
-    return mMemRefs.itemAt(index);
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 
 ACodec::BaseState::BaseState(ACodec *codec, const sp<AState> &parentState)
@@ -5646,6 +5224,7 @@
         {
             // This will result in kFlagSawMediaServerDie handling in MediaCodec.
             ALOGE("OMX/mediaserver died, signalling error!");
+            mCodec->mGraphicBufferSource.clear();
             mCodec->signalError(OMX_ErrorResourcesLost, DEAD_OBJECT);
             break;
         }
@@ -5654,12 +5233,10 @@
         {
             ALOGI("[%s] forcing the release of codec",
                     mCodec->mComponentName.c_str());
-            status_t err = mCodec->mOMX->freeNode(mCodec->mNode);
+            status_t err = mCodec->mOMXNode->freeNode();
             ALOGE_IF("[%s] failed to release codec instance: err=%d",
                        mCodec->mComponentName.c_str(), err);
-            sp<AMessage> notify = mCodec->mNotify->dup();
-            notify->setInt32("what", CodecBase::kWhatShutdownCompleted);
-            notify->post();
+            mCodec->mCallback->onReleaseCompleted();
             break;
         }
 
@@ -5673,16 +5250,17 @@
 bool ACodec::BaseState::checkOMXMessage(const sp<AMessage> &msg) {
     // there is a possibility that this is an outstanding message for a
     // codec that we have already destroyed
-    if (mCodec->mNode == 0) {
+    if (mCodec->mOMXNode == NULL) {
         ALOGI("ignoring message as already freed component: %s",
                 msg->debugString().c_str());
         return false;
     }
 
-    IOMX::node_id nodeID;
-    CHECK(msg->findInt32("node", (int32_t*)&nodeID));
-    if (nodeID != mCodec->mNode) {
-        ALOGE("Unexpected message for nodeID: %u, should have been %u", nodeID, mCodec->mNode);
+    int32_t generation;
+    CHECK(msg->findInt32("generation", (int32_t*)&generation));
+    if (generation != mCodec->mNodeGeneration) {
+        ALOGW("Unexpected message for component: %s, gen %u, cur %u",
+                msg->debugString().c_str(), generation, mCodec->mNodeGeneration);
         return false;
     }
     return true;
@@ -5800,11 +5378,7 @@
 bool ACodec::BaseState::onOMXEvent(
         OMX_EVENTTYPE event, OMX_U32 data1, OMX_U32 data2) {
     if (event == OMX_EventDataSpaceChanged) {
-        ColorAspects aspects;
-        aspects.mRange = (ColorAspects::Range)((data2 >> 24) & 0xFF);
-        aspects.mPrimaries = (ColorAspects::Primaries)((data2 >> 16) & 0xFF);
-        aspects.mMatrixCoeffs = (ColorAspects::MatrixCoeffs)((data2 >> 8) & 0xFF);
-        aspects.mTransfer = (ColorAspects::Transfer)(data2 & 0xFF);
+        ColorAspects aspects = ColorUtils::unpackToColorAspects(data2);
 
         mCodec->onDataSpaceChanged((android_dataspace)data1, aspects);
         return true;
@@ -5886,46 +5460,28 @@
 
     CHECK_EQ((int)info->mStatus, (int)BufferInfo::OWNED_BY_US);
 
-    sp<AMessage> notify = mCodec->mNotify->dup();
-    notify->setInt32("what", CodecBase::kWhatFillThisBuffer);
-    notify->setInt32("buffer-id", info->mBufferID);
-
-    info->mData->meta()->clear();
-    notify->setBuffer("buffer", info->mData);
-
-    sp<AMessage> reply = new AMessage(kWhatInputBufferFilled, mCodec);
-    reply->setInt32("buffer-id", info->mBufferID);
-
-    notify->setMessage("reply", reply);
-
-    notify->post();
-
+    info->mData->setFormat(mCodec->mInputFormat);
+    mCodec->mBufferChannel->fillThisBuffer(info->mBufferID);
+    info->mData.clear();
     info->mStatus = BufferInfo::OWNED_BY_UPSTREAM;
 }
 
 void ACodec::BaseState::onInputBufferFilled(const sp<AMessage> &msg) {
     IOMX::buffer_id bufferID;
     CHECK(msg->findInt32("buffer-id", (int32_t*)&bufferID));
-    sp<ABuffer> buffer;
+    sp<MediaCodecBuffer> buffer;
     int32_t err = OK;
     bool eos = false;
     PortMode mode = getPortMode(kPortIndexInput);
-
-    if (!msg->findBuffer("buffer", &buffer)) {
-        /* these are unfilled buffers returned by client */
-        CHECK(msg->findInt32("err", &err));
-
-        if (err == OK) {
-            /* buffers with no errors are returned on MediaCodec.flush */
-            mode = KEEP_BUFFERS;
-        } else {
-            ALOGV("[%s] saw error %d instead of an input buffer",
-                 mCodec->mComponentName.c_str(), err);
-            eos = true;
-        }
-
-        buffer.clear();
+    int32_t discarded = 0;
+    if (msg->findInt32("discarded", &discarded) && discarded) {
+        // these are unfilled buffers returned by client
+        // buffers are returned on MediaCodec.flush
+        mode = KEEP_BUFFERS;
     }
+    sp<RefBase> obj;
+    CHECK(msg->findObject("buffer", &obj));
+    buffer = static_cast<MediaCodecBuffer *>(obj.get());
 
     int32_t tmp;
     if (buffer != NULL && buffer->meta()->findInt32("eos", &tmp) && tmp) {
@@ -5943,6 +5499,7 @@
     }
 
     info->mStatus = BufferInfo::OWNED_BY_US;
+    info->mData = buffer;
 
     switch (mode) {
         case KEEP_BUFFERS:
@@ -5970,7 +5527,6 @@
 
                 OMX_U32 flags = OMX_BUFFERFLAG_ENDOFFRAME;
 
-                MetadataBufferType metaType = mCodec->mInputMetadataType;
                 int32_t isCSD = 0;
                 if (buffer->meta()->findInt32("csd", &isCSD) && isCSD != 0) {
                     if (mCodec->mIsLegacyVP9Decoder) {
@@ -5980,18 +5536,19 @@
                         break;
                     }
                     flags |= OMX_BUFFERFLAG_CODECCONFIG;
-                    metaType = kMetadataBufferTypeInvalid;
                 }
 
                 if (eos) {
                     flags |= OMX_BUFFERFLAG_EOS;
                 }
 
-                if (buffer != info->mCodecData) {
+                size_t size = buffer->size();
+                size_t offset = buffer->offset();
+                if (buffer->base() != info->mCodecData->base()) {
                     ALOGV("[%s] Needs to copy input data for buffer %u. (%p != %p)",
                          mCodec->mComponentName.c_str(),
                          bufferID,
-                         buffer.get(), info->mCodecData.get());
+                         buffer->base(), info->mCodecData->base());
 
                     sp<DataConverter> converter = mCodec->mConverter[kPortIndexInput];
                     if (converter == NULL || isCSD) {
@@ -6002,6 +5559,9 @@
                         mCodec->signalError(OMX_ErrorUndefined, err);
                         return;
                     }
+                    size = info->mCodecData->size();
+                } else {
+                    info->mCodecData->setRange(offset, size);
                 }
 
                 if (flags & OMX_BUFFERFLAG_CODECCONFIG) {
@@ -6042,54 +5602,53 @@
                 info->checkReadFence("onInputBufferFilled");
 
                 status_t err2 = OK;
-                switch (metaType) {
-                case kMetadataBufferTypeInvalid:
+                switch (mCodec->mPortMode[kPortIndexInput]) {
+                case IOMX::kPortModePresetByteBuffer:
+                case IOMX::kPortModePresetANWBuffer:
+                case IOMX::kPortModePresetSecureBuffer:
+                    {
+                        err2 = mCodec->mOMXNode->emptyBuffer(
+                            bufferID, info->mCodecData, flags, timeUs, info->mFenceFd);
+                    }
                     break;
 #ifndef OMX_ANDROID_COMPILE_AS_32BIT_ON_64BIT_PLATFORMS
-                case kMetadataBufferTypeNativeHandleSource:
+                case IOMX::kPortModeDynamicNativeHandle:
                     if (info->mCodecData->size() >= sizeof(VideoNativeHandleMetadata)) {
                         VideoNativeHandleMetadata *vnhmd =
                             (VideoNativeHandleMetadata*)info->mCodecData->base();
-                        err2 = mCodec->mOMX->updateNativeHandleInMeta(
-                                mCodec->mNode, kPortIndexInput,
-                                NativeHandle::create(vnhmd->pHandle, false /* ownsHandle */),
-                                bufferID);
+                        sp<NativeHandle> handle = NativeHandle::create(
+                                vnhmd->pHandle, false /* ownsHandle */);
+                        err2 = mCodec->mOMXNode->emptyBuffer(
+                            bufferID, handle, flags, timeUs, info->mFenceFd);
                     }
                     break;
-                case kMetadataBufferTypeANWBuffer:
+                case IOMX::kPortModeDynamicANWBuffer:
                     if (info->mCodecData->size() >= sizeof(VideoNativeMetadata)) {
                         VideoNativeMetadata *vnmd = (VideoNativeMetadata*)info->mCodecData->base();
-                        err2 = mCodec->mOMX->updateGraphicBufferInMeta(
-                                mCodec->mNode, kPortIndexInput,
-                                new GraphicBuffer(vnmd->pBuffer, false /* keepOwnership */),
-                                bufferID);
+                        sp<GraphicBuffer> graphicBuffer = new GraphicBuffer(
+                                vnmd->pBuffer, false /* keepOwnership */);
+                        err2 = mCodec->mOMXNode->emptyBuffer(
+                            bufferID, graphicBuffer, flags, timeUs, info->mFenceFd);
                     }
                     break;
 #endif
                 default:
                     ALOGW("Can't marshall %s data in %zu sized buffers in %zu-bit mode",
-                            asString(metaType), info->mCodecData->size(),
+                            asString(mCodec->mPortMode[kPortIndexInput]),
+                            info->mCodecData->size(),
                             sizeof(buffer_handle_t) * 8);
                     err2 = ERROR_UNSUPPORTED;
                     break;
                 }
 
-                if (err2 == OK) {
-                    err2 = mCodec->mOMX->emptyBuffer(
-                        mCodec->mNode,
-                        bufferID,
-                        0,
-                        info->mCodecData->size(),
-                        flags,
-                        timeUs,
-                        info->mFenceFd);
-                }
                 info->mFenceFd = -1;
                 if (err2 != OK) {
                     mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err2));
                     return;
                 }
                 info->mStatus = BufferInfo::OWNED_BY_COMPONENT;
+                // Hold the reference while component is using the buffer.
+                info->mData = buffer;
 
                 if (!eos && err == OK) {
                     getMoreInputDataIfPossible();
@@ -6113,14 +5672,8 @@
                      mCodec->mComponentName.c_str(), bufferID);
 
                 info->checkReadFence("onInputBufferFilled");
-                status_t err2 = mCodec->mOMX->emptyBuffer(
-                        mCodec->mNode,
-                        bufferID,
-                        0,
-                        0,
-                        OMX_BUFFERFLAG_EOS,
-                        0,
-                        info->mFenceFd);
+                status_t err2 = mCodec->mOMXNode->emptyBuffer(
+                        bufferID, OMXBuffer::sPreset, OMX_BUFFERFLAG_EOS, 0, info->mFenceFd);
                 info->mFenceFd = -1;
                 if (err2 != OK) {
                     mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err2));
@@ -6244,56 +5797,46 @@
                 ALOGV("[%s] calling fillBuffer %u",
                      mCodec->mComponentName.c_str(), info->mBufferID);
 
-                err = mCodec->mOMX->fillBuffer(mCodec->mNode, info->mBufferID, info->mFenceFd);
-                info->mFenceFd = -1;
+                err = mCodec->fillBuffer(info);
                 if (err != OK) {
                     mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
                     return true;
                 }
-
-                info->mStatus = BufferInfo::OWNED_BY_COMPONENT;
                 break;
             }
 
-            sp<AMessage> reply =
-                new AMessage(kWhatOutputBufferDrained, mCodec);
+            sp<MediaCodecBuffer> buffer = info->mData;
 
             if (mCodec->mOutputFormat != mCodec->mLastOutputFormat && rangeLength > 0) {
                 // pretend that output format has changed on the first frame (we used to do this)
                 if (mCodec->mBaseOutputFormat == mCodec->mOutputFormat) {
                     mCodec->onOutputFormatChanged(mCodec->mOutputFormat);
                 }
-                mCodec->addKeyFormatChangesToRenderBufferNotification(reply);
                 mCodec->sendFormatChange();
-            } else if (rangeLength > 0 && mCodec->mNativeWindow != NULL) {
-                // If potentially rendering onto a surface, always save key format data (crop &
-                // data space) so that we can set it if and once the buffer is rendered.
-                mCodec->addKeyFormatChangesToRenderBufferNotification(reply);
             }
+            buffer->setFormat(mCodec->mOutputFormat);
 
-            if (mCodec->usingMetadataOnEncoderOutput()) {
+            if (mCodec->usingSecureBufferOnEncoderOutput()) {
                 native_handle_t *handle = NULL;
-                VideoNativeHandleMetadata &nativeMeta =
-                    *(VideoNativeHandleMetadata *)info->mData->data();
-                if (info->mData->size() >= sizeof(nativeMeta)
-                        && nativeMeta.eType == kMetadataBufferTypeNativeHandleSource) {
+                sp<SecureBuffer> secureBuffer = static_cast<SecureBuffer *>(buffer.get());
+                if (secureBuffer != NULL) {
 #ifdef OMX_ANDROID_COMPILE_AS_32BIT_ON_64BIT_PLATFORMS
                     // handle is only valid on 32-bit/mediaserver process
                     handle = NULL;
 #else
-                    handle = (native_handle_t *)nativeMeta.pHandle;
+                    handle = (native_handle_t *)secureBuffer->getDestinationPointer();
 #endif
                 }
-                info->mData->meta()->setPointer("handle", handle);
-                info->mData->meta()->setInt32("rangeOffset", rangeOffset);
-                info->mData->meta()->setInt32("rangeLength", rangeLength);
-            } else if (info->mData == info->mCodecData) {
-                info->mData->setRange(rangeOffset, rangeLength);
+                buffer->meta()->setPointer("handle", handle);
+                buffer->meta()->setInt32("rangeOffset", rangeOffset);
+                buffer->meta()->setInt32("rangeLength", rangeLength);
+            } else if (buffer->base() == info->mCodecData->base()) {
+                buffer->setRange(rangeOffset, rangeLength);
             } else {
                 info->mCodecData->setRange(rangeOffset, rangeLength);
                 // in this case we know that mConverter is not null
                 status_t err = mCodec->mConverter[kPortIndexOutput]->convert(
-                        info->mCodecData, info->mData);
+                        info->mCodecData, buffer);
                 if (err != OK) {
                     mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
                     return true;
@@ -6308,32 +5851,20 @@
 #endif
 
             if (mCodec->mSkipCutBuffer != NULL) {
-                mCodec->mSkipCutBuffer->submit(info->mData);
+                mCodec->mSkipCutBuffer->submit(buffer);
             }
-            info->mData->meta()->setInt64("timeUs", timeUs);
+            buffer->meta()->setInt64("timeUs", timeUs);
 
-            sp<AMessage> notify = mCodec->mNotify->dup();
-            notify->setInt32("what", CodecBase::kWhatDrainThisBuffer);
-            notify->setInt32("buffer-id", info->mBufferID);
-            notify->setBuffer("buffer", info->mData);
-            notify->setInt32("flags", flags);
+            info->mData.clear();
 
-            reply->setInt32("buffer-id", info->mBufferID);
-
-            notify->setMessage("reply", reply);
-
-            notify->post();
+            mCodec->mBufferChannel->drainThisBuffer(info->mBufferID, flags);
 
             info->mStatus = BufferInfo::OWNED_BY_DOWNSTREAM;
 
             if (flags & OMX_BUFFERFLAG_EOS) {
                 ALOGV("[%s] saw output EOS", mCodec->mComponentName.c_str());
 
-                sp<AMessage> notify = mCodec->mNotify->dup();
-                notify->setInt32("what", CodecBase::kWhatEOS);
-                notify->setInt32("err", mCodec->mInputEOSResult);
-                notify->post();
-
+                mCodec->mCallback->onEos(mCodec->mInputEOSResult);
                 mCodec->mPortEOS[kPortIndexOutput] = true;
             }
             break;
@@ -6358,6 +5889,12 @@
 void ACodec::BaseState::onOutputBufferDrained(const sp<AMessage> &msg) {
     IOMX::buffer_id bufferID;
     CHECK(msg->findInt32("buffer-id", (int32_t*)&bufferID));
+    sp<RefBase> obj;
+    CHECK(msg->findObject("buffer", &obj));
+    sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
+    int32_t discarded = 0;
+    msg->findInt32("discarded", &discarded);
+
     ssize_t index;
     BufferInfo *info = mCodec->findBufferByID(kPortIndexOutput, bufferID, &index);
     BufferInfo::Status status = BufferInfo::getSafeStatus(info);
@@ -6367,34 +5904,38 @@
         mCodec->signalError(OMX_ErrorUndefined, FAILED_TRANSACTION);
         return;
     }
-
-    android_native_rect_t crop;
-    if (msg->findRect("crop", &crop.left, &crop.top, &crop.right, &crop.bottom)
-            && memcmp(&crop, &mCodec->mLastNativeWindowCrop, sizeof(crop)) != 0) {
-        mCodec->mLastNativeWindowCrop = crop;
-        status_t err = native_window_set_crop(mCodec->mNativeWindow.get(), &crop);
-        ALOGW_IF(err != NO_ERROR, "failed to set crop: %d", err);
-    }
-
-    int32_t dataSpace;
-    if (msg->findInt32("dataspace", &dataSpace)
-            && dataSpace != mCodec->mLastNativeWindowDataSpace) {
-        status_t err = native_window_set_buffers_data_space(
-                mCodec->mNativeWindow.get(), (android_dataspace)dataSpace);
-        mCodec->mLastNativeWindowDataSpace = dataSpace;
-        ALOGW_IF(err != NO_ERROR, "failed to set dataspace: %d", err);
-    }
-
+    info->mData = buffer;
     int32_t render;
     if (mCodec->mNativeWindow != NULL
             && msg->findInt32("render", &render) && render != 0
-            && info->mData != NULL && info->mData->size() != 0) {
+            && !discarded && buffer->size() != 0) {
         ATRACE_NAME("render");
         // The client wants this buffer to be rendered.
 
+        android_native_rect_t crop;
+        if (buffer->format()->findRect("crop", &crop.left, &crop.top, &crop.right, &crop.bottom)) {
+            // NOTE: native window uses extended right-bottom coordinate
+            ++crop.right;
+            ++crop.bottom;
+            if (memcmp(&crop, &mCodec->mLastNativeWindowCrop, sizeof(crop)) != 0) {
+                mCodec->mLastNativeWindowCrop = crop;
+                status_t err = native_window_set_crop(mCodec->mNativeWindow.get(), &crop);
+                ALOGW_IF(err != NO_ERROR, "failed to set crop: %d", err);
+            }
+        }
+
+        int32_t dataSpace;
+        if (buffer->format()->findInt32("android._dataspace", &dataSpace)
+                && dataSpace != mCodec->mLastNativeWindowDataSpace) {
+            status_t err = native_window_set_buffers_data_space(
+                    mCodec->mNativeWindow.get(), (android_dataspace)dataSpace);
+            mCodec->mLastNativeWindowDataSpace = dataSpace;
+            ALOGW_IF(err != NO_ERROR, "failed to set dataspace: %d", err);
+        }
+
         // save buffers sent to the surface so we can get render time when they return
         int64_t mediaTimeUs = -1;
-        info->mData->meta()->findInt64("timeUs", &mediaTimeUs);
+        buffer->meta()->findInt64("timeUs", &mediaTimeUs);
         if (mediaTimeUs >= 0) {
             mCodec->mRenderTracker.onFrameQueued(
                     mediaTimeUs, info->mGraphicBuffer, new Fence(::dup(info->mFenceFd)));
@@ -6403,7 +5944,7 @@
         int64_t timestampNs = 0;
         if (!msg->findInt64("timestampNs", &timestampNs)) {
             // use media timestamp if client did not request a specific render timestamp
-            if (info->mData->meta()->findInt64("timeUs", &timestampNs)) {
+            if (buffer->meta()->findInt64("timeUs", &timestampNs)) {
                 ALOGV("using buffer PTS of %lld", (long long)timestampNs);
                 timestampNs *= 1000;
             }
@@ -6427,8 +5968,7 @@
             info->mIsReadFence = false;
         }
     } else {
-        if (mCodec->mNativeWindow != NULL &&
-            (info->mData == NULL || info->mData->size() != 0)) {
+        if (mCodec->mNativeWindow != NULL && (discarded || buffer->size() != 0)) {
             // move read fence into write fence to avoid clobbering
             info->mIsReadFence = false;
             ATRACE_NAME("frame-drop");
@@ -6466,12 +6006,8 @@
                     ALOGV("[%s] calling fillBuffer %u",
                          mCodec->mComponentName.c_str(), info->mBufferID);
                     info->checkWriteFence("onOutputBufferDrained::RESUBMIT_BUFFERS");
-                    status_t err = mCodec->mOMX->fillBuffer(
-                            mCodec->mNode, info->mBufferID, info->mFenceFd);
-                    info->mFenceFd = -1;
-                    if (err == OK) {
-                        info->mStatus = BufferInfo::OWNED_BY_COMPONENT;
-                    } else {
+                    status_t err = mCodec->fillBuffer(info);
+                    if (err != OK) {
                         mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
                     }
                 }
@@ -6504,19 +6040,21 @@
     ALOGV("Now uninitialized");
 
     if (mDeathNotifier != NULL) {
-        mCodec->mNodeBinder->unlinkToDeath(mDeathNotifier);
+        if (mCodec->mOMXNode != NULL) {
+            sp<IBinder> binder = IInterface::asBinder(mCodec->mOMXNode);
+            binder->unlinkToDeath(mDeathNotifier);
+        }
         mDeathNotifier.clear();
     }
 
     mCodec->mUsingNativeWindow = false;
     mCodec->mNativeWindow.clear();
     mCodec->mNativeWindowUsageBits = 0;
-    mCodec->mNode = 0;
     mCodec->mOMX.clear();
-    mCodec->mQuirks = 0;
+    mCodec->mOMXNode.clear();
     mCodec->mFlags = 0;
-    mCodec->mInputMetadataType = kMetadataBufferTypeInvalid;
-    mCodec->mOutputMetadataType = kMetadataBufferTypeInvalid;
+    mCodec->mPortMode[kPortIndexInput] = IOMX::kPortModePresetByteBuffer;
+    mCodec->mPortMode[kPortIndexOutput] = IOMX::kPortModePresetByteBuffer;
     mCodec->mConverter[0].clear();
     mCodec->mConverter[1].clear();
     mCodec->mComponentName.clear();
@@ -6548,21 +6086,18 @@
                         "keepComponentAllocated", &keepComponentAllocated));
             ALOGW_IF(keepComponentAllocated,
                      "cannot keep component allocated on shutdown in Uninitialized state");
-
-            sp<AMessage> notify = mCodec->mNotify->dup();
-            notify->setInt32("what", CodecBase::kWhatShutdownCompleted);
-            notify->post();
-
+            if (keepComponentAllocated) {
+                mCodec->mCallback->onStopCompleted();
+            } else {
+                mCodec->mCallback->onReleaseCompleted();
+            }
             handled = true;
             break;
         }
 
         case ACodec::kWhatFlush:
         {
-            sp<AMessage> notify = mCodec->mNotify->dup();
-            notify->setInt32("what", CodecBase::kWhatFlushCompleted);
-            notify->post();
-
+            mCodec->mCallback->onFlushCompleted();
             handled = true;
             break;
         }
@@ -6592,7 +6127,7 @@
 bool ACodec::UninitializedState::onAllocateComponent(const sp<AMessage> &msg) {
     ALOGV("onAllocateComponent");
 
-    CHECK(mCodec->mNode == 0);
+    CHECK(mCodec->mOMXNode == NULL);
 
     OMXClient client;
     if (client.connect() != OK) {
@@ -6631,7 +6166,7 @@
     }
 
     sp<CodecObserver> observer = new CodecObserver;
-    IOMX::node_id node = 0;
+    sp<IOMXNode> omxNode;
 
     status_t err = NAME_NOT_FOUND;
     for (size_t matchIndex = 0; matchIndex < matchingCodecs.size();
@@ -6642,7 +6177,7 @@
         pid_t tid = gettid();
         int prevPriority = androidGetThreadPriority(tid);
         androidSetThreadPriority(tid, ANDROID_PRIORITY_FOREGROUND);
-        err = omx->allocateNode(componentName.c_str(), observer, &mCodec->mNodeBinder, &node);
+        err = omx->allocateNode(componentName.c_str(), observer, &omxNode);
         androidSetThreadPriority(tid, prevPriority);
 
         if (err == OK) {
@@ -6651,10 +6186,10 @@
             ALOGW("Allocating component '%s' failed, try next one.", componentName.c_str());
         }
 
-        node = 0;
+        omxNode = NULL;
     }
 
-    if (node == 0) {
+    if (omxNode == NULL) {
         if (!mime.empty()) {
             ALOGE("Unable to instantiate a %scoder for type '%s' with err %#x.",
                     encoder ? "en" : "de", mime.c_str(), err);
@@ -6667,14 +6202,14 @@
     }
 
     mDeathNotifier = new DeathNotifier(notify);
-    if (mCodec->mNodeBinder == NULL ||
-            mCodec->mNodeBinder->linkToDeath(mDeathNotifier) != OK) {
+    if (IInterface::asBinder(omxNode)->linkToDeath(mDeathNotifier) != OK) {
         // This was a local binder, if it dies so do we, we won't care
         // about any notifications in the afterlife.
         mDeathNotifier.clear();
     }
 
     notify = new AMessage(kWhatOMXMessageList, mCodec);
+    notify->setInt32("generation", ++mCodec->mNodeGeneration);
     observer->setNotificationMessage(notify);
 
     mCodec->mComponentName = componentName;
@@ -6687,17 +6222,10 @@
         mCodec->mFlags |= kFlagPushBlankBuffersToNativeWindowOnShutdown;
     }
 
-    mCodec->mQuirks = quirks;
+    omxNode->setQuirks(quirks);
     mCodec->mOMX = omx;
-    mCodec->mNode = node;
-
-    {
-        sp<AMessage> notify = mCodec->mNotify->dup();
-        notify->setInt32("what", CodecBase::kWhatComponentAllocated);
-        notify->setString("componentName", mCodec->mComponentName.c_str());
-        notify->post();
-    }
-
+    mCodec->mOMXNode = omxNode;
+    mCodec->mCallback->onComponentAllocated(mCodec->mComponentName.c_str());
     mCodec->changeState(mCodec->mLoadedState);
 
     return true;
@@ -6723,6 +6251,7 @@
     mCodec->mInputFormat.clear();
     mCodec->mOutputFormat.clear();
     mCodec->mBaseOutputFormat.clear();
+    mCodec->mGraphicBufferSource.clear();
 
     if (mCodec->mShutdownInProgress) {
         bool keepComponentAllocated = mCodec->mKeepComponentAllocated;
@@ -6739,15 +6268,17 @@
 
 void ACodec::LoadedState::onShutdown(bool keepComponentAllocated) {
     if (!keepComponentAllocated) {
-        (void)mCodec->mOMX->freeNode(mCodec->mNode);
+        (void)mCodec->mOMXNode->freeNode();
 
         mCodec->changeState(mCodec->mUninitializedState);
     }
 
     if (mCodec->mExplicitShutdown) {
-        sp<AMessage> notify = mCodec->mNotify->dup();
-        notify->setInt32("what", CodecBase::kWhatShutdownCompleted);
-        notify->post();
+        if (keepComponentAllocated) {
+            mCodec->mCallback->onStopCompleted();
+        } else {
+            mCodec->mCallback->onReleaseCompleted();
+        }
         mCodec->mExplicitShutdown = false;
     }
 }
@@ -6799,10 +6330,7 @@
 
         case ACodec::kWhatFlush:
         {
-            sp<AMessage> notify = mCodec->mNotify->dup();
-            notify->setInt32("what", CodecBase::kWhatFlushCompleted);
-            notify->post();
-
+            mCodec->mCallback->onFlushCompleted();
             handled = true;
             break;
         }
@@ -6818,7 +6346,7 @@
         const sp<AMessage> &msg) {
     ALOGV("onConfigureComponent");
 
-    CHECK(mCodec->mNode != 0);
+    CHECK(mCodec->mOMXNode != NULL);
 
     status_t err = OK;
     AString mime;
@@ -6835,109 +6363,100 @@
         return false;
     }
 
-    {
-        sp<AMessage> notify = mCodec->mNotify->dup();
-        notify->setInt32("what", CodecBase::kWhatComponentConfigured);
-        notify->setMessage("input-format", mCodec->mInputFormat);
-        notify->setMessage("output-format", mCodec->mOutputFormat);
-        notify->post();
-    }
+    mCodec->mCallback->onComponentConfigured(mCodec->mInputFormat, mCodec->mOutputFormat);
 
     return true;
 }
 
 status_t ACodec::LoadedState::setupInputSurface() {
-    status_t err = OK;
+    if (mCodec->mGraphicBufferSource == NULL) {
+        return BAD_VALUE;
+    }
+
+    android_dataspace dataSpace;
+    status_t err =
+        mCodec->setInitialColorAspectsForVideoEncoderSurfaceAndGetDataSpace(&dataSpace);
+    if (err != OK) {
+        ALOGE("Failed to get default data space");
+        return err;
+    }
+
+    err = statusFromBinderStatus(
+            mCodec->mGraphicBufferSource->configure(mCodec->mOMXNode, dataSpace));
+    if (err != OK) {
+        ALOGE("[%s] Unable to configure for node (err %d)",
+              mCodec->mComponentName.c_str(), err);
+        return err;
+    }
 
     if (mCodec->mRepeatFrameDelayUs > 0ll) {
-        err = mCodec->mOMX->setInternalOption(
-                mCodec->mNode,
-                kPortIndexInput,
-                IOMX::INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY,
-                &mCodec->mRepeatFrameDelayUs,
-                sizeof(mCodec->mRepeatFrameDelayUs));
+        err = statusFromBinderStatus(
+                mCodec->mGraphicBufferSource->setRepeatPreviousFrameDelayUs(
+                        mCodec->mRepeatFrameDelayUs));
 
         if (err != OK) {
             ALOGE("[%s] Unable to configure option to repeat previous "
                   "frames (err %d)",
-                  mCodec->mComponentName.c_str(),
-                  err);
+                  mCodec->mComponentName.c_str(), err);
             return err;
         }
     }
 
     if (mCodec->mMaxPtsGapUs > 0ll) {
-        err = mCodec->mOMX->setInternalOption(
-                mCodec->mNode,
-                kPortIndexInput,
-                IOMX::INTERNAL_OPTION_MAX_TIMESTAMP_GAP,
-                &mCodec->mMaxPtsGapUs,
-                sizeof(mCodec->mMaxPtsGapUs));
+        OMX_PARAM_U32TYPE maxPtsGapParams;
+        InitOMXParams(&maxPtsGapParams);
+        maxPtsGapParams.nPortIndex = kPortIndexInput;
+        maxPtsGapParams.nU32 = (uint32_t) mCodec->mMaxPtsGapUs;
+
+        err = mCodec->mOMXNode->setParameter(
+                (OMX_INDEXTYPE)OMX_IndexParamMaxFrameDurationForBitrateControl,
+                &maxPtsGapParams, sizeof(maxPtsGapParams));
 
         if (err != OK) {
             ALOGE("[%s] Unable to configure max timestamp gap (err %d)",
-                    mCodec->mComponentName.c_str(),
-                    err);
+                    mCodec->mComponentName.c_str(), err);
             return err;
         }
     }
 
     if (mCodec->mMaxFps > 0) {
-        err = mCodec->mOMX->setInternalOption(
-                mCodec->mNode,
-                kPortIndexInput,
-                IOMX::INTERNAL_OPTION_MAX_FPS,
-                &mCodec->mMaxFps,
-                sizeof(mCodec->mMaxFps));
+        err = statusFromBinderStatus(
+                mCodec->mGraphicBufferSource->setMaxFps(mCodec->mMaxFps));
 
         if (err != OK) {
             ALOGE("[%s] Unable to configure max fps (err %d)",
-                    mCodec->mComponentName.c_str(),
-                    err);
+                    mCodec->mComponentName.c_str(), err);
             return err;
         }
     }
 
     if (mCodec->mTimePerCaptureUs > 0ll
             && mCodec->mTimePerFrameUs > 0ll) {
-        int64_t timeLapse[2];
-        timeLapse[0] = mCodec->mTimePerFrameUs;
-        timeLapse[1] = mCodec->mTimePerCaptureUs;
-        err = mCodec->mOMX->setInternalOption(
-                mCodec->mNode,
-                kPortIndexInput,
-                IOMX::INTERNAL_OPTION_TIME_LAPSE,
-                &timeLapse[0],
-                sizeof(timeLapse));
+        err = statusFromBinderStatus(
+                mCodec->mGraphicBufferSource->setTimeLapseConfig(
+                        mCodec->mTimePerFrameUs, mCodec->mTimePerCaptureUs));
 
         if (err != OK) {
             ALOGE("[%s] Unable to configure time lapse (err %d)",
-                    mCodec->mComponentName.c_str(),
-                    err);
+                    mCodec->mComponentName.c_str(), err);
             return err;
         }
     }
 
     if (mCodec->mCreateInputBuffersSuspended) {
-        bool suspend = true;
-        err = mCodec->mOMX->setInternalOption(
-                mCodec->mNode,
-                kPortIndexInput,
-                IOMX::INTERNAL_OPTION_SUSPEND,
-                &suspend,
-                sizeof(suspend));
+        err = statusFromBinderStatus(
+                mCodec->mGraphicBufferSource->setSuspend(true));
 
         if (err != OK) {
             ALOGE("[%s] Unable to configure option to suspend (err %d)",
-                  mCodec->mComponentName.c_str(),
-                  err);
+                  mCodec->mComponentName.c_str(), err);
             return err;
         }
     }
 
     uint32_t usageBits;
-    if (mCodec->mOMX->getParameter(
-            mCodec->mNode, (OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits,
+    if (mCodec->mOMXNode->getParameter(
+            (OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits,
             &usageBits, sizeof(usageBits)) == OK) {
         mCodec->mInputFormat->setInt32(
                 "using-sw-read-often", !!(usageBits & GRALLOC_USAGE_SW_READ_OFTEN));
@@ -6945,9 +6464,14 @@
 
     sp<ABuffer> colorAspectsBuffer;
     if (mCodec->mInputFormat->findBuffer("android._color-aspects", &colorAspectsBuffer)) {
-        err = mCodec->mOMX->setInternalOption(
-                mCodec->mNode, kPortIndexInput, IOMX::INTERNAL_OPTION_COLOR_ASPECTS,
-                colorAspectsBuffer->base(), colorAspectsBuffer->capacity());
+        if (colorAspectsBuffer->size() != sizeof(ColorAspects)) {
+            return INVALID_OPERATION;
+        }
+
+        err = statusFromBinderStatus(
+                mCodec->mGraphicBufferSource->setColorAspects(ColorUtils::packToU32(
+                        *(ColorAspects *)colorAspectsBuffer->base())));
+
         if (err != OK) {
             ALOGE("[%s] Unable to configure color aspects (err %d)",
                   mCodec->mComponentName.c_str(), err);
@@ -6961,33 +6485,18 @@
         const sp<AMessage> & /* msg */) {
     ALOGV("onCreateInputSurface");
 
-    sp<AMessage> notify = mCodec->mNotify->dup();
-    notify->setInt32("what", CodecBase::kWhatInputSurfaceCreated);
-
-    android_dataspace dataSpace;
-    status_t err =
-        mCodec->setInitialColorAspectsForVideoEncoderSurfaceAndGetDataSpace(&dataSpace);
-    notify->setMessage("input-format", mCodec->mInputFormat);
-    notify->setMessage("output-format", mCodec->mOutputFormat);
-
     sp<IGraphicBufferProducer> bufferProducer;
-    if (err == OK) {
-        mCodec->mInputMetadataType = kMetadataBufferTypeANWBuffer;
-        err = mCodec->mOMX->createInputSurface(
-                mCodec->mNode, kPortIndexInput, dataSpace, &bufferProducer,
-                &mCodec->mInputMetadataType);
-        // framework uses ANW buffers internally instead of gralloc handles
-        if (mCodec->mInputMetadataType == kMetadataBufferTypeGrallocSource) {
-            mCodec->mInputMetadataType = kMetadataBufferTypeANWBuffer;
-        }
-    }
+    status_t err = mCodec->mOMX->createInputSurface(
+            &bufferProducer, &mCodec->mGraphicBufferSource);
 
     if (err == OK) {
         err = setupInputSurface();
     }
 
     if (err == OK) {
-        notify->setObject("input-surface",
+        mCodec->mCallback->onInputSurfaceCreated(
+                mCodec->mInputFormat,
+                mCodec->mOutputFormat,
                 new BufferProducerWrapper(bufferProducer));
     } else {
         // Can't use mCodec->signalError() here -- MediaCodec won't forward
@@ -6995,59 +6504,37 @@
         // send a kWhatInputSurfaceCreated with an error value instead.
         ALOGE("[%s] onCreateInputSurface returning error %d",
                 mCodec->mComponentName.c_str(), err);
-        notify->setInt32("err", err);
+        mCodec->mCallback->onInputSurfaceCreationFailed(err);
     }
-    notify->post();
 }
 
-void ACodec::LoadedState::onSetInputSurface(
-        const sp<AMessage> &msg) {
+void ACodec::LoadedState::onSetInputSurface(const sp<AMessage> &msg) {
     ALOGV("onSetInputSurface");
 
-    sp<AMessage> notify = mCodec->mNotify->dup();
-    notify->setInt32("what", CodecBase::kWhatInputSurfaceAccepted);
-
     sp<RefBase> obj;
     CHECK(msg->findObject("input-surface", &obj));
     sp<PersistentSurface> surface = static_cast<PersistentSurface *>(obj.get());
+    mCodec->mGraphicBufferSource = surface->getBufferSource();
 
-    android_dataspace dataSpace;
-    status_t err =
-        mCodec->setInitialColorAspectsForVideoEncoderSurfaceAndGetDataSpace(&dataSpace);
-    notify->setMessage("input-format", mCodec->mInputFormat);
-    notify->setMessage("output-format", mCodec->mOutputFormat);
+    status_t err = setupInputSurface();
 
     if (err == OK) {
-        mCodec->mInputMetadataType = kMetadataBufferTypeANWBuffer;
-        err = mCodec->mOMX->setInputSurface(
-                mCodec->mNode, kPortIndexInput, surface->getBufferConsumer(),
-                &mCodec->mInputMetadataType);
-        // framework uses ANW buffers internally instead of gralloc handles
-        if (mCodec->mInputMetadataType == kMetadataBufferTypeGrallocSource) {
-            mCodec->mInputMetadataType = kMetadataBufferTypeANWBuffer;
-        }
-    }
-
-    if (err == OK) {
-        surface->getBufferConsumer()->setDefaultBufferDataSpace(dataSpace);
-        err = setupInputSurface();
-    }
-
-    if (err != OK) {
+        mCodec->mCallback->onInputSurfaceAccepted(
+                mCodec->mInputFormat, mCodec->mOutputFormat);
+    } else {
         // Can't use mCodec->signalError() here -- MediaCodec won't forward
         // the error through because it's in the "configured" state.  We
         // send a kWhatInputSurfaceAccepted with an error value instead.
         ALOGE("[%s] onSetInputSurface returning error %d",
                 mCodec->mComponentName.c_str(), err);
-        notify->setInt32("err", err);
+        mCodec->mCallback->onInputSurfaceDeclined(err);
     }
-    notify->post();
 }
 
 void ACodec::LoadedState::onStart() {
     ALOGV("onStart");
 
-    status_t err = mCodec->mOMX->sendCommand(mCodec->mNode, OMX_CommandStateSet, OMX_StateIdle);
+    status_t err = mCodec->mOMXNode->sendCommand(OMX_CommandStateSet, OMX_StateIdle);
     if (err != OK) {
         mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
     } else {
@@ -7072,8 +6559,8 @@
 
         mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
 
-        mCodec->mOMX->sendCommand(
-                mCodec->mNode, OMX_CommandStateSet, OMX_StateLoaded);
+        mCodec->mOMXNode->sendCommand(
+                OMX_CommandStateSet, OMX_StateLoaded);
         if (mCodec->allYourBuffersAreBelongToUs(kPortIndexInput)) {
             mCodec->freeBuffersOnPort(kPortIndexInput);
         }
@@ -7087,12 +6574,18 @@
 
 status_t ACodec::LoadedToIdleState::allocateBuffers() {
     status_t err = mCodec->allocateBuffersOnPort(kPortIndexInput);
-
     if (err != OK) {
         return err;
     }
 
-    return mCodec->allocateBuffersOnPort(kPortIndexOutput);
+    err = mCodec->allocateBuffersOnPort(kPortIndexOutput);
+    if (err != OK) {
+        return err;
+    }
+
+    mCodec->mCallback->onStartCompleted();
+
+    return OK;
 }
 
 bool ACodec::LoadedToIdleState::onMessageReceived(const sp<AMessage> &msg) {
@@ -7119,9 +6612,7 @@
         case kWhatFlush:
         {
             // We haven't even started yet, so we're flushed alright...
-            sp<AMessage> notify = mCodec->mNotify->dup();
-            notify->setInt32("what", CodecBase::kWhatFlushCompleted);
-            notify->post();
+            mCodec->mCallback->onFlushCompleted();
             return true;
         }
 
@@ -7145,8 +6636,8 @@
             }
 
             if (err == OK) {
-                err = mCodec->mOMX->sendCommand(
-                    mCodec->mNode, OMX_CommandStateSet, OMX_StateExecuting);
+                err = mCodec->mOMXNode->sendCommand(
+                    OMX_CommandStateSet, OMX_StateExecuting);
             }
 
             if (err != OK) {
@@ -7191,10 +6682,7 @@
         case kWhatFlush:
         {
             // We haven't even started yet, so we're flushed alright...
-            sp<AMessage> notify = mCodec->mNotify->dup();
-            notify->setInt32("what", CodecBase::kWhatFlushCompleted);
-            notify->post();
-
+            mCodec->mCallback->onFlushCompleted();
             return true;
         }
 
@@ -7289,14 +6777,11 @@
         ALOGV("[%s] calling fillBuffer %u", mCodec->mComponentName.c_str(), info->mBufferID);
 
         info->checkWriteFence("submitRegularOutputBuffers");
-        status_t err = mCodec->mOMX->fillBuffer(mCodec->mNode, info->mBufferID, info->mFenceFd);
-        info->mFenceFd = -1;
+        status_t err = mCodec->fillBuffer(info);
         if (err != OK) {
             failed = true;
             break;
         }
-
-        info->mStatus = BufferInfo::OWNED_BY_COMPONENT;
     }
 
     if (failed) {
@@ -7357,8 +6842,8 @@
 
             mActive = false;
 
-            status_t err = mCodec->mOMX->sendCommand(
-                    mCodec->mNode, OMX_CommandStateSet, OMX_StateIdle);
+            status_t err = mCodec->mOMXNode->sendCommand(
+                    OMX_CommandStateSet, OMX_StateIdle);
             if (err != OK) {
                 if (keepComponentAllocated) {
                     mCodec->signalError(OMX_ErrorUndefined, FAILED_TRANSACTION);
@@ -7384,7 +6869,7 @@
 
             mActive = false;
 
-            status_t err = mCodec->mOMX->sendCommand(mCodec->mNode, OMX_CommandFlush, OMX_ALL);
+            status_t err = mCodec->mOMXNode->sendCommand(OMX_CommandFlush, OMX_ALL);
             if (err != OK) {
                 mCodec->signalError(OMX_ErrorUndefined, FAILED_TRANSACTION);
             } else {
@@ -7467,8 +6952,7 @@
         configParams.nPortIndex = kPortIndexOutput;
         configParams.nEncodeBitrate = videoBitrate;
 
-        status_t err = mOMX->setConfig(
-                mNode,
+        status_t err = mOMXNode->setConfig(
                 OMX_IndexConfigVideoBitrate,
                 &configParams,
                 sizeof(configParams));
@@ -7483,12 +6967,14 @@
 
     int64_t timeOffsetUs;
     if (params->findInt64("time-offset-us", &timeOffsetUs)) {
-        status_t err = mOMX->setInternalOption(
-            mNode,
-            kPortIndexInput,
-            IOMX::INTERNAL_OPTION_TIME_OFFSET,
-            &timeOffsetUs,
-            sizeof(timeOffsetUs));
+        if (mGraphicBufferSource == NULL) {
+            ALOGE("[%s] Invalid to set input buffer time offset without surface",
+                    mComponentName.c_str());
+            return INVALID_OPERATION;
+        }
+
+        status_t err = statusFromBinderStatus(
+                mGraphicBufferSource->setTimeOffsetUs(timeOffsetUs));
 
         if (err != OK) {
             ALOGE("[%s] Unable to set input buffer time offset (err %d)",
@@ -7500,13 +6986,14 @@
 
     int64_t skipFramesBeforeUs;
     if (params->findInt64("skip-frames-before", &skipFramesBeforeUs)) {
-        status_t err =
-            mOMX->setInternalOption(
-                     mNode,
-                     kPortIndexInput,
-                     IOMX::INTERNAL_OPTION_START_TIME,
-                     &skipFramesBeforeUs,
-                     sizeof(skipFramesBeforeUs));
+        if (mGraphicBufferSource == NULL) {
+            ALOGE("[%s] Invalid to set start time without surface",
+                    mComponentName.c_str());
+            return INVALID_OPERATION;
+        }
+
+        status_t err = statusFromBinderStatus(
+                mGraphicBufferSource->setStartTimeUs(skipFramesBeforeUs));
 
         if (err != OK) {
             ALOGE("Failed to set parameter 'skip-frames-before' (err %d)", err);
@@ -7516,15 +7003,14 @@
 
     int32_t dropInputFrames;
     if (params->findInt32("drop-input-frames", &dropInputFrames)) {
-        bool suspend = dropInputFrames != 0;
+        if (mGraphicBufferSource == NULL) {
+            ALOGE("[%s] Invalid to set suspend without surface",
+                    mComponentName.c_str());
+            return INVALID_OPERATION;
+        }
 
-        status_t err =
-            mOMX->setInternalOption(
-                     mNode,
-                     kPortIndexInput,
-                     IOMX::INTERNAL_OPTION_SUSPEND,
-                     &suspend,
-                     sizeof(suspend));
+        status_t err = statusFromBinderStatus(
+                mGraphicBufferSource->setSuspend(dropInputFrames != 0));
 
         if (err != OK) {
             ALOGE("Failed to set parameter 'drop-input-frames' (err %d)", err);
@@ -7571,14 +7057,11 @@
 }
 
 void ACodec::onSignalEndOfInputStream() {
-    sp<AMessage> notify = mNotify->dup();
-    notify->setInt32("what", CodecBase::kWhatSignaledInputEOS);
-
-    status_t err = mOMX->signalEndOfInputStream(mNode);
-    if (err != OK) {
-        notify->setInt32("err", err);
+    status_t err = INVALID_OPERATION;
+    if (mGraphicBufferSource != NULL) {
+        err = statusFromBinderStatus(mGraphicBufferSource->signalEndOfInputStream());
     }
-    notify->post();
+    mCallback->onSignaledInputEOS(err);
 }
 
 bool ACodec::ExecutingState::onOMXFrameRendered(int64_t mediaTimeUs, nsecs_t systemNano) {
@@ -7597,8 +7080,7 @@
 
             if (data2 == 0 || data2 == OMX_IndexParamPortDefinition) {
                 mCodec->mMetadataBuffersToSubmit = 0;
-                CHECK_EQ(mCodec->mOMX->sendCommand(
-                            mCodec->mNode,
+                CHECK_EQ(mCodec->mOMXNode->sendCommand(
                             OMX_CommandPortDisable, kPortIndexOutput),
                          (status_t)OK);
 
@@ -7703,14 +7185,15 @@
                 }
 
                 if (err == OK) {
-                    err = mCodec->mOMX->sendCommand(
-                            mCodec->mNode, OMX_CommandPortEnable, kPortIndexOutput);
+                    err = mCodec->mOMXNode->sendCommand(
+                            OMX_CommandPortEnable, kPortIndexOutput);
                 }
 
                 if (err == OK) {
                     err = mCodec->allocateBuffersOnPort(kPortIndexOutput);
                     ALOGE_IF(err != OK, "Failed to allocate output port buffers after port "
                             "reconfiguration: (%d)", err);
+                    mCodec->mCallback->onOutputBuffersChanged();
                 }
 
                 if (err != OK) {
@@ -7773,8 +7256,7 @@
 
         case kWhatShutdown:
         {
-            // We're already doing that...
-
+            mCodec->deferMessage(msg);
             handled = true;
             break;
         }
@@ -7829,8 +7311,8 @@
 
 void ACodec::ExecutingToIdleState::changeStateIfWeOwnAllBuffers() {
     if (mComponentNowIdle && mCodec->allYourBuffersAreBelongToUs()) {
-        status_t err = mCodec->mOMX->sendCommand(
-                mCodec->mNode, OMX_CommandStateSet, OMX_StateLoaded);
+        status_t err = mCodec->mOMXNode->sendCommand(
+                OMX_CommandStateSet, OMX_StateLoaded);
         if (err == OK) {
             err = mCodec->freeBuffersOnPort(kPortIndexInput);
             status_t err2 = mCodec->freeBuffersOnPort(kPortIndexOutput);
@@ -7883,8 +7365,7 @@
     switch (msg->what()) {
         case kWhatShutdown:
         {
-            // We're already doing that...
-
+            mCodec->deferMessage(msg);
             handled = true;
             break;
         }
@@ -8016,7 +7497,7 @@
         {
             sp<AMessage> msg = new AMessage(kWhatOMXMessage, mCodec);
             msg->setInt32("type", omx_message::EVENT);
-            msg->setInt32("node", mCodec->mNode);
+            msg->setInt32("generation", mCodec->mNodeGeneration);
             msg->setInt32("event", event);
             msg->setInt32("data1", data1);
             msg->setInt32("data2", data2);
@@ -8058,9 +7539,7 @@
 
         mCodec->mRenderTracker.clear(systemTime(CLOCK_MONOTONIC));
 
-        sp<AMessage> notify = mCodec->mNotify->dup();
-        notify->setInt32("what", CodecBase::kWhatFlushCompleted);
-        notify->post();
+        mCodec->mCallback->onFlushCompleted();
 
         mCodec->mPortEOS[kPortIndexInput] =
             mCodec->mPortEOS[kPortIndexOutput] = false;
@@ -8079,7 +7558,7 @@
         const AString &name, const AString &mime, bool isEncoder,
         sp<MediaCodecInfo::Capabilities> *caps) {
     (*caps).clear();
-    const char *role = getComponentRole(isEncoder, mime.c_str());
+    const char *role = GetComponentRole(isEncoder, mime.c_str());
     if (role == NULL) {
         return BAD_VALUE;
     }
@@ -8092,17 +7571,17 @@
 
     sp<IOMX> omx = client.interface();
     sp<CodecObserver> observer = new CodecObserver;
-    IOMX::node_id node = 0;
+    sp<IOMXNode> omxNode;
 
-    err = omx->allocateNode(name.c_str(), observer, NULL, &node);
+    err = omx->allocateNode(name.c_str(), observer, &omxNode);
     if (err != OK) {
         client.disconnect();
         return err;
     }
 
-    err = setComponentRole(omx, node, role);
+    err = SetComponentRole(omxNode, role);
     if (err != OK) {
-        omx->freeNode(node);
+        omxNode->freeNode();
         client.disconnect();
         return err;
     }
@@ -8117,8 +7596,8 @@
 
         for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
             param.nProfileIndex = index;
-            status_t err = omx->getParameter(
-                    node, OMX_IndexParamVideoProfileLevelQuerySupported,
+            status_t err = omxNode->getParameter(
+                    OMX_IndexParamVideoProfileLevelQuerySupported,
                     &param, sizeof(param));
             if (err != OK) {
                 break;
@@ -8141,16 +7620,16 @@
         Vector<uint32_t> supportedColors; // shadow copy to check for duplicates
         for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
             portFormat.nIndex = index;
-            status_t err = omx->getParameter(
-                    node, OMX_IndexParamVideoPortFormat,
+            status_t err = omxNode->getParameter(
+                    OMX_IndexParamVideoPortFormat,
                     &portFormat, sizeof(portFormat));
             if (err != OK) {
                 break;
             }
 
             OMX_U32 flexibleEquivalent;
-            if (isFlexibleColorFormat(
-                    omx, node, portFormat.eColorFormat, false /* usingNativeWindow */,
+            if (IsFlexibleColorFormat(
+                    omxNode, portFormat.eColorFormat, false /* usingNativeWindow */,
                     &flexibleEquivalent)) {
                 bool marked = false;
                 for (size_t i = 0; i < supportedColors.size(); ++i) {
@@ -8180,8 +7659,8 @@
         param.nPortIndex = isEncoder ? kPortIndexOutput : kPortIndexInput;
         for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
             param.nProfileIndex = index;
-            status_t err = omx->getParameter(
-                    node, (OMX_INDEXTYPE)OMX_IndexParamAudioProfileQuerySupported,
+            status_t err = omxNode->getParameter(
+                    (OMX_INDEXTYPE)OMX_IndexParamAudioProfileQuerySupported,
                     &param, sizeof(param));
             if (err != OK) {
                 break;
@@ -8205,15 +7684,15 @@
 
     if (isVideo && !isEncoder) {
         native_handle_t *sidebandHandle = NULL;
-        if (omx->configureVideoTunnelMode(
-                node, kPortIndexOutput, OMX_TRUE, 0, &sidebandHandle) == OK) {
+        if (omxNode->configureVideoTunnelMode(
+                kPortIndexOutput, OMX_TRUE, 0, &sidebandHandle) == OK) {
             // tunneled playback includes adaptive playback
             builder->addFlags(MediaCodecInfo::Capabilities::kFlagSupportsAdaptivePlayback
                     | MediaCodecInfo::Capabilities::kFlagSupportsTunneledPlayback);
-        } else if (omx->storeMetaDataInBuffers(
-                node, kPortIndexOutput, OMX_TRUE) == OK ||
-            omx->prepareForAdaptivePlayback(
-                node, kPortIndexOutput, OMX_TRUE,
+        } else if (omxNode->setPortMode(
+                kPortIndexOutput, IOMX::kPortModeDynamicANWBuffer) == OK ||
+                omxNode->prepareForAdaptivePlayback(
+                kPortIndexOutput, OMX_TRUE,
                 1280 /* width */, 720 /* height */) == OK) {
             builder->addFlags(MediaCodecInfo::Capabilities::kFlagSupportsAdaptivePlayback);
         }
@@ -8224,15 +7703,15 @@
         InitOMXParams(&params);
         params.nPortIndex = kPortIndexOutput;
         // TODO: should we verify if fallback is supported?
-        if (omx->getConfig(
-                node, (OMX_INDEXTYPE)OMX_IndexConfigAndroidIntraRefresh,
+        if (omxNode->getConfig(
+                (OMX_INDEXTYPE)OMX_IndexConfigAndroidIntraRefresh,
                 &params, sizeof(params)) == OK) {
             builder->addFlags(MediaCodecInfo::Capabilities::kFlagSupportsIntraRefresh);
         }
     }
 
     *caps = builder;
-    omx->freeNode(node);
+    omxNode->freeNode();
     client.disconnect();
     return OK;
 }
diff --git a/media/libstagefright/ACodecBufferChannel.cpp b/media/libstagefright/ACodecBufferChannel.cpp
new file mode 100644
index 0000000..1db7ab0
--- /dev/null
+++ b/media/libstagefright/ACodecBufferChannel.cpp
@@ -0,0 +1,308 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ACodecBufferChannel"
+#include <utils/Log.h>
+
+#include <numeric>
+
+#include <binder/MemoryDealer.h>
+#include <media/openmax/OMX_Core.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/MediaCodecBuffer.h>
+#include <system/window.h>
+
+#include "include/ACodecBufferChannel.h"
+#include "include/SecureBuffer.h"
+#include "include/SharedMemoryBuffer.h"
+
+namespace android {
+
+using BufferInfo = ACodecBufferChannel::BufferInfo;
+using BufferInfoIterator = std::vector<const BufferInfo>::const_iterator;
+
+static BufferInfoIterator findClientBuffer(
+        const std::shared_ptr<const std::vector<const BufferInfo>> &array,
+        const sp<MediaCodecBuffer> &buffer) {
+    return std::find_if(
+            array->begin(), array->end(),
+            [buffer](const BufferInfo &info) { return info.mClientBuffer == buffer; });
+}
+
+static BufferInfoIterator findBufferId(
+        const std::shared_ptr<const std::vector<const BufferInfo>> &array,
+        IOMX::buffer_id bufferId) {
+    return std::find_if(
+            array->begin(), array->end(),
+            [bufferId](const BufferInfo &info) { return bufferId == info.mBufferId; });
+}
+
+ACodecBufferChannel::BufferInfo::BufferInfo(
+        const sp<MediaCodecBuffer> &buffer,
+        IOMX::buffer_id bufferId,
+        const sp<IMemory> &sharedEncryptedBuffer)
+    : mClientBuffer(
+          (sharedEncryptedBuffer == nullptr)
+          ? buffer
+          : new SharedMemoryBuffer(buffer->format(), sharedEncryptedBuffer)),
+      mCodecBuffer(buffer),
+      mBufferId(bufferId),
+      mSharedEncryptedBuffer(sharedEncryptedBuffer) {
+}
+
+ACodecBufferChannel::ACodecBufferChannel(
+        const sp<AMessage> &inputBufferFilled, const sp<AMessage> &outputBufferDrained)
+    : mInputBufferFilled(inputBufferFilled),
+      mOutputBufferDrained(outputBufferDrained) {
+}
+
+status_t ACodecBufferChannel::queueInputBuffer(const sp<MediaCodecBuffer> &buffer) {
+    std::shared_ptr<const std::vector<const BufferInfo>> array(
+            std::atomic_load(&mInputBuffers));
+    BufferInfoIterator it = findClientBuffer(array, buffer);
+    if (it == array->end()) {
+        return -ENOENT;
+    }
+    ALOGV("queueInputBuffer #%d", it->mBufferId);
+    sp<AMessage> msg = mInputBufferFilled->dup();
+    msg->setObject("buffer", it->mCodecBuffer);
+    msg->setInt32("buffer-id", it->mBufferId);
+    msg->post();
+    return OK;
+}
+
+status_t ACodecBufferChannel::queueSecureInputBuffer(
+        const sp<MediaCodecBuffer> &buffer,
+        bool secure,
+        const uint8_t *key,
+        const uint8_t *iv,
+        CryptoPlugin::Mode mode,
+        CryptoPlugin::Pattern pattern,
+        const CryptoPlugin::SubSample *subSamples,
+        size_t numSubSamples,
+        AString *errorDetailMsg) {
+    if (mCrypto == nullptr) {
+        return -ENOSYS;
+    }
+    std::shared_ptr<const std::vector<const BufferInfo>> array(
+            std::atomic_load(&mInputBuffers));
+    BufferInfoIterator it = findClientBuffer(array, buffer);
+    if (it == array->end()) {
+        return -ENOENT;
+    }
+
+    void *dst_pointer = nullptr;
+    ICrypto::DestinationType dst_type = ICrypto::kDestinationTypeOpaqueHandle;
+
+    if (secure) {
+        sp<SecureBuffer> secureData = static_cast<SecureBuffer *>(it->mCodecBuffer.get());
+        dst_pointer = secureData->getDestinationPointer();
+        dst_type = secureData->getDestinationType();
+    } else {
+        dst_pointer = it->mCodecBuffer->base();
+        dst_type = ICrypto::kDestinationTypeVmPointer;
+    }
+
+    ssize_t result = mCrypto->decrypt(
+            dst_type,
+            key,
+            iv,
+            mode,
+            pattern,
+            it->mSharedEncryptedBuffer,
+            it->mClientBuffer->offset(),
+            subSamples,
+            numSubSamples,
+            dst_pointer,
+            errorDetailMsg);
+
+    if (result < 0) {
+        return result;
+    }
+
+    it->mCodecBuffer->setRange(0, result);
+
+    // Copy metadata from client to codec buffer.
+    it->mCodecBuffer->meta()->clear();
+    int64_t timeUs;
+    CHECK(it->mClientBuffer->meta()->findInt64("timeUs", &timeUs));
+    it->mCodecBuffer->meta()->setInt64("timeUs", timeUs);
+    int32_t eos;
+    if (it->mClientBuffer->meta()->findInt32("eos", &eos)) {
+        it->mCodecBuffer->meta()->setInt32("eos", eos);
+    }
+    int32_t csd;
+    if (it->mClientBuffer->meta()->findInt32("csd", &csd)) {
+        it->mCodecBuffer->meta()->setInt32("csd", csd);
+    }
+
+    ALOGV("queueSecureInputBuffer #%d", it->mBufferId);
+    sp<AMessage> msg = mInputBufferFilled->dup();
+    msg->setObject("buffer", it->mCodecBuffer);
+    msg->setInt32("buffer-id", it->mBufferId);
+    msg->post();
+    return OK;
+}
+
+status_t ACodecBufferChannel::renderOutputBuffer(
+        const sp<MediaCodecBuffer> &buffer, int64_t timestampNs) {
+    std::shared_ptr<const std::vector<const BufferInfo>> array(
+            std::atomic_load(&mOutputBuffers));
+    BufferInfoIterator it = findClientBuffer(array, buffer);
+    if (it == array->end()) {
+        return -ENOENT;
+    }
+
+    ALOGV("renderOutputBuffer #%d", it->mBufferId);
+    sp<AMessage> msg = mOutputBufferDrained->dup();
+    msg->setObject("buffer", buffer);
+    msg->setInt32("buffer-id", it->mBufferId);
+    msg->setInt32("render", true);
+    msg->setInt64("timestampNs", timestampNs);
+    msg->post();
+    return OK;
+}
+
+status_t ACodecBufferChannel::discardBuffer(const sp<MediaCodecBuffer> &buffer) {
+    std::shared_ptr<const std::vector<const BufferInfo>> array(
+            std::atomic_load(&mInputBuffers));
+    bool input = true;
+    BufferInfoIterator it = findClientBuffer(array, buffer);
+    if (it == array->end()) {
+        array = std::atomic_load(&mOutputBuffers);
+        input = false;
+        it = findClientBuffer(array, buffer);
+        if (it == array->end()) {
+            return -ENOENT;
+        }
+    }
+    ALOGV("discardBuffer #%d", it->mBufferId);
+    sp<AMessage> msg = input ? mInputBufferFilled->dup() : mOutputBufferDrained->dup();
+    msg->setObject("buffer", it->mCodecBuffer);
+    msg->setInt32("buffer-id", it->mBufferId);
+    msg->setInt32("discarded", true);
+    msg->post();
+    return OK;
+}
+
+void ACodecBufferChannel::getInputBufferArray(Vector<sp<MediaCodecBuffer>> *array) {
+    std::shared_ptr<const std::vector<const BufferInfo>> inputBuffers(
+            std::atomic_load(&mInputBuffers));
+    array->clear();
+    for (const BufferInfo &elem : *inputBuffers) {
+        array->push_back(elem.mClientBuffer);
+    }
+}
+
+void ACodecBufferChannel::getOutputBufferArray(Vector<sp<MediaCodecBuffer>> *array) {
+    std::shared_ptr<const std::vector<const BufferInfo>> outputBuffers(
+            std::atomic_load(&mOutputBuffers));
+    array->clear();
+    for (const BufferInfo &elem : *outputBuffers) {
+        array->push_back(elem.mClientBuffer);
+    }
+}
+
+void ACodecBufferChannel::setInputBufferArray(const std::vector<BufferAndId> &array) {
+    bool secure = (mCrypto != nullptr);
+    if (secure) {
+        size_t totalSize = std::accumulate(
+                array.begin(), array.end(), 0u,
+                [alignment = MemoryDealer::getAllocationAlignment()]
+                (size_t sum, const BufferAndId& elem) {
+                    return sum + align(elem.mBuffer->capacity(), alignment);
+                });
+        mDealer = new MemoryDealer(totalSize, "ACodecBufferChannel");
+    }
+    std::vector<const BufferInfo> inputBuffers;
+    for (const BufferAndId &elem : array) {
+        sp<IMemory> sharedEncryptedBuffer;
+        if (secure) {
+            sharedEncryptedBuffer = mDealer->allocate(elem.mBuffer->capacity());
+        }
+        inputBuffers.emplace_back(elem.mBuffer, elem.mBufferId, sharedEncryptedBuffer);
+    }
+    std::atomic_store(
+            &mInputBuffers,
+            std::make_shared<const std::vector<const BufferInfo>>(inputBuffers));
+}
+
+void ACodecBufferChannel::setOutputBufferArray(const std::vector<BufferAndId> &array) {
+    std::vector<const BufferInfo> outputBuffers;
+    for (const BufferAndId &elem : array) {
+        outputBuffers.emplace_back(elem.mBuffer, elem.mBufferId, nullptr);
+    }
+    std::atomic_store(
+            &mOutputBuffers,
+            std::make_shared<const std::vector<const BufferInfo>>(outputBuffers));
+}
+
+void ACodecBufferChannel::fillThisBuffer(IOMX::buffer_id bufferId) {
+    ALOGV("fillThisBuffer #%d", bufferId);
+    std::shared_ptr<const std::vector<const BufferInfo>> array(
+            std::atomic_load(&mInputBuffers));
+    BufferInfoIterator it = findBufferId(array, bufferId);
+
+    if (it == array->end()) {
+        ALOGE("fillThisBuffer: unrecognized buffer #%d", bufferId);
+        return;
+    }
+    if (it->mClientBuffer != it->mCodecBuffer) {
+        it->mClientBuffer->setFormat(it->mCodecBuffer->format());
+    }
+
+    mCallback->onInputBufferAvailable(
+            std::distance(array->begin(), it),
+            it->mClientBuffer);
+}
+
+void ACodecBufferChannel::drainThisBuffer(
+        IOMX::buffer_id bufferId,
+        OMX_U32 omxFlags) {
+    ALOGV("drainThisBuffer #%d", bufferId);
+    std::shared_ptr<const std::vector<const BufferInfo>> array(
+            std::atomic_load(&mOutputBuffers));
+    BufferInfoIterator it = findBufferId(array, bufferId);
+
+    if (it == array->end()) {
+        ALOGE("drainThisBuffer: unrecognized buffer #%d", bufferId);
+        return;
+    }
+    if (it->mClientBuffer != it->mCodecBuffer) {
+        it->mClientBuffer->setFormat(it->mCodecBuffer->format());
+    }
+
+    uint32_t flags = 0;
+    if (omxFlags & OMX_BUFFERFLAG_SYNCFRAME) {
+        flags |= MediaCodec::BUFFER_FLAG_SYNCFRAME;
+    }
+    if (omxFlags & OMX_BUFFERFLAG_CODECCONFIG) {
+        flags |= MediaCodec::BUFFER_FLAG_CODECCONFIG;
+    }
+    if (omxFlags & OMX_BUFFERFLAG_EOS) {
+        flags |= MediaCodec::BUFFER_FLAG_EOS;
+    }
+    it->mClientBuffer->meta()->setInt32("flags", flags);
+
+    mCallback->onOutputBufferAvailable(
+            std::distance(array->begin(), it),
+            it->mClientBuffer);
+}
+
+}  // namespace android
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 604ad7c..25dd6b1 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -4,20 +4,20 @@
 
 LOCAL_SRC_FILES:=                         \
         ACodec.cpp                        \
+        ACodecBufferChannel.cpp           \
         AACExtractor.cpp                  \
         AACWriter.cpp                     \
         AMRExtractor.cpp                  \
         AMRWriter.cpp                     \
         AudioPlayer.cpp                   \
         AudioSource.cpp                   \
+        BufferImpl.cpp                    \
         CallbackDataSource.cpp            \
         CameraSource.cpp                  \
         CameraSourceTimeLapse.cpp         \
-        CodecBase.cpp                     \
         DataConverter.cpp                 \
         DataSource.cpp                    \
         DataURISource.cpp                 \
-        DRMExtractor.cpp                  \
         ESDS.cpp                          \
         FileSource.cpp                    \
         FLACExtractor.cpp                 \
@@ -35,7 +35,6 @@
         MediaCodecList.cpp                \
         MediaCodecListOverrides.cpp       \
         MediaCodecSource.cpp              \
-        MediaDefs.cpp                     \
         MediaExtractor.cpp                \
         MediaSync.cpp                     \
         MidiExtractor.cpp                 \
@@ -46,7 +45,6 @@
         NuMediaExtractor.cpp              \
         OMXClient.cpp                     \
         OggExtractor.cpp                  \
-        ProcessInfo.cpp                   \
         SampleIterator.cpp                \
         SampleTable.cpp                   \
         SimpleDecodingSource.cpp          \
@@ -60,7 +58,6 @@
         VBRISeeker.cpp                    \
         VideoFrameScheduler.cpp           \
         WAVExtractor.cpp                  \
-        WVMExtractor.cpp                  \
         XINGSeeker.cpp                    \
         avc_utils.cpp                     \
 
@@ -110,9 +107,9 @@
         libstagefright_mpeg2ts \
         libstagefright_id3 \
         libFLAC \
-        libmedia_helper \
 
 LOCAL_SHARED_LIBRARIES += \
+        libmedia_helper \
         libstagefright_foundation \
         libdl \
         libRScpp \
@@ -126,7 +123,6 @@
 LOCAL_CFLAGS += -DENABLE_STAGEFRIGHT_EXPERIMENTS
 endif
 
-LOCAL_CLANG := true
 LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
 
 LOCAL_MODULE:= libstagefright
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index efdee77..4ccd2d0 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -62,6 +62,8 @@
       mPrevSampleTimeUs(0),
       mInitialReadTimeUs(0),
       mNumFramesReceived(0),
+      mNumFramesSkipped(0),
+      mNumFramesLost(0),
       mNumClientOwnedBuffers(0) {
     ALOGV("sampleRate: %u, outSampleRate: %u, channelCount: %u",
             sampleRate, outSampleRate, channelCount);
@@ -295,11 +297,27 @@
 }
 
 status_t AudioSource::dataCallback(const AudioRecord::Buffer& audioBuffer) {
-    int64_t timeUs = systemTime() / 1000ll;
-    // Estimate the real sampling time of the 1st sample in this buffer
-    // from AudioRecord's latency. (Apply this adjustment first so that
-    // the start time logic is not affected.)
-    timeUs -= mRecord->latency() * 1000LL;
+    int64_t timeUs, position, timeNs;
+    ExtendedTimestamp ts;
+    ExtendedTimestamp::Location location;
+    const int32_t usPerSec = 1000000;
+
+    if (mRecord->getTimestamp(&ts) == OK &&
+            ts.getBestTimestamp(&position, &timeNs, ExtendedTimestamp::TIMEBASE_MONOTONIC,
+            &location) == OK) {
+        // Use audio timestamp.
+        timeUs = timeNs / 1000 -
+                (position - mNumFramesSkipped -
+                mNumFramesReceived + mNumFramesLost) * usPerSec / mSampleRate;
+    } else {
+        // This should not happen in normal case.
+        ALOGW("Failed to get audio timestamp, fallback to use systemclock");
+        timeUs = systemTime() / 1000ll;
+        // Estimate the real sampling time of the 1st sample in this buffer
+        // from AudioRecord's latency. (Apply this adjustment first so that
+        // the start time logic is not affected.)
+        timeUs -= mRecord->latency() * 1000LL;
+    }
 
     ALOGV("dataCallbackTimestamp: %" PRId64 " us", timeUs);
     Mutex::Autolock autoLock(mLock);
@@ -308,10 +326,15 @@
         return OK;
     }
 
+    const size_t bufferSize = audioBuffer.size;
+
     // Drop retrieved and previously lost audio data.
     if (mNumFramesReceived == 0 && timeUs < mStartTimeUs) {
         (void) mRecord->getInputFramesLost();
-        ALOGV("Drop audio data at %" PRId64 "/%" PRId64 " us", timeUs, mStartTimeUs);
+        int64_t receievedFrames = bufferSize / mRecord->frameSize();
+        ALOGV("Drop audio data(%" PRId64 " frames) at %" PRId64 "/%" PRId64 " us",
+                receievedFrames, timeUs, mStartTimeUs);
+        mNumFramesSkipped += receievedFrames;
         return OK;
     }
 
@@ -320,11 +343,7 @@
         // Initial delay
         if (mStartTimeUs > 0) {
             mStartTimeUs = timeUs - mStartTimeUs;
-        } else {
-            // Assume latency is constant.
-            mStartTimeUs += mRecord->latency() * 1000;
         }
-
         mPrevSampleTimeUs = mStartTimeUs;
     }
 
@@ -354,6 +373,7 @@
         MediaBuffer *lostAudioBuffer = new MediaBuffer(bufferSize);
         memset(lostAudioBuffer->data(), 0, bufferSize);
         lostAudioBuffer->set_range(0, bufferSize);
+        mNumFramesLost += bufferSize / mRecord->frameSize();
         queueInputBuffer_l(lostAudioBuffer, timeUs);
     }
 
@@ -362,7 +382,6 @@
         return OK;
     }
 
-    const size_t bufferSize = audioBuffer.size;
     MediaBuffer *buffer = new MediaBuffer(bufferSize);
     memcpy((uint8_t *) buffer->data(),
             audioBuffer.i16, audioBuffer.size);
diff --git a/media/libstagefright/BufferImpl.cpp b/media/libstagefright/BufferImpl.cpp
new file mode 100644
index 0000000..37a40ec
--- /dev/null
+++ b/media/libstagefright/BufferImpl.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "BufferImpl"
+#include <utils/Log.h>
+
+#include <binder/IMemory.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/ICrypto.h>
+#include <utils/NativeHandle.h>
+
+#include "include/SecureBuffer.h"
+#include "include/SharedMemoryBuffer.h"
+
+namespace android {
+
+SharedMemoryBuffer::SharedMemoryBuffer(const sp<AMessage> &format, const sp<IMemory> &mem)
+    : MediaCodecBuffer(format, new ABuffer(mem->pointer(), mem->size())),
+      mMemory(mem) {
+}
+
+SecureBuffer::SecureBuffer(const sp<AMessage> &format, const void *ptr, size_t size)
+    : MediaCodecBuffer(format, new ABuffer(nullptr, size)),
+      mPointer(ptr) {
+}
+
+SecureBuffer::SecureBuffer(
+        const sp<AMessage> &format, const sp<NativeHandle> &handle, size_t size)
+    : MediaCodecBuffer(format, new ABuffer(nullptr, size)),
+      mPointer(nullptr),
+      mHandle(handle) {
+}
+
+void *SecureBuffer::getDestinationPointer() {
+    return (void *)(mHandle == nullptr ? mPointer : mHandle->handle());
+}
+
+ICrypto::DestinationType SecureBuffer::getDestinationType() {
+    return mHandle == nullptr ? ICrypto::kDestinationTypeOpaqueHandle
+                              : ICrypto::kDestinationTypeNativeHandle;
+}
+
+}  // namespace android
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index 408ad7a..0fe44eb 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -111,6 +111,11 @@
 }
 
 static int32_t getColorFormat(const char* colorFormat) {
+    if (!colorFormat) {
+        ALOGE("Invalid color format");
+        return -1;
+    }
+
     if (!strcmp(colorFormat, CameraParameters::PIXEL_FORMAT_YUV420P)) {
        return OMX_COLOR_FormatYUV420Planar;
     }
@@ -765,9 +770,7 @@
         return mInitCheck;
     }
 
-    char value[PROPERTY_VALUE_MAX];
-    if (property_get("media.stagefright.record-stats", value, NULL)
-        && (!strcmp(value, "1") || !strcasecmp(value, "true"))) {
+    if (property_get_bool("media.stagefright.record-stats", false)) {
         mCollectStats = true;
     }
 
diff --git a/media/libstagefright/DRMExtractor.cpp b/media/libstagefright/DRMExtractor.cpp
deleted file mode 100644
index d36ac65..0000000
--- a/media/libstagefright/DRMExtractor.cpp
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "include/DRMExtractor.h"
-
-#include <arpa/inet.h>
-#include <utils/String8.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/Utils.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaBuffer.h>
-
-#include <drm/drm_framework_common.h>
-#include <utils/Errors.h>
-
-
-namespace android {
-
-class DRMSource : public MediaSource {
-public:
-    DRMSource(const sp<IMediaSource> &mediaSource,
-            const sp<DecryptHandle> &decryptHandle,
-            DrmManagerClient *managerClient,
-            int32_t trackId, DrmBuffer *ipmpBox);
-
-    virtual status_t start(MetaData *params = NULL);
-    virtual status_t stop();
-    virtual sp<MetaData> getFormat();
-    virtual status_t read(
-            MediaBuffer **buffer, const ReadOptions *options = NULL);
-
-protected:
-    virtual ~DRMSource();
-
-private:
-    sp<IMediaSource> mOriginalMediaSource;
-    sp<DecryptHandle> mDecryptHandle;
-    DrmManagerClient* mDrmManagerClient;
-    size_t mTrackId;
-    mutable Mutex mDRMLock;
-    size_t mNALLengthSize;
-    bool mWantsNALFragments;
-
-    DRMSource(const DRMSource &);
-    DRMSource &operator=(const DRMSource &);
-};
-
-////////////////////////////////////////////////////////////////////////////////
-
-DRMSource::DRMSource(const sp<IMediaSource> &mediaSource,
-        const sp<DecryptHandle> &decryptHandle,
-        DrmManagerClient *managerClient,
-        int32_t trackId, DrmBuffer *ipmpBox)
-    : mOriginalMediaSource(mediaSource),
-      mDecryptHandle(decryptHandle),
-      mDrmManagerClient(managerClient),
-      mTrackId(trackId),
-      mNALLengthSize(0),
-      mWantsNALFragments(false) {
-    CHECK(mDrmManagerClient);
-    mDrmManagerClient->initializeDecryptUnit(
-            mDecryptHandle, trackId, ipmpBox);
-
-    const char *mime;
-    bool success = getFormat()->findCString(kKeyMIMEType, &mime);
-    CHECK(success);
-
-    if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
-        uint32_t type;
-        const void *data;
-        size_t size;
-        CHECK(getFormat()->findData(kKeyAVCC, &type, &data, &size));
-
-        const uint8_t *ptr = (const uint8_t *)data;
-
-        CHECK(size >= 7);
-        CHECK_EQ(ptr[0], 1);  // configurationVersion == 1
-
-        // The number of bytes used to encode the length of a NAL unit.
-        mNALLengthSize = 1 + (ptr[4] & 3);
-    }
-}
-
-DRMSource::~DRMSource() {
-    Mutex::Autolock autoLock(mDRMLock);
-    mDrmManagerClient->finalizeDecryptUnit(mDecryptHandle, mTrackId);
-}
-
-status_t DRMSource::start(MetaData *params) {
-    int32_t val;
-    if (params && params->findInt32(kKeyWantsNALFragments, &val)
-        && val != 0) {
-        mWantsNALFragments = true;
-    } else {
-        mWantsNALFragments = false;
-    }
-
-   return mOriginalMediaSource->start(params);
-}
-
-status_t DRMSource::stop() {
-    return mOriginalMediaSource->stop();
-}
-
-sp<MetaData> DRMSource::getFormat() {
-    return mOriginalMediaSource->getFormat();
-}
-
-status_t DRMSource::read(MediaBuffer **buffer, const ReadOptions *options) {
-    Mutex::Autolock autoLock(mDRMLock);
-    status_t err;
-    if ((err = mOriginalMediaSource->read(buffer, options)) != OK) {
-        return err;
-    }
-
-    size_t len = (*buffer)->range_length();
-
-    char *src = (char *)(*buffer)->data() + (*buffer)->range_offset();
-
-    DrmBuffer encryptedDrmBuffer(src, len);
-    DrmBuffer decryptedDrmBuffer;
-    decryptedDrmBuffer.length = len;
-    decryptedDrmBuffer.data = new char[len];
-    DrmBuffer *pDecryptedDrmBuffer = &decryptedDrmBuffer;
-
-    if ((err = mDrmManagerClient->decrypt(mDecryptHandle, mTrackId,
-            &encryptedDrmBuffer, &pDecryptedDrmBuffer)) != NO_ERROR) {
-
-        if (decryptedDrmBuffer.data) {
-            delete [] decryptedDrmBuffer.data;
-            decryptedDrmBuffer.data = NULL;
-        }
-
-        return err;
-    }
-    CHECK(pDecryptedDrmBuffer == &decryptedDrmBuffer);
-
-    const char *mime;
-    CHECK(getFormat()->findCString(kKeyMIMEType, &mime));
-
-    if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC) && !mWantsNALFragments) {
-        uint8_t *dstData = (uint8_t*)src;
-        size_t srcOffset = 0;
-        size_t dstOffset = 0;
-
-        len = decryptedDrmBuffer.length;
-        while (srcOffset < len) {
-            CHECK(srcOffset + mNALLengthSize <= len);
-            size_t nalLength = 0;
-            const uint8_t* data = (const uint8_t*)(&decryptedDrmBuffer.data[srcOffset]);
-
-            switch (mNALLengthSize) {
-                case 1:
-                    nalLength = *data;
-                    break;
-                case 2:
-                    nalLength = U16_AT(data);
-                    break;
-                case 3:
-                    nalLength = ((size_t)data[0] << 16) | U16_AT(&data[1]);
-                    break;
-                case 4:
-                    nalLength = U32_AT(data);
-                    break;
-                default:
-                    CHECK(!"Should not be here.");
-                    break;
-            }
-
-            srcOffset += mNALLengthSize;
-
-            size_t end = srcOffset + nalLength;
-            if (end > len || end < srcOffset) {
-                if (decryptedDrmBuffer.data) {
-                    delete [] decryptedDrmBuffer.data;
-                    decryptedDrmBuffer.data = NULL;
-                }
-
-                return ERROR_MALFORMED;
-            }
-
-            if (nalLength == 0) {
-                continue;
-            }
-
-            if (dstOffset > SIZE_MAX - 4 ||
-                dstOffset + 4 > SIZE_MAX - nalLength ||
-                dstOffset + 4 + nalLength > (*buffer)->size()) {
-                (*buffer)->release();
-                (*buffer) = NULL;
-                if (decryptedDrmBuffer.data) {
-                    delete [] decryptedDrmBuffer.data;
-                    decryptedDrmBuffer.data = NULL;
-                }
-                return ERROR_MALFORMED;
-            }
-
-            dstData[dstOffset++] = 0;
-            dstData[dstOffset++] = 0;
-            dstData[dstOffset++] = 0;
-            dstData[dstOffset++] = 1;
-            memcpy(&dstData[dstOffset], &decryptedDrmBuffer.data[srcOffset], nalLength);
-            srcOffset += nalLength;
-            dstOffset += nalLength;
-        }
-
-        CHECK_EQ(srcOffset, len);
-        (*buffer)->set_range((*buffer)->range_offset(), dstOffset);
-
-    } else {
-        memcpy(src, decryptedDrmBuffer.data, decryptedDrmBuffer.length);
-        (*buffer)->set_range((*buffer)->range_offset(), decryptedDrmBuffer.length);
-    }
-
-    if (decryptedDrmBuffer.data) {
-        delete [] decryptedDrmBuffer.data;
-        decryptedDrmBuffer.data = NULL;
-    }
-
-    return OK;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-DRMExtractor::DRMExtractor(const sp<DataSource> &source, const char* mime)
-    : mDataSource(source),
-      mDecryptHandle(NULL),
-      mDrmManagerClient(NULL) {
-    mOriginalExtractor = MediaExtractor::Create(source, mime);
-    mOriginalExtractor->setDrmFlag(true);
-    mOriginalExtractor->getMetaData()->setInt32(kKeyIsDRM, 1);
-
-    source->getDrmInfo(mDecryptHandle, &mDrmManagerClient);
-}
-
-DRMExtractor::~DRMExtractor() {
-}
-
-size_t DRMExtractor::countTracks() {
-    return mOriginalExtractor->countTracks();
-}
-
-sp<IMediaSource> DRMExtractor::getTrack(size_t index) {
-    sp<IMediaSource> originalMediaSource = mOriginalExtractor->getTrack(index);
-    originalMediaSource->getFormat()->setInt32(kKeyIsDRM, 1);
-
-    int32_t trackID;
-    CHECK(getTrackMetaData(index, 0)->findInt32(kKeyTrackID, &trackID));
-
-    DrmBuffer ipmpBox;
-    ipmpBox.data = mOriginalExtractor->getDrmTrackInfo(trackID, &(ipmpBox.length));
-    CHECK(ipmpBox.length > 0);
-
-    return interface_cast<IMediaSource>(
-            new DRMSource(originalMediaSource, mDecryptHandle, mDrmManagerClient,
-            trackID, &ipmpBox));
-}
-
-sp<MetaData> DRMExtractor::getTrackMetaData(size_t index, uint32_t flags) {
-    return mOriginalExtractor->getTrackMetaData(index, flags);
-}
-
-sp<MetaData> DRMExtractor::getMetaData() {
-    return mOriginalExtractor->getMetaData();
-}
-
-bool SniffDRM(
-    const sp<DataSource> &source, String8 *mimeType, float *confidence,
-        sp<AMessage> *) {
-    sp<DecryptHandle> decryptHandle = source->DrmInitialization();
-
-    if (decryptHandle != NULL) {
-        if (decryptHandle->decryptApiType == DecryptApiType::CONTAINER_BASED) {
-            *mimeType = String8("drm+container_based+") + decryptHandle->mimeType;
-            *confidence = 10.0f;
-        } else if (decryptHandle->decryptApiType == DecryptApiType::ELEMENTARY_STREAM_BASED) {
-            *mimeType = String8("drm+es_based+") + decryptHandle->mimeType;
-            *confidence = 10.0f;
-        } else {
-            return false;
-        }
-
-        return true;
-    }
-
-    return false;
-}
-} //namespace android
-
diff --git a/media/libstagefright/DataConverter.cpp b/media/libstagefright/DataConverter.cpp
index aea47f3..52be054 100644
--- a/media/libstagefright/DataConverter.cpp
+++ b/media/libstagefright/DataConverter.cpp
@@ -21,13 +21,13 @@
 
 #include <audio_utils/primitives.h>
 
-#include <media/stagefright/foundation/ABuffer.h>
+#include <media/MediaCodecBuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AUtils.h>
 
 namespace android {
 
-status_t DataConverter::convert(const sp<ABuffer> &source, sp<ABuffer> &target) {
+status_t DataConverter::convert(const sp<MediaCodecBuffer> &source, sp<MediaCodecBuffer> &target) {
     CHECK(source->base() != target->base());
     size_t size = targetSize(source->size());
     status_t err = OK;
@@ -43,7 +43,8 @@
     return err;
 }
 
-status_t DataConverter::safeConvert(const sp<ABuffer> &source, sp<ABuffer> &target) {
+status_t DataConverter::safeConvert(
+        const sp<MediaCodecBuffer> &source, sp<MediaCodecBuffer> &target) {
     memcpy(target->base(), source->data(), source->size());
     return OK;
 }
@@ -101,7 +102,7 @@
     return NULL;
 }
 
-status_t AudioConverter::safeConvert(const sp<ABuffer> &src, sp<ABuffer> &tgt) {
+status_t AudioConverter::safeConvert(const sp<MediaCodecBuffer> &src, sp<MediaCodecBuffer> &tgt) {
     if (mTo == kAudioEncodingPcm8bit && mFrom == kAudioEncodingPcm16bit) {
         memcpy_to_u8_from_i16((uint8_t*)tgt->base(), (const int16_t*)src->data(), src->size() / 2);
     } else if (mTo == kAudioEncodingPcm8bit && mFrom == kAudioEncodingPcmFloat) {
diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp
index 163a527..4a965ba 100644
--- a/media/libstagefright/DataSource.cpp
+++ b/media/libstagefright/DataSource.cpp
@@ -16,24 +16,9 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "DataSource"
 
-#include "include/AMRExtractor.h"
-
-#include "include/AACExtractor.h"
 #include "include/CallbackDataSource.h"
-#include "include/DRMExtractor.h"
-#include "include/FLACExtractor.h"
 #include "include/HTTPBase.h"
-#include "include/MidiExtractor.h"
-#include "include/MP3Extractor.h"
-#include "include/MPEG2PSExtractor.h"
-#include "include/MPEG2TSExtractor.h"
-#include "include/MPEG4Extractor.h"
 #include "include/NuCachedSource2.h"
-#include "include/OggExtractor.h"
-#include "include/WAVExtractor.h"
-#include "include/WVMExtractor.h"
-
-#include "matroska/MatroskaExtractor.h"
 
 #include <media/IMediaHTTPConnection.h>
 #include <media/IMediaHTTPService.h>
@@ -44,12 +29,15 @@
 #include <media/stagefright/FileSource.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaHTTP.h>
+#include <media/stagefright/Utils.h>
 #include <utils/String8.h>
 
 #include <cutils/properties.h>
 
 #include <private/android_filesystem_config.h>
 
+#include <arpa/inet.h>
+
 namespace android {
 
 bool DataSource::getUInt16(off64_t offset, uint16_t *x) {
@@ -112,83 +100,6 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
-Mutex DataSource::gSnifferMutex;
-List<DataSource::SnifferFunc> DataSource::gSniffers;
-bool DataSource::gSniffersRegistered = false;
-
-bool DataSource::sniff(
-        String8 *mimeType, float *confidence, sp<AMessage> *meta) {
-    *mimeType = "";
-    *confidence = 0.0f;
-    meta->clear();
-
-    {
-        Mutex::Autolock autoLock(gSnifferMutex);
-        if (!gSniffersRegistered) {
-            return false;
-        }
-    }
-
-    for (List<SnifferFunc>::iterator it = gSniffers.begin();
-         it != gSniffers.end(); ++it) {
-        String8 newMimeType;
-        float newConfidence;
-        sp<AMessage> newMeta;
-        if ((*it)(this, &newMimeType, &newConfidence, &newMeta)) {
-            if (newConfidence > *confidence) {
-                *mimeType = newMimeType;
-                *confidence = newConfidence;
-                *meta = newMeta;
-            }
-        }
-    }
-
-    return *confidence > 0.0;
-}
-
-// static
-void DataSource::RegisterSniffer_l(SnifferFunc func) {
-    for (List<SnifferFunc>::iterator it = gSniffers.begin();
-         it != gSniffers.end(); ++it) {
-        if (*it == func) {
-            return;
-        }
-    }
-
-    gSniffers.push_back(func);
-}
-
-// static
-void DataSource::RegisterDefaultSniffers() {
-    Mutex::Autolock autoLock(gSnifferMutex);
-    if (gSniffersRegistered) {
-        return;
-    }
-
-    RegisterSniffer_l(SniffMPEG4);
-    RegisterSniffer_l(SniffMatroska);
-    RegisterSniffer_l(SniffOgg);
-    RegisterSniffer_l(SniffWAV);
-    RegisterSniffer_l(SniffFLAC);
-    RegisterSniffer_l(SniffAMR);
-    RegisterSniffer_l(SniffMPEG2TS);
-    RegisterSniffer_l(SniffMP3);
-    RegisterSniffer_l(SniffAAC);
-    RegisterSniffer_l(SniffMPEG2PS);
-    if (getuid() == AID_MEDIA) {
-        // WVM only in the media server process
-        RegisterSniffer_l(SniffWVM);
-    }
-    RegisterSniffer_l(SniffMidi);
-
-    char value[PROPERTY_VALUE_MAX];
-    if (property_get("drm.service.enabled", value, NULL)
-            && (!strcmp(value, "1") || !strcasecmp(value, "true"))) {
-        RegisterSniffer_l(SniffDRM);
-    }
-    gSniffersRegistered = true;
-}
-
 // static
 sp<DataSource> DataSource::CreateFromURI(
         const sp<IMediaHTTPService> &httpService,
@@ -200,14 +111,10 @@
         *contentType = "";
     }
 
-    bool isWidevine = !strncasecmp("widevine://", uri, 11);
-
     sp<DataSource> source;
     if (!strncasecmp("file://", uri, 7)) {
         source = new FileSource(uri + 7);
-    } else if (!strncasecmp("http://", uri, 7)
-            || !strncasecmp("https://", uri, 8)
-            || isWidevine) {
+    } else if (!strncasecmp("http://", uri, 7) || !strncasecmp("https://", uri, 8)) {
         if (httpService == NULL) {
             ALOGE("Invalid http service!");
             return NULL;
@@ -222,14 +129,6 @@
             httpSource = new MediaHTTP(conn);
         }
 
-        String8 tmp;
-        if (isWidevine) {
-            tmp = String8("http://");
-            tmp.append(uri + 11);
-
-            uri = tmp.string();
-        }
-
         String8 cacheConfig;
         bool disconnectAtHighwatermark;
         KeyedVector<String8, String8> nonCacheSpecificHeaders;
@@ -246,20 +145,14 @@
             return NULL;
         }
 
-        if (!isWidevine) {
-            if (contentType != NULL) {
-                *contentType = httpSource->getMIMEType();
-            }
-
-            source = NuCachedSource2::Create(
-                    httpSource,
-                    cacheConfig.isEmpty() ? NULL : cacheConfig.string(),
-                    disconnectAtHighwatermark);
-        } else {
-            // We do not want that prefetching, caching, datasource wrapper
-            // in the widevine:// case.
-            source = httpSource;
+        if (contentType != NULL) {
+            *contentType = httpSource->getMIMEType();
         }
+
+        source = NuCachedSource2::Create(
+                httpSource,
+                cacheConfig.isEmpty() ? NULL : cacheConfig.string(),
+                disconnectAtHighwatermark);
     } else if (!strncasecmp("data:", uri, 5)) {
         source = DataURISource::Create(uri);
     } else {
diff --git a/media/libstagefright/MPEG2TSWriter.cpp b/media/libstagefright/MPEG2TSWriter.cpp
index 931b280..b83b0a0 100644
--- a/media/libstagefright/MPEG2TSWriter.cpp
+++ b/media/libstagefright/MPEG2TSWriter.cpp
@@ -37,7 +37,7 @@
 struct MPEG2TSWriter::SourceInfo : public AHandler {
     explicit SourceInfo(const sp<IMediaSource> &source);
 
-    void start(const sp<AMessage> &notify);
+    void start(const sp<AMessage> &notify, const sp<MetaData> &params);
     void stop();
 
     unsigned streamType() const;
@@ -75,7 +75,7 @@
 
     sp<ABuffer> mAACCodecSpecificData;
 
-    sp<ABuffer> mAACBuffer;
+    sp<ABuffer> mBuffer;
 
     sp<ABuffer> mLastAccessUnit;
     bool mEOSReceived;
@@ -85,10 +85,8 @@
 
     void extractCodecSpecificData();
 
-    bool appendAACFrames(MediaBuffer *buffer);
-    bool flushAACFrames();
-
-    void postAVCFrame(MediaBuffer *buffer);
+    void appendAACFrames(MediaBuffer *buffer);
+    void appendAVCFrame(MediaBuffer *buffer);
 
     DISALLOW_EVIL_CONSTRUCTORS(SourceInfo);
 };
@@ -129,13 +127,14 @@
     return mContinuityCounter;
 }
 
-void MPEG2TSWriter::SourceInfo::start(const sp<AMessage> &notify) {
+void MPEG2TSWriter::SourceInfo::start(const sp<AMessage> &notify, const sp<MetaData> &params) {
     mLooper->registerHandler(this);
     mLooper->start();
-
     mNotify = notify;
 
-    (new AMessage(kWhatStart, this))->post();
+    sp<AMessage> msg = new AMessage(kWhatStart, this);
+    msg->setObject("meta", params);
+    msg->post();
 }
 
 void MPEG2TSWriter::SourceInfo::stop() {
@@ -250,56 +249,51 @@
     notify->post();
 }
 
-void MPEG2TSWriter::SourceInfo::postAVCFrame(MediaBuffer *buffer) {
+void MPEG2TSWriter::SourceInfo::appendAVCFrame(MediaBuffer *buffer) {
     sp<AMessage> notify = mNotify->dup();
     notify->setInt32("what", kNotifyBuffer);
 
-    sp<ABuffer> copy =
-        new ABuffer(buffer->range_length());
-    memcpy(copy->data(),
+    if (mBuffer == NULL || buffer->range_length() > mBuffer->capacity()) {
+        mBuffer = new ABuffer(buffer->range_length());
+    }
+    mBuffer->setRange(0, 0);
+
+    memcpy(mBuffer->data(),
            (const uint8_t *)buffer->data()
             + buffer->range_offset(),
            buffer->range_length());
 
     int64_t timeUs;
     CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs));
-    copy->meta()->setInt64("timeUs", timeUs);
+    mBuffer->meta()->setInt64("timeUs", timeUs);
 
     int32_t isSync;
     if (buffer->meta_data()->findInt32(kKeyIsSyncFrame, &isSync)
             && isSync != 0) {
-        copy->meta()->setInt32("isSync", true);
+        mBuffer->meta()->setInt32("isSync", true);
     }
 
-    notify->setBuffer("buffer", copy);
+    mBuffer->setRange(0, buffer->range_length());
+
+    notify->setBuffer("buffer", mBuffer);
     notify->post();
 }
 
-bool MPEG2TSWriter::SourceInfo::appendAACFrames(MediaBuffer *buffer) {
-    bool accessUnitPosted = false;
+void MPEG2TSWriter::SourceInfo::appendAACFrames(MediaBuffer *buffer) {
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kNotifyBuffer);
 
-    if (mAACBuffer != NULL
-            && mAACBuffer->size() + 7 + buffer->range_length()
-                    > mAACBuffer->capacity()) {
-        accessUnitPosted = flushAACFrames();
+    if (mBuffer == NULL || 7 + buffer->range_length() > mBuffer->capacity()) {
+        mBuffer = new ABuffer(7 + buffer->range_length());
     }
 
-    if (mAACBuffer == NULL) {
-        size_t alloc = 4096;
-        if (buffer->range_length() + 7 > alloc) {
-            alloc = 7 + buffer->range_length();
-        }
+    int64_t timeUs;
+    CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs));
 
-        mAACBuffer = new ABuffer(alloc);
+    mBuffer->meta()->setInt64("timeUs", timeUs);
+    mBuffer->meta()->setInt32("isSync", true);
 
-        int64_t timeUs;
-        CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs));
-
-        mAACBuffer->meta()->setInt64("timeUs", timeUs);
-        mAACBuffer->meta()->setInt32("isSync", true);
-
-        mAACBuffer->setRange(0, 0);
-    }
+    mBuffer->setRange(0, 0);
 
     const uint8_t *codec_specific_data = mAACCodecSpecificData->data();
 
@@ -312,7 +306,7 @@
     unsigned channel_configuration =
         (codec_specific_data[1] >> 3) & 0x0f;
 
-    uint8_t *ptr = mAACBuffer->data() + mAACBuffer->size();
+    uint8_t *ptr = mBuffer->data() + mBuffer->size();
 
     const uint32_t aac_frame_length = buffer->range_length() + 7;
 
@@ -340,24 +334,10 @@
 
     ptr += buffer->range_length();
 
-    mAACBuffer->setRange(0, ptr - mAACBuffer->data());
+    mBuffer->setRange(0, ptr - mBuffer->data());
 
-    return accessUnitPosted;
-}
-
-bool MPEG2TSWriter::SourceInfo::flushAACFrames() {
-    if (mAACBuffer == NULL) {
-        return false;
-    }
-
-    sp<AMessage> notify = mNotify->dup();
-    notify->setInt32("what", kNotifyBuffer);
-    notify->setBuffer("buffer", mAACBuffer);
+    notify->setBuffer("buffer", mBuffer);
     notify->post();
-
-    mAACBuffer.clear();
-
-    return true;
 }
 
 void MPEG2TSWriter::SourceInfo::readMore() {
@@ -368,7 +348,10 @@
     switch (msg->what()) {
         case kWhatStart:
         {
-            status_t err = mSource->start();
+            sp<RefBase> obj;
+            CHECK(msg->findObject("meta", &obj));
+            MetaData *params = static_cast<MetaData *>(obj.get());
+            status_t err = mSource->start(params);
             if (err != OK) {
                 sp<AMessage> notify = mNotify->dup();
                 notify->setInt32("what", kNotifyStartFailed);
@@ -376,6 +359,7 @@
                 break;
             }
 
+            // Extract CSD from config format.
             extractCodecSpecificData();
 
             readMore();
@@ -388,10 +372,6 @@
             status_t err = mSource->read(&buffer);
 
             if (err != OK && err != INFO_FORMAT_CHANGED) {
-                if (mStreamType == 0x0f) {
-                    flushAACFrames();
-                }
-
                 sp<AMessage> notify = mNotify->dup();
                 notify->setInt32("what", kNotifyReachedEOS);
                 notify->setInt32("status", err);
@@ -401,23 +381,20 @@
 
             if (err == OK) {
                 if (mStreamType == 0x0f && mAACCodecSpecificData == NULL) {
-                    // The first buffer contains codec specific data.
-
+                    // The first audio buffer must contain CSD if not received yet.
                     CHECK_GE(buffer->range_length(), 2u);
-
                     mAACCodecSpecificData = new ABuffer(buffer->range_length());
 
                     memcpy(mAACCodecSpecificData->data(),
                            (const uint8_t *)buffer->data()
                             + buffer->range_offset(),
                            buffer->range_length());
+                    readMore();
                 } else if (buffer->range_length() > 0) {
                     if (mStreamType == 0x0f) {
-                        if (!appendAACFrames(buffer)) {
-                            msg->post();
-                        }
+                        appendAACFrames(buffer);
                     } else {
-                        postAVCFrame(buffer);
+                        appendAVCFrame(buffer);
                     }
                 } else {
                     readMore();
@@ -452,7 +429,6 @@
 
     int64_t timeUs;
     CHECK(mLastAccessUnit->meta()->findInt64("timeUs", &timeUs));
-
     return timeUs;
 }
 
@@ -542,7 +518,7 @@
     return OK;
 }
 
-status_t MPEG2TSWriter::start(MetaData * /* param */) {
+status_t MPEG2TSWriter::start(MetaData *param ) {
     CHECK(!mStarted);
 
     mStarted = true;
@@ -556,7 +532,7 @@
 
         notify->setInt32("source-index", i);
 
-        mSources.editItemAt(i)->start(notify);
+        mSources.editItemAt(i)->start(notify, param);
     }
 
     return OK;
@@ -594,13 +570,13 @@
         {
             int32_t sourceIndex;
             CHECK(msg->findInt32("source-index", &sourceIndex));
+            sp<SourceInfo> source = mSources.editItemAt(sourceIndex);
 
             int32_t what;
             CHECK(msg->findInt32("what", &what));
 
             if (what == SourceInfo::kNotifyReachedEOS
                     || what == SourceInfo::kNotifyStartFailed) {
-                sp<SourceInfo> source = mSources.editItemAt(sourceIndex);
                 source->setEOSReceived();
 
                 sp<ABuffer> buffer = source->lastAccessUnit();
@@ -615,6 +591,7 @@
             } else if (what == SourceInfo::kNotifyBuffer) {
                 sp<ABuffer> buffer;
                 CHECK(msg->findBuffer("buffer", &buffer));
+                CHECK(source->lastAccessUnit() == NULL);
 
                 int32_t oob;
                 if (msg->findInt32("oob", &oob) && oob) {
@@ -635,15 +612,10 @@
                 // Rinse, repeat.
                 // If we don't have data on any track we don't write
                 // anything just yet.
-
-                sp<SourceInfo> source = mSources.editItemAt(sourceIndex);
-
-                CHECK(source->lastAccessUnit() == NULL);
                 source->setLastAccessUnit(buffer);
 
                 ALOGV("lastAccessUnitTimeUs[%d] = %.2f secs",
-                     sourceIndex, source->lastAccessUnitTimeUs() / 1E6);
-
+                    sourceIndex, source->lastAccessUnitTimeUs() / 1E6);
                 int64_t minTimeUs = -1;
                 size_t minIndex = 0;
 
@@ -665,15 +637,14 @@
                 }
 
                 if (minTimeUs < 0) {
-                    ALOGV("not a all tracks have valid data.");
+                    ALOGV("not all tracks have valid data.");
                     break;
                 }
 
                 ALOGV("writing access unit at time %.2f secs (index %zu)",
-                     minTimeUs / 1E6, minIndex);
+                    minTimeUs / 1E6, minIndex);
 
                 source = mSources.editItemAt(minIndex);
-
                 buffer = source->lastAccessUnit();
                 source->setLastAccessUnit(NULL);
 
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 9392e7d..87d7d3c 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -815,6 +815,10 @@
         ALOGE("b/23540914");
         return ERROR_MALFORMED;
     }
+    if (depth > 100) {
+        ALOGE("b/27456299");
+        return ERROR_MALFORMED;
+    }
     uint32_t hdr[2];
     if (mDataSource->readAt(*offset, hdr, 8) < 8) {
         return ERROR_IO;
@@ -2313,6 +2317,12 @@
             return UNKNOWN_ERROR; // stop parsing after sidx
         }
 
+        case FOURCC('a', 'c', '-', '3'):
+        {
+            *offset += chunk_size;
+            return parseAC3SampleEntry(data_offset);
+        }
+
         case FOURCC('f', 't', 'y', 'p'):
         {
             if (chunk_data_size < 8 || depth != 0) {
@@ -2361,6 +2371,99 @@
     return OK;
 }
 
+status_t MPEG4Extractor::parseAC3SampleEntry(off64_t offset) {
+    // skip 16 bytes:
+    //  + 6-byte reserved,
+    //  + 2-byte data reference index,
+    //  + 8-byte reserved
+    offset += 16;
+    uint16_t channelCount;
+    if (!mDataSource->getUInt16(offset, &channelCount)) {
+        return ERROR_MALFORMED;
+    }
+    // skip 8 bytes:
+    //  + 2-byte channelCount,
+    //  + 2-byte sample size,
+    //  + 4-byte reserved
+    offset += 8;
+    uint16_t sampleRate;
+    if (!mDataSource->getUInt16(offset, &sampleRate)) {
+        ALOGE("MPEG4Extractor: error while reading ac-3 block: cannot read sample rate");
+        return ERROR_MALFORMED;
+    }
+
+    // skip 4 bytes:
+    //  + 2-byte sampleRate,
+    //  + 2-byte reserved
+    offset += 4;
+    return parseAC3SpecificBox(offset, sampleRate);
+}
+
+status_t MPEG4Extractor::parseAC3SpecificBox(
+        off64_t offset, uint16_t sampleRate) {
+    uint32_t size;
+    // + 4-byte size
+    // + 4-byte type
+    // + 3-byte payload
+    const uint32_t kAC3SpecificBoxSize = 11;
+    if (!mDataSource->getUInt32(offset, &size) || size < kAC3SpecificBoxSize) {
+        ALOGE("MPEG4Extractor: error while reading ac-3 block: cannot read specific box size");
+        return ERROR_MALFORMED;
+    }
+
+    offset += 4;
+    uint32_t type;
+    if (!mDataSource->getUInt32(offset, &type) || type != FOURCC('d', 'a', 'c', '3')) {
+        ALOGE("MPEG4Extractor: error while reading ac-3 specific block: header not dac3");
+        return ERROR_MALFORMED;
+    }
+
+    offset += 4;
+    const uint32_t kAC3SpecificBoxPayloadSize = 3;
+    uint8_t chunk[kAC3SpecificBoxPayloadSize];
+    if (mDataSource->readAt(offset, chunk, sizeof(chunk)) != sizeof(chunk)) {
+        ALOGE("MPEG4Extractor: error while reading ac-3 specific block: bitstream fields");
+        return ERROR_MALFORMED;
+    }
+
+    ABitReader br(chunk, sizeof(chunk));
+    static const unsigned channelCountTable[] = {2, 1, 2, 3, 3, 4, 4, 5};
+    static const unsigned sampleRateTable[] = {48000, 44100, 32000};
+
+    unsigned fscod = br.getBits(2);
+    if (fscod == 3) {
+        ALOGE("Incorrect fscod (3) in AC3 header");
+        return ERROR_MALFORMED;
+    }
+    unsigned boxSampleRate = sampleRateTable[fscod];
+    if (boxSampleRate != sampleRate) {
+        ALOGE("sample rate mismatch: boxSampleRate = %d, sampleRate = %d",
+            boxSampleRate, sampleRate);
+        return ERROR_MALFORMED;
+    }
+
+    unsigned bsid = br.getBits(5);
+    if (bsid > 8) {
+        ALOGW("Incorrect bsid in AC3 header. Possibly E-AC-3?");
+        return ERROR_MALFORMED;
+    }
+
+    // skip
+    unsigned bsmod __unused = br.getBits(3);
+
+    unsigned acmod = br.getBits(3);
+    unsigned lfeon = br.getBits(1);
+    unsigned channelCount = channelCountTable[acmod] + lfeon;
+
+    if (mLastTrack == NULL) {
+        return ERROR_MALFORMED;
+    }
+    mLastTrack->meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AC3);
+    mLastTrack->meta->setInt32(kKeyChannelCount, channelCount);
+    mLastTrack->meta->setInt32(kKeySampleRate, sampleRate);
+    return OK;
+}
+
 status_t MPEG4Extractor::parseSegmentIndex(off64_t offset, size_t size) {
   ALOGV("MPEG4Extractor::parseSegmentIndex");
 
@@ -2905,7 +3008,7 @@
 
     int32_t type = U32_AT(&buffer[0]);
     if ((type == FOURCC('n', 'c', 'l', 'x') && size >= 11)
-            || (type == FOURCC('n', 'c', 'l', 'c' && size >= 10))) {
+            || (type == FOURCC('n', 'c', 'l', 'c') && size >= 10)) {
         int32_t primaries = U16_AT(&buffer[4]);
         int32_t transfer = U16_AT(&buffer[6]);
         int32_t coeffs = U16_AT(&buffer[8]);
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 9de5c26..02a1239 100755
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -69,6 +69,7 @@
 static const uint8_t kNalUnitTypeSeqParamSet = 0x07;
 static const uint8_t kNalUnitTypePicParamSet = 0x08;
 static const int64_t kInitialDelayTimeUs     = 700000LL;
+static const int64_t kMaxMetadataSize = 0x4000000LL;   // 64MB max per-frame metadata size
 
 static const char kMetaKey_Version[]    = "com.android.version";
 #ifdef SHOW_MODEL_BUILD
@@ -116,6 +117,7 @@
     int32_t getTrackId() const { return mTrackId; }
     status_t dump(int fd, const Vector<String16>& args) const;
     static const char *getFourCCForMime(const char *mime);
+    const char *getTrackType() const;
 
 private:
     enum {
@@ -271,6 +273,7 @@
     bool mIsAvc;
     bool mIsHevc;
     bool mIsAudio;
+    bool mIsVideo;
     bool mIsMPEG4;
     bool mIsMalformed;
     int32_t mTrackId;
@@ -393,6 +396,7 @@
     void writeMdhdBox(uint32_t now);
     void writeSmhdBox();
     void writeVmhdBox();
+    void writeNmhdBox();
     void writeHdlrBox();
     void writeTkhdBox(uint32_t now);
     void writeColrBox();
@@ -400,6 +404,7 @@
     void writeMp4vEsdsBox();
     void writeAudioFourCCBox();
     void writeVideoFourCCBox();
+    void writeMetadataFourCCBox();
     void writeStblBox(bool use32BitOffset);
 
     Track(const Track &);
@@ -477,7 +482,7 @@
     const size_t SIZE = 256;
     char buffer[SIZE];
     String8 result;
-    snprintf(buffer, SIZE, "     %s track\n", mIsAudio? "Audio": "Video");
+    snprintf(buffer, SIZE, "     %s track\n", getTrackType());
     result.append(buffer);
     snprintf(buffer, SIZE, "       reached EOS: %s\n",
             mReachedEOS? "true": "false");
@@ -513,8 +518,10 @@
         } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime)) {
             return "hvc1";
         }
+    } else if (!strncasecmp(mime, "application/", 12)) {
+        return "mett";
     } else {
-        ALOGE("Track (%s) other than video or audio is not supported", mime);
+        ALOGE("Track (%s) other than video/audio/metadata is not supported", mime);
     }
     return NULL;
 }
@@ -526,37 +533,17 @@
         return UNKNOWN_ERROR;
     }
 
-    // At most 2 tracks can be supported.
-    if (mTracks.size() >= 2) {
-        ALOGE("Too many tracks (%zu) to add", mTracks.size());
-        return ERROR_UNSUPPORTED;
-    }
-
     CHECK(source.get() != NULL);
 
     const char *mime;
     source->getFormat()->findCString(kKeyMIMEType, &mime);
-    bool isAudio = !strncasecmp(mime, "audio/", 6);
+
     if (Track::getFourCCForMime(mime) == NULL) {
         ALOGE("Unsupported mime '%s'", mime);
         return ERROR_UNSUPPORTED;
     }
 
-    // At this point, we know the track to be added is either
-    // video or audio. Thus, we only need to check whether it
-    // is an audio track or not (if it is not, then it must be
-    // a video track).
-
-    // No more than one video or one audio track is supported.
-    for (List<Track*>::iterator it = mTracks.begin();
-         it != mTracks.end(); ++it) {
-        if ((*it)->isAudio() == isAudio) {
-            ALOGE("%s track already exists", isAudio? "Audio": "Video");
-            return ERROR_UNSUPPORTED;
-        }
-    }
-
-    // This is the first track of either audio or video.
+    // This is a metadata track or the first track of either audio or video
     // Go ahead to add the track.
     Track *track = new Track(this, source, 1 + mTracks.size());
     mTracks.push_back(track);
@@ -1123,9 +1110,7 @@
 
     // Test mode is enabled only if rw.media.record.test system
     // property is enabled.
-    char value[PROPERTY_VALUE_MAX];
-    if (property_get("rw.media.record.test", value, NULL) &&
-        (!strcasecmp(value, "true") || !strcasecmp(value, "1"))) {
+    if (property_get_bool("rw.media.record.test", false)) {
         return true;
     }
     return false;
@@ -1561,11 +1546,12 @@
     mIsAvc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC);
     mIsHevc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC);
     mIsAudio = !strncasecmp(mime, "audio/", 6);
+    mIsVideo = !strncasecmp(mime, "video/", 6);
     mIsMPEG4 = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4) ||
                !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC);
 
     // store temporal layer count
-    if (!mIsAudio) {
+    if (mIsVideo) {
         int32_t count;
         if (mMeta->findInt32(kKeyTemporalLayerCount, &count) && count > 1) {
             mOwner->setTemporalLayerCount(count);
@@ -1621,7 +1607,7 @@
 void MPEG4Writer::Track::addOneCttsTableEntry(
         size_t sampleCount, int32_t duration) {
 
-    if (mIsAudio) {
+    if (!mIsVideo) {
         return;
     }
     mCttsTableEntries->add(htonl(sampleCount));
@@ -1753,7 +1739,7 @@
 
 void MPEG4Writer::writeChunkToFile(Chunk* chunk) {
     ALOGV("writeChunkToFile: %" PRId64 " from %s track",
-        chunk->mTimeStampUs, chunk->mTrack->isAudio()? "audio": "video");
+        chunk->mTimeStampUs, chunk->mTrack->getTrackType());
 
     int32_t isFirstSample = true;
     while (!chunk->mSamples.empty()) {
@@ -1906,7 +1892,7 @@
     mStartTimeRealUs = startTimeUs;
 
     int32_t rotationDegrees;
-    if (!mIsAudio && params && params->findInt32(kKeyRotation, &rotationDegrees)) {
+    if (mIsVideo && params && params->findInt32(kKeyRotation, &rotationDegrees)) {
         mRotation = rotationDegrees;
     }
 
@@ -1964,7 +1950,7 @@
 }
 
 status_t MPEG4Writer::Track::stop() {
-    ALOGD("%s track stopping", mIsAudio? "Audio": "Video");
+    ALOGD("%s track stopping", getTrackType());
     if (!mStarted) {
         ALOGE("Stop() called but track is not started");
         return ERROR_END_OF_STREAM;
@@ -1975,15 +1961,15 @@
     }
     mDone = true;
 
-    ALOGD("%s track source stopping", mIsAudio? "Audio": "Video");
+    ALOGD("%s track source stopping", getTrackType());
     mSource->stop();
-    ALOGD("%s track source stopped", mIsAudio? "Audio": "Video");
+    ALOGD("%s track source stopped", getTrackType());
 
     void *dummy;
     pthread_join(mThread, &dummy);
     status_t err = static_cast<status_t>(reinterpret_cast<uintptr_t>(dummy));
 
-    ALOGD("%s track stopped", mIsAudio? "Audio": "Video");
+    ALOGD("%s track stopped", getTrackType());
     return err;
 }
 
@@ -2381,8 +2367,10 @@
 
     if (mIsAudio) {
         prctl(PR_SET_NAME, (unsigned long)"AudioTrackEncoding", 0, 0, 0);
-    } else {
+    } else if (mIsVideo) {
         prctl(PR_SET_NAME, (unsigned long)"VideoTrackEncoding", 0, 0, 0);
+    } else {
+        prctl(PR_SET_NAME, (unsigned long)"MetadataTrackEncoding", 0, 0, 0);
     }
 
     if (mOwner->isRealTimeRecording()) {
@@ -2393,7 +2381,7 @@
 
     status_t err = OK;
     MediaBuffer *buffer;
-    const char *trackName = mIsAudio ? "Audio" : "Video";
+    const char *trackName = getTrackType();
     while (!mDone && (err = mSource->read(&buffer)) == OK) {
         if (buffer->range_length() == 0) {
             buffer->release();
@@ -2450,6 +2438,16 @@
             continue;
         }
 
+        // Per-frame metadata sample's size must be smaller than max allowed.
+        if (!mIsVideo && !mIsAudio && buffer->range_length() >= kMaxMetadataSize) {
+            ALOGW("Buffer size is %zu. Maximum metadata buffer size is %lld for %s track",
+                    buffer->range_length(), (long long)kMaxMetadataSize, trackName);
+            buffer->release();
+            mSource->stop();
+            mIsMalformed = true;
+            break;
+        }
+
         ++nActualFrames;
 
         // Make a deep copy of the MediaBuffer and Metadata and release
@@ -2536,7 +2534,7 @@
             break;
         }
 
-        if (!mIsAudio) {
+        if (mIsVideo) {
             /*
              * Composition time: timestampUs
              * Decoding time: decodingTimeUs
@@ -2661,7 +2659,6 @@
                 timestampUs += deltaUs;
             }
         }
-
         mStszTableEntries->add(htonl(sampleSize));
         if (mStszTableEntries->count() > 2) {
 
@@ -2808,7 +2805,7 @@
         return true;
     }
 
-    if (!mIsAudio && mStssTableEntries->count() == 0) {  // no sync frames for video
+    if (mIsVideo && mStssTableEntries->count() == 0) {  // no sync frames for video
         ALOGE("There are no sync frames for video track");
         return true;
     }
@@ -2831,7 +2828,7 @@
 
     mOwner->notify(MEDIA_RECORDER_TRACK_EVENT_INFO,
                     trackNum | MEDIA_RECORDER_TRACK_INFO_TYPE,
-                    mIsAudio? 0: 1);
+                    mIsAudio ? 0: 1);
 
     mOwner->notify(MEDIA_RECORDER_TRACK_EVENT_INFO,
                     trackNum | MEDIA_RECORDER_TRACK_INFO_DURATION_MS,
@@ -2971,11 +2968,11 @@
     return OK;
 }
 
+const char *MPEG4Writer::Track::getTrackType() const {
+    return mIsAudio ? "Audio" : (mIsVideo ? "Video" : "Metadata");
+}
+
 void MPEG4Writer::Track::writeTrackHeader(bool use32BitOffset) {
-
-    ALOGV("%s track time scale: %d",
-        mIsAudio? "Audio": "Video", mTimeScale);
-
     uint32_t now = getMpeg4Time();
     mOwner->beginBox("trak");
         writeTkhdBox(now);
@@ -2985,8 +2982,10 @@
             mOwner->beginBox("minf");
                 if (mIsAudio) {
                     writeSmhdBox();
-                } else {
+                } else if (mIsVideo) {
                     writeVmhdBox();
+                } else {
+                    writeNmhdBox();
                 }
                 writeDinfBox();
                 writeStblBox(use32BitOffset);
@@ -3002,13 +3001,15 @@
     mOwner->writeInt32(1);               // entry count
     if (mIsAudio) {
         writeAudioFourCCBox();
-    } else {
+    } else if (mIsVideo) {
         writeVideoFourCCBox();
+    } else {
+        writeMetadataFourCCBox();
     }
     mOwner->endBox();  // stsd
     writeSttsBox();
-    writeCttsBox();
-    if (!mIsAudio) {
+    if (mIsVideo) {
+        writeCttsBox();
         writeStssBox();
     }
     writeStszBox();
@@ -3017,6 +3018,20 @@
     mOwner->endBox();  // stbl
 }
 
+void MPEG4Writer::Track::writeMetadataFourCCBox() {
+    const char *mime;
+    bool success = mMeta->findCString(kKeyMIMEType, &mime);
+    CHECK(success);
+    const char *fourcc = getFourCCForMime(mime);
+    if (fourcc == NULL) {
+        ALOGE("Unknown mime type '%s'.", mime);
+        TRESPASS();
+    }
+    mOwner->beginBox(fourcc);    // TextMetaDataSampleEntry
+    mOwner->writeCString(mime);  // metadata mime_format
+    mOwner->endBox(); // mett
+}
+
 void MPEG4Writer::Track::writeVideoFourCCBox() {
     const char *mime;
     bool success = mMeta->findCString(kKeyMIMEType, &mime);
@@ -3024,7 +3039,7 @@
     const char *fourcc = getFourCCForMime(mime);
     if (fourcc == NULL) {
         ALOGE("Unknown mime type '%s'.", mime);
-        CHECK(!"should not be here, unknown mime type.");
+        TRESPASS();
     }
 
     mOwner->beginBox(fourcc);        // video format
@@ -3097,7 +3112,7 @@
     const char *fourcc = getFourCCForMime(mime);
     if (fourcc == NULL) {
         ALOGE("Unknown mime type '%s'.", mime);
-        CHECK(!"should not be here, unknown mime type.");
+        TRESPASS();
     }
 
     mOwner->beginBox(fourcc);        // audio format
@@ -3240,13 +3255,19 @@
 
     mOwner->writeCompositionMatrix(mRotation);       // matrix
 
-    if (mIsAudio) {
+    if (!mIsVideo) {
         mOwner->writeInt32(0);
         mOwner->writeInt32(0);
     } else {
         int32_t width, height;
-        bool success = mMeta->findInt32(kKeyWidth, &width);
-        success = success && mMeta->findInt32(kKeyHeight, &height);
+        bool success = mMeta->findInt32(kKeyDisplayWidth, &width);
+        success = success && mMeta->findInt32(kKeyDisplayHeight, &height);
+
+        // Use width/height if display width/height are not present.
+        if (!success) {
+            success = mMeta->findInt32(kKeyWidth, &width);
+            success = success && mMeta->findInt32(kKeyHeight, &height);
+        }
         CHECK(success);
 
         mOwner->writeInt32(width << 16);   // 32-bit fixed-point value
@@ -3273,16 +3294,22 @@
     mOwner->endBox();
 }
 
+void MPEG4Writer::Track::writeNmhdBox() {
+    mOwner->beginBox("nmhd");
+    mOwner->writeInt32(0);           // version=0, flags=0
+    mOwner->endBox();
+}
+
 void MPEG4Writer::Track::writeHdlrBox() {
     mOwner->beginBox("hdlr");
     mOwner->writeInt32(0);             // version=0, flags=0
     mOwner->writeInt32(0);             // component type: should be mhlr
-    mOwner->writeFourcc(mIsAudio ? "soun" : "vide");  // component subtype
+    mOwner->writeFourcc(mIsAudio ? "soun" : (mIsVideo ? "vide" : "meta"));  // component subtype
     mOwner->writeInt32(0);             // reserved
     mOwner->writeInt32(0);             // reserved
     mOwner->writeInt32(0);             // reserved
     // Removing "r" for the name string just makes the string 4 byte aligned
-    mOwner->writeCString(mIsAudio ? "SoundHandle": "VideoHandle");  // name
+    mOwner->writeCString(mIsAudio ? "SoundHandle": (mIsVideo ? "VideoHandle" : "MetadHandle"));
     mOwner->endBox();
 }
 
@@ -3309,7 +3336,12 @@
     // Each character is packed as the difference between its ASCII value and 0x60.
     // For "English", these are 00101, 01110, 00111.
     // XXX: Where is the padding bit located: 0x15C7?
-    mOwner->writeInt16(0);             // language code
+    const char *lang = NULL;
+    int16_t langCode = 0;
+    if (mMeta->findCString(kKeyMediaLanguage, &lang) && lang && strnlen(lang, 3) > 2) {
+        langCode = ((lang[0] & 0x1f) << 10) | ((lang[1] & 0x1f) << 5) | (lang[2] & 0x1f);
+    }
+    mOwner->writeInt16(langCode);      // language code
     mOwner->writeInt16(0);             // predefined
     mOwner->endBox();
 }
@@ -3413,10 +3445,6 @@
 }
 
 void MPEG4Writer::Track::writeCttsBox() {
-    if (mIsAudio) {  // ctts is not for audio
-        return;
-    }
-
     // There is no B frame at all
     if (mMinCttsOffsetTimeUs == mMaxCttsOffsetTimeUs) {
         return;
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index b088775..9eca982 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -19,6 +19,8 @@
 #include <inttypes.h>
 
 #include "include/avc_utils.h"
+#include "include/SecureBuffer.h"
+#include "include/SharedMemoryBuffer.h"
 #include "include/SoftwareRenderer.h"
 
 #include <binder/IMemory.h>
@@ -30,10 +32,12 @@
 #include <media/ICrypto.h>
 #include <media/IOMX.h>
 #include <media/IResourceManagerService.h>
+#include <media/MediaCodecBuffer.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/AUtils.h>
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/ACodec.h>
 #include <media/stagefright/BufferProducerWrapper.h>
@@ -63,6 +67,9 @@
 
 static const int kMaxRetry = 2;
 static const int kMaxReclaimWaitTimeInUs = 500000;  // 0.5s
+static const int kNumBuffersAlign = 16;
+
+////////////////////////////////////////////////////////////////////////////////
 
 struct ResourceManagerClient : public BnResourceManagerClient {
     explicit ResourceManagerClient(MediaCodec* codec) : mMediaCodec(codec) {}
@@ -168,10 +175,221 @@
     return mService->reclaimResource(mPid, resources);
 }
 
+////////////////////////////////////////////////////////////////////////////////
+
+MediaCodec::BufferInfo::BufferInfo() : mOwnedByClient(false) {}
+
+////////////////////////////////////////////////////////////////////////////////
+
+namespace {
+
+enum {
+    kWhatFillThisBuffer      = 'fill',
+    kWhatDrainThisBuffer     = 'drai',
+    kWhatEOS                 = 'eos ',
+    kWhatStartCompleted      = 'Scom',
+    kWhatStopCompleted       = 'scom',
+    kWhatReleaseCompleted    = 'rcom',
+    kWhatFlushCompleted      = 'fcom',
+    kWhatError               = 'erro',
+    kWhatComponentAllocated  = 'cAll',
+    kWhatComponentConfigured = 'cCon',
+    kWhatInputSurfaceCreated = 'isfc',
+    kWhatInputSurfaceAccepted = 'isfa',
+    kWhatSignaledInputEOS    = 'seos',
+    kWhatOutputFramesRendered = 'outR',
+    kWhatOutputBuffersChanged = 'outC',
+};
+
+class BufferCallback : public CodecBase::BufferCallback {
+public:
+    explicit BufferCallback(const sp<AMessage> &notify);
+    virtual ~BufferCallback() = default;
+
+    virtual void onInputBufferAvailable(
+            size_t index, const sp<MediaCodecBuffer> &buffer) override;
+    virtual void onOutputBufferAvailable(
+            size_t index, const sp<MediaCodecBuffer> &buffer) override;
+private:
+    const sp<AMessage> mNotify;
+};
+
+BufferCallback::BufferCallback(const sp<AMessage> &notify)
+    : mNotify(notify) {}
+
+void BufferCallback::onInputBufferAvailable(
+        size_t index, const sp<MediaCodecBuffer> &buffer) {
+    sp<AMessage> notify(mNotify->dup());
+    notify->setInt32("what", kWhatFillThisBuffer);
+    notify->setSize("index", index);
+    notify->setObject("buffer", buffer);
+    notify->post();
+}
+
+void BufferCallback::onOutputBufferAvailable(
+        size_t index, const sp<MediaCodecBuffer> &buffer) {
+    sp<AMessage> notify(mNotify->dup());
+    notify->setInt32("what", kWhatDrainThisBuffer);
+    notify->setSize("index", index);
+    notify->setObject("buffer", buffer);
+    notify->post();
+}
+
+class CodecCallback : public CodecBase::CodecCallback {
+public:
+    explicit CodecCallback(const sp<AMessage> &notify);
+    virtual ~CodecCallback() = default;
+
+    virtual void onEos(status_t err) override;
+    virtual void onStartCompleted() override;
+    virtual void onStopCompleted() override;
+    virtual void onReleaseCompleted() override;
+    virtual void onFlushCompleted() override;
+    virtual void onError(status_t err, enum ActionCode actionCode) override;
+    virtual void onComponentAllocated(const char *componentName) override;
+    virtual void onComponentConfigured(
+            const sp<AMessage> &inputFormat, const sp<AMessage> &outputFormat) override;
+    virtual void onInputSurfaceCreated(
+            const sp<AMessage> &inputFormat,
+            const sp<AMessage> &outputFormat,
+            const sp<BufferProducerWrapper> &inputSurface) override;
+    virtual void onInputSurfaceCreationFailed(status_t err) override;
+    virtual void onInputSurfaceAccepted(
+            const sp<AMessage> &inputFormat,
+            const sp<AMessage> &outputFormat) override;
+    virtual void onInputSurfaceDeclined(status_t err) override;
+    virtual void onSignaledInputEOS(status_t err) override;
+    virtual void onOutputFramesRendered(const std::list<FrameRenderTracker::Info> &done) override;
+    virtual void onOutputBuffersChanged() override;
+private:
+    const sp<AMessage> mNotify;
+};
+
+CodecCallback::CodecCallback(const sp<AMessage> &notify) : mNotify(notify) {}
+
+void CodecCallback::onEos(status_t err) {
+    sp<AMessage> notify(mNotify->dup());
+    notify->setInt32("what", kWhatEOS);
+    notify->setInt32("err", err);
+    notify->post();
+}
+
+void CodecCallback::onStartCompleted() {
+    sp<AMessage> notify(mNotify->dup());
+    notify->setInt32("what", kWhatStartCompleted);
+    notify->post();
+}
+
+void CodecCallback::onStopCompleted() {
+    sp<AMessage> notify(mNotify->dup());
+    notify->setInt32("what", kWhatStopCompleted);
+    notify->post();
+}
+
+void CodecCallback::onReleaseCompleted() {
+    sp<AMessage> notify(mNotify->dup());
+    notify->setInt32("what", kWhatReleaseCompleted);
+    notify->post();
+}
+
+void CodecCallback::onFlushCompleted() {
+    sp<AMessage> notify(mNotify->dup());
+    notify->setInt32("what", kWhatFlushCompleted);
+    notify->post();
+}
+
+void CodecCallback::onError(status_t err, enum ActionCode actionCode) {
+    sp<AMessage> notify(mNotify->dup());
+    notify->setInt32("what", kWhatError);
+    notify->setInt32("err", err);
+    notify->setInt32("actionCode", actionCode);
+    notify->post();
+}
+
+void CodecCallback::onComponentAllocated(const char *componentName) {
+    sp<AMessage> notify(mNotify->dup());
+    notify->setInt32("what", kWhatComponentAllocated);
+    notify->setString("componentName", componentName);
+    notify->post();
+}
+
+void CodecCallback::onComponentConfigured(
+        const sp<AMessage> &inputFormat, const sp<AMessage> &outputFormat) {
+    sp<AMessage> notify(mNotify->dup());
+    notify->setInt32("what", kWhatComponentConfigured);
+    notify->setMessage("input-format", inputFormat);
+    notify->setMessage("output-format", outputFormat);
+    notify->post();
+}
+
+void CodecCallback::onInputSurfaceCreated(
+        const sp<AMessage> &inputFormat,
+        const sp<AMessage> &outputFormat,
+        const sp<BufferProducerWrapper> &inputSurface) {
+    sp<AMessage> notify(mNotify->dup());
+    notify->setInt32("what", kWhatInputSurfaceCreated);
+    notify->setMessage("input-format", inputFormat);
+    notify->setMessage("output-format", outputFormat);
+    notify->setObject("input-surface", inputSurface);
+    notify->post();
+}
+
+void CodecCallback::onInputSurfaceCreationFailed(status_t err) {
+    sp<AMessage> notify(mNotify->dup());
+    notify->setInt32("what", kWhatInputSurfaceCreated);
+    notify->setInt32("err", err);
+    notify->post();
+}
+
+void CodecCallback::onInputSurfaceAccepted(
+        const sp<AMessage> &inputFormat,
+        const sp<AMessage> &outputFormat) {
+    sp<AMessage> notify(mNotify->dup());
+    notify->setInt32("what", kWhatInputSurfaceAccepted);
+    notify->setMessage("input-format", inputFormat);
+    notify->setMessage("output-format", outputFormat);
+    notify->post();
+}
+
+void CodecCallback::onInputSurfaceDeclined(status_t err) {
+    sp<AMessage> notify(mNotify->dup());
+    notify->setInt32("what", kWhatInputSurfaceAccepted);
+    notify->setInt32("err", err);
+    notify->post();
+}
+
+void CodecCallback::onSignaledInputEOS(status_t err) {
+    sp<AMessage> notify(mNotify->dup());
+    notify->setInt32("what", kWhatSignaledInputEOS);
+    if (err != OK) {
+        notify->setInt32("err", err);
+    }
+    notify->post();
+}
+
+void CodecCallback::onOutputFramesRendered(const std::list<FrameRenderTracker::Info> &done) {
+    sp<AMessage> notify(mNotify->dup());
+    notify->setInt32("what", kWhatOutputFramesRendered);
+    if (MediaCodec::CreateFramesRenderedMessage(done, notify)) {
+        notify->post();
+    }
+}
+
+void CodecCallback::onOutputBuffersChanged() {
+    sp<AMessage> notify(mNotify->dup());
+    notify->setInt32("what", kWhatOutputBuffersChanged);
+    notify->post();
+}
+
+}  // namespace
+
+////////////////////////////////////////////////////////////////////////////////
+
 // static
 sp<MediaCodec> MediaCodec::CreateByType(
-        const sp<ALooper> &looper, const AString &mime, bool encoder, status_t *err, pid_t pid) {
-    sp<MediaCodec> codec = new MediaCodec(looper, pid);
+        const sp<ALooper> &looper, const AString &mime, bool encoder, status_t *err, pid_t pid,
+        uid_t uid) {
+    sp<MediaCodec> codec = new MediaCodec(looper, pid, uid);
 
     const status_t ret = codec->init(mime, true /* nameIsType */, encoder);
     if (err != NULL) {
@@ -182,8 +400,8 @@
 
 // static
 sp<MediaCodec> MediaCodec::CreateByComponentName(
-        const sp<ALooper> &looper, const AString &name, status_t *err, pid_t pid) {
-    sp<MediaCodec> codec = new MediaCodec(looper, pid);
+        const sp<ALooper> &looper, const AString &name, status_t *err, pid_t pid, uid_t uid) {
+    sp<MediaCodec> codec = new MediaCodec(looper, pid, uid);
 
     const status_t ret = codec->init(name, false /* nameIsType */, false /* encoder */);
     if (err != NULL) {
@@ -211,53 +429,27 @@
 // static
 sp<PersistentSurface> MediaCodec::CreatePersistentInputSurface() {
     OMXClient client;
-    CHECK_EQ(client.connect(), (status_t)OK);
+    if (client.connect() != OK) {
+        ALOGE("Failed to connect to OMX to create persistent input surface.");
+        return NULL;
+    }
+
     sp<IOMX> omx = client.interface();
 
-    const sp<IMediaCodecList> mediaCodecList = MediaCodecList::getInstance();
-    if (mediaCodecList == NULL) {
-        ALOGE("Failed to obtain MediaCodecList!");
-        return NULL; // if called from Java should raise IOException
-    }
-
-    AString tmp;
-    sp<AMessage> globalSettings = mediaCodecList->getGlobalSettings();
-    if (globalSettings == NULL || !globalSettings->findString(
-            kMaxEncoderInputBuffers, &tmp)) {
-        ALOGE("Failed to get encoder input buffer count!");
-        return NULL;
-    }
-
-    int32_t bufferCount = strtol(tmp.c_str(), NULL, 10);
-    if (bufferCount <= 0
-            || bufferCount > BufferQueue::MAX_MAX_ACQUIRED_BUFFERS) {
-        ALOGE("Encoder input buffer count is invalid!");
-        return NULL;
-    }
-
     sp<IGraphicBufferProducer> bufferProducer;
-    sp<IGraphicBufferConsumer> bufferConsumer;
+    sp<IGraphicBufferSource> bufferSource;
 
-    status_t err = omx->createPersistentInputSurface(
-            &bufferProducer, &bufferConsumer);
+    status_t err = omx->createInputSurface(&bufferProducer, &bufferSource);
 
     if (err != OK) {
         ALOGE("Failed to create persistent input surface.");
         return NULL;
     }
 
-    err = bufferConsumer->setMaxAcquiredBufferCount(bufferCount);
-
-    if (err != NO_ERROR) {
-        ALOGE("Unable to set BQ max acquired buffer count to %u: %d",
-                bufferCount, err);
-        return NULL;
-    }
-
-    return new PersistentSurface(bufferProducer, bufferConsumer);
+    return new PersistentSurface(bufferProducer, bufferSource);
 }
 
-MediaCodec::MediaCodec(const sp<ALooper> &looper, pid_t pid)
+MediaCodec::MediaCodec(const sp<ALooper> &looper, pid_t pid, uid_t uid)
     : mState(UNINITIALIZED),
       mReleasedByResourceManager(false),
       mLooper(looper),
@@ -279,6 +471,11 @@
       mDequeueOutputReplyID(0),
       mHaveInputSurface(false),
       mHavePendingInputBuffers(false) {
+    if (uid == kNoUid) {
+        mUid = IPCThreadState::self()->getCallingUid();
+    } else {
+        mUid = uid;
+    }
 }
 
 MediaCodec::~MediaCodec() {
@@ -387,7 +584,13 @@
 
     mLooper->registerHandler(this);
 
-    mCodec->setNotificationMessage(new AMessage(kWhatCodecNotify, this));
+    mCodec->setCallback(
+            std::unique_ptr<CodecBase::CodecCallback>(
+                    new CodecCallback(new AMessage(kWhatCodecNotify, this))));
+    mBufferChannel = mCodec->getBufferChannel();
+    mBufferChannel->setCallback(
+            std::unique_ptr<CodecBase::BufferCallback>(
+                    new BufferCallback(new AMessage(kWhatCodecNotify, this))));
 
     sp<AMessage> msg = new AMessage(kWhatInit, this);
     msg->setString("name", name);
@@ -614,14 +817,9 @@
 }
 
 bool MediaCodec::hasPendingBuffer(int portIndex) {
-    const Vector<BufferInfo> &buffers = mPortBuffers[portIndex];
-    for (size_t i = 0; i < buffers.size(); ++i) {
-        const BufferInfo &info = buffers.itemAt(i);
-        if (info.mOwnedByClient) {
-            return true;
-        }
-    }
-    return false;
+    return std::any_of(
+            mPortBuffers[portIndex].begin(), mPortBuffers[portIndex].end(),
+            [](const BufferInfo &info) { return info.mOwnedByClient; });
 }
 
 bool MediaCodec::hasPendingBuffer() {
@@ -860,17 +1058,7 @@
     return OK;
 }
 
-status_t MediaCodec::getWidevineLegacyBuffers(Vector<sp<ABuffer> > *buffers) const {
-    sp<AMessage> msg = new AMessage(kWhatGetBuffers, this);
-    msg->setInt32("portIndex", kPortIndexInput);
-    msg->setPointer("buffers", buffers);
-    msg->setInt32("widevine", true);
-
-    sp<AMessage> response;
-    return PostAndAwaitResponse(msg, &response);
-}
-
-status_t MediaCodec::getInputBuffers(Vector<sp<ABuffer> > *buffers) const {
+status_t MediaCodec::getInputBuffers(Vector<sp<MediaCodecBuffer> > *buffers) const {
     sp<AMessage> msg = new AMessage(kWhatGetBuffers, this);
     msg->setInt32("portIndex", kPortIndexInput);
     msg->setPointer("buffers", buffers);
@@ -879,7 +1067,7 @@
     return PostAndAwaitResponse(msg, &response);
 }
 
-status_t MediaCodec::getOutputBuffers(Vector<sp<ABuffer> > *buffers) const {
+status_t MediaCodec::getOutputBuffers(Vector<sp<MediaCodecBuffer> > *buffers) const {
     sp<AMessage> msg = new AMessage(kWhatGetBuffers, this);
     msg->setInt32("portIndex", kPortIndexOutput);
     msg->setPointer("buffers", buffers);
@@ -888,17 +1076,17 @@
     return PostAndAwaitResponse(msg, &response);
 }
 
-status_t MediaCodec::getOutputBuffer(size_t index, sp<ABuffer> *buffer) {
+status_t MediaCodec::getOutputBuffer(size_t index, sp<MediaCodecBuffer> *buffer) {
     sp<AMessage> format;
     return getBufferAndFormat(kPortIndexOutput, index, buffer, &format);
 }
 
 status_t MediaCodec::getOutputFormat(size_t index, sp<AMessage> *format) {
-    sp<ABuffer> buffer;
+    sp<MediaCodecBuffer> buffer;
     return getBufferAndFormat(kPortIndexOutput, index, &buffer, format);
 }
 
-status_t MediaCodec::getInputBuffer(size_t index, sp<ABuffer> *buffer) {
+status_t MediaCodec::getInputBuffer(size_t index, sp<MediaCodecBuffer> *buffer) {
     sp<AMessage> format;
     return getBufferAndFormat(kPortIndexInput, index, buffer, &format);
 }
@@ -909,7 +1097,7 @@
 
 status_t MediaCodec::getBufferAndFormat(
         size_t portIndex, size_t index,
-        sp<ABuffer> *buffer, sp<AMessage> *format) {
+        sp<MediaCodecBuffer> *buffer, sp<AMessage> *format) {
     // use mutex instead of a context switch
     if (mReleasedByResourceManager) {
         ALOGE("getBufferAndFormat - resource already released");
@@ -917,7 +1105,7 @@
     }
 
     if (buffer == NULL) {
-        ALOGE("getBufferAndFormat - null ABuffer");
+        ALOGE("getBufferAndFormat - null MediaCodecBuffer");
         return INVALID_OPERATION;
     }
 
@@ -938,26 +1126,22 @@
     // we also don't want mOwnedByClient to change during this
     Mutex::Autolock al(mBufferLock);
 
-    Vector<BufferInfo> *buffers = &mPortBuffers[portIndex];
-    if (index >= buffers->size()) {
+    std::vector<BufferInfo> &buffers = mPortBuffers[portIndex];
+    if (index >= buffers.size()) {
         ALOGE("getBufferAndFormat - trying to get buffer with "
-              "bad index (index=%zu buffer_size=%zu)", index, buffers->size());
+              "bad index (index=%zu buffer_size=%zu)", index, buffers.size());
         return INVALID_OPERATION;
     }
 
-    const BufferInfo &info = buffers->itemAt(index);
+    const BufferInfo &info = buffers[index];
     if (!info.mOwnedByClient) {
         ALOGE("getBufferAndFormat - invalid operation "
               "(the index %zu is not owned by client)", index);
         return INVALID_OPERATION;
     }
 
-    // by the time buffers array is initialized, crypto is set
-    *buffer = (portIndex == kPortIndexInput && mCrypto != NULL) ?
-                  info.mEncryptedData :
-                  info.mData;
-
-    *format = info.mFormat;
+    *buffer = info.mData;
+    *format = info.mData->format();
 
     return OK;
 }
@@ -1046,8 +1230,8 @@
             return false;
         }
 
-        const sp<ABuffer> &buffer =
-            mPortBuffers[kPortIndexOutput].itemAt(index).mData;
+        const sp<MediaCodecBuffer> &buffer =
+            mPortBuffers[kPortIndexOutput][index].mData;
 
         response->setSize("index", index);
         response->setSize("offset", buffer->offset());
@@ -1058,19 +1242,8 @@
 
         response->setInt64("timeUs", timeUs);
 
-        int32_t omxFlags;
-        CHECK(buffer->meta()->findInt32("omxFlags", &omxFlags));
-
-        uint32_t flags = 0;
-        if (omxFlags & OMX_BUFFERFLAG_SYNCFRAME) {
-            flags |= BUFFER_FLAG_SYNCFRAME;
-        }
-        if (omxFlags & OMX_BUFFERFLAG_CODECCONFIG) {
-            flags |= BUFFER_FLAG_CODECCONFIG;
-        }
-        if (omxFlags & OMX_BUFFERFLAG_EOS) {
-            flags |= BUFFER_FLAG_EOS;
-        }
+        int32_t flags;
+        CHECK(buffer->meta()->findInt32("flags", &flags));
 
         response->setInt32("flags", flags);
         response->postReply(replyID);
@@ -1087,7 +1260,7 @@
             CHECK(msg->findInt32("what", &what));
 
             switch (what) {
-                case CodecBase::kWhatError:
+                case kWhatError:
                 {
                     int32_t err, actionCode;
                     CHECK(msg->findInt32("err", &err));
@@ -1123,14 +1296,16 @@
                             break;
                         }
 
-                        case STOPPING:
                         case RELEASING:
                         {
                             // Ignore the error, assuming we'll still get
-                            // the shutdown complete notification.
-
+                            // the shutdown complete notification. If we
+                            // don't, we'll timeout and force release.
                             sendErrorResponse = false;
-
+                        }
+                        // fall-thru
+                        case STOPPING:
+                        {
                             if (mFlags & kFlagSawMediaServerDie) {
                                 // MediaServer died, there definitely won't
                                 // be a shutdown complete notification after
@@ -1144,6 +1319,7 @@
                                     mComponentName.clear();
                                 }
                                 (new AMessage)->postReply(mReplyID);
+                                sendErrorResponse = false;
                             }
                             break;
                         }
@@ -1219,7 +1395,7 @@
                     break;
                 }
 
-                case CodecBase::kWhatComponentAllocated:
+                case kWhatComponentAllocated:
                 {
                     CHECK_EQ(mState, INITIALIZING);
                     setState(INITIALIZED);
@@ -1251,7 +1427,7 @@
                     break;
                 }
 
-                case CodecBase::kWhatComponentConfigured:
+                case kWhatComponentConfigured:
                 {
                     if (mState == UNINITIALIZED || mState == INITIALIZED) {
                         // In case a kWhatError message came in and replied with error,
@@ -1280,7 +1456,7 @@
                     break;
                 }
 
-                case CodecBase::kWhatInputSurfaceCreated:
+                case kWhatInputSurfaceCreated:
                 {
                     // response to initiateCreateInputSurface()
                     status_t err = NO_ERROR;
@@ -1304,12 +1480,14 @@
                     break;
                 }
 
-                case CodecBase::kWhatInputSurfaceAccepted:
+                case kWhatInputSurfaceAccepted:
                 {
                     // response to initiateSetInputSurface()
                     status_t err = NO_ERROR;
                     sp<AMessage> response = new AMessage();
                     if (!msg->findInt32("err", &err)) {
+                        CHECK(msg->findMessage("input-format", &mInputFormat));
+                        CHECK(msg->findMessage("output-format", &mOutputFormat));
                         mHaveInputSurface = true;
                     } else {
                         response->setInt32("err", err);
@@ -1318,7 +1496,7 @@
                     break;
                 }
 
-                case CodecBase::kWhatSignaledInputEOS:
+                case kWhatSignaledInputEOS:
                 {
                     // response to signalEndOfInputStream()
                     sp<AMessage> response = new AMessage;
@@ -1330,138 +1508,28 @@
                     break;
                 }
 
-
-                case CodecBase::kWhatBuffersAllocated:
+                case kWhatStartCompleted:
                 {
-                    Mutex::Autolock al(mBufferLock);
-                    int32_t portIndex;
-                    CHECK(msg->findInt32("portIndex", &portIndex));
-
-                    ALOGV("%s buffers allocated",
-                          portIndex == kPortIndexInput ? "input" : "output");
-
-                    CHECK(portIndex == kPortIndexInput
-                            || portIndex == kPortIndexOutput);
-
-                    mPortBuffers[portIndex].clear();
-
-                    Vector<BufferInfo> *buffers = &mPortBuffers[portIndex];
-
-                    sp<RefBase> obj;
-                    CHECK(msg->findObject("portDesc", &obj));
-
-                    sp<CodecBase::PortDescription> portDesc =
-                        static_cast<CodecBase::PortDescription *>(obj.get());
-
-                    size_t numBuffers = portDesc->countBuffers();
-
-                    size_t totalSize = 0;
-                    for (size_t i = 0; i < numBuffers; ++i) {
-                        if (portIndex == kPortIndexInput && mCrypto != NULL) {
-                            totalSize += portDesc->bufferAt(i)->capacity();
-                        }
+                    CHECK_EQ(mState, STARTING);
+                    if (mIsVideo) {
+                        addResource(
+                                MediaResource::kGraphicMemory,
+                                MediaResource::kUnspecifiedSubType,
+                                getGraphicBufferSize());
                     }
-
-                    if (totalSize) {
-                        mDealer = new MemoryDealer(totalSize, "MediaCodec");
-                    }
-
-                    for (size_t i = 0; i < numBuffers; ++i) {
-                        BufferInfo info;
-                        info.mBufferID = portDesc->bufferIDAt(i);
-                        info.mOwnedByClient = false;
-                        info.mData = portDesc->bufferAt(i);
-                        info.mNativeHandle = portDesc->handleAt(i);
-                        info.mMemRef = portDesc->memRefAt(i);
-
-                        if (portIndex == kPortIndexInput && mCrypto != NULL) {
-                            sp<IMemory> mem = mDealer->allocate(info.mData->capacity());
-                            info.mEncryptedData =
-                                new ABuffer(mem->pointer(), info.mData->capacity());
-                            info.mSharedEncryptedBuffer = mem;
-                        }
-
-                        buffers->push_back(info);
-                    }
-
-                    if (portIndex == kPortIndexOutput) {
-                        if (mState == STARTING) {
-                            // We're always allocating output buffers after
-                            // allocating input buffers, so this is a good
-                            // indication that now all buffers are allocated.
-                            if (mIsVideo) {
-                                addResource(
-                                        MediaResource::kGraphicMemory,
-                                        MediaResource::kUnspecifiedSubType,
-                                        getGraphicBufferSize());
-                            }
-                            setState(STARTED);
-                            (new AMessage)->postReply(mReplyID);
-                        } else {
-                            mFlags |= kFlagOutputBuffersChanged;
-                            postActivityNotificationIfPossible();
-                        }
-                    }
+                    setState(STARTED);
+                    (new AMessage)->postReply(mReplyID);
                     break;
                 }
 
-                case CodecBase::kWhatOutputFormatChanged:
+                case kWhatOutputBuffersChanged:
                 {
-                    CHECK(msg->findMessage("format", &mOutputFormat));
-
-                    ALOGV("[%s] output format changed to: %s",
-                            mComponentName.c_str(), mOutputFormat->debugString(4).c_str());
-
-                    if (mSoftRenderer == NULL &&
-                            mSurface != NULL &&
-                            (mFlags & kFlagUsesSoftwareRenderer)) {
-                        AString mime;
-                        CHECK(mOutputFormat->findString("mime", &mime));
-
-                        // TODO: propagate color aspects to software renderer to allow better
-                        // color conversion to RGB. For now, just mark dataspace for YUV
-                        // rendering.
-                        int32_t dataSpace;
-                        if (mOutputFormat->findInt32("android._dataspace", &dataSpace)) {
-                            ALOGD("[%s] setting dataspace on output surface to #%x",
-                                    mComponentName.c_str(), dataSpace);
-                            int err = native_window_set_buffers_data_space(
-                                    mSurface.get(), (android_dataspace)dataSpace);
-                            ALOGW_IF(err != 0, "failed to set dataspace on surface (%d)", err);
-                        }
-
-                        if (mime.startsWithIgnoreCase("video/")) {
-                            mSoftRenderer = new SoftwareRenderer(mSurface, mRotationDegrees);
-                        }
-                    }
-
-                    if (mFlags & kFlagIsEncoder) {
-                        // Before we announce the format change we should
-                        // collect codec specific data and amend the output
-                        // format as necessary.
-                        mFlags |= kFlagGatherCodecSpecificData;
-                    } else if (mFlags & kFlagIsAsync) {
-                        onOutputFormatChanged();
-                    } else {
-                        mFlags |= kFlagOutputFormatChanged;
-                        postActivityNotificationIfPossible();
-                    }
-
-                    // Notify mCrypto of video resolution changes
-                    if (mCrypto != NULL) {
-                        int32_t left, top, right, bottom, width, height;
-                        if (mOutputFormat->findRect("crop", &left, &top, &right, &bottom)) {
-                            mCrypto->notifyResolution(right - left + 1, bottom - top + 1);
-                        } else if (mOutputFormat->findInt32("width", &width)
-                                && mOutputFormat->findInt32("height", &height)) {
-                            mCrypto->notifyResolution(width, height);
-                        }
-                    }
-
+                    mFlags |= kFlagOutputBuffersChanged;
+                    postActivityNotificationIfPossible();
                     break;
                 }
 
-                case CodecBase::kWhatOutputFramesRendered:
+                case kWhatOutputFramesRendered:
                 {
                     // ignore these in all states except running, and check that we have a
                     // notification set
@@ -1473,7 +1541,7 @@
                     break;
                 }
 
-                case CodecBase::kWhatFillThisBuffer:
+                case kWhatFillThisBuffer:
                 {
                     /* size_t index = */updateBuffers(kPortIndexInput, msg);
 
@@ -1528,7 +1596,7 @@
                     break;
                 }
 
-                case CodecBase::kWhatDrainThisBuffer:
+                case kWhatDrainThisBuffer:
                 {
                     /* size_t index = */updateBuffers(kPortIndexOutput, msg);
 
@@ -1539,34 +1607,71 @@
                         break;
                     }
 
-                    sp<ABuffer> buffer;
-                    CHECK(msg->findBuffer("buffer", &buffer));
+                    sp<RefBase> obj;
+                    CHECK(msg->findObject("buffer", &obj));
+                    sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
 
-                    int32_t omxFlags;
-                    CHECK(msg->findInt32("flags", &omxFlags));
+                    if (mOutputFormat != buffer->format()) {
+                        mOutputFormat = buffer->format();
+                        ALOGV("[%s] output format changed to: %s",
+                                mComponentName.c_str(), mOutputFormat->debugString(4).c_str());
 
-                    buffer->meta()->setInt32("omxFlags", omxFlags);
+                        if (mSoftRenderer == NULL &&
+                                mSurface != NULL &&
+                                (mFlags & kFlagUsesSoftwareRenderer)) {
+                            AString mime;
+                            CHECK(mOutputFormat->findString("mime", &mime));
 
-                    if (mFlags & kFlagGatherCodecSpecificData) {
-                        // This is the very first output buffer after a
-                        // format change was signalled, it'll either contain
-                        // the one piece of codec specific data we can expect
-                        // or there won't be codec specific data.
-                        if (omxFlags & OMX_BUFFERFLAG_CODECCONFIG) {
-                            status_t err =
-                                amendOutputFormatWithCodecSpecificData(buffer);
+                            // TODO: propagate color aspects to software renderer to allow better
+                            // color conversion to RGB. For now, just mark dataspace for YUV
+                            // rendering.
+                            int32_t dataSpace;
+                            if (mOutputFormat->findInt32("android._dataspace", &dataSpace)) {
+                                ALOGD("[%s] setting dataspace on output surface to #%x",
+                                        mComponentName.c_str(), dataSpace);
+                                int err = native_window_set_buffers_data_space(
+                                        mSurface.get(), (android_dataspace)dataSpace);
+                                ALOGW_IF(err != 0, "failed to set dataspace on surface (%d)", err);
+                            }
 
-                            if (err != OK) {
-                                ALOGE("Codec spit out malformed codec "
-                                      "specific data!");
+                            if (mime.startsWithIgnoreCase("video/")) {
+                                mSoftRenderer = new SoftwareRenderer(mSurface, mRotationDegrees);
                             }
                         }
 
-                        mFlags &= ~kFlagGatherCodecSpecificData;
+                        if (mFlags & kFlagIsEncoder) {
+                            // Before we announce the format change we should
+                            // collect codec specific data and amend the output
+                            // format as necessary.
+                            int32_t flags = 0;
+                            (void) buffer->meta()->findInt32("flags", &flags);
+                            if (flags & BUFFER_FLAG_CODECCONFIG) {
+                                status_t err =
+                                    amendOutputFormatWithCodecSpecificData(buffer);
+
+                                if (err != OK) {
+                                    ALOGE("Codec spit out malformed codec "
+                                          "specific data!");
+                                }
+                            }
+                        }
+
                         if (mFlags & kFlagIsAsync) {
                             onOutputFormatChanged();
                         } else {
                             mFlags |= kFlagOutputFormatChanged;
+                            postActivityNotificationIfPossible();
+                        }
+
+                        // Notify mCrypto of video resolution changes
+                        if (mCrypto != NULL) {
+                            int32_t left, top, right, bottom, width, height;
+                            if (mOutputFormat->findRect("crop", &left, &top, &right, &bottom)) {
+                                mCrypto->notifyResolution(right - left + 1, bottom - top + 1);
+                            } else if (mOutputFormat->findInt32("width", &width)
+                                    && mOutputFormat->findInt32("height", &height)) {
+                                mCrypto->notifyResolution(width, height);
+                            }
                         }
                     }
 
@@ -1585,22 +1690,33 @@
                     break;
                 }
 
-                case CodecBase::kWhatEOS:
+                case kWhatEOS:
                 {
                     // We already notify the client of this by using the
                     // corresponding flag in "onOutputBufferReady".
                     break;
                 }
 
-                case CodecBase::kWhatShutdownCompleted:
+                case kWhatStopCompleted:
                 {
-                    if (mState == STOPPING) {
-                        setState(INITIALIZED);
-                    } else {
-                        CHECK_EQ(mState, RELEASING);
-                        setState(UNINITIALIZED);
-                        mComponentName.clear();
+                    if (mState != STOPPING) {
+                        ALOGW("Received kWhatStopCompleted in state %d", mState);
+                        break;
                     }
+                    setState(INITIALIZED);
+                    (new AMessage)->postReply(mReplyID);
+                    break;
+                }
+
+                case kWhatReleaseCompleted:
+                {
+                    if (mState != RELEASING) {
+                        ALOGW("Received kWhatReleaseCompleted in state %d", mState);
+                        break;
+                    }
+                    setState(UNINITIALIZED);
+                    mComponentName.clear();
+
                     mFlags &= ~kFlagIsComponentAllocated;
 
                     mResourceManagerService->removeResource(getId(mResourceManagerClient));
@@ -1609,7 +1725,7 @@
                     break;
                 }
 
-                case CodecBase::kWhatFlushCompleted:
+                case kWhatFlushCompleted:
                 {
                     if (mState != FLUSHING) {
                         ALOGW("received FlushCompleted message in state %d",
@@ -1752,6 +1868,7 @@
             }
 
             mCrypto = static_cast<ICrypto *>(crypto);
+            mBufferChannel->setCrypto(mCrypto);
 
             uint32_t flags;
             CHECK(msg->findInt32("flags", (int32_t *)&flags));
@@ -1915,7 +2032,9 @@
                 }
             }
 
-            if (!((mFlags & kFlagIsComponentAllocated) && targetState == UNINITIALIZED) // See 1
+            bool isReleasingAllocatedComponent =
+                    (mFlags & kFlagIsComponentAllocated) && targetState == UNINITIALIZED;
+            if (!isReleasingAllocatedComponent // See 1
                     && mState != INITIALIZED
                     && mState != CONFIGURED && !isExecuting()) {
                 // 1) Permit release to shut down the component if allocated.
@@ -1939,6 +2058,14 @@
                 break;
             }
 
+            // If we're flushing, or we're stopping but received a release
+            // request, post the reply for the pending call first, and consider
+            // it done. The reply token will be replaced after this, and we'll
+            // no longer be able to reply.
+            if (mState == FLUSHING || mState == STOPPING) {
+                (new AMessage)->postReply(mReplyID);
+            }
+
             if (mFlags & kFlagSawMediaServerDie) {
                 // It's dead, Jim. Don't expect initiateShutdown to yield
                 // any useful results now...
@@ -1950,6 +2077,15 @@
                 break;
             }
 
+            // If we already have an error, component may not be able to
+            // complete the shutdown properly. If we're stopping, post the
+            // reply now with an error to unblock the client, client can
+            // release after the failure (instead of ANR).
+            if (msg->what() == kWhatStop && (mFlags & kFlagStickyError)) {
+                PostReplyWithError(replyID, getStickyError());
+                break;
+            }
+
             mReplyID = replyID;
             setState(msg->what() == kWhatStop ? STOPPING : RELEASING);
 
@@ -1961,6 +2097,7 @@
             if (mSoftRenderer != NULL && (mFlags & kFlagPushBlankBuffersOnShutdown)) {
                 pushBlankBuffersToNativeWindow(mSurface.get());
             }
+
             break;
         }
 
@@ -2123,7 +2260,7 @@
             sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
-            if (!isExecuting()) {
+            if (!isExecuting() || !mHaveInputSurface) {
                 PostReplyWithError(replyID, INVALID_OPERATION);
                 break;
             } else if (mFlags & kFlagStickyError) {
@@ -2140,12 +2277,7 @@
         {
             sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
-            // Unfortunately widevine legacy source requires knowing all of the
-            // codec input buffers, so we have to provide them even in async mode.
-            int32_t widevine = 0;
-            msg->findInt32("widevine", &widevine);
-
-            if (!isExecuting() || ((mFlags & kFlagIsAsync) && !widevine)) {
+            if (!isExecuting() || (mFlags & kFlagIsAsync)) {
                 PostReplyWithError(replyID, INVALID_OPERATION);
                 break;
             } else if (mFlags & kFlagStickyError) {
@@ -2156,7 +2288,7 @@
             int32_t portIndex;
             CHECK(msg->findInt32("portIndex", &portIndex));
 
-            Vector<sp<ABuffer> > *dstBuffers;
+            Vector<sp<MediaCodecBuffer> > *dstBuffers;
             CHECK(msg->findPointer("buffers", (void **)&dstBuffers));
 
             dstBuffers->clear();
@@ -2164,14 +2296,10 @@
             // createInputSurface(), or persistent set by setInputSurface()),
             // give the client an empty input buffers array.
             if (portIndex != kPortIndexInput || !mHaveInputSurface) {
-                const Vector<BufferInfo> &srcBuffers = mPortBuffers[portIndex];
-
-                for (size_t i = 0; i < srcBuffers.size(); ++i) {
-                    const BufferInfo &info = srcBuffers.itemAt(i);
-
-                    dstBuffers->push_back(
-                            (portIndex == kPortIndexInput && mCrypto != NULL)
-                                    ? info.mEncryptedData : info.mData);
+                if (portIndex == kPortIndexInput) {
+                    mBufferChannel->getInputBufferArray(dstBuffers);
+                } else {
+                    mBufferChannel->getOutputBufferArray(dstBuffers);
                 }
             }
 
@@ -2300,14 +2428,12 @@
 status_t MediaCodec::queueCSDInputBuffer(size_t bufferIndex) {
     CHECK(!mCSD.empty());
 
-    const BufferInfo *info =
-        &mPortBuffers[kPortIndexInput].itemAt(bufferIndex);
+    const BufferInfo &info = mPortBuffers[kPortIndexInput][bufferIndex];
 
     sp<ABuffer> csd = *mCSD.begin();
     mCSD.erase(mCSD.begin());
 
-    const sp<ABuffer> &codecInputData =
-        (mCrypto != NULL) ? info->mEncryptedData : info->mData;
+    const sp<MediaCodecBuffer> &codecInputData = info.mData;
 
     if (csd->size() > codecInputData->capacity()) {
         return -EINVAL;
@@ -2342,7 +2468,6 @@
         mFlags &= ~kFlagOutputBuffersChanged;
         mFlags &= ~kFlagStickyError;
         mFlags &= ~kFlagIsEncoder;
-        mFlags &= ~kFlagGatherCodecSpecificData;
         mFlags &= ~kFlagIsAsync;
         mStickyError = OK;
 
@@ -2376,27 +2501,19 @@
     CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
     Mutex::Autolock al(mBufferLock);
 
-    Vector<BufferInfo> *buffers = &mPortBuffers[portIndex];
+    for (size_t i = 0; i < mPortBuffers[portIndex].size(); ++i) {
+        BufferInfo *info = &mPortBuffers[portIndex][i];
 
-    for (size_t i = 0; i < buffers->size(); ++i) {
-        BufferInfo *info = &buffers->editItemAt(i);
-
-        if (info->mNotify != NULL) {
-            sp<AMessage> msg = info->mNotify;
-            info->mNotify = NULL;
+        if (info->mData != nullptr) {
+            sp<MediaCodecBuffer> buffer = info->mData;
             if (isReclaim && info->mOwnedByClient) {
                 ALOGD("port %d buffer %zu still owned by client when codec is reclaimed",
                         portIndex, i);
             } else {
-                info->mMemRef = NULL;
                 info->mOwnedByClient = false;
+                info->mData.clear();
             }
-
-            if (portIndex == kPortIndexInput) {
-                /* no error, just returning buffers */
-                msg->setInt32("err", OK);
-            }
-            msg->post();
+            mBufferChannel->discardBuffer(buffer);
         }
     }
 
@@ -2406,30 +2523,22 @@
 size_t MediaCodec::updateBuffers(
         int32_t portIndex, const sp<AMessage> &msg) {
     CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
+    size_t index;
+    CHECK(msg->findSize("index", &index));
+    sp<RefBase> obj;
+    CHECK(msg->findObject("buffer", &obj));
+    sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
 
-    uint32_t bufferID;
-    CHECK(msg->findInt32("buffer-id", (int32_t*)&bufferID));
-
-    Vector<BufferInfo> *buffers = &mPortBuffers[portIndex];
-
-    for (size_t i = 0; i < buffers->size(); ++i) {
-        BufferInfo *info = &buffers->editItemAt(i);
-
-        if (info->mBufferID == bufferID) {
-            CHECK(info->mNotify == NULL);
-            CHECK(msg->findMessage("reply", &info->mNotify));
-
-            info->mFormat =
-                (portIndex == kPortIndexInput) ? mInputFormat : mOutputFormat;
-            mAvailPortBuffers[portIndex].push_back(i);
-
-            return i;
+    {
+        Mutex::Autolock al(mBufferLock);
+        if (mPortBuffers[portIndex].size() <= index) {
+            mPortBuffers[portIndex].resize(align(index + 1, kNumBuffersAlign));
         }
+        mPortBuffers[portIndex][index].mData = buffer;
     }
+    mAvailPortBuffers[portIndex].push_back(index);
 
-    TRESPASS();
-
-    return 0;
+    return index;
 }
 
 status_t MediaCodec::onQueueInputBuffer(const sp<AMessage> &msg) {
@@ -2494,9 +2603,9 @@
         return -ERANGE;
     }
 
-    BufferInfo *info = &mPortBuffers[kPortIndexInput].editItemAt(index);
+    BufferInfo *info = &mPortBuffers[kPortIndexInput][index];
 
-    if (info->mNotify == NULL || !info->mOwnedByClient) {
+    if (info->mData == nullptr || !info->mOwnedByClient) {
         return -EACCES;
     }
 
@@ -2504,10 +2613,8 @@
         return -EINVAL;
     }
 
-    sp<AMessage> reply = info->mNotify;
     info->mData->setRange(offset, size);
     info->mData->meta()->setInt64("timeUs", timeUs);
-
     if (flags & BUFFER_FLAG_EOS) {
         info->mData->meta()->setInt32("eos", true);
     }
@@ -2516,55 +2623,34 @@
         info->mData->meta()->setInt32("csd", true);
     }
 
+    sp<MediaCodecBuffer> buffer = info->mData;
+    status_t err = OK;
     if (mCrypto != NULL) {
-        if (size > info->mEncryptedData->capacity()) {
-            return -ERANGE;
-        }
-
         AString *errorDetailMsg;
         CHECK(msg->findPointer("errorDetailMsg", (void **)&errorDetailMsg));
 
-        void *dst_pointer = info->mData->base();
-        ICrypto::DestinationType dst_type = ICrypto::kDestinationTypeOpaqueHandle;
-
-        if (info->mNativeHandle != NULL) {
-            dst_pointer = (void *)info->mNativeHandle->handle();
-            dst_type = ICrypto::kDestinationTypeNativeHandle;
-        } else if ((mFlags & kFlagIsSecure) == 0) {
-            dst_type = ICrypto::kDestinationTypeVmPointer;
-        }
-
-        ssize_t result = mCrypto->decrypt(
-                dst_type,
+        err = mBufferChannel->queueSecureInputBuffer(
+                buffer,
+                (mFlags & kFlagIsSecure),
                 key,
                 iv,
                 mode,
                 pattern,
-                info->mSharedEncryptedBuffer,
-                offset,
                 subSamples,
                 numSubSamples,
-                dst_pointer,
                 errorDetailMsg);
-
-        if (result < 0) {
-            return result;
-        }
-
-        info->mData->setRange(0, result);
+    } else {
+        err = mBufferChannel->queueInputBuffer(buffer);
     }
 
-    // synchronization boundary for getBufferAndFormat
-    {
+    if (err == OK) {
+        // synchronization boundary for getBufferAndFormat
         Mutex::Autolock al(mBufferLock);
         info->mOwnedByClient = false;
+        info->mData.clear();
     }
-    reply->setBuffer("buffer", info->mData);
-    reply->post();
 
-    info->mNotify = NULL;
-
-    return OK;
+    return err;
 }
 
 //static
@@ -2601,23 +2687,24 @@
         return -ERANGE;
     }
 
-    BufferInfo *info = &mPortBuffers[kPortIndexOutput].editItemAt(index);
+    BufferInfo *info = &mPortBuffers[kPortIndexOutput][index];
 
-    if (info->mNotify == NULL || !info->mOwnedByClient) {
+    if (info->mData == nullptr || !info->mOwnedByClient) {
         return -EACCES;
     }
 
     // synchronization boundary for getBufferAndFormat
+    sp<MediaCodecBuffer> buffer;
     {
         Mutex::Autolock al(mBufferLock);
         info->mOwnedByClient = false;
+        buffer = info->mData;
+        info->mData.clear();
     }
 
-    if (render && info->mData != NULL && info->mData->size() != 0) {
-        info->mNotify->setInt32("render", true);
-
+    if (render && buffer->size() != 0) {
         int64_t mediaTimeUs = -1;
-        info->mData->meta()->findInt64("timeUs", &mediaTimeUs);
+        buffer->meta()->findInt64("timeUs", &mediaTimeUs);
 
         int64_t renderTimeNs = 0;
         if (!msg->findInt64("timestampNs", &renderTimeNs)) {
@@ -2625,12 +2712,11 @@
             ALOGV("using buffer PTS of %lld", (long long)mediaTimeUs);
             renderTimeNs = mediaTimeUs * 1000;
         }
-        info->mNotify->setInt64("timestampNs", renderTimeNs);
 
         if (mSoftRenderer != NULL) {
             std::list<FrameRenderTracker::Info> doneFrames = mSoftRenderer->render(
-                    info->mData->data(), info->mData->size(),
-                    mediaTimeUs, renderTimeNs, NULL, info->mFormat);
+                    buffer->data(), buffer->size(),
+                    mediaTimeUs, renderTimeNs, NULL, buffer->format());
 
             // if we are running, notify rendered frames
             if (!doneFrames.empty() && mState == STARTED && mOnFrameRenderedNotification != NULL) {
@@ -2642,11 +2728,11 @@
                 }
             }
         }
+        mBufferChannel->renderOutputBuffer(buffer, renderTimeNs);
+    } else {
+        mBufferChannel->discardBuffer(buffer);
     }
 
-    info->mNotify->post();
-    info->mNotify = NULL;
-
     return OK;
 }
 
@@ -2662,20 +2748,20 @@
     size_t index = *availBuffers->begin();
     availBuffers->erase(availBuffers->begin());
 
-    BufferInfo *info = &mPortBuffers[portIndex].editItemAt(index);
+    BufferInfo *info = &mPortBuffers[portIndex][index];
     CHECK(!info->mOwnedByClient);
     {
         Mutex::Autolock al(mBufferLock);
         info->mOwnedByClient = true;
 
         // set image-data
-        if (info->mFormat != NULL) {
+        if (info->mData->format() != NULL) {
             sp<ABuffer> imageData;
-            if (info->mFormat->findBuffer("image-data", &imageData)) {
+            if (info->mData->format()->findBuffer("image-data", &imageData)) {
                 info->mData->meta()->setBuffer("image-data", imageData);
             }
             int32_t left, top, right, bottom;
-            if (info->mFormat->findRect("crop", &left, &top, &right, &bottom)) {
+            if (info->mData->format()->findRect("crop", &left, &top, &right, &bottom)) {
                 info->mData->meta()->setRect("crop-rect", left, top, right, bottom);
             }
         }
@@ -2765,8 +2851,8 @@
 void MediaCodec::onOutputBufferAvailable() {
     int32_t index;
     while ((index = dequeuePortBuffer(kPortIndexOutput)) >= 0) {
-        const sp<ABuffer> &buffer =
-            mPortBuffers[kPortIndexOutput].itemAt(index).mData;
+        const sp<MediaCodecBuffer> &buffer =
+            mPortBuffers[kPortIndexOutput][index].mData;
         sp<AMessage> msg = mCallback->dup();
         msg->setInt32("callbackID", CB_OUTPUT_AVAILABLE);
         msg->setInt32("index", index);
@@ -2778,19 +2864,8 @@
 
         msg->setInt64("timeUs", timeUs);
 
-        int32_t omxFlags;
-        CHECK(buffer->meta()->findInt32("omxFlags", &omxFlags));
-
-        uint32_t flags = 0;
-        if (omxFlags & OMX_BUFFERFLAG_SYNCFRAME) {
-            flags |= BUFFER_FLAG_SYNCFRAME;
-        }
-        if (omxFlags & OMX_BUFFERFLAG_CODECCONFIG) {
-            flags |= BUFFER_FLAG_CODECCONFIG;
-        }
-        if (omxFlags & OMX_BUFFERFLAG_EOS) {
-            flags |= BUFFER_FLAG_EOS;
-        }
+        int32_t flags;
+        CHECK(buffer->meta()->findInt32("flags", &flags));
 
         msg->setInt32("flags", flags);
 
@@ -2822,7 +2897,6 @@
     }
 }
 
-
 void MediaCodec::postActivityNotificationIfPossible() {
     if (mActivityNotify == NULL) {
         return;
@@ -2866,7 +2940,7 @@
 }
 
 status_t MediaCodec::amendOutputFormatWithCodecSpecificData(
-        const sp<ABuffer> &buffer) {
+        const sp<MediaCodecBuffer> &buffer) {
     AString mime;
     CHECK(mOutputFormat->findString("mime", &mime));
 
@@ -2900,7 +2974,10 @@
     } else {
         // For everything else we just stash the codec specific data into
         // the output format as a single piece of csd under "csd-0".
-        mOutputFormat->setBuffer("csd-0", buffer);
+        sp<ABuffer> csd = new ABuffer(buffer->size());
+        memcpy(csd->data(), buffer->data(), buffer->size());
+        csd->setRange(0, buffer->size());
+        mOutputFormat->setBuffer("csd-0", csd);
     }
 
     return OK;
@@ -2912,10 +2989,10 @@
     }
 
     if (mState == CONFIGURED && !mBatteryStatNotified) {
-        BatteryNotifier::getInstance().noteStartVideo();
+        BatteryNotifier::getInstance().noteStartVideo(mUid);
         mBatteryStatNotified = true;
     } else if (mState == UNINITIALIZED && mBatteryStatNotified) {
-        BatteryNotifier::getInstance().noteStopVideo();
+        BatteryNotifier::getInstance().noteStopVideo(mUid);
         mBatteryStatNotified = false;
     }
 }
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index 23d49f0..d3b34b7 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -44,8 +44,6 @@
 
 namespace android {
 
-const char *kMaxEncoderInputBuffers = "max-video-encoder-input-buffers";
-
 static Mutex sInitMutex;
 
 static bool parseBoolean(const char *s) {
@@ -196,9 +194,7 @@
     if (mInitCheck != OK) {
         return;  // this may fail if IMediaPlayerService is not available.
     }
-    mOMX = client.interface();
     parseXMLFile(codecs_xml);
-    mOMX.clear();
 
     if (mInitCheck != OK) {
         if (ignore_errors) {
diff --git a/media/libstagefright/MediaCodecListOverrides.cpp b/media/libstagefright/MediaCodecListOverrides.cpp
index 33795f3..095fc6a 100644
--- a/media/libstagefright/MediaCodecListOverrides.cpp
+++ b/media/libstagefright/MediaCodecListOverrides.cpp
@@ -127,61 +127,6 @@
     return format;
 }
 
-static size_t doProfileEncoderInputBuffers(
-        const AString &name, const AString &mime, const sp<MediaCodecInfo::Capabilities> &caps) {
-    ALOGV("doProfileEncoderInputBuffers: name %s, mime %s", name.c_str(), mime.c_str());
-
-    sp<AMessage> format = getMeasureFormat(true /* isEncoder */, mime, caps);
-    if (format == NULL) {
-        return 0;
-    }
-
-    format->setInt32("color-format", OMX_COLOR_FormatAndroidOpaque);
-    ALOGV("doProfileEncoderInputBuffers: format %s", format->debugString().c_str());
-
-    status_t err = OK;
-    sp<ALooper> looper = new ALooper;
-    looper->setName("MediaCodec_looper");
-    looper->start(
-            false /* runOnCallingThread */, false /* canCallJava */, ANDROID_PRIORITY_AUDIO);
-
-    sp<MediaCodec> codec = MediaCodec::CreateByComponentName(looper, name.c_str(), &err);
-    if (err != OK) {
-        ALOGE("Failed to create codec: %s", name.c_str());
-        return 0;
-    }
-
-    err = codec->configure(format, NULL, NULL, MediaCodec::CONFIGURE_FLAG_ENCODE);
-    if (err != OK) {
-        ALOGE("Failed to configure codec: %s with mime: %s", name.c_str(), mime.c_str());
-        codec->release();
-        return 0;
-    }
-
-    sp<IGraphicBufferProducer> bufferProducer;
-    err = codec->createInputSurface(&bufferProducer);
-    if (err != OK) {
-        ALOGE("Failed to create surface: %s with mime: %s", name.c_str(), mime.c_str());
-        codec->release();
-        return 0;
-    }
-
-    int minUndequeued = 0;
-    err = bufferProducer->query(
-            NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeued);
-    if (err != OK) {
-        ALOGE("Failed to query NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS");
-        minUndequeued = 0;
-    }
-
-    err = codec->release();
-    if (err != OK) {
-        ALOGW("Failed to release codec: %s with mime: %s", name.c_str(), mime.c_str());
-    }
-
-    return minUndequeued;
-}
-
 static size_t doProfileCodecs(
         bool isEncoder, const AString &name, const AString &mime, const sp<MediaCodecInfo::Capabilities> &caps) {
     sp<AMessage> format = getMeasureFormat(isEncoder, mime, caps);
@@ -276,7 +221,6 @@
         bool forceToMeasure) {
     KeyedVector<AString, sp<MediaCodecInfo::Capabilities>> codecsNeedMeasure;
     AString supportMultipleSecureCodecs = "true";
-    size_t maxEncoderInputBuffers = 0;
     for (size_t i = 0; i < infos.size(); ++i) {
         const sp<MediaCodecInfo> info = infos[i];
         AString name = info->getCodecName();
@@ -319,21 +263,9 @@
                         supportMultipleSecureCodecs = "false";
                     }
                 }
-                if (info->isEncoder() && mimes[i].startsWith("video/")) {
-                    size_t encoderInputBuffers =
-                        doProfileEncoderInputBuffers(name, mimes[i], caps);
-                    if (encoderInputBuffers > maxEncoderInputBuffers) {
-                        maxEncoderInputBuffers = encoderInputBuffers;
-                    }
-                }
             }
         }
     }
-    if (maxEncoderInputBuffers > 0) {
-        char tmp[32];
-        sprintf(tmp, "%zu", maxEncoderInputBuffers);
-        global_results->add(kMaxEncoderInputBuffers, tmp);
-    }
     global_results->add(kPolicySupportsMultipleSecureCodecs, supportMultipleSecureCodecs);
 }
 
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index 35c07ca..5981b35 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -20,10 +20,10 @@
 
 #include <inttypes.h>
 
-#include <gui/IGraphicBufferConsumer.h>
 #include <gui/IGraphicBufferProducer.h>
 #include <gui/Surface.h>
 #include <media/ICrypto.h>
+#include <media/MediaCodecBuffer.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/ALooper.h>
@@ -35,7 +35,6 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MetaData.h>
-#include <media/stagefright/PersistentSurface.h>
 #include <media/stagefright/Utils.h>
 
 namespace android {
@@ -325,10 +324,10 @@
         const sp<ALooper> &looper,
         const sp<AMessage> &format,
         const sp<MediaSource> &source,
-        const sp<IGraphicBufferConsumer> &consumer,
+        const sp<PersistentSurface> &persistentSurface,
         uint32_t flags) {
-    sp<MediaCodecSource> mediaSource =
-            new MediaCodecSource(looper, format, source, consumer, flags);
+    sp<MediaCodecSource> mediaSource = new MediaCodecSource(
+            looper, format, source, persistentSurface, flags);
 
     if (mediaSource->init() == OK) {
         return mediaSource;
@@ -404,7 +403,7 @@
         const sp<ALooper> &looper,
         const sp<AMessage> &outputFormat,
         const sp<MediaSource> &source,
-        const sp<IGraphicBufferConsumer> &consumer,
+        const sp<PersistentSurface> &persistentSurface,
         uint32_t flags)
     : mLooper(looper),
       mOutputFormat(outputFormat),
@@ -417,7 +416,7 @@
       mSetEncoderFormat(false),
       mEncoderFormat(0),
       mEncoderDataSpace(0),
-      mGraphicBufferConsumer(consumer),
+      mPersistentSurface(persistentSurface),
       mInputBufferTimeOffsetUs(0),
       mFirstSampleSystemTimeUs(-1ll),
       mPausePending(false),
@@ -514,12 +513,11 @@
     if (mFlags & FLAG_USE_SURFACE_INPUT) {
         CHECK(mIsVideo);
 
-        if (mGraphicBufferConsumer != NULL) {
+        if (mPersistentSurface != NULL) {
             // When using persistent surface, we are only interested in the
             // consumer, but have to use PersistentSurface as a wrapper to
             // pass consumer over messages (similar to BufferProducerWrapper)
-            err = mEncoder->setInputSurface(
-                    new PersistentSurface(NULL, mGraphicBufferConsumer));
+            err = mEncoder->setInputSurface(mPersistentSurface);
         } else {
             err = mEncoder->createInputSurface(&mGraphicBufferProducer);
         }
@@ -689,9 +687,11 @@
 #endif // DEBUG_DRIFT_TIME
             }
 
-            sp<ABuffer> inbuf;
+            sp<MediaCodecBuffer> inbuf;
             status_t err = mEncoder->getInputBuffer(bufferIndex, &inbuf);
-            if (err != OK || inbuf == NULL) {
+
+            if (err != OK || inbuf == NULL || inbuf->data() == NULL
+                    || mbuf->data() == NULL || mbuf->size() == 0) {
                 mbuf->release();
                 signalEOS();
                 break;
@@ -851,9 +851,10 @@
                 break;
             }
 
-            sp<ABuffer> outbuf;
+            sp<MediaCodecBuffer> outbuf;
             status_t err = mEncoder->getOutputBuffer(index, &outbuf);
-            if (err != OK || outbuf == NULL) {
+            if (err != OK || outbuf == NULL || outbuf->data() == NULL
+                || outbuf->size() == 0) {
                 signalEOS();
                 break;
             }
@@ -906,6 +907,7 @@
                 }
                 mbuf->meta_data()->setInt64(kKeyTime, timeUs);
             } else {
+                mbuf->meta_data()->setInt64(kKeyTime, 0ll);
                 mbuf->meta_data()->setInt32(kKeyIsCodecConfig, true);
             }
             if (flags & MediaCodec::BUFFER_FLAG_SYNCFRAME) {
@@ -1017,7 +1019,7 @@
         CHECK(msg->findInt64("time-offset-us", &mInputBufferTimeOffsetUs));
 
         // Propagate the timestamp offset to GraphicBufferSource.
-        if (mIsVideo) {
+        if (mFlags & FLAG_USE_SURFACE_INPUT) {
             sp<AMessage> params = new AMessage;
             params->setInt64("time-offset-us", mInputBufferTimeOffsetUs);
             err = mEncoder->setParameters(params);
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
index 92ce88c..df4d9bf 100644
--- a/media/libstagefright/MediaExtractor.cpp
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -27,8 +27,6 @@
 #include "include/OggExtractor.h"
 #include "include/MPEG2PSExtractor.h"
 #include "include/MPEG2TSExtractor.h"
-#include "include/DRMExtractor.h"
-#include "include/WVMExtractor.h"
 #include "include/FLACExtractor.h"
 #include "include/AACExtractor.h"
 #include "include/MidiExtractor.h"
@@ -52,8 +50,7 @@
 
 namespace android {
 
-MediaExtractor::MediaExtractor():
-    mIsDrm(false) {
+MediaExtractor::MediaExtractor() {
     if (!LOG_NDEBUG) {
         uid_t uid = getuid();
         struct passwd *pw = getpwuid(uid);
@@ -94,7 +91,7 @@
     sp<IMemory> mMemory;
     sp<DataSource> mSource;
     String8 mName;
-    RemoteDataSource(const sp<DataSource> &source);
+    explicit RemoteDataSource(const sp<DataSource> &source);
     DISALLOW_EVIL_CONSTRUCTORS(RemoteDataSource);
 };
 
@@ -144,34 +141,11 @@
         const sp<DataSource> &source, const char *mime) {
     ALOGV("MediaExtractor::Create %s", mime);
 
-    char value[PROPERTY_VALUE_MAX];
-    if (property_get("media.stagefright.extractremote", value, NULL)
-            && (!strcmp("0", value) || !strcasecmp("false", value))) {
+    if (!property_get_bool("media.stagefright.extractremote", true)) {
         // local extractor
         ALOGW("creating media extractor in calling process");
         return CreateFromService(source, mime);
     } else {
-        // Check if it's WVM, since WVMExtractor needs to be created in the media server process,
-        // not the extractor process.
-        String8 mime8;
-        float confidence;
-        sp<AMessage> meta;
-        if (SniffWVM(source, &mime8, &confidence, &meta) &&
-                !strcasecmp(mime8, MEDIA_MIMETYPE_CONTAINER_WVM)) {
-            return new WVMExtractor(source);
-        }
-
-        // Check if it's es-based DRM, since DRMExtractor needs to be created in the media server
-        // process, not the extractor process.
-        if (SniffDRM(source, &mime8, &confidence, &meta)) {
-            const char *drmMime = mime8.string();
-            ALOGV("Detected media content as '%s' with confidence %.2f", drmMime, confidence);
-            if (!strncmp(drmMime, "drm+es_based+", 13)) {
-                // DRMExtractor sets container metadata kKeyIsDRM to 1
-                return new DRMExtractor(source, drmMime + 14);
-            }
-        }
-
         // remote extractor
         ALOGV("get service manager");
         sp<IBinder> binder = defaultServiceManager()->getService(String16("media.extractor"));
@@ -192,14 +166,17 @@
         const sp<DataSource> &source, const char *mime) {
 
     ALOGV("MediaExtractor::CreateFromService %s", mime);
-    DataSource::RegisterDefaultSniffers();
+    RegisterDefaultSniffers();
+
+    // initialize source decryption if needed
+    source->DrmInitialization();
 
     sp<AMessage> meta;
 
     String8 tmp;
     if (mime == NULL) {
         float confidence;
-        if (!source->sniff(&tmp, &confidence, &meta)) {
+        if (!sniff(source, &tmp, &confidence, &meta)) {
             ALOGV("FAILED to autodetect media content.");
 
             return NULL;
@@ -210,28 +187,6 @@
              mime, confidence);
     }
 
-    bool isDrm = false;
-    // DRM MIME type syntax is "drm+type+original" where
-    // type is "es_based" or "container_based" and
-    // original is the content's cleartext MIME type
-    if (!strncmp(mime, "drm+", 4)) {
-        const char *originalMime = strchr(mime+4, '+');
-        if (originalMime == NULL) {
-            // second + not found
-            return NULL;
-        }
-        ++originalMime;
-        if (!strncmp(mime, "drm+es_based+", 13)) {
-            // DRMExtractor sets container metadata kKeyIsDRM to 1
-            return new DRMExtractor(source, originalMime);
-        } else if (!strncmp(mime, "drm+container_based+", 20)) {
-            mime = originalMime;
-            isDrm = true;
-        } else {
-            return NULL;
-        }
-    }
-
     MediaExtractor *ret = NULL;
     if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG4)
             || !strcasecmp(mime, "audio/mp4")) {
@@ -251,9 +206,6 @@
         ret = new MatroskaExtractor(source);
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2TS)) {
         ret = new MPEG2TSExtractor(source);
-    } else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_WVM) && getuid() == AID_MEDIA) {
-        // Return now.  WVExtractor should not have the DrmFlag set in the block below.
-        return new WVMExtractor(source);
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC_ADTS)) {
         ret = new AACExtractor(source, meta);
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2PS)) {
@@ -262,15 +214,77 @@
         ret = new MidiExtractor(source);
     }
 
-    if (ret != NULL) {
-       if (isDrm) {
-           ret->setDrmFlag(true);
-       } else {
-           ret->setDrmFlag(false);
-       }
-    }
-
     return ret;
 }
 
+Mutex MediaExtractor::gSnifferMutex;
+List<MediaExtractor::SnifferFunc> MediaExtractor::gSniffers;
+bool MediaExtractor::gSniffersRegistered = false;
+
+// static
+bool MediaExtractor::sniff(
+        const sp<DataSource> &source, String8 *mimeType, float *confidence, sp<AMessage> *meta) {
+    *mimeType = "";
+    *confidence = 0.0f;
+    meta->clear();
+
+    {
+        Mutex::Autolock autoLock(gSnifferMutex);
+        if (!gSniffersRegistered) {
+            return false;
+        }
+    }
+
+    for (List<SnifferFunc>::iterator it = gSniffers.begin();
+         it != gSniffers.end(); ++it) {
+        String8 newMimeType;
+        float newConfidence;
+        sp<AMessage> newMeta;
+        if ((*it)(source, &newMimeType, &newConfidence, &newMeta)) {
+            if (newConfidence > *confidence) {
+                *mimeType = newMimeType;
+                *confidence = newConfidence;
+                *meta = newMeta;
+            }
+        }
+    }
+
+    return *confidence > 0.0;
+}
+
+// static
+void MediaExtractor::RegisterSniffer_l(SnifferFunc func) {
+    for (List<SnifferFunc>::iterator it = gSniffers.begin();
+         it != gSniffers.end(); ++it) {
+        if (*it == func) {
+            return;
+        }
+    }
+
+    gSniffers.push_back(func);
+}
+
+// static
+void MediaExtractor::RegisterDefaultSniffers() {
+    Mutex::Autolock autoLock(gSnifferMutex);
+    if (gSniffersRegistered) {
+        return;
+    }
+
+    RegisterSniffer_l(SniffMPEG4);
+    RegisterSniffer_l(SniffMatroska);
+    RegisterSniffer_l(SniffOgg);
+    RegisterSniffer_l(SniffWAV);
+    RegisterSniffer_l(SniffFLAC);
+    RegisterSniffer_l(SniffAMR);
+    RegisterSniffer_l(SniffMPEG2TS);
+    RegisterSniffer_l(SniffMP3);
+    RegisterSniffer_l(SniffAAC);
+    RegisterSniffer_l(SniffMPEG2PS);
+    RegisterSniffer_l(SniffMidi);
+
+    gSniffersRegistered = true;
+}
+
+
 }  // namespace android
diff --git a/media/libstagefright/MediaSync.cpp b/media/libstagefright/MediaSync.cpp
index 6f2d868..0cf6fbf 100644
--- a/media/libstagefright/MediaSync.cpp
+++ b/media/libstagefright/MediaSync.cpp
@@ -478,59 +478,43 @@
     CHECK(mAudioTrack != NULL);
 
     uint32_t numFramesPlayed;
-    int64_t numFramesPlayedAt;
+    int64_t numFramesPlayedAtUs;
     AudioTimestamp ts;
-    static const int64_t kStaleTimestamp100ms = 100000;
 
     status_t res = mAudioTrack->getTimestamp(ts);
     if (res == OK) {
         // case 1: mixing audio tracks.
         numFramesPlayed = ts.mPosition;
-        numFramesPlayedAt =
-            ts.mTime.tv_sec * 1000000LL + ts.mTime.tv_nsec / 1000;
-        const int64_t timestampAge = nowUs - numFramesPlayedAt;
-        if (timestampAge > kStaleTimestamp100ms) {
-            // This is an audio FIXME.
-            // getTimestamp returns a timestamp which may come from audio
-            // mixing threads. After pausing, the MixerThread may go idle,
-            // thus the mTime estimate may become stale. Assuming that the
-            // MixerThread runs 20ms, with FastMixer at 5ms, the max latency
-            // should be about 25ms with an average around 12ms (to be
-            // verified). For safety we use 100ms.
-            ALOGV("getTimestamp: returned stale timestamp nowUs(%lld) "
-                  "numFramesPlayedAt(%lld)",
-                  (long long)nowUs, (long long)numFramesPlayedAt);
-            numFramesPlayedAt = nowUs - kStaleTimestamp100ms;
-        }
+        numFramesPlayedAtUs = ts.mTime.tv_sec * 1000000LL + ts.mTime.tv_nsec / 1000;
         //ALOGD("getTimestamp: OK %d %lld",
-        //      numFramesPlayed, (long long)numFramesPlayedAt);
+        //      numFramesPlayed, (long long)numFramesPlayedAtUs);
     } else if (res == WOULD_BLOCK) {
         // case 2: transitory state on start of a new track
         numFramesPlayed = 0;
-        numFramesPlayedAt = nowUs;
+        numFramesPlayedAtUs = nowUs;
         //ALOGD("getTimestamp: WOULD_BLOCK %d %lld",
-        //      numFramesPlayed, (long long)numFramesPlayedAt);
+        //      numFramesPlayed, (long long)numFramesPlayedAtUs);
     } else {
         // case 3: transitory at new track or audio fast tracks.
         res = mAudioTrack->getPosition(&numFramesPlayed);
         CHECK_EQ(res, (status_t)OK);
-        numFramesPlayedAt = nowUs;
-        numFramesPlayedAt += 1000LL * mAudioTrack->latency() / 2; /* XXX */
-        //ALOGD("getPosition: %d %lld", numFramesPlayed, (long long)numFramesPlayedAt);
+        numFramesPlayedAtUs = nowUs;
+        numFramesPlayedAtUs += 1000LL * mAudioTrack->latency() / 2; /* XXX */
+        //ALOGD("getPosition: %d %lld", numFramesPlayed, (long long)numFramesPlayedAtUs);
     }
 
     //can't be negative until 12.4 hrs, test.
     //CHECK_EQ(numFramesPlayed & (1 << 31), 0);
     int64_t durationUs =
         getDurationIfPlayedAtNativeSampleRate_l(numFramesPlayed)
-            + nowUs - numFramesPlayedAt;
+            + nowUs - numFramesPlayedAtUs;
     if (durationUs < 0) {
         // Occurs when numFramesPlayed position is very small and the following:
         // (1) In case 1, the time nowUs is computed before getTimestamp() is
-        //     called and numFramesPlayedAt is greater than nowUs by time more
+        //     called and numFramesPlayedAtUs is greater than nowUs by time more
         //     than numFramesPlayed.
         // (2) In case 3, using getPosition and adding mAudioTrack->latency()
-        //     to numFramesPlayedAt, by a time amount greater than
+        //     to numFramesPlayedAtUs, by a time amount greater than
         //     numFramesPlayed.
         //
         // Both of these are transitory conditions.
@@ -541,7 +525,7 @@
     ALOGV("getPlayedOutAudioDurationMedia_l(%lld) nowUs(%lld) frames(%u) "
           "framesAt(%lld)",
           (long long)durationUs, (long long)nowUs, numFramesPlayed,
-          (long long)numFramesPlayedAt);
+          (long long)numFramesPlayedAtUs);
     return durationUs;
 }
 
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index 276d731..d25ce6c 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -22,7 +22,6 @@
 
 #include "include/ESDS.h"
 #include "include/NuCachedSource2.h"
-#include "include/WVMExtractor.h"
 
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -40,8 +39,7 @@
 namespace android {
 
 NuMediaExtractor::NuMediaExtractor()
-    : mIsWidevineExtractor(false),
-      mTotalBitrate(-1ll),
+    : mTotalBitrate(-1ll),
       mDurationUs(-1ll) {
 }
 
@@ -51,7 +49,8 @@
     for (size_t i = 0; i < mSelectedTracks.size(); ++i) {
         TrackInfo *info = &mSelectedTracks.editItemAt(i);
 
-        CHECK_EQ((status_t)OK, info->mSource->stop());
+        status_t err = info->mSource->stop();
+        ALOGE_IF(err != OK, "error %d stopping track %zu", err, i);
     }
 
     mSelectedTracks.clear();
@@ -66,7 +65,7 @@
         const KeyedVector<String8, String8> *headers) {
     Mutex::Autolock autoLock(mLock);
 
-    if (mImpl != NULL) {
+    if (mImpl != NULL || path == NULL) {
         return -EINVAL;
     }
 
@@ -77,53 +76,12 @@
         return -ENOENT;
     }
 
-    mIsWidevineExtractor = false;
-    if (!strncasecmp("widevine://", path, 11)) {
-        String8 mimeType;
-        float confidence;
-        sp<AMessage> dummy;
-        bool success = SniffWVM(dataSource, &mimeType, &confidence, &dummy);
-
-        if (!success
-                || strcasecmp(
-                    mimeType.string(), MEDIA_MIMETYPE_CONTAINER_WVM)) {
-            return ERROR_UNSUPPORTED;
-        }
-
-        sp<WVMExtractor> extractor = new WVMExtractor(dataSource);
-        extractor->setAdaptiveStreamingMode(true);
-
-        mImpl = extractor;
-        mIsWidevineExtractor = true;
-    } else {
-        mImpl = MediaExtractor::Create(dataSource);
-    }
+    mImpl = MediaExtractor::Create(dataSource);
 
     if (mImpl == NULL) {
         return ERROR_UNSUPPORTED;
     }
 
-    sp<MetaData> fileMeta = mImpl->getMetaData();
-    const char *containerMime;
-    if (fileMeta != NULL
-            && fileMeta->findCString(kKeyMIMEType, &containerMime)
-            && !strcasecmp(containerMime, "video/wvm")) {
-        // We always want to use "cryptoPluginMode" when using the wvm
-        // extractor. We can tell that it is this extractor by looking
-        // at the container mime type.
-        // The cryptoPluginMode ensures that the extractor will actually
-        // give us data in a call to MediaSource::read(), unlike its
-        // default mode that we used in AwesomePlayer.
-        // TODO: change default mode
-        static_cast<WVMExtractor *>(mImpl.get())->setCryptoPluginMode(true);
-    } else if (mImpl->getDrmFlag()) {
-        // For all other drm content, we don't want to expose decrypted
-        // content to Java application.
-        mImpl.clear();
-        mImpl = NULL;
-        return ERROR_UNSUPPORTED;
-    }
-
     status_t err = updateDurationAndBitrate();
     if (err == OK) {
         mDataSource = dataSource;
@@ -632,15 +590,7 @@
     Mutex::Autolock autoLock(mLock);
 
     int64_t bitrate;
-    if (mIsWidevineExtractor) {
-        sp<WVMExtractor> wvmExtractor =
-            static_cast<WVMExtractor *>(mImpl.get());
-
-        status_t finalStatus;
-        *durationUs = wvmExtractor->getCachedDurationUs(&finalStatus);
-        *eos = (finalStatus != OK);
-        return true;
-    } else if ((mDataSource->flags() & DataSource::kIsCachingDataSource)
+    if ((mDataSource->flags() & DataSource::kIsCachingDataSource)
             && getTotalBitrate(&bitrate)) {
         sp<NuCachedSource2> cachedSource =
             static_cast<NuCachedSource2 *>(mDataSource.get());
diff --git a/media/libstagefright/OMXClient.cpp b/media/libstagefright/OMXClient.cpp
index e40dbcf..a29aff0 100644
--- a/media/libstagefright/OMXClient.cpp
+++ b/media/libstagefright/OMXClient.cpp
@@ -24,506 +24,18 @@
 #include <utils/Log.h>
 
 #include <binder/IServiceManager.h>
-#include <media/IMediaPlayerService.h>
 #include <media/IMediaCodecService.h>
-#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/OMXClient.h>
-#include <cutils/properties.h>
-#include <utils/KeyedVector.h>
 
 #include "include/OMX.h"
 
 namespace android {
 
-static bool sCodecProcessEnabled = true;
-
-struct MuxOMX : public IOMX {
-    MuxOMX(const sp<IOMX> &mediaServerOMX, const sp<IOMX> &mediaCodecOMX);
-    virtual ~MuxOMX();
-
-    // Nobody should be calling this. In case someone does anyway, just
-    // return the media server IOMX.
-    // TODO: return NULL
-    virtual IBinder *onAsBinder() {
-        ALOGE("MuxOMX::onAsBinder should not be called");
-        return IInterface::asBinder(mMediaServerOMX).get();
-    }
-
-    virtual bool livesLocally(node_id node, pid_t pid);
-
-    virtual status_t listNodes(List<ComponentInfo> *list);
-
-    virtual status_t allocateNode(
-            const char *name, const sp<IOMXObserver> &observer,
-            sp<IBinder> *nodeBinder,
-            node_id *node);
-
-    virtual status_t freeNode(node_id node);
-
-    virtual status_t sendCommand(
-            node_id node, OMX_COMMANDTYPE cmd, OMX_S32 param);
-
-    virtual status_t getParameter(
-            node_id node, OMX_INDEXTYPE index,
-            void *params, size_t size);
-
-    virtual status_t setParameter(
-            node_id node, OMX_INDEXTYPE index,
-            const void *params, size_t size);
-
-    virtual status_t getConfig(
-            node_id node, OMX_INDEXTYPE index,
-            void *params, size_t size);
-
-    virtual status_t setConfig(
-            node_id node, OMX_INDEXTYPE index,
-            const void *params, size_t size);
-
-    virtual status_t getState(
-            node_id node, OMX_STATETYPE* state);
-
-    virtual status_t storeMetaDataInBuffers(
-            node_id node, OMX_U32 port_index, OMX_BOOL enable, MetadataBufferType *type);
-
-    virtual status_t prepareForAdaptivePlayback(
-            node_id node, OMX_U32 port_index, OMX_BOOL enable,
-            OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight);
-
-    virtual status_t configureVideoTunnelMode(
-            node_id node, OMX_U32 portIndex, OMX_BOOL tunneled,
-            OMX_U32 audioHwSync, native_handle_t **sidebandHandle);
-
-    virtual status_t enableNativeBuffers(
-            node_id node, OMX_U32 port_index, OMX_BOOL graphic, OMX_BOOL enable);
-
-    virtual status_t getGraphicBufferUsage(
-            node_id node, OMX_U32 port_index, OMX_U32* usage);
-
-    virtual status_t useBuffer(
-            node_id node, OMX_U32 port_index, const sp<IMemory> &params,
-            buffer_id *buffer, OMX_U32 allottedSize);
-
-    virtual status_t useGraphicBuffer(
-            node_id node, OMX_U32 port_index,
-            const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer);
-
-    virtual status_t updateGraphicBufferInMeta(
-            node_id node, OMX_U32 port_index,
-            const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer);
-
-    virtual status_t updateNativeHandleInMeta(
-            node_id node, OMX_U32 port_index,
-            const sp<NativeHandle> &nativeHandle, buffer_id buffer);
-
-    virtual status_t createInputSurface(
-            node_id node, OMX_U32 port_index, android_dataspace dataSpace,
-            sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type);
-
-    virtual status_t createPersistentInputSurface(
-            sp<IGraphicBufferProducer> *bufferProducer,
-            sp<IGraphicBufferConsumer> *bufferConsumer);
-
-    virtual status_t setInputSurface(
-            node_id node, OMX_U32 port_index,
-            const sp<IGraphicBufferConsumer> &bufferConsumer, MetadataBufferType *type);
-
-    virtual status_t signalEndOfInputStream(node_id node);
-
-    virtual status_t allocateSecureBuffer(
-            node_id node, OMX_U32 port_index, size_t size,
-            buffer_id *buffer, void **buffer_data, sp<NativeHandle> *native_handle);
-
-    virtual status_t allocateBufferWithBackup(
-            node_id node, OMX_U32 port_index, const sp<IMemory> &params,
-            buffer_id *buffer, OMX_U32 allottedSize);
-
-    virtual status_t freeBuffer(
-            node_id node, OMX_U32 port_index, buffer_id buffer);
-
-    virtual status_t fillBuffer(node_id node, buffer_id buffer, int fenceFd);
-
-    virtual status_t emptyBuffer(
-            node_id node,
-            buffer_id buffer,
-            OMX_U32 range_offset, OMX_U32 range_length,
-            OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
-
-    virtual status_t getExtensionIndex(
-            node_id node,
-            const char *parameter_name,
-            OMX_INDEXTYPE *index);
-
-    virtual status_t setInternalOption(
-            node_id node,
-            OMX_U32 port_index,
-            InternalOptionType type,
-            const void *data,
-            size_t size);
-
-private:
-    mutable Mutex mLock;
-
-    sp<IOMX> mMediaServerOMX;
-    sp<IOMX> mMediaCodecOMX;
-    sp<IOMX> mLocalOMX;
-
-    typedef enum {
-        LOCAL,
-        MEDIAPROCESS,
-        CODECPROCESS
-    } node_location;
-
-    KeyedVector<node_id, node_location> mNodeLocation;
-
-    bool isLocalNode(node_id node) const;
-    bool isLocalNode_l(node_id node) const;
-    const sp<IOMX> &getOMX(node_id node) const;
-    const sp<IOMX> &getOMX_l(node_id node) const;
-
-    static node_location getPreferredCodecLocation(const char *name);
-
-    DISALLOW_EVIL_CONSTRUCTORS(MuxOMX);
-};
-
-MuxOMX::MuxOMX(const sp<IOMX> &mediaServerOMX, const sp<IOMX> &mediaCodecOMX)
-    : mMediaServerOMX(mediaServerOMX),
-      mMediaCodecOMX(mediaCodecOMX) {
-    ALOGI("MuxOMX ctor");
-}
-
-MuxOMX::~MuxOMX() {
-}
-
-bool MuxOMX::isLocalNode(node_id node) const {
-    Mutex::Autolock autoLock(mLock);
-
-    return isLocalNode_l(node);
-}
-
-bool MuxOMX::isLocalNode_l(node_id node) const {
-    return mNodeLocation.valueFor(node) == LOCAL;
-}
-
-// static
-MuxOMX::node_location MuxOMX::getPreferredCodecLocation(const char *name) {
-    if (sCodecProcessEnabled) {
-        // all codecs go to codec process unless excluded using system property, in which case
-        // all non-secure decoders, OMX.google.* codecs and encoders can go in the codec process
-        // (non-OMX.google.* encoders can be excluded using system property.)
-        if ((strcasestr(name, "decoder")
-                        && strcasestr(name, ".secure") != name + strlen(name) - 7)
-                || (strcasestr(name, "encoder")
-                        && !property_get_bool("media.stagefright.legacyencoder", false))
-                || !property_get_bool("media.stagefright.less-secure", false)
-                || !strncasecmp(name, "OMX.google.", 11)) {
-            return CODECPROCESS;
-        }
-        // everything else runs in the media server
-        return MEDIAPROCESS;
-    } else {
-#ifdef __LP64__
-        // 64 bit processes always run OMX remote on MediaServer
-        return MEDIAPROCESS;
-#else
-        // 32 bit processes run only OMX.google.* components locally
-        if (!strncasecmp(name, "OMX.google.", 11)) {
-            return LOCAL;
-        }
-        return MEDIAPROCESS;
-#endif
-    }
-}
-
-const sp<IOMX> &MuxOMX::getOMX(node_id node) const {
-    Mutex::Autolock autoLock(mLock);
-    return getOMX_l(node);
-}
-
-const sp<IOMX> &MuxOMX::getOMX_l(node_id node) const {
-    node_location loc = mNodeLocation.valueFor(node);
-    if (loc == LOCAL) {
-        return mLocalOMX;
-    } else if (loc == MEDIAPROCESS) {
-        return mMediaServerOMX;
-    } else if (loc == CODECPROCESS) {
-        return mMediaCodecOMX;
-    }
-    ALOGE("Couldn't determine node location for node %d: %d, using local", node, loc);
-    return mLocalOMX;
-}
-
-bool MuxOMX::livesLocally(node_id node, pid_t pid) {
-    return getOMX(node)->livesLocally(node, pid);
-}
-
-status_t MuxOMX::listNodes(List<ComponentInfo> *list) {
-    Mutex::Autolock autoLock(mLock);
-
-    if (mLocalOMX == NULL) {
-        mLocalOMX = new OMX;
-    }
-
-    return mLocalOMX->listNodes(list);
-}
-
-status_t MuxOMX::allocateNode(
-        const char *name, const sp<IOMXObserver> &observer,
-        sp<IBinder> *nodeBinder,
-        node_id *node) {
-    Mutex::Autolock autoLock(mLock);
-
-    sp<IOMX> omx;
-
-    node_location loc = getPreferredCodecLocation(name);
-    if (loc == CODECPROCESS) {
-        omx = mMediaCodecOMX;
-    } else if (loc == MEDIAPROCESS) {
-        omx = mMediaServerOMX;
-    } else {
-        if (mLocalOMX == NULL) {
-            mLocalOMX = new OMX;
-        }
-        omx = mLocalOMX;
-    }
-
-    status_t err = omx->allocateNode(name, observer, nodeBinder, node);
-    ALOGV("allocated node_id %x on %s OMX", *node, omx == mMediaCodecOMX ? "codecprocess" :
-            omx == mMediaServerOMX ? "mediaserver" : "local");
-
-
-    if (err != OK) {
-        return err;
-    }
-
-    mNodeLocation.add(*node, loc);
-
-    return OK;
-}
-
-status_t MuxOMX::freeNode(node_id node) {
-    Mutex::Autolock autoLock(mLock);
-
-    // exit if we have already freed the node
-    if (mNodeLocation.indexOfKey(node) < 0) {
-        ALOGD("MuxOMX::freeNode: node %d seems to be released already --- ignoring.", node);
-        return OK;
-    }
-
-    status_t err = getOMX_l(node)->freeNode(node);
-
-    if (err != OK) {
-        return err;
-    }
-
-    mNodeLocation.removeItem(node);
-
-    return OK;
-}
-
-status_t MuxOMX::sendCommand(
-        node_id node, OMX_COMMANDTYPE cmd, OMX_S32 param) {
-    return getOMX(node)->sendCommand(node, cmd, param);
-}
-
-status_t MuxOMX::getParameter(
-        node_id node, OMX_INDEXTYPE index,
-        void *params, size_t size) {
-    return getOMX(node)->getParameter(node, index, params, size);
-}
-
-status_t MuxOMX::setParameter(
-        node_id node, OMX_INDEXTYPE index,
-        const void *params, size_t size) {
-    return getOMX(node)->setParameter(node, index, params, size);
-}
-
-status_t MuxOMX::getConfig(
-        node_id node, OMX_INDEXTYPE index,
-        void *params, size_t size) {
-    return getOMX(node)->getConfig(node, index, params, size);
-}
-
-status_t MuxOMX::setConfig(
-        node_id node, OMX_INDEXTYPE index,
-        const void *params, size_t size) {
-    return getOMX(node)->setConfig(node, index, params, size);
-}
-
-status_t MuxOMX::getState(
-        node_id node, OMX_STATETYPE* state) {
-    return getOMX(node)->getState(node, state);
-}
-
-status_t MuxOMX::storeMetaDataInBuffers(
-        node_id node, OMX_U32 port_index, OMX_BOOL enable, MetadataBufferType *type) {
-    return getOMX(node)->storeMetaDataInBuffers(node, port_index, enable, type);
-}
-
-status_t MuxOMX::prepareForAdaptivePlayback(
-        node_id node, OMX_U32 port_index, OMX_BOOL enable,
-        OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight) {
-    return getOMX(node)->prepareForAdaptivePlayback(
-            node, port_index, enable, maxFrameWidth, maxFrameHeight);
-}
-
-status_t MuxOMX::configureVideoTunnelMode(
-        node_id node, OMX_U32 portIndex, OMX_BOOL enable,
-        OMX_U32 audioHwSync, native_handle_t **sidebandHandle) {
-    return getOMX(node)->configureVideoTunnelMode(
-            node, portIndex, enable, audioHwSync, sidebandHandle);
-}
-
-status_t MuxOMX::enableNativeBuffers(
-        node_id node, OMX_U32 port_index, OMX_BOOL graphic, OMX_BOOL enable) {
-    return getOMX(node)->enableNativeBuffers(node, port_index, graphic, enable);
-}
-
-status_t MuxOMX::getGraphicBufferUsage(
-        node_id node, OMX_U32 port_index, OMX_U32* usage) {
-    return getOMX(node)->getGraphicBufferUsage(node, port_index, usage);
-}
-
-status_t MuxOMX::useBuffer(
-        node_id node, OMX_U32 port_index, const sp<IMemory> &params,
-        buffer_id *buffer, OMX_U32 allottedSize) {
-    return getOMX(node)->useBuffer(node, port_index, params, buffer, allottedSize);
-}
-
-status_t MuxOMX::useGraphicBuffer(
-        node_id node, OMX_U32 port_index,
-        const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer) {
-    return getOMX(node)->useGraphicBuffer(
-            node, port_index, graphicBuffer, buffer);
-}
-
-status_t MuxOMX::updateGraphicBufferInMeta(
-        node_id node, OMX_U32 port_index,
-        const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer) {
-    return getOMX(node)->updateGraphicBufferInMeta(
-            node, port_index, graphicBuffer, buffer);
-}
-
-status_t MuxOMX::updateNativeHandleInMeta(
-        node_id node, OMX_U32 port_index,
-        const sp<NativeHandle> &nativeHandle, buffer_id buffer) {
-    return getOMX(node)->updateNativeHandleInMeta(
-            node, port_index, nativeHandle, buffer);
-}
-
-status_t MuxOMX::createInputSurface(
-        node_id node, OMX_U32 port_index, android_dataspace dataSpace,
-        sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
-    status_t err = getOMX(node)->createInputSurface(
-            node, port_index, dataSpace, bufferProducer, type);
-    return err;
-}
-
-status_t MuxOMX::createPersistentInputSurface(
-        sp<IGraphicBufferProducer> *bufferProducer,
-        sp<IGraphicBufferConsumer> *bufferConsumer) {
-    sp<IOMX> omx;
-    {
-        Mutex::Autolock autoLock(mLock);
-        if (property_get_bool("media.stagefright.legacyencoder", false)) {
-            omx = mMediaServerOMX;
-        } else {
-            omx = mMediaCodecOMX;
-        }
-    }
-    return omx->createPersistentInputSurface(
-            bufferProducer, bufferConsumer);
-}
-
-status_t MuxOMX::setInputSurface(
-        node_id node, OMX_U32 port_index,
-        const sp<IGraphicBufferConsumer> &bufferConsumer, MetadataBufferType *type) {
-    return getOMX(node)->setInputSurface(node, port_index, bufferConsumer, type);
-}
-
-status_t MuxOMX::signalEndOfInputStream(node_id node) {
-    return getOMX(node)->signalEndOfInputStream(node);
-}
-
-status_t MuxOMX::allocateSecureBuffer(
-        node_id node, OMX_U32 port_index, size_t size,
-        buffer_id *buffer, void **buffer_data, sp<NativeHandle> *native_handle) {
-    return getOMX(node)->allocateSecureBuffer(
-            node, port_index, size, buffer, buffer_data, native_handle);
-}
-
-status_t MuxOMX::allocateBufferWithBackup(
-        node_id node, OMX_U32 port_index, const sp<IMemory> &params,
-        buffer_id *buffer, OMX_U32 allottedSize) {
-    return getOMX(node)->allocateBufferWithBackup(
-            node, port_index, params, buffer, allottedSize);
-}
-
-status_t MuxOMX::freeBuffer(
-        node_id node, OMX_U32 port_index, buffer_id buffer) {
-    return getOMX(node)->freeBuffer(node, port_index, buffer);
-}
-
-status_t MuxOMX::fillBuffer(node_id node, buffer_id buffer, int fenceFd) {
-    return getOMX(node)->fillBuffer(node, buffer, fenceFd);
-}
-
-status_t MuxOMX::emptyBuffer(
-        node_id node,
-        buffer_id buffer,
-        OMX_U32 range_offset, OMX_U32 range_length,
-        OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
-    return getOMX(node)->emptyBuffer(
-            node, buffer, range_offset, range_length, flags, timestamp, fenceFd);
-}
-
-status_t MuxOMX::getExtensionIndex(
-        node_id node,
-        const char *parameter_name,
-        OMX_INDEXTYPE *index) {
-    return getOMX(node)->getExtensionIndex(node, parameter_name, index);
-}
-
-status_t MuxOMX::setInternalOption(
-        node_id node,
-        OMX_U32 port_index,
-        InternalOptionType type,
-        const void *data,
-        size_t size) {
-    return getOMX(node)->setInternalOption(node, port_index, type, data, size);
-}
-
 OMXClient::OMXClient() {
-    char value[PROPERTY_VALUE_MAX];
-    if (property_get("media.stagefright.codecremote", value, NULL)
-            && (!strcmp("0", value) || !strcasecmp("false", value))) {
-        sCodecProcessEnabled = false;
-    }
 }
 
 status_t OMXClient::connect() {
     sp<IServiceManager> sm = defaultServiceManager();
-    sp<IBinder> playerbinder = sm->getService(String16("media.player"));
-    sp<IMediaPlayerService> mediaservice = interface_cast<IMediaPlayerService>(playerbinder);
-
-    if (mediaservice.get() == NULL) {
-        ALOGE("Cannot obtain IMediaPlayerService");
-        return NO_INIT;
-    }
-
-    sp<IOMX> mediaServerOMX = mediaservice->getOMX();
-    if (mediaServerOMX.get() == NULL) {
-        ALOGE("Cannot obtain mediaserver IOMX");
-        return NO_INIT;
-    }
-
-    // If we don't want to use the codec process, and the media server OMX
-    // is local, use it directly instead of going through MuxOMX
-    if (!sCodecProcessEnabled &&
-            mediaServerOMX->livesLocally(0 /* node */, getpid())) {
-        mOMX = mediaServerOMX;
-        return OK;
-    }
-
     sp<IBinder> codecbinder = sm->getService(String16("media.codec"));
     sp<IMediaCodecService> codecservice = interface_cast<IMediaCodecService>(codecbinder);
 
@@ -532,22 +44,17 @@
         return NO_INIT;
     }
 
-    sp<IOMX> mediaCodecOMX = codecservice->getOMX();
-    if (mediaCodecOMX.get() == NULL) {
+    mOMX = codecservice->getOMX();
+    if (mOMX.get() == NULL) {
         ALOGE("Cannot obtain mediacodec IOMX");
         return NO_INIT;
     }
 
-    mOMX = new MuxOMX(mediaServerOMX, mediaCodecOMX);
-
     return OK;
 }
 
 void OMXClient::disconnect() {
-    if (mOMX.get() != NULL) {
-        mOMX.clear();
-        mOMX = NULL;
-    }
+    mOMX.clear();
 }
 
 }  // namespace android
diff --git a/media/libstagefright/SampleTable.cpp b/media/libstagefright/SampleTable.cpp
index 5ce2b76..de5ea9c 100644
--- a/media/libstagefright/SampleTable.cpp
+++ b/media/libstagefright/SampleTable.cpp
@@ -566,7 +566,7 @@
 
     if (mDataSource->readAt(data_offset + 8, mSyncSamples,
             (size_t)allocSize) != (ssize_t)allocSize) {
-        delete mSyncSamples;
+        delete[] mSyncSamples;
         mSyncSamples = NULL;
         return ERROR_IO;
     }
@@ -992,4 +992,3 @@
 }
 
 }  // namespace android
-
diff --git a/media/libstagefright/SimpleDecodingSource.cpp b/media/libstagefright/SimpleDecodingSource.cpp
index 2503a32..ea7d5af 100644
--- a/media/libstagefright/SimpleDecodingSource.cpp
+++ b/media/libstagefright/SimpleDecodingSource.cpp
@@ -17,7 +17,7 @@
 #include <gui/Surface.h>
 
 #include <media/ICrypto.h>
-#include <media/stagefright/foundation/ABuffer.h>
+#include <media/MediaCodecBuffer.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/foundation/ALooper.h>
 #include <media/stagefright/foundation/AMessage.h>
@@ -230,7 +230,7 @@
                 break;
             }
 
-            sp<ABuffer> in_buffer;
+            sp<MediaCodecBuffer> in_buffer;
             if (res == OK) {
                 res = mCodec->getInputBuffer(in_ix, &in_buffer);
             }
@@ -344,7 +344,7 @@
             return res;
         }
 
-        sp<ABuffer> out_buffer;
+        sp<MediaCodecBuffer> out_buffer;
         res = mCodec->getOutputBuffer(out_ix, &out_buffer);
         if (res != OK) {
             ALOGW("[%s] could not get output buffer #%zu",
diff --git a/media/libstagefright/SkipCutBuffer.cpp b/media/libstagefright/SkipCutBuffer.cpp
index d30be88..ee9016d 100644
--- a/media/libstagefright/SkipCutBuffer.cpp
+++ b/media/libstagefright/SkipCutBuffer.cpp
@@ -88,7 +88,8 @@
     buffer->set_range(0, copied);
 }
 
-void SkipCutBuffer::submit(const sp<ABuffer>& buffer) {
+template <typename T>
+void SkipCutBuffer::submitInternal(const sp<T>& buffer) {
     if (mCutBuffer == NULL) {
         // passthrough mode
         return;
@@ -120,6 +121,14 @@
     buffer->setRange(0, copied);
 }
 
+void SkipCutBuffer::submit(const sp<ABuffer>& buffer) {
+    submitInternal(buffer);
+}
+
+void SkipCutBuffer::submit(const sp<MediaCodecBuffer>& buffer) {
+    submitInternal(buffer);
+}
+
 void SkipCutBuffer::clear() {
     mWriteHead = mReadHead = 0;
     mFrontPadding = mSkip;
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index be5067d..883a4dd 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -27,8 +27,8 @@
 
 #include <media/ICrypto.h>
 #include <media/IMediaHTTPService.h>
+#include <media/MediaCodecBuffer.h>
 
-#include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/ColorConverter.h>
@@ -55,8 +55,6 @@
     : mParsedMetaData(false),
       mAlbumArt(NULL) {
     ALOGV("StagefrightMetadataRetriever()");
-
-    DataSource::RegisterDefaultSniffers();
 }
 
 StagefrightMetadataRetriever::~StagefrightMetadataRetriever() {
@@ -223,7 +221,7 @@
         return NULL;
     }
 
-    Vector<sp<ABuffer> > inputBuffers;
+    Vector<sp<MediaCodecBuffer> > inputBuffers;
     err = decoder->getInputBuffers(&inputBuffers);
     if (err != OK) {
         ALOGW("failed to get input buffers: %d (%s)", err, asString(err));
@@ -232,7 +230,7 @@
         return NULL;
     }
 
-    Vector<sp<ABuffer> > outputBuffers;
+    Vector<sp<MediaCodecBuffer> > outputBuffers;
     err = decoder->getOutputBuffers(&outputBuffers);
     if (err != OK) {
         ALOGW("failed to get output buffers: %d (%s)", err, asString(err));
@@ -264,7 +262,7 @@
         size_t inputIndex = -1;
         int64_t ptsUs = 0ll;
         uint32_t flags = 0;
-        sp<ABuffer> codecBuffer = NULL;
+        sp<MediaCodecBuffer> codecBuffer = NULL;
 
         while (haveMoreInputs) {
             err = decoder->dequeueInputBuffer(&inputIndex, kBufferTimeOutUs);
@@ -376,7 +374,7 @@
     }
 
     ALOGV("successfully decoded video frame.");
-    sp<ABuffer> videoFrameBuffer = outputBuffers.itemAt(index);
+    sp<MediaCodecBuffer> videoFrameBuffer = outputBuffers.itemAt(index);
 
     if (thumbNailTime >= 0) {
         if (timeUs != thumbNailTime) {
@@ -418,6 +416,22 @@
             && trackMeta->findInt32(kKeySARHeight, &sarHeight)
             && sarHeight != 0) {
         frame->mDisplayWidth = (frame->mDisplayWidth * sarWidth) / sarHeight;
+    } else {
+        int32_t width, height;
+        if (trackMeta->findInt32(kKeyDisplayWidth, &width)
+                && trackMeta->findInt32(kKeyDisplayHeight, &height)
+                && frame->mDisplayWidth > 0 && frame->mDisplayHeight > 0
+                && width > 0 && height > 0) {
+            if (frame->mDisplayHeight * (int64_t)width / height > (int64_t)frame->mDisplayWidth) {
+                frame->mDisplayHeight =
+                        (int32_t)(height * (int64_t)frame->mDisplayWidth / width);
+            } else {
+                frame->mDisplayWidth =
+                        (int32_t)(frame->mDisplayHeight * (int64_t)width / height);
+            }
+            ALOGV("thumbNail width and height are overridden to %d x %d",
+                    frame->mDisplayWidth, frame->mDisplayHeight);
+        }
     }
 
     int32_t srcFormat;
@@ -754,9 +768,9 @@
 
     if (numTracks == 1) {
         const char *fileMIME;
-        CHECK(meta->findCString(kKeyMIMEType, &fileMIME));
 
-        if (!strcasecmp(fileMIME, "video/x-matroska")) {
+        if (meta->findCString(kKeyMIMEType, &fileMIME) &&
+                !strcasecmp(fileMIME, "video/x-matroska")) {
             sp<MetaData> trackMeta = mExtractor->getTrackMetaData(0);
             const char *trackMIME;
             CHECK(trackMeta->findCString(kKeyMIMEType, &trackMIME));
@@ -769,11 +783,6 @@
             }
         }
     }
-
-    // To check whether the media file is drm-protected
-    if (mExtractor->getDrmFlag()) {
-        mMetaData.add(METADATA_KEY_IS_DRM, String8("1"));
-    }
 }
 
 void StagefrightMetadataRetriever::clearMetadata() {
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 36be7a0..ec02fb9 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -41,9 +41,9 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/AudioSystem.h>
 #include <media/MediaPlayerInterface.h>
-#include <hardware/audio.h>
 #include <media/stagefright/Utils.h>
 #include <media/AudioParameter.h>
+#include <system/audio.h>
 
 namespace android {
 
@@ -637,6 +637,11 @@
         msg->setInt32("track-id", trackID);
     }
 
+    const char *lang;
+    if (meta->findCString(kKeyMediaLanguage, &lang)) {
+        msg->setString("language", lang);
+    }
+
     if (!strncasecmp("video/", mime, 6)) {
         int32_t width, height;
         if (!meta->findInt32(kKeyWidth, &width)
@@ -647,6 +652,13 @@
         msg->setInt32("width", width);
         msg->setInt32("height", height);
 
+        int32_t displayWidth, displayHeight;
+        if (meta->findInt32(kKeyDisplayWidth, &displayWidth)
+                && meta->findInt32(kKeyDisplayHeight, &displayHeight)) {
+            msg->setInt32("display-width", displayWidth);
+            msg->setInt32("display-height", displayHeight);
+        }
+
         int32_t sarWidth, sarHeight;
         if (meta->findInt32(kKeySARWidth, &sarWidth)
                 && meta->findInt32(kKeySARHeight, &sarHeight)) {
@@ -1273,6 +1285,11 @@
         meta->setInt32(kKeyMaxBitRate, maxBitrate);
     }
 
+    AString lang;
+    if (msg->findString("language", &lang)) {
+        meta->setCString(kKeyMediaLanguage, lang.c_str());
+    }
+
     if (mime.startsWith("video/")) {
         int32_t width;
         int32_t height;
@@ -1290,6 +1307,13 @@
             meta->setInt32(kKeySARHeight, sarHeight);
         }
 
+        int32_t displayWidth, displayHeight;
+        if (msg->findInt32("display-width", &displayWidth)
+                && msg->findInt32("display-height", &displayHeight)) {
+            meta->setInt32(kKeyDisplayWidth, displayWidth);
+            meta->setInt32(kKeyDisplayHeight, displayHeight);
+        }
+
         int32_t colorFormat;
         if (msg->findInt32("color-format", &colorFormat)) {
             meta->setInt32(kKeyColorFormat, colorFormat);
@@ -1505,6 +1529,7 @@
     { MEDIA_MIMETYPE_AUDIO_AAC,         AUDIO_FORMAT_AAC },
     { MEDIA_MIMETYPE_AUDIO_VORBIS,      AUDIO_FORMAT_VORBIS },
     { MEDIA_MIMETYPE_AUDIO_OPUS,        AUDIO_FORMAT_OPUS},
+    { MEDIA_MIMETYPE_AUDIO_AC3,         AUDIO_FORMAT_AC3},
     { 0, AUDIO_FORMAT_INVALID }
 };
 
@@ -1634,9 +1659,7 @@
         return AString("<URI suppressed>");
     }
 
-    char prop[PROPERTY_VALUE_MAX];
-    if (property_get("media.stagefright.log-uri", prop, "false") &&
-        (!strcmp(prop, "1") || !strcmp(prop, "true"))) {
+    if (property_get_bool("media.stagefright.log-uri", false)) {
         return uri;
     }
 
@@ -1747,6 +1770,45 @@
     *sync = settings;
 }
 
+void writeToAMessage(const sp<AMessage> &msg, const BufferingSettings &buffering) {
+    msg->setInt32("init-mode", buffering.mInitialBufferingMode);
+    msg->setInt32("rebuffer-mode", buffering.mRebufferingMode);
+    msg->setInt32("init-ms", buffering.mInitialWatermarkMs);
+    msg->setInt32("init-kb", buffering.mInitialWatermarkKB);
+    msg->setInt32("rebuffer-low-ms", buffering.mRebufferingWatermarkLowMs);
+    msg->setInt32("rebuffer-high-ms", buffering.mRebufferingWatermarkHighMs);
+    msg->setInt32("rebuffer-low-kb", buffering.mRebufferingWatermarkLowKB);
+    msg->setInt32("rebuffer-high-kb", buffering.mRebufferingWatermarkHighKB);
+}
+
+void readFromAMessage(const sp<AMessage> &msg, BufferingSettings *buffering /* nonnull */) {
+    int32_t value;
+    if (msg->findInt32("init-mode", &value)) {
+        buffering->mInitialBufferingMode = (BufferingMode)value;
+    }
+    if (msg->findInt32("rebuffer-mode", &value)) {
+        buffering->mRebufferingMode = (BufferingMode)value;
+    }
+    if (msg->findInt32("init-ms", &value)) {
+        buffering->mInitialWatermarkMs = value;
+    }
+    if (msg->findInt32("init-kb", &value)) {
+        buffering->mInitialWatermarkKB = value;
+    }
+    if (msg->findInt32("rebuffer-low-ms", &value)) {
+        buffering->mRebufferingWatermarkLowMs = value;
+    }
+    if (msg->findInt32("rebuffer-high-ms", &value)) {
+        buffering->mRebufferingWatermarkHighMs = value;
+    }
+    if (msg->findInt32("rebuffer-low-kb", &value)) {
+        buffering->mRebufferingWatermarkLowKB = value;
+    }
+    if (msg->findInt32("rebuffer-high-kb", &value)) {
+        buffering->mRebufferingWatermarkHighKB = value;
+    }
+}
+
 AString nameForFd(int fd) {
     const size_t SIZE = 256;
     char buffer[SIZE];
diff --git a/media/libstagefright/WVMExtractor.cpp b/media/libstagefright/WVMExtractor.cpp
deleted file mode 100644
index 1c170b8..0000000
--- a/media/libstagefright/WVMExtractor.cpp
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "WVMExtractor"
-#include <utils/Log.h>
-
-#include "include/WVMExtractor.h"
-
-#include <arpa/inet.h>
-#include <utils/String8.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/Utils.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <dlfcn.h>
-
-#include <utils/Errors.h>
-
-/* The extractor lifetime is short - just long enough to get
- * the media sources constructed - so the shared lib needs to remain open
- * beyond the lifetime of the extractor.  So keep the handle as a global
- * rather than a member of the extractor
- */
-void *gVendorLibHandle = NULL;
-
-namespace android {
-
-WVMExtractor::WVMExtractor(const sp<DataSource> &source)
-    : mDataSource(source)
-{
-    if (!getVendorLibHandle()) {
-        return;
-    }
-
-    typedef WVMLoadableExtractor *(*GetInstanceFunc)(sp<DataSource>);
-    GetInstanceFunc getInstanceFunc =
-        (GetInstanceFunc) dlsym(gVendorLibHandle,
-                "_ZN7android11GetInstanceENS_2spINS_10DataSourceEEE");
-
-    if (getInstanceFunc) {
-        if (source->DrmInitialization(
-                MEDIA_MIMETYPE_CONTAINER_WVM) != NULL) {
-            mImpl = (*getInstanceFunc)(source);
-            CHECK(mImpl != NULL);
-            setDrmFlag(true);
-        } else {
-            ALOGE("Drm manager failed to initialize.");
-        }
-    } else {
-        ALOGE("Failed to locate GetInstance in libwvm.so");
-    }
-}
-
-static void init_routine()
-{
-    gVendorLibHandle = dlopen("libwvm.so", RTLD_NOW);
-    if (gVendorLibHandle == NULL) {
-        ALOGE("Failed to open libwvm.so: %s", dlerror());
-    }
-}
-
-bool WVMExtractor::getVendorLibHandle()
-{
-    static pthread_once_t sOnceControl = PTHREAD_ONCE_INIT;
-    pthread_once(&sOnceControl, init_routine);
-
-    return gVendorLibHandle != NULL;
-}
-
-WVMExtractor::~WVMExtractor() {
-}
-
-size_t WVMExtractor::countTracks() {
-    return (mImpl != NULL) ? mImpl->countTracks() : 0;
-}
-
-sp<IMediaSource> WVMExtractor::getTrack(size_t index) {
-    if (mImpl == NULL) {
-        return NULL;
-    }
-    return mImpl->getTrack(index);
-}
-
-sp<MetaData> WVMExtractor::getTrackMetaData(size_t index, uint32_t flags) {
-    if (mImpl == NULL) {
-        return NULL;
-    }
-    return mImpl->getTrackMetaData(index, flags);
-}
-
-sp<MetaData> WVMExtractor::getMetaData() {
-    if (mImpl == NULL) {
-        return NULL;
-    }
-    return mImpl->getMetaData();
-}
-
-int64_t WVMExtractor::getCachedDurationUs(status_t *finalStatus) {
-    if (mImpl == NULL) {
-        return 0;
-    }
-
-    return mImpl->getCachedDurationUs(finalStatus);
-}
-
-status_t WVMExtractor::getEstimatedBandwidthKbps(int32_t *kbps) {
-    if (mImpl == NULL) {
-        return UNKNOWN_ERROR;
-    }
-
-    return mImpl->getEstimatedBandwidthKbps(kbps);
-}
-
-
-void WVMExtractor::setAdaptiveStreamingMode(bool adaptive) {
-    if (mImpl != NULL) {
-        mImpl->setAdaptiveStreamingMode(adaptive);
-    }
-}
-
-void WVMExtractor::setCryptoPluginMode(bool cryptoPluginMode) {
-    if (mImpl != NULL) {
-        mImpl->setCryptoPluginMode(cryptoPluginMode);
-    }
-}
-
-void WVMExtractor::setUID(uid_t uid) {
-    if (mImpl != NULL) {
-        mImpl->setUID(uid);
-    }
-}
-
-status_t WVMExtractor::getError() {
-    if (mImpl == NULL) {
-       return UNKNOWN_ERROR;
-    }
-
-    return mImpl->getError();
-}
-
-void WVMExtractor::setError(status_t err) {
-    if (mImpl != NULL) {
-        mImpl->setError(err);
-    }
-}
-
-bool SniffWVM(
-    const sp<DataSource> &source, String8 *mimeType, float *confidence,
-        sp<AMessage> *) {
-
-    if (!WVMExtractor::getVendorLibHandle()) {
-        return false;
-    }
-
-    typedef WVMLoadableExtractor *(*SnifferFunc)(const sp<DataSource>&);
-    SnifferFunc snifferFunc =
-        (SnifferFunc) dlsym(gVendorLibHandle,
-                            "_ZN7android15IsWidevineMediaERKNS_2spINS_10DataSourceEEE");
-
-    if (snifferFunc) {
-        if ((*snifferFunc)(source)) {
-            *mimeType = MEDIA_MIMETYPE_CONTAINER_WVM;
-            *confidence = 10.0f;
-            return true;
-        }
-    } else {
-        ALOGE("IsWidevineMedia not found in libwvm.so");
-    }
-
-    return false;
-}
-
-} //namespace android
-
diff --git a/media/libstagefright/avc_utils.cpp b/media/libstagefright/avc_utils.cpp
index 0396dc6..9f15cf7 100644
--- a/media/libstagefright/avc_utils.cpp
+++ b/media/libstagefright/avc_utils.cpp
@@ -447,7 +447,8 @@
     return meta;
 }
 
-bool IsIDR(const sp<ABuffer> &buffer) {
+template <typename T>
+bool IsIDRInternal(const sp<T> &buffer) {
     const uint8_t *data = buffer->data();
     size_t size = buffer->size();
 
@@ -469,6 +470,14 @@
     return foundIDR;
 }
 
+bool IsIDR(const sp<ABuffer> &buffer) {
+    return IsIDRInternal(buffer);
+}
+
+bool IsIDR(const sp<MediaCodecBuffer> &buffer) {
+    return IsIDRInternal(buffer);
+}
+
 bool IsAVCReferenceFrame(const sp<ABuffer> &accessUnit) {
     const uint8_t *data = accessUnit->data();
     size_t size = accessUnit->size();
diff --git a/media/libstagefright/codec2/Android.mk b/media/libstagefright/codec2/Android.mk
new file mode 100644
index 0000000..a463205
--- /dev/null
+++ b/media/libstagefright/codec2/Android.mk
@@ -0,0 +1,20 @@
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+        C2.cpp    \
+
+LOCAL_C_INCLUDES += \
+        $(TOP)/frameworks/av/media/libstagefright/codec2/include \
+        $(TOP)/frameworks/native/include/media/hardware \
+
+LOCAL_MODULE:= libstagefright_codec2
+LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CLANG := true
+LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
+
+include $(BUILD_SHARED_LIBRARY)
+
+################################################################################
+
+include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/media/libstagefright/codec2/C2.cpp b/media/libstagefright/codec2/C2.cpp
new file mode 100644
index 0000000..a51b073
--- /dev/null
+++ b/media/libstagefright/codec2/C2.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <C2.h>
+#include <C2Buffer.h>
+#include <C2Component.h>
+#include <C2Config.h>
+#include <C2Param.h>
+#include <C2ParamDef.h>
+#include <C2Work.h>
+
+namespace android {
+
+/**
+ * There is nothing here yet. This library is built to see what symbols and methods get
+ * defined as part of the API include files.
+ *
+ * Going forward, the Codec2 library will contain utility methods that are useful for
+ * Codec2 clients.
+ */
+
+} // namespace android
+
diff --git a/media/libstagefright/codec2/include/C2.h b/media/libstagefright/codec2/include/C2.h
new file mode 100644
index 0000000..7d00a03
--- /dev/null
+++ b/media/libstagefright/codec2/include/C2.h
@@ -0,0 +1,287 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef C2_H_
+#define C2_H_
+
+#include <string>
+#include <vector>
+#include <list>
+
+#ifdef __ANDROID__
+
+#include <utils/Errors.h>       // for status_t
+#include <utils/Timers.h>       // for nsecs_t
+
+namespace android {
+
+#else
+
+#include <errno.h>
+typedef int64_t nsecs_t;
+
+enum {
+    GRALLOC_USAGE_SW_READ_OFTEN,
+    GRALLOC_USAGE_RENDERSCRIPT,
+    GRALLOC_USAGE_HW_TEXTURE,
+    GRALLOC_USAGE_HW_COMPOSER,
+    GRALLOC_USAGE_HW_VIDEO_ENCODER,
+    GRALLOC_USAGE_PROTECTED,
+    GRALLOC_USAGE_SW_WRITE_OFTEN,
+    GRALLOC_USAGE_HW_RENDER,
+};
+
+#endif
+
+/** \mainpage Codec2
+ *
+ * Codec2 is a frame-based data processing API used by android.
+ *
+ * The framework accesses components via the \ref API.
+ */
+
+/** \ingroup API
+ *
+ * The Codec2 API defines the operation of data processing components and their interaction with
+ * the rest of the system.
+ *
+ * Coding Conventions
+ *
+ * Mitigating Binary Compatibility.
+ *
+ * While full binary compatibility is not a goal of the API (due to our use of STL), we try to
+ * mitigate binary breaks by adhering to the following conventions:
+ *
+ * - at most one vtable with placeholder virtual methods
+ * - all optional/placeholder virtual methods returning a status_t, with C2_NOT_IMPLEMENTED not
+ *   requiring any update to input/output arguments.
+ * - limiting symbol export of inline methods
+ * - use of pimpl (or shared-pimpl)
+ *
+ * Naming
+ *
+ * - all classes and types prefix with C2
+ * - classes for internal use prefix with _C2
+ * - enum values in global namespace prefix with C2_ all caps
+ * - enum values inside classes have no C2_ prefix as class already has it
+ * - supporting two kinds of enum naming: all-caps and kCamelCase
+ * \todo revisit kCamelCase for param-type
+ *
+ * Aspects
+ *
+ * Aspects define certain common behavior across a group of objects.
+ * - classes whose name matches _C2.*Aspect
+ * - only protected constructors
+ * - no desctructor and copiable
+ * - all methods are inline or static (this is opposite of the interface paradigm where all methods
+ *   are virtual, which would not work due to the at most one vtable rule.)
+ * - only private variables (this prevents subclasses interfering with the aspects.)
+ */
+
+/// \defgroup types Common Types
+/// @{
+
+/**
+ * C2String: basic string implementation
+ */
+typedef std::string C2String;
+typedef const char *C2StringLiteral;
+
+/**
+ * C2Error: status codes used.
+ */
+typedef int32_t C2Error;
+enum {
+#ifndef __ANDROID__
+    OK                  = 0,
+    BAD_VALUE           = -EINVAL,
+    BAD_INDEX           = -EOVERFLOW,
+    UNKNOWN_TRANSACTION = -EBADMSG,
+    ALREADY_EXISTS      = -EEXIST,
+    NAME_NOT_FOUND      = -ENOENT,
+    INVALID_OPERATION   = -ENOSYS,
+    NO_MEMORY           = -ENOMEM,
+    PERMISSION_DENIED   = -EPERM,
+    TIMED_OUT           = -ETIMEDOUT,
+    UNKNOWN_ERROR       = -EINVAL,
+#endif
+
+    C2_OK               = OK,                   ///< operation completed successfully
+
+    // bad input
+    C2_BAD_VALUE        = BAD_VALUE,            ///< argument has invalid value (user error)
+    C2_BAD_INDEX        = BAD_INDEX,            ///< argument uses invalid index (user error)
+    C2_UNSUPPORTED      = UNKNOWN_TRANSACTION,  ///< argument/index is value but not supported \todo is this really BAD_INDEX/VALUE?
+
+    // bad sequencing of events
+    C2_DUPLICATE        = ALREADY_EXISTS,       ///< object already exists
+    C2_NOT_FOUND        = NAME_NOT_FOUND,       ///< object not found
+    C2_BAD_STATE        = INVALID_OPERATION,    ///< operation is not permitted in the current state
+
+    // bad environment
+    C2_NO_MEMORY        = NO_MEMORY,            ///< not enough memory to complete operation
+    C2_NO_PERMISSION    = PERMISSION_DENIED,    ///< missing permission to complete operation
+    C2_TIMED_OUT        = TIMED_OUT,            ///< operation did not complete within timeout
+
+    // bad versioning
+    C2_NOT_IMPLEMENTED  = UNKNOWN_TRANSACTION,  ///< operation is not implemented (optional only) \todo for now reuse error code
+
+    // unknown fatal
+    C2_CORRUPTED        = UNKNOWN_ERROR,        ///< some unexpected error prevented the operation
+};
+
+/// @}
+
+/// \defgroup utils Utilities
+/// @{
+
+#define C2_DO_NOT_COPY(type, args...) \
+    type args& operator=(const type args&) = delete; \
+    type(const type args&) = delete; \
+
+#define C2_PURE __attribute__((pure))
+#define C2_CONST __attribute__((const))
+#define C2_HIDE __attribute__((visibility("hidden")))
+#define C2_INTERNAL __attribute__((internal_linkage))
+
+#define DEFINE_OTHER_COMPARISON_OPERATORS(type) \
+    inline bool operator!=(const type &other) { return !(*this == other); } \
+    inline bool operator<=(const type &other) { return (*this == other) || (*this < other); } \
+    inline bool operator>=(const type &other) { return !(*this < other); } \
+    inline bool operator>(const type &other) { return !(*this < other) && !(*this == other); }
+
+#define DEFINE_FIELD_BASED_COMPARISON_OPERATORS(type, field) \
+    inline bool operator<(const type &other) const { return field < other.field; } \
+    inline bool operator==(const type &other) const { return field == other.field; } \
+    DEFINE_OTHER_COMPARISON_OPERATORS(type)
+
+/// \cond INTERNAL
+
+/// \defgroup utils_internal
+/// @{
+
+template<typename... T> struct c2_types;
+
+/** specialization for a single type */
+template<typename T>
+struct c2_types<T> {
+    typedef typename std::decay<T>::type wide_type;
+    typedef wide_type narrow_type;
+    typedef wide_type mintype;
+};
+
+/** specialization for two types */
+template<typename T, typename U>
+struct c2_types<T, U> {
+    static_assert(std::is_floating_point<T>::value == std::is_floating_point<U>::value,
+                  "mixing floating point and non-floating point types is disallowed");
+    static_assert(std::is_signed<T>::value == std::is_signed<U>::value,
+                  "mixing signed and unsigned types is disallowed");
+
+    typedef typename std::decay<
+            decltype(true ? std::declval<T>() : std::declval<U>())>::type wide_type;
+    typedef typename std::decay<
+            typename std::conditional<sizeof(T) < sizeof(U), T, U>::type>::type narrow_type;
+    typedef typename std::conditional<
+            std::is_signed<T>::value, wide_type, narrow_type>::type mintype;
+};
+
+/// @}
+
+/// \endcond
+
+/**
+ * Type support utility class. Only supports similar classes, such as:
+ * - all floating point
+ * - all unsigned/all signed
+ * - all pointer
+ */
+template<typename T, typename U, typename... V>
+struct c2_types<T, U, V...> {
+    /** Common type that accommodates all template parameter types. */
+    typedef typename c2_types<typename c2_types<T, U>::wide_type, V...>::wide_type wide_type;
+    /** Narrowest type of the template parameter types. */
+    typedef typename c2_types<typename c2_types<T, U>::narrow_type, V...>::narrow_type narrow_type;
+    /** Type that accommodates the minimum value for any input for the template parameter types. */
+    typedef typename c2_types<typename c2_types<T, U>::mintype, V...>::mintype mintype;
+};
+
+/**
+ *  \ingroup utils_internal
+ * specialization for two values */
+template<typename T, typename U>
+inline constexpr typename c2_types<T, U>::wide_type c2_max(const T a, const U b) {
+    typedef typename c2_types<T, U>::wide_type wide_type;
+    return ({ wide_type a_(a), b_(b); a_ > b_ ? a_ : b_; });
+}
+
+/**
+ * Finds the maximum value of a list of "similarly typed" values.
+ *
+ * This is an extension to std::max where the types do not have to be identical, and the smallest
+ * resulting type is used that accommodates the argument types.
+ *
+ * \note Value types must be similar, e.g. all floating point, all pointers, all signed, or all
+ * unsigned.
+ *
+ * @return the largest of the input arguments.
+ */
+template<typename T, typename U, typename... V>
+constexpr typename c2_types<T, U, V...>::wide_type c2_max(const T a, const U b, const V ... c) {
+    typedef typename c2_types<T, U, V...>::wide_type wide_type;
+    return ({ wide_type a_(a), b_(c2_max(b, c...)); a_ > b_ ? a_ : b_; });
+}
+
+/**
+ *  \ingroup utils_internal
+ * specialization for two values */
+template<typename T, typename U>
+inline constexpr typename c2_types<T, U>::mintype c2_min(const T a, const U b) {
+    typedef typename c2_types<T, U>::wide_type wide_type;
+    return ({
+        wide_type a_(a), b_(b);
+        static_cast<typename c2_types<T, U>::mintype>(a_ < b_ ? a_ : b_);
+    });
+}
+
+/**
+ * Finds the minimum value of a list of "similarly typed" values.
+ *
+ * This is an extension to std::min where the types do not have to be identical, and the smallest
+ * resulting type is used that accommodates the argument types.
+ *
+ * \note Value types must be similar, e.g. all floating point, all pointers, all signed, or all
+ * unsigned.
+ *
+ * @return the smallest of the input arguments.
+ */
+template<typename T, typename U, typename... V>
+constexpr typename c2_types<T, U, V...>::mintype c2_min(const T a, const U b, const V ... c) {
+    typedef typename c2_types<U, V...>::mintype rest_type;
+    typedef typename c2_types<T, rest_type>::wide_type wide_type;
+    return ({
+        wide_type a_(a), b_(c2_min(b, c...));
+        static_cast<typename c2_types<T, rest_type>::mintype>(a_ < b_ ? a_ : b_);
+    });
+}
+
+/// @}
+
+#ifdef __ANDROID__
+} // namespace android
+#endif
+
+#endif  // C2_H_
diff --git a/media/libstagefright/codec2/include/C2Buffer.h b/media/libstagefright/codec2/include/C2Buffer.h
new file mode 100644
index 0000000..9f6b487
--- /dev/null
+++ b/media/libstagefright/codec2/include/C2Buffer.h
@@ -0,0 +1,1777 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef C2BUFFER_H_
+#define C2BUFFER_H_
+
+#include <C2.h>
+#include <C2Param.h> // for C2Info
+
+#include <list>
+#include <memory>
+
+typedef int C2Fence;
+
+#ifdef __ANDROID__
+
+// #include <system/window.h>
+#include <cutils/native_handle.h>
+#include <hardware/gralloc.h> // TODO: remove
+
+typedef native_handle_t C2Handle;
+
+#else
+
+typedef void* C2Handle;
+
+#endif
+
+namespace android {
+
+/// \defgroup buffer Buffers
+/// @{
+
+/// \defgroup buffer_sync Synchronization
+/// @{
+
+/**
+ * Synchronization is accomplished using event and fence objects.
+ *
+ * These are cross-process extensions of promise/future infrastructure.
+ * Events are analogous to std::promise<void>, whereas fences are to std::shared_future<void>.
+ *
+ * Fences and events are shareable/copyable.
+ *
+ * Fences are used in two scenarios, and all copied instances refer to the same event.
+ * \todo do events need to be copyable or should they be unique?
+ *
+ * acquire sync fence object: signaled when it is safe for the component or client to access
+ * (the contents of) an object.
+ *
+ * release sync fence object: \todo
+ *
+ * Fences can be backed by hardware. Hardware fences are guaranteed to signal NO MATTER WHAT within
+ * a short (platform specific) amount of time; this guarantee is usually less than 15 msecs.
+ */
+
+/**
+ * Fence object used by components and the framework.
+ *
+ * Implements the waiting for an event, analogous to a 'future'.
+ *
+ * To be implemented by vendors if using HW fences.
+ */
+class C2Fence {
+public:
+    /**
+     * Waits for a fence to be signaled with a timeout.
+     *
+     * \todo a mechanism to cancel a wait - for now the only way to do this is to abandon the
+     * event, but fences are shared so canceling a wait will cancel all waits.
+     *
+     * \param timeoutNs           the maximum time to wait in nsecs
+     *
+     * \retval C2_OK            the fence has been signaled
+     * \retval C2_TIMED_OUT     the fence has not been signaled within the timeout
+     * \retval C2_BAD_STATE     the fence has been abandoned without being signaled (it will never
+     *                          be signaled)
+     * \retval C2_NO_PERMISSION no permission to wait for the fence (unexpected - system)
+     * \retval C2_CORRUPTED     some unknown error prevented waiting for the fence (unexpected)
+     */
+    C2Error wait(nsecs_t timeoutNs);
+
+    /**
+     * Used to check if this fence is valid (if there is a chance for it to be signaled.)
+     * A fence becomes invalid if the controling event is destroyed without it signaling the fence.
+     *
+     * \return whether this fence is valid
+     */
+    bool valid() const;
+
+    /**
+     * Used to check if this fence has been signaled (is ready).
+     *
+     * \return whether this fence has been signaled
+     */
+    bool ready() const;
+
+    /**
+     * Returns a file descriptor that can be used to wait for this fence in a select system call.
+     * \note The returned file descriptor, if valid, must be closed by the caller.
+     *
+     * This can be used in e.g. poll() system calls. This file becomes readable (POLLIN) when the
+     * fence is signaled, and bad (POLLERR) if the fence is abandoned.
+     *
+     * \return a file descriptor representing this fence (with ownership), or -1 if the fence
+     * has already been signaled (\todo or abandoned).
+     *
+     * \todo this must be compatible with fences used by gralloc
+     */
+    int fd() const;
+
+    /**
+     * Returns whether this fence is a hardware-backed fence.
+     * \return whether this is a hardware fence
+     */
+    bool isHW() const;
+
+private:
+    class Impl;
+    std::shared_ptr<Impl> mImpl;
+};
+
+/**
+ * Event object used by components and the framework.
+ *
+ * Implements the signaling of an event, analogous to a 'promise'.
+ *
+ * Hardware backed events do not go through this object, and must be exposed directly as fences
+ * by vendors.
+ */
+class C2Event {
+public:
+    /**
+     * Returns a fence for this event.
+     */
+    C2Fence fence() const;
+
+    /**
+     * Signals (all) associated fence(s).
+     * This has no effect no effect if the event was already signaled or abandoned.
+     *
+     * \retval C2_OK            the fence(s) were successfully signaled
+     * \retval C2_BAD_STATE     the fence(s) have already been abandoned or merged (caller error)
+     * \retval C2_ALREADY_EXISTS the fence(s) have already been signaled (caller error)
+     * \retval C2_NO_PERMISSION no permission to signal the fence (unexpected - system)
+     * \retval C2_CORRUPTED     some unknown error prevented signaling the fence(s) (unexpected)
+     */
+    C2Error fire();
+
+    /**
+     * Trigger this event from the merging of the supplied fences. This means that it will be
+     * abandoned if any of these fences have been abandoned, and it will be fired if all of these
+     * fences have been signaled.
+     *
+     * \retval C2_OK            the merging was successfully done
+     * \retval C2_NO_MEMORY     not enough memory to perform the merging
+     * \retval C2_ALREADY_EXISTS    the fence have already been merged (caller error)
+     * \retval C2_BAD_STATE     the fence have already been signaled or abandoned (caller error)
+     * \retval C2_NO_PERMISSION no permission to merge the fence (unexpected - system)
+     * \retval C2_CORRUPTED     some unknown error prevented merging the fence(s) (unexpected)
+     */
+    C2Error merge(std::vector<C2Fence> fences);
+
+    /**
+     * Abandons the event and any associated fence(s).
+     * \note Call this to explicitly abandon an event before it is destructed to avoid a warning.
+     *
+     * This has no effect no effect if the event was already signaled or abandoned.
+     *
+     * \retval C2_OK            the fence(s) were successfully signaled
+     * \retval C2_BAD_STATE     the fence(s) have already been signaled or merged (caller error)
+     * \retval C2_ALREADY_EXISTS    the fence(s) have already been abandoned (caller error)
+     * \retval C2_NO_PERMISSION no permission to abandon the fence (unexpected - system)
+     * \retval C2_CORRUPTED     some unknown error prevented signaling the fence(s) (unexpected)
+     */
+    C2Error abandon();
+
+private:
+    class Impl;
+    std::shared_ptr<Impl> mImpl;
+};
+
+/// \addtogroup buf_internal Internal
+/// @{
+
+/**
+ * Interface for objects that encapsulate an updatable error value.
+ */
+struct _C2InnateError {
+    inline C2Error error() const { return mError; }
+
+protected:
+    _C2InnateError(C2Error error) : mError(error) { }
+
+    C2Error mError; // this error is updatable by the object
+};
+
+/// @}
+
+/**
+ * This is a utility template for objects protected by an acquire fence, so that errors during
+ * acquiring the object are propagated to the object itself.
+ */
+template<typename T>
+class C2Acquirable : public C2Fence {
+public:
+    /**
+     * Acquires the object protected by an acquire fence. Any errors during the mapping will be
+     * passed to the object.
+     *
+     * \return acquired object potentially invalidated if waiting for the fence failed.
+     */
+    T get();
+
+protected:
+    C2Acquirable(C2Error error, C2Fence fence, T t) : C2Fence(fence), mInitialError(error), mT(t) { }
+
+private:
+    C2Error mInitialError;
+    T mT; // TODO: move instead of copy
+};
+
+/// @}
+
+/// \defgroup linear Linear Data Blocks
+/// @{
+
+/**************************************************************************************************
+  LINEAR ASPECTS, BLOCKS AND VIEWS
+**************************************************************************************************/
+
+/**
+ * Common aspect for all objects that have a linear capacity.
+ */
+class _C2LinearCapacityAspect {
+/// \name Linear capacity interface
+/// @{
+public:
+    inline uint32_t capacity() const { return mCapacity; }
+
+protected:
+
+#if UINTPTR_MAX == 0xffffffff
+    static_assert(sizeof(size_t) == sizeof(uint32_t), "size_t is too big");
+#else
+    static_assert(sizeof(size_t) > sizeof(uint32_t), "size_t is too small");
+    // explicitly disable construction from size_t
+    inline explicit _C2LinearCapacityAspect(size_t capacity) = delete;
+#endif
+
+    inline explicit _C2LinearCapacityAspect(uint32_t capacity)
+      : mCapacity(capacity) { }
+
+    inline explicit _C2LinearCapacityAspect(const _C2LinearCapacityAspect *parent)
+        : mCapacity(parent == nullptr ? 0 : parent->capacity()) { }
+
+private:
+    const uint32_t mCapacity;
+/// @}
+};
+
+/**
+ * Aspect for objects that have a linear range.
+ *
+ * This class is copiable.
+ */
+class _C2LinearRangeAspect : public _C2LinearCapacityAspect {
+/// \name Linear range interface
+/// @{
+public:
+    inline uint32_t offset() const { return mOffset; }
+    inline uint32_t size() const { return mSize; }
+
+protected:
+    inline explicit _C2LinearRangeAspect(const _C2LinearCapacityAspect *parent)
+        : _C2LinearCapacityAspect(parent),
+          mOffset(0),
+          mSize(capacity()) { }
+
+    inline _C2LinearRangeAspect(const _C2LinearCapacityAspect *parent, size_t offset, size_t size)
+        : _C2LinearCapacityAspect(parent),
+          mOffset(c2_min(offset, capacity())),
+          mSize(c2_min(size, capacity() - mOffset)) { }
+
+    // subsection of the two [offset, offset + size] ranges
+    inline _C2LinearRangeAspect(const _C2LinearRangeAspect *parent, size_t offset, size_t size)
+        : _C2LinearCapacityAspect(parent == nullptr ? 0 : parent->capacity()),
+          mOffset(c2_min(c2_max(offset, parent == nullptr ? 0 : parent->offset()), capacity())),
+          mSize(c2_min(c2_min(size, parent == nullptr ? 0 : parent->size()), capacity() - mOffset)) { }
+
+private:
+    friend class _C2EditableLinearRange;
+    // invariants 0 <= mOffset <= mOffset + mSize <= capacity()
+    uint32_t mOffset;
+    uint32_t mSize;
+/// @}
+};
+
+/**
+ * Aspect for objects that have an editable linear range.
+ *
+ * This class is copiable.
+ */
+class _C2EditableLinearRange : public _C2LinearRangeAspect {
+protected:
+    inline explicit _C2EditableLinearRange(const _C2LinearCapacityAspect *parent)
+        : _C2LinearRangeAspect(parent) { }
+
+    inline _C2EditableLinearRange(const _C2LinearCapacityAspect *parent, size_t offset, size_t size)
+        : _C2LinearRangeAspect(parent, offset, size) { }
+
+    // subsection of the two [offset, offset + size] ranges
+    inline _C2EditableLinearRange(const _C2LinearRangeAspect *parent, size_t offset, size_t size)
+        : _C2LinearRangeAspect(parent, offset, size) { }
+
+/// \name Editable linear range interface
+/// @{
+
+    /**
+     * Sets the offset to |offset|, while trying to keep the end of the buffer unchanged (e.g.
+     * size will grow if offset is decreased, and may shrink if offset is increased.) Returns
+     * true if successful, which is equivalent to if 0 <= |offset| <= capacity().
+     *
+     * Note: setting offset and size will yield different result depending on the order of the
+     * operations. Always set offset first to ensure proper size.
+     */
+    inline bool setOffset(uint32_t offset) {
+        if (offset > capacity()) {
+            return false;
+        }
+
+        if (offset > mOffset + mSize) {
+            mSize = 0;
+        } else {
+            mSize = mOffset + mSize - offset;
+        }
+        mOffset = offset;
+        return true;
+    }
+    /**
+     * Sets the size to |size|. Returns true if successful, which is equivalent to
+     * if 0 <= |size| <= capacity() - offset().
+     *
+     * Note: setting offset and size will yield different result depending on the order of the
+     * operations. Always set offset first to ensure proper size.
+     */
+    inline bool setSize(uint32_t size) {
+        if (size > capacity() - mOffset) {
+            return false;
+        } else {
+            mSize = size;
+            return true;
+        }
+    }
+    /**
+     * Sets the offset to |offset| with best effort. Same as setOffset() except that offset will
+     * be clamped to the buffer capacity.
+     *
+     * Note: setting offset and size (even using best effort) will yield different result depending
+     * on the order of the operations. Always set offset first to ensure proper size.
+     */
+    inline void setOffset_be(uint32_t offset) {
+        if (offset > capacity()) {
+            offset = capacity();
+        }
+        if (offset > mOffset + mSize) {
+            mSize = 0;
+        } else {
+            mSize = mOffset + mSize - offset;
+        }
+        mOffset = offset;
+    }
+    /**
+     * Sets the size to |size| with best effort. Same as setSize() except that the selected region
+     * will be clamped to the buffer capacity (e.g. size is clamped to [0, capacity() - offset()]).
+     *
+     * Note: setting offset and size (even using best effort) will yield different result depending
+     * on the order of the operations. Always set offset first to ensure proper size.
+     */
+    inline void setSize_be(uint32_t size) {
+        mSize = std::min(size, capacity() - mOffset);
+    }
+/// @}
+};
+
+// ================================================================================================
+//  BLOCKS
+// ================================================================================================
+
+/**
+ * Blocks are sections of allocations. They can be either 1D or 2D.
+ */
+
+class C2LinearAllocation;
+
+class C2Block1D : public _C2LinearRangeAspect {
+public:
+    const C2Handle *handle() const;
+
+protected:
+    C2Block1D(std::shared_ptr<C2LinearAllocation> alloc);
+    C2Block1D(std::shared_ptr<C2LinearAllocation> alloc, size_t offset, size_t size);
+
+private:
+    class Impl;
+    std::shared_ptr<Impl> mImpl;
+};
+
+/**
+ * Read view provides read-only access for a linear memory segment.
+ *
+ * This class is copiable.
+ */
+class C2ReadView : public _C2LinearCapacityAspect {
+public:
+    /**
+     * \return pointer to the start of the block or nullptr on error.
+     */
+    const uint8_t *data();
+
+    /**
+     * Returns a portion of this view.
+     *
+     * \param offset  the start offset of the portion. \note This is clamped to the capacity of this
+     *              view.
+     * \param size    the size of the portion. \note This is clamped to the remaining data from offset.
+     *
+     * \return a read view containing a portion of this view
+     */
+    C2ReadView subView(size_t offset, size_t size) const;
+
+    /**
+     * \return error during the creation/mapping of this view.
+     */
+    C2Error error();
+
+private:
+    class Impl;
+    std::shared_ptr<Impl> mImpl;
+};
+
+/**
+ * Write view provides read/write access for a linear memory segment.
+ *
+ * This class is copiable. \todo movable only?
+ */
+class C2WriteView : public _C2EditableLinearRange {
+public:
+    /**
+     * Start of the block.
+     *
+     * \return pointer to the start of the block or nullptr on error.
+     */
+    uint8_t *base();
+
+    /**
+     * \return pointer to the block at the current offset or nullptr on error.
+     */
+    uint8_t *data();
+
+    /**
+     * \return error during the creation/mapping of this view.
+     */
+    C2Error error();
+
+private:
+    class Impl;
+    /// \todo should this be unique_ptr to make this movable only - to avoid inconsistent regions
+    /// between copies.
+    std::shared_ptr<Impl> mImpl;
+};
+
+/**
+ * A constant (read-only) linear block (portion of an allocation) with an acquire fence.
+ * Blocks are unmapped when created, and can be mapped into a read view on demand.
+ *
+ * This class is copiable and contains a reference to the allocation that it is based on.
+ */
+class C2ConstLinearBlock : public C2Block1D {
+public:
+    /**
+     * Maps this block into memory and returns a read view for it.
+     *
+     * \return a read view for this block.
+     */
+    C2Acquirable<C2ReadView> map() const;
+
+    /**
+     * Returns a portion of this block.
+     *
+     * \param offset  the start offset of the portion. \note This is clamped to the capacity of this
+     *              block.
+     * \param size    the size of the portion. \note This is clamped to the remaining data from offset.
+     *
+     * \return a constant linear block containing a portion of this block
+     */
+    C2ConstLinearBlock subBlock(size_t offset, size_t size) const;
+
+    /**
+     * Returns the acquire fence for this block.
+     *
+     * \return a fence that must be waited on before reading the block.
+     */
+    C2Fence fence() const { return mFence; }
+
+private:
+    C2Fence mFence;
+};
+
+/**
+ * Linear block is a writeable 1D block. Once written, it can be shared in whole or in parts with
+ * consumers/readers as read-only const linear block(s).
+ */
+class C2LinearBlock : public C2Block1D {
+public:
+    /**
+     * Maps this block into memory and returns a write view for it.
+     *
+     * \return a write view for this block.
+     */
+    C2Acquirable<C2WriteView> map();
+
+    /**
+     * Creates a read-only const linear block for a portion of this block; optionally protected
+     * by an acquire fence. There are two ways to use this:
+     *
+     * 1) share ready block after writing data into the block. In this case no fence shall be
+     *    supplied, and the block shall not be modified after calling this method.
+     * 2) share block metadata before actually (finishing) writing the data into the block. In
+     *    this case a fence must be supplied that will be triggered when the data is written.
+     *    The block shall be modified only until firing the event for the fence.
+     */
+    C2ConstLinearBlock share(size_t offset, size_t size, C2Fence fence);
+};
+
+/// @}
+
+/**************************************************************************************************
+  CIRCULAR BLOCKS AND VIEWS
+**************************************************************************************************/
+
+/// \defgroup circular Circular buffer support
+/// @{
+
+/**
+ * Circular blocks can be used to share data between a writer and a reader (and/or other consumers)-
+ * in a memory-efficient way by reusing a section of memory. Circular blocks are a bit more complex
+ * than single reader/single writer schemes to facilitate block-based consuming of data.
+ *
+ * They can operate in two modes:
+ *
+ * 1) one writer that creates blocks to be consumed (this model can be used by components)
+ *
+ * 2) one writer that writes continuously, and one reader that can creates blocks to be consumed
+ *    by further recipients (this model is used by the framework, and cannot be used by components.)
+ *
+ * Circular blocks have four segments with running pointers:
+ *  - reserved: data reserved and available for the writer
+ *  - committed: data committed by the writer and available to the reader (if present)
+ *  - used: data used by consumers (if present)
+ *  - available: unused data available to be reserved
+ */
+class C2CircularBlock : public C2Block1D {
+    // TODO: add methods
+
+private:
+    size_t mReserved __unused;   // end of reserved section
+    size_t mCommitted __unused;  // end of committed section
+    size_t mUsed __unused;       // end of used section
+    size_t mFree __unused;       // end of free section
+};
+
+class _C2CircularBlockSegment : public _C2LinearCapacityAspect {
+public:
+    /**
+     * Returns the available size for this segment.
+     *
+     * \return currently available size for this segment
+     */
+    size_t available() const;
+
+    /**
+     * Reserve some space for this segment from its current start.
+     *
+     * \param size    desired space in bytes
+     * \param fence   a pointer to an acquire fence. If non-null, the reservation is asynchronous and
+     *              a fence will be stored here that will be signaled when the reservation is
+     *              complete. If null, the reservation is synchronous.
+     *
+     * \retval C2_OK            the space was successfully reserved
+     * \retval C2_NO_MEMORY     the space requested cannot be reserved
+     * \retval C2_TIMED_OUT     the reservation timed out \todo when?
+     * \retval C2_CORRUPTED     some unknown error prevented reserving space. (unexpected)
+     */
+    C2Error reserve(size_t size, C2Fence *fence /* nullable */);
+
+    /**
+     * Abandons a portion of this segment. This will move to the beginning of this segment.
+     *
+     * \note This methods is only allowed if this segment is producing blocks.
+     *
+     * \param size    number of bytes to abandon
+     *
+     * \retval C2_OK            the data was successfully abandoned
+     * \retval C2_TIMED_OUT     the operation timed out (unexpected)
+     * \retval C2_CORRUPTED     some unknown error prevented abandoning the data (unexpected)
+     */
+    C2Error abandon(size_t size);
+
+    /**
+     * Share a portion as block(s) with consumers (these are moved to the used section).
+     *
+     * \note This methods is only allowed if this segment is producing blocks.
+     * \note Share does not move the beginning of the segment. (\todo add abandon/offset?)
+     *
+     * \param size    number of bytes to share
+     * \param fence   fence to be used for the section
+     * \param blocks  list where the blocks of the section are appended to
+     *
+     * \retval C2_OK            the portion was successfully shared
+     * \retval C2_NO_MEMORY     not enough memory to share the portion
+     * \retval C2_TIMED_OUT     the operation timed out (unexpected)
+     * \retval C2_CORRUPTED     some unknown error prevented sharing the data (unexpected)
+     */
+    C2Error share(size_t size, C2Fence fence, std::list<C2ConstLinearBlock> &blocks);
+
+    /**
+     * Returns the beginning offset of this segment from the start of this circular block.
+     *
+     * @return beginning offset
+     */
+    size_t begin();
+
+    /**
+     * Returns the end offset of this segment from the start of this circular block.
+     *
+     * @return end offset
+     */
+    size_t end();
+};
+
+/**
+ * A circular write-view is a dynamic mapped view for a segment of a circular block. Care must be
+ * taken when using this view so that only the section owned by the segment is modified.
+ */
+class C2CircularWriteView : public _C2LinearCapacityAspect {
+public:
+    /**
+     * Start of the circular block.
+     * \note the segment does not own this pointer.
+     *
+     * \return pointer to the start of the circular block or nullptr on error.
+     */
+    uint8_t *base();
+
+    /**
+     * \return error during the creation/mapping of this view.
+     */
+    C2Error error();
+};
+
+/**
+ * The writer of a circular buffer.
+ *
+ * Can commit data to a reader (not supported for components) OR share data blocks directly with a
+ * consumer.
+ *
+ * If a component supports outputting data into circular buffers, it must allocate a circular
+ * block and use a circular writer.
+ */
+class C2CircularWriter : public _C2CircularBlockSegment {
+public:
+    /**
+     * Commits a portion of this segment to the next segment. This moves the beginning of the
+     * segment.
+     *
+     * \param size    number of bytes to commit to the next segment
+     * \param fence   fence used for the commit (the fence must signal before the data is committed)
+     */
+    C2Error commit(size_t size, C2Fence fence);
+
+    /**
+     * Maps this block into memory and returns a write view for it.
+     *
+     * \return a write view for this block.
+     */
+    C2Acquirable<C2CircularWriteView> map();
+};
+
+/// @}
+
+/// \defgroup graphic Graphic Data Blocks
+/// @{
+
+/**
+ * Interface for objects that have a width and height (planar capacity).
+ */
+class _C2PlanarCapacityAspect {
+/// \name Planar capacity interface
+/// @{
+public:
+    inline uint32_t width() const { return mWidth; }
+    inline uint32_t height() const { return mHeight; }
+
+protected:
+    inline _C2PlanarCapacityAspect(uint32_t width, uint32_t height)
+      : mWidth(width), mHeight(height) { }
+
+    inline _C2PlanarCapacityAspect(const _C2PlanarCapacityAspect *parent)
+        : mWidth(parent == nullptr ? 0 : parent->width()),
+          mHeight(parent == nullptr ? 0 : parent->height()) { }
+
+private:
+    const uint32_t mWidth;
+    const uint32_t mHeight;
+/// @}
+};
+
+/**
+ * C2Rect: rectangle type with non-negative coordinates.
+ *
+ * \note This struct has public fields without getters/setters. All methods are inline.
+ */
+struct C2Rect {
+// public:
+    uint32_t mLeft;
+    uint32_t mTop;
+    uint32_t mWidth;
+    uint32_t mHeight;
+
+    inline C2Rect(uint32_t width, uint32_t height)
+        : C2Rect(width, height, 0, 0) { }
+
+    inline C2Rect(uint32_t width, uint32_t height, uint32_t left, uint32_t top)
+        : mLeft(left), mTop(top), mWidth(width), mHeight(height) { }
+
+    // utility methods
+
+    inline bool isEmpty() const {
+        return mWidth == 0 || mHeight == 0;
+    }
+
+    inline bool isValid() const {
+        return mLeft <= ~mWidth && mTop <= ~mHeight;
+    }
+
+    inline operator bool() const {
+        return isValid() && !isEmpty();
+    }
+
+    inline bool operator!() const {
+        return !bool(*this);
+    }
+
+    inline bool contains(const C2Rect &other) const {
+        if (!isValid() || !other.isValid()) {
+            return false;
+        } else if (other.isEmpty()) {
+            return true;
+        } else {
+            return mLeft <= other.mLeft && mTop <= other.mTop
+                    && mLeft + mWidth >= other.mLeft + other.mWidth
+                    && mTop + mHeight >= other.mTop + other.mHeight;
+        }
+    }
+
+    inline bool operator==(const C2Rect &other) const {
+        if (!isValid()) {
+            return !other.isValid();
+        } else if (isEmpty()) {
+            return other.isEmpty();
+        } else {
+            return mLeft == other.mLeft && mTop == other.mTop
+                    && mWidth == other.mWidth && mHeight == other.mHeight;
+        }
+    }
+
+    inline bool operator!=(const C2Rect &other) const {
+        return !operator==(other);
+    }
+
+    inline bool operator>=(const C2Rect &other) const {
+        return contains(other);
+    }
+
+    inline bool operator>(const C2Rect &other) const {
+        return contains(other) && !operator==(other);
+    }
+
+    inline bool operator<=(const C2Rect &other) const {
+        return other.contains(*this);
+    }
+
+    inline bool operator<(const C2Rect &other) const {
+        return other.contains(*this) && !operator==(other);
+    }
+};
+
+/**
+ * C2PlaneInfo: information on the layout of flexible planes.
+ *
+ * Public fields without getters/setters.
+ */
+struct C2PlaneInfo {
+// public:
+    enum Channel : uint32_t {
+        Y,
+        R,
+        G,
+        B,
+        A,
+        Cr,
+        Cb,
+    } mChannel;
+
+    int32_t mColInc;               // column increment in bytes. may be negative
+    int32_t mRowInc;               // row increment in bytes. may be negative
+    uint32_t mHorizSubsampling;    // subsampling compared to width
+    uint32_t mVertSubsampling;     // subsampling compared to height
+
+    uint32_t mBitDepth;
+    uint32_t mAllocatedDepth;
+
+    inline ssize_t minOffset(uint32_t width, uint32_t height) {
+        ssize_t offs = 0;
+        if (width > 0 && mColInc < 0) {
+            offs += mColInc * (ssize_t)(width - 1);
+        }
+        if (height > 0 && mRowInc < 0) {
+            offs += mRowInc * (ssize_t)(height - 1);
+        }
+        return offs;
+    }
+
+    inline ssize_t maxOffset(uint32_t width, uint32_t height, uint32_t allocatedDepth) {
+        ssize_t offs = (allocatedDepth + 7) >> 3;
+        if (width > 0 && mColInc > 0) {
+            offs += mColInc * (ssize_t)(width - 1);
+        }
+        if (height > 0 && mRowInc > 0) {
+            offs += mRowInc * (ssize_t)(height - 1);
+        }
+        return offs;
+    }
+};
+
+struct C2PlaneLayout {
+public:
+    enum Type : uint32_t {
+        MEDIA_IMAGE_TYPE_UNKNOWN = 0,
+        MEDIA_IMAGE_TYPE_YUV = 0x100,
+        MEDIA_IMAGE_TYPE_YUVA,
+        MEDIA_IMAGE_TYPE_RGB,
+        MEDIA_IMAGE_TYPE_RGBA,
+    };
+
+    Type mType;
+    uint32_t mNumPlanes;               // number of planes
+
+    enum PlaneIndex : uint32_t {
+        Y = 0,
+        U = 1,
+        V = 2,
+        R = 0,
+        G = 1,
+        B = 2,
+        A = 3,
+        MAX_NUM_PLANES = 4,
+    };
+
+    C2PlaneInfo mPlanes[MAX_NUM_PLANES];
+};
+
+/**
+ * Aspect for objects that have a planar section (crop rectangle).
+ *
+ * This class is copiable.
+ */
+class _C2PlanarSection : public _C2PlanarCapacityAspect {
+/// \name Planar section interface
+/// @{
+public:
+    // crop can be an empty rect, does not have to line up with subsampling
+    // NOTE: we do not support floating-point crop
+    inline const C2Rect crop() { return mCrop; }
+
+    /**
+     *  Sets crop to crop intersected with [(0,0) .. (width, height)]
+     */
+    inline void setCrop_be(const C2Rect &crop);
+
+    /**
+     * If crop is within the dimensions of this object, it sets crop to it.
+     *
+     * \return true iff crop is within the dimensions of this object
+     */
+    inline bool setCrop(const C2Rect &crop);
+
+private:
+    C2Rect mCrop;
+/// @}
+};
+
+class C2Block2D : public _C2PlanarSection {
+public:
+    const C2Handle *handle() const;
+
+private:
+    class Impl;
+    std::shared_ptr<Impl> mImpl;
+};
+
+/**
+ * Graphic view provides read or read-write access for a graphic block.
+ *
+ * This class is copiable.
+ *
+ * \note Due to the subsampling of graphic buffers, a read view must still contain a crop rectangle
+ * to ensure subsampling is followed. This results in nearly identical interface between read and
+ * write views, so C2GraphicView can encompass both of them.
+ */
+class C2GraphicView : public _C2PlanarSection {
+public:
+    /**
+     * \return pointer to the start of the block or nullptr on error.
+     */
+    const uint8_t *data() const;
+
+    /**
+     * \return pointer to the start of the block or nullptr on error.
+     */
+    uint8_t *data();
+
+    /**
+     * Returns a section of this view.
+     *
+     * \param rect    the dimension of the section. \note This is clamped to the crop of this view.
+     *
+     * \return a read view containing the requested section of this view
+     */
+    const C2GraphicView subView(const C2Rect &rect) const;
+    C2GraphicView subView(const C2Rect &rect);
+
+    /**
+     * \return error during the creation/mapping of this view.
+     */
+    C2Error error() const;
+
+private:
+    class Impl;
+    std::shared_ptr<Impl> mImpl;
+};
+
+/**
+ * A constant (read-only) graphic block (portion of an allocation) with an acquire fence.
+ * Blocks are unmapped when created, and can be mapped into a read view on demand.
+ *
+ * This class is copiable and contains a reference to the allocation that it is based on.
+ */
+class C2ConstGraphicBlock : public C2Block2D {
+public:
+    /**
+     * Maps this block into memory and returns a read view for it.
+     *
+     * \return a read view for this block.
+     */
+    C2Acquirable<const C2GraphicView> map() const;
+
+    /**
+     * Returns a section of this block.
+     *
+     * \param rect    the coordinates of the section. \note This is clamped to the crop rectangle of
+     *              this block.
+     *
+     * \return a constant graphic block containing a portion of this block
+     */
+    C2ConstGraphicBlock subBlock(const C2Rect &rect) const;
+
+    /**
+     * Returns the acquire fence for this block.
+     *
+     * \return a fence that must be waited on before reading the block.
+     */
+    C2Fence fence() const { return mFence; }
+
+private:
+    C2Fence mFence;
+};
+
+/**
+ * Graphic block is a writeable 2D block. Once written, it can be shared in whole or in part with
+ * consumers/readers as read-only const graphic block.
+ */
+class C2GraphicBlock : public C2Block2D {
+public:
+    /**
+     * Maps this block into memory and returns a write view for it.
+     *
+     * \return a write view for this block.
+     */
+    C2Acquirable<C2GraphicView> map();
+
+    /**
+     * Creates a read-only const linear block for a portion of this block; optionally protected
+     * by an acquire fence. There are two ways to use this:
+     *
+     * 1) share ready block after writing data into the block. In this case no fence shall be
+     *    supplied, and the block shall not be modified after calling this method.
+     * 2) share block metadata before actually (finishing) writing the data into the block. In
+     *    this case a fence must be supplied that will be triggered when the data is written.
+     *    The block shall be modified only until firing the event for the fence.
+     */
+    C2ConstGraphicBlock share(const C2Rect &crop, C2Fence fence);
+};
+
+/// @}
+
+/// \defgroup buffer_onj Buffer objects
+/// @{
+
+// ================================================================================================
+//  BUFFERS
+// ================================================================================================
+
+/// \todo: Do we still need this?
+///
+// There are 2 kinds of buffers: linear or graphic. Linear buffers can contain a single block, or
+// a list of blocks (LINEAR_CHUNKS). Support for list of blocks is optional, and can allow consuming
+// data from circular buffers or scattered data sources without extra memcpy. Currently, list of
+// graphic blocks is not supported.
+
+class C2LinearBuffer;   // read-write buffer
+class C2GraphicBuffer;  // read-write buffer
+class C2LinearChunksBuffer;
+
+/**
+ * C2BufferData: the main, non-meta data of a buffer. A buffer can contain either linear blocks
+ * or graphic blocks, and can contain either a single block or multiple blocks. This is determined
+ * by its type.
+ */
+class C2BufferData {
+public:
+    /**
+     *  The type of buffer data.
+     */
+    enum Type : uint32_t {
+        LINEAR,             ///< the buffer contains a single linear block
+        LINEAR_CHUNKS,      ///< the buffer contains one or more linear blocks
+        GRAPHIC,            ///< the buffer contains a single graphic block
+        GRAPHIC_CHUNKS,     ///< the buffer contains one of more graphic blocks
+    };
+
+    /**
+     * Gets the type of this buffer (data).
+     * \return the type of this buffer data.
+     */
+    Type type() const;
+
+    /**
+     * Gets the linear blocks of this buffer.
+     * \return a constant list of const linear blocks of this buffer.
+     * \retval empty list if this buffer does not contain linear block(s).
+     */
+    const std::list<C2ConstLinearBlock> linearBlocks() const;
+
+    /**
+     * Gets the graphic blocks of this buffer.
+     * \return a constant list of const graphic blocks of this buffer.
+     * \retval empty list if this buffer does not contain graphic block(s).
+     */
+    const std::list<C2ConstGraphicBlock> graphicBlocks() const;
+
+private:
+    class Impl;
+    std::shared_ptr<Impl> mImpl;
+
+protected:
+    // no public constructor
+    // C2BufferData(const std::shared_ptr<const Impl> &impl) : mImpl(impl) {}
+};
+
+/**
+ * C2Buffer: buffer base class. These are always used as shared_ptrs. Though the underlying buffer
+ * objects (native buffers, ion buffers, or dmabufs) are reference-counted by the system,
+ * C2Buffers hold only a single reference.
+ *
+ * These objects cannot be used on the stack.
+ */
+class C2Buffer {
+public:
+    /**
+     * Gets the buffer's data.
+     *
+     * \return the buffer's data.
+     */
+    const C2BufferData data() const;
+
+    /**
+     * These will still work if used in onDeathNotify.
+     */
+#if 0
+    inline std::shared_ptr<C2LinearBuffer> asLinearBuffer() const {
+        return mType == LINEAR ? std::shared_ptr::reinterpret_cast<C2LinearBuffer>(this) : nullptr;
+    }
+
+    inline std::shared_ptr<C2GraphicBuffer> asGraphicBuffer() const {
+        return mType == GRAPHIC ? std::shared_ptr::reinterpret_cast<C2GraphicBuffer>(this) : nullptr;
+    }
+
+    inline std::shared_ptr<C2CircularBuffer> asCircularBuffer() const {
+        return mType == CIRCULAR ? std::shared_ptr::reinterpret_cast<C2CircularBuffer>(this) : nullptr;
+    }
+#endif
+
+    ///@name Pre-destroy notification handling
+    ///@{
+
+    /**
+     * Register for notification just prior to the destruction of this object.
+     */
+    typedef void (*OnDestroyNotify) (const C2Buffer *buf, void *arg);
+
+    /**
+     * Registers for a pre-destroy notification. This is called just prior to the destruction of
+     * this buffer (when this buffer is no longer valid.)
+     *
+     * \param onDestroyNotify   the notification callback
+     * \param arg               an arbitrary parameter passed to the callback
+     *
+     * \retval C2_OK        the registration was successful.
+     * \retval C2_DUPLICATE a notification was already registered for this callback and argument
+     * \retval C2_NO_MEMORY not enough memory to register for this callback
+     * \retval C2_CORRUPTED an unknown error prevented the registration (unexpected)
+     */
+    C2Error registerOnDestroyNotify(OnDestroyNotify *onDestroyNotify, void *arg = nullptr);
+
+    /**
+     * Unregisters a previously registered pre-destroy notification.
+     *
+     * \param onDestroyNotify   the notification callback
+     * \param arg               an arbitrary parameter passed to the callback
+     *
+     * \retval C2_OK        the unregistration was successful.
+     * \retval C2_NOT_FOUND the notification was not found
+     * \retval C2_CORRUPTED an unknown error prevented the registration (unexpected)
+     */
+    C2Error unregisterOnDestroyNotify(OnDestroyNotify *onDestroyNotify, void *arg = nullptr);
+
+    ///@}
+
+    virtual ~C2Buffer() = default;
+
+    ///@name Buffer-specific arbitrary metadata handling
+    ///@{
+
+    /**
+     * Gets the list of metadata associated with this buffer.
+     *
+     * \return a constant list of info objects associated with this buffer.
+     */
+    const std::list<std::shared_ptr<const C2Info>> infos() const;
+
+    /**
+     * Attaches (or updates) an (existing) metadata for this buffer.
+     * If the metadata is stream specific, the stream information will be reset.
+     *
+     * \param info Metadata to update
+     *
+     * \retval C2_OK        the metadata was successfully attached/updated.
+     * \retval C2_NO_MEMORY not enough memory to attach the metadata (this return value is not
+     *                      used if the same kind of metadata is already attached to the buffer).
+     */
+    C2Error setInfo(const std::shared_ptr<C2Info> &info);
+
+    /**
+     * Checks if there is a certain type of metadata attached to this buffer.
+     *
+     * \param index the parameter type of the metadata
+     *
+     * \return true iff there is a metadata with the parameter type attached to this buffer.
+     */
+    bool hasInfo(C2Param::Type index) const;
+    std::shared_ptr<C2Info> removeInfo(C2Param::Type index) const;
+    ///@}
+
+protected:
+    // no public constructor
+    inline C2Buffer() = default;
+
+private:
+//    Type _mType;
+};
+
+/**
+ * An extension of C2Info objects that can contain arbitrary buffer data.
+ *
+ * \note This object is not describable and contains opaque data.
+ */
+class C2InfoBuffer {
+public:
+    /**
+     * Gets the index of this info object.
+     *
+     * \return the parameter index.
+     */
+    const C2Param::Index index() const;
+
+    /**
+     * Gets the buffer's data.
+     *
+     * \return the buffer's data.
+     */
+    const C2BufferData data() const;
+};
+
+/// @}
+
+/**************************************************************************************************
+  ALLOCATIONS
+**************************************************************************************************/
+
+/// \defgroup allocator Allocation and memory placement
+/// @{
+
+/**
+ * Buffer/memory usage bits. These are used by the allocators to select optimal memory type/pool and
+ * buffer layout.
+ *
+ * \note This struct has public fields without getters/setters. All methods are inline.
+ */
+struct C2MemoryUsage {
+// public:
+    // TODO: match these to gralloc1.h
+    enum Consumer : uint64_t {
+        kSoftwareRead        = GRALLOC_USAGE_SW_READ_OFTEN,
+        kRenderScriptRead    = GRALLOC_USAGE_RENDERSCRIPT,
+        kTextureRead         = GRALLOC_USAGE_HW_TEXTURE,
+        kHardwareComposer    = GRALLOC_USAGE_HW_COMPOSER,
+        kHardwareEncoder     = GRALLOC_USAGE_HW_VIDEO_ENCODER,
+        kProtectedRead       = GRALLOC_USAGE_PROTECTED,
+    };
+
+    enum Producer : uint64_t {
+        kSoftwareWrite       = GRALLOC_USAGE_SW_WRITE_OFTEN,
+        kRenderScriptWrite   = GRALLOC_USAGE_RENDERSCRIPT,
+        kTextureWrite        = GRALLOC_USAGE_HW_RENDER,
+        kCompositionTarget   = GRALLOC_USAGE_HW_COMPOSER | GRALLOC_USAGE_HW_RENDER,
+        kHardwareDecoder     = GRALLOC_USAGE_HW_VIDEO_ENCODER,
+        kProtectedWrite      = GRALLOC_USAGE_PROTECTED,
+    };
+
+    uint64_t mConsumer; // e.g. input
+    uint64_t mProducer; // e.g. output
+};
+
+/**
+ * \ingroup linear allocator
+ * 1D allocation interface.
+ */
+class C2LinearAllocation : public _C2LinearCapacityAspect {
+public:
+    /**
+     * Maps a portion of an allocation starting from |offset| with |size| into local process memory.
+     * Stores the starting address into |addr|, or NULL if the operation was unsuccessful.
+     * |fenceFd| is a file descriptor referring to an acquire sync fence object. If it is already
+     * safe to access the buffer contents, then -1.
+     *
+     * \param offset          starting position of the portion to be mapped (this does not have to
+     *                      be page aligned)
+     * \param size            size of the portion to be mapped (this does not have to be page
+     *                      aligned)
+     * \param usage           the desired usage. \todo this must be kSoftwareRead and/or
+     *                      kSoftwareWrite.
+     * \param fenceFd         a pointer to a file descriptor if an async mapping is requested. If
+     *                      not-null, and acquire fence FD will be stored here on success, or -1
+     *                      on failure. If null, the mapping will be synchronous.
+     * \param addr            a pointer to where the starting address of the mapped portion will be
+     *                      stored. On failure, nullptr will be stored here.
+     *
+     * \todo Only one portion can be mapped at the same time - this is true for gralloc, but there
+     *       is no need for this for 1D buffers.
+     * \todo Do we need to support sync operation as we could just wait for the fence?
+     *
+     * \retval C2_OK        the operation was successful
+     * \retval C2_NO_PERMISSION no permission to map the portion
+     * \retval C2_TIMED_OUT the operation timed out
+     * \retval C2_NO_MEMORY not enough memory to complete the operation
+     * \retval C2_BAD_VALUE the parameters (offset/size) are invalid or outside the allocation, or
+     *                      the usage flags are invalid (caller error)
+     * \retval C2_CORRUPTED some unknown error prevented the operation from completing (unexpected)
+     */
+    virtual C2Error map(
+            size_t offset, size_t size, C2MemoryUsage usage, int *fenceFd /* nullable */,
+            void **addr /* nonnull */) = 0;
+
+    /**
+     * Unmaps a portion of an allocation at |addr| with |size|. These must be parameters previously
+     * passed to |map|; otherwise, this operation is a no-op.
+     *
+     * \param addr            starting address of the mapped region
+     * \param size            size of the mapped region
+     * \param fenceFd         a pointer to a file descriptor if an async unmapping is requested. If
+     *                      not-null, a release fence FD will be stored here on success, or -1
+     *                      on failure. This fence signals when the original allocation contains
+     *                      any changes that happened to the mapped region. If null, the unmapping
+     *                      will be synchronous.
+     *
+     * \retval C2_OK        the operation was successful
+     * \retval C2_TIMED_OUT the operation timed out
+     * \retval C2_BAD_VALUE the parameters (addr/size) do not correspond to previously mapped
+     *                      regions (caller error)
+     * \retval C2_CORRUPTED some unknown error prevented the operation from completing (unexpected)
+     * \retval C2_NO_PERMISSION no permission to unmap the portion (unexpected - system)
+     */
+    virtual C2Error unmap(void *addr, size_t size, int *fenceFd /* nullable */) = 0;
+
+    /**
+     * Returns true if this is a valid allocation.
+     *
+     * \todo remove?
+     */
+    virtual bool isValid() const = 0;
+
+    /**
+     * Returns a pointer to the allocation handle.
+     */
+    virtual const C2Handle *handle() const = 0;
+
+    /**
+     * Returns true if this is the same allocation as |other|.
+     */
+    virtual bool equals(const std::shared_ptr<C2LinearAllocation> &other) const = 0;
+
+protected:
+    // \todo should we limit allocation directly?
+    C2LinearAllocation(size_t capacity) : _C2LinearCapacityAspect(c2_min(capacity, UINT32_MAX)) {}
+    virtual ~C2LinearAllocation() = default;
+};
+
+/**
+ * \ingroup graphic allocator
+ * 2D allocation interface.
+ */
+class C2GraphicAllocation : public _C2PlanarCapacityAspect {
+public:
+    /**
+     * Maps a rectangular section (as defined by |rect|) of a 2D allocation into local process
+     * memory for flexible access. On success, it fills out |layout| with the plane specifications
+     * and fills the |addr| array with pointers to the first byte of the top-left pixel of each
+     * plane used. Otherwise, it leaves |layout| and |addr| untouched. |fenceFd| is a file
+     * descriptor referring to an acquire sync fence object. If it is already safe to access the
+     * buffer contents, then -1.
+     *
+     * \note Only one portion of the graphic allocation can be mapped at the same time. (This is
+     * from gralloc1 limitation.)
+     *
+     * \param rect            section to be mapped (this does not have to be aligned)
+     * \param usage           the desired usage. \todo this must be kSoftwareRead and/or
+     *                      kSoftwareWrite.
+     * \param fenceFd         a pointer to a file descriptor if an async mapping is requested. If
+     *                      not-null, and acquire fence FD will be stored here on success, or -1
+     *                      on failure. If null, the mapping will be synchronous.
+     * \param layout          a pointer to where the mapped planes' descriptors will be
+     *                      stored. On failure, nullptr will be stored here.
+     *
+     * \todo Do we need to support sync operation as we could just wait for the fence?
+     *
+     * \retval C2_OK        the operation was successful
+     * \retval C2_NO_PERMISSION no permission to map the section
+     * \retval C2_ALREADY_EXISTS there is already a mapped region (caller error)
+     * \retval C2_TIMED_OUT the operation timed out
+     * \retval C2_NO_MEMORY not enough memory to complete the operation
+     * \retval C2_BAD_VALUE the parameters (rect) are invalid or outside the allocation, or the
+     *                      usage flags are invalid (caller error)
+     * \retval C2_CORRUPTED some unknown error prevented the operation from completing (unexpected)
+
+     */
+    virtual C2Error map(
+            C2Rect rect, C2MemoryUsage usage, int *fenceFd,
+            // TODO: return <addr, size> buffers with plane sizes
+            C2PlaneLayout *layout /* nonnull */, uint8_t **addr /* nonnull */) = 0;
+
+    /**
+     * Unmaps the last mapped rectangular section.
+     *
+     * \param fenceFd         a pointer to a file descriptor if an async unmapping is requested. If
+     *                      not-null, a release fence FD will be stored here on success, or -1
+     *                      on failure. This fence signals when the original allocation contains
+     *                      any changes that happened to the mapped section. If null, the unmapping
+     *                      will be synchronous.
+     *
+     * \retval C2_OK        the operation was successful
+     * \retval C2_TIMED_OUT the operation timed out
+     * \retval C2_NOT_FOUND there is no mapped region (caller error)
+     * \retval C2_CORRUPTED some unknown error prevented the operation from completing (unexpected)
+     * \retval C2_NO_PERMISSION no permission to unmap the section (unexpected - system)
+     */
+    virtual C2Error unmap(C2Fence *fenceFd /* nullable */) = 0;
+
+    /**
+     * Returns true if this is a valid allocation.
+     *
+     * \todo remove?
+     */
+    virtual bool isValid() const = 0;
+
+    /**
+     * Returns a pointer to the allocation handle.
+     */
+    virtual const C2Handle *handle() const = 0;
+
+    /**
+     * Returns true if this is the same allocation as |other|.
+     */
+    virtual bool equals(const std::shared_ptr<const C2GraphicAllocation> &other) = 0;
+
+protected:
+    virtual ~C2GraphicAllocation();
+};
+
+/**
+ *  Allocators are used by the framework to allocate memory (allocations) for buffers. They can
+ *  support either 1D or 2D allocations.
+ *
+ *  \note In theory they could support both, but in practice, we will use only one or the other.
+ *
+ *  Never constructed on stack.
+ *
+ *  Allocators are provided by vendors.
+ */
+class C2Allocator {
+public:
+    /**
+     * Allocates a 1D allocation of given |capacity| and |usage|. If successful, the allocation is
+     * stored in |allocation|. Otherwise, |allocation| is set to 'nullptr'.
+     *
+     * \param capacity        the size of requested allocation (the allocation could be slightly
+     *                      larger, e.g. to account for any system-required alignment)
+     * \param usage           the memory usage info for the requested allocation. \note that the
+     *                      returned allocation may be later used/mapped with different usage.
+     *                      The allocator should layout the buffer to be optimized for this usage,
+     *                      but must support any usage. One exception: protected buffers can
+     *                      only be used in a protected scenario.
+     * \param allocation      pointer to where the allocation shall be stored on success. nullptr
+     *                      will be stored here on failure
+     *
+     * \retval C2_OK        the allocation was successful
+     * \retval C2_NO_MEMORY not enough memory to complete the allocation
+     * \retval C2_TIMED_OUT the allocation timed out
+     * \retval C2_NO_PERMISSION     no permission to complete the allocation
+     * \retval C2_BAD_VALUE capacity or usage are not supported (invalid) (caller error)
+     * \retval C2_UNSUPPORTED       this allocator does not support 1D allocations
+     * \retval C2_CORRUPTED some unknown, unrecoverable error occured during allocation (unexpected)
+     */
+    virtual C2Error allocateLinearBuffer(
+            uint32_t capacity __unused, C2MemoryUsage usage __unused,
+            std::shared_ptr<C2LinearAllocation> *allocation /* nonnull */) {
+        *allocation = nullptr;
+        return C2_UNSUPPORTED;
+    }
+
+    /**
+     * (Re)creates a 1D allocation from a native |handle|. If successful, the allocation is stored
+     * in |allocation|. Otherwise, |allocation| is set to 'nullptr'.
+     *
+     * \param handle      the handle for the existing allocation
+     * \param allocation  pointer to where the allocation shall be stored on success. nullptr
+     *                  will be stored here on failure
+     *
+     * \retval C2_OK        the allocation was recreated successfully
+     * \retval C2_NO_MEMORY not enough memory to recreate the allocation
+     * \retval C2_TIMED_OUT the recreation timed out (unexpected)
+     * \retval C2_NO_PERMISSION     no permission to recreate the allocation
+     * \retval C2_BAD_VALUE invalid handle (caller error)
+     * \retval C2_UNSUPPORTED       this allocator does not support 1D allocations
+     * \retval C2_CORRUPTED some unknown, unrecoverable error occured during allocation (unexpected)
+     */
+    virtual C2Error recreateLinearBuffer(
+            const C2Handle *handle __unused,
+            std::shared_ptr<C2LinearAllocation> *allocation /* nonnull */) {
+        *allocation = nullptr;
+        return C2_UNSUPPORTED;
+    }
+
+    /**
+     * Allocates a 2D allocation of given |width|, |height|, |format| and |usage|. If successful,
+     * the allocation is stored in |allocation|. Otherwise, |allocation| is set to 'nullptr'.
+     *
+     * \param width           the width of requested allocation (the allocation could be slightly
+     *                      larger, e.g. to account for any system-required alignment)
+     * \param height          the height of requested allocation (the allocation could be slightly
+     *                      larger, e.g. to account for any system-required alignment)
+     * \param format          the pixel format of requested allocation. This could be a vendor
+     *                      specific format.
+     * \param usage           the memory usage info for the requested allocation. \note that the
+     *                      returned allocation may be later used/mapped with different usage.
+     *                      The allocator should layout the buffer to be optimized for this usage,
+     *                      but must support any usage. One exception: protected buffers can
+     *                      only be used in a protected scenario.
+     * \param allocation      pointer to where the allocation shall be stored on success. nullptr
+     *                      will be stored here on failure
+     *
+     * \retval C2_OK        the allocation was successful
+     * \retval C2_NO_MEMORY not enough memory to complete the allocation
+     * \retval C2_TIMED_OUT the allocation timed out
+     * \retval C2_NO_PERMISSION     no permission to complete the allocation
+     * \retval C2_BAD_VALUE width, height, format or usage are not supported (invalid) (caller error)
+     * \retval C2_UNSUPPORTED       this allocator does not support 2D allocations
+     * \retval C2_CORRUPTED some unknown, unrecoverable error occured during allocation (unexpected)
+     */
+    virtual C2Error allocateGraphicBuffer(
+            uint32_t width __unused, uint32_t height __unused, uint32_t format __unused,
+            C2MemoryUsage usage __unused,
+            std::shared_ptr<C2GraphicAllocation> *allocation /* nonnull */) {
+        *allocation = nullptr;
+        return C2_UNSUPPORTED;
+    }
+
+    /**
+     * (Re)creates a 2D allocation from a native handle.  If successful, the allocation is stored
+     * in |allocation|. Otherwise, |allocation| is set to 'nullptr'.
+     *
+     * \param handle      the handle for the existing allocation
+     * \param allocation  pointer to where the allocation shall be stored on success. nullptr
+     *                  will be stored here on failure
+     *
+     * \retval C2_OK        the allocation was recreated successfully
+     * \retval C2_NO_MEMORY not enough memory to recreate the allocation
+     * \retval C2_TIMED_OUT the recreation timed out (unexpected)
+     * \retval C2_NO_PERMISSION     no permission to recreate the allocation
+     * \retval C2_BAD_VALUE invalid handle (caller error)
+     * \retval C2_UNSUPPORTED       this allocator does not support 2D allocations
+     * \retval C2_CORRUPTED some unknown, unrecoverable error occured during recreation (unexpected)
+     */
+    virtual C2Error recreateGraphicBuffer(
+            const C2Handle *handle __unused,
+            std::shared_ptr<C2GraphicAllocation> *allocation /* nonnull */) {
+        *allocation = nullptr;
+        return C2_UNSUPPORTED;
+    }
+
+protected:
+    C2Allocator() = default;
+
+    virtual ~C2Allocator() = default;
+};
+
+/**
+ *  Block allocators are used by components to allocate memory for output buffers. They can
+ *  support either linear (1D), circular (1D) or graphic (2D) allocations.
+ *
+ *  Never constructed on stack.
+ *
+ *  Block allocators are provided by the framework.
+ */
+class C2BlockAllocator {
+public:
+    /**
+     * Allocates a linear writeable block of given |capacity| and |usage|. If successful, the
+     * block is stored in |block|. Otherwise, |block| is set to 'nullptr'.
+     *
+     * \param capacity        the size of requested block.
+     * \param usage           the memory usage info for the requested allocation. \note that the
+     *                      returned allocation may be later used/mapped with different usage.
+     *                      The allocator shall lay out the buffer to be optimized for this usage,
+     *                      but must support any usage. One exception: protected buffers can
+     *                      only be used in a protected scenario.
+     * \param block      pointer to where the allocated block shall be stored on success. nullptr
+     *                      will be stored here on failure
+     *
+     * \retval C2_OK        the allocation was successful
+     * \retval C2_NO_MEMORY not enough memory to complete the allocation
+     * \retval C2_TIMED_OUT the allocation timed out
+     * \retval C2_NO_PERMISSION     no permission to complete the allocation
+     * \retval C2_BAD_VALUE capacity or usage are not supported (invalid) (caller error)
+     * \retval C2_UNSUPPORTED       this allocator does not support linear allocations
+     * \retval C2_CORRUPTED some unknown, unrecoverable error occured during allocation (unexpected)
+     */
+    virtual C2Error allocateLinearBlock(
+            uint32_t capacity __unused, C2MemoryUsage usage __unused,
+            std::shared_ptr<C2LinearBlock> *block /* nonnull */) {
+        *block = nullptr;
+        return C2_UNSUPPORTED;
+    }
+
+    /**
+     * Allocates a circular writeable block of given |capacity| and |usage|. If successful, the
+     * block is stored in |block|. Otherwise, |block| is set to 'nullptr'.
+     *
+     * \param capacity        the size of requested circular block. (the allocation could be slightly
+     *                      larger, e.g. to account for any system-required alignment)
+     * \param usage           the memory usage info for the requested allocation. \note that the
+     *                      returned allocation may be later used/mapped with different usage.
+     *                      The allocator shall lay out the buffer to be optimized for this usage,
+     *                      but must support any usage. One exception: protected buffers can
+     *                      only be used in a protected scenario.
+     * \param block      pointer to where the allocated block shall be stored on success. nullptr
+     *                      will be stored here on failure
+     *
+     * \retval C2_OK            the allocation was successful
+     * \retval C2_NO_MEMORY     not enough memory to complete the allocation
+     * \retval C2_TIMED_OUT     the allocation timed out
+     * \retval C2_NO_PERMISSION     no permission to complete the allocation
+     * \retval C2_BAD_VALUE     capacity or usage are not supported (invalid) (caller error)
+     * \retval C2_UNSUPPORTED   this allocator does not support circular allocations
+     * \retval C2_CORRUPTED     some unknown, unrecoverable error occured during allocation (unexpected)
+     */
+    virtual C2Error allocateCircularBlock(
+            uint32_t capacity __unused, C2MemoryUsage usage __unused,
+            std::shared_ptr<C2CircularBlock> *block /* nonnull */) {
+        *block = nullptr;
+        return C2_UNSUPPORTED;
+    }
+
+    /**
+     * Allocates a 2D graphic block of given |width|, |height|, |format| and |usage|. If successful,
+     * the allocation is stored in |block|. Otherwise, |block| is set to 'nullptr'.
+     *
+     * \param width           the width of requested allocation (the allocation could be slightly
+     *                      larger, e.g. to account for any system-required alignment)
+     * \param height          the height of requested allocation (the allocation could be slightly
+     *                      larger, e.g. to account for any system-required alignment)
+     * \param format          the pixel format of requested allocation. This could be a vendor
+     *                      specific format.
+     * \param usage           the memory usage info for the requested allocation. \note that the
+     *                      returned allocation may be later used/mapped with different usage.
+     *                      The allocator should layout the buffer to be optimized for this usage,
+     *                      but must support any usage. One exception: protected buffers can
+     *                      only be used in a protected scenario.
+     * \param block      pointer to where the allocation shall be stored on success. nullptr
+     *                      will be stored here on failure
+     *
+     * \retval C2_OK            the allocation was successful
+     * \retval C2_NO_MEMORY     not enough memory to complete the allocation
+     * \retval C2_TIMED_OUT     the allocation timed out
+     * \retval C2_NO_PERMISSION     no permission to complete the allocation
+     * \retval C2_BAD_VALUE     width, height, format or usage are not supported (invalid) (caller error)
+     * \retval C2_UNSUPPORTED   this allocator does not support 2D allocations
+     * \retval C2_CORRUPTED     some unknown, unrecoverable error occured during allocation (unexpected)
+     */
+    virtual C2Error allocateGraphicBlock(
+            uint32_t width __unused, uint32_t height __unused, uint32_t format __unused,
+            C2MemoryUsage usage __unused,
+            std::shared_ptr<C2GraphicBlock> *block /* nonnull */) {
+        *block = nullptr;
+        return C2_UNSUPPORTED;
+    }
+
+protected:
+    C2BlockAllocator() = default;
+
+    virtual ~C2BlockAllocator() = default;
+};
+
+/// @}
+
+/// \cond INTERNAL
+
+/// \todo These are no longer used
+
+/// \addtogroup linear
+/// @{
+
+/** \deprecated */
+class C2LinearBuffer
+    : public C2Buffer, public _C2LinearRangeAspect,
+      public std::enable_shared_from_this<C2LinearBuffer> {
+public:
+    /** \todo what is this? */
+    const C2Handle *handle() const;
+
+protected:
+    inline C2LinearBuffer(const C2ConstLinearBlock &block);
+
+private:
+    class Impl;
+    Impl *mImpl;
+};
+
+class C2ReadCursor;
+
+class C2WriteCursor {
+public:
+    uint32_t remaining() const; // remaining data to be read
+    void commit(); // commits the current position. discard data before current position
+    void reset() const;  // resets position to the last committed position
+    // slices off at most |size| bytes, and moves cursor ahead by the number of bytes
+    // sliced off.
+    C2ReadCursor slice(uint32_t size) const;
+    // slices off at most |size| bytes, and moves cursor ahead by the number of bytes
+    // sliced off.
+    C2WriteCursor reserve(uint32_t size);
+    // bool read(T&);
+    // bool write(T&);
+    C2Fence waitForSpace(uint32_t size);
+};
+
+/// @}
+
+/// \addtogroup graphic
+/// @{
+
+struct C2ColorSpace {
+//public:
+    enum Standard {
+        BT601,
+        BT709,
+        BT2020,
+        // TODO
+    };
+
+    enum Range {
+        LIMITED,
+        FULL,
+        // TODO
+    };
+
+    enum TransferFunction {
+        BT709Transfer,
+        BT2020Transfer,
+        HybridLogGamma2,
+        HybridLogGamma4,
+        // TODO
+    };
+};
+
+/** \deprecated */
+class C2GraphicBuffer : public C2Buffer {
+public:
+    // constant attributes
+    inline uint32_t width() const  { return mWidth; }
+    inline uint32_t height() const { return mHeight; }
+    inline uint32_t format() const { return mFormat; }
+    inline const C2MemoryUsage usage() const { return mUsage; }
+
+    // modifiable attributes
+
+
+    virtual const C2ColorSpace colorSpace() const = 0;
+    // best effort
+    virtual void setColorSpace_be(const C2ColorSpace &colorSpace) = 0;
+    virtual bool setColorSpace(const C2ColorSpace &colorSpace) = 0;
+
+    const C2Handle *handle() const;
+
+protected:
+    uint32_t mWidth;
+    uint32_t mHeight;
+    uint32_t mFormat;
+    C2MemoryUsage mUsage;
+
+    class Impl;
+    Impl *mImpl;
+};
+
+/// @}
+
+/// \endcond
+
+/// @}
+
+}  // namespace android
+
+#endif  // C2BUFFER_H_
diff --git a/media/libstagefright/codec2/include/C2Component.h b/media/libstagefright/codec2/include/C2Component.h
new file mode 100644
index 0000000..1ee9302
--- /dev/null
+++ b/media/libstagefright/codec2/include/C2Component.h
@@ -0,0 +1,685 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef C2COMPONENT_H_
+
+#define C2COMPONENT_H_
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <list>
+#include <memory>
+#include <vector>
+#include <functional>
+
+#include <C2Param.h>
+#include <C2Work.h>
+
+namespace android {
+
+/// \defgroup components Components
+/// @{
+
+class C2Component;
+
+class C2ComponentListener {
+public:
+    virtual void onWorkDone(std::weak_ptr<C2Component> component,
+                            std::vector<std::unique_ptr<C2Work>> workItems) = 0;
+
+    virtual void onTripped(std::weak_ptr<C2Component> component,
+                           std::vector<std::shared_ptr<C2SettingResult>> settingResult) = 0;
+
+    virtual void onError(std::weak_ptr<C2Component> component,
+                         uint32_t errorCode) = 0;
+
+    // virtual void onTunnelReleased(<from>, <to>) = 0;
+
+    // virtual void onComponentReleased(<id>) = 0;
+
+protected:
+    virtual ~C2ComponentListener();
+};
+
+/**
+ * Component interface object. This object contains all of the configuration of a potential or
+ * actual component. It can be created and used independently of an actual C2Component instance to
+ * query support and parameters for various component settings and configurations for a potential
+ * component. Actual components also expose this interface.
+ */
+
+class C2ComponentInterface {
+public:
+    // ALWAYS AVAILABLE METHODS
+    // =============================================================================================
+
+    /**
+     * Returns the name of this component or component interface object.
+     * This is a unique name for this component or component interface 'class'; however, multiple
+     * instances of this component SHALL have the same name.
+     *
+     * This method MUST be supported in any state. This call does not change the state nor the
+     * internal states of the component.
+     *
+     * This method MUST be "non-blocking" and return within 1ms.
+     *
+     * \return the name of this component or component interface object.
+     * \retval an empty string if there was not enough memory to allocate the actual name.
+     */
+    virtual C2String getName() const = 0;
+
+    /**
+     * Returns a unique ID for this component or interface object.
+     * This ID is used as work targets, unique work IDs, and when configuring tunneling.
+     *
+     * This method MUST be supported in any state. This call does not change the state nor the
+     * internal states of the component.
+     *
+     * This method MUST be "non-blocking" and return within 1ms.
+     *
+     * \return a unique node ID for this component or component interface instance.
+     */
+    virtual node_id getId() const = 0;
+
+    /**
+     * Queries a set of parameters from the component or interface object.
+     * Querying is performed at best effort: the component SHALL query all supported parameters and
+     * skip unsupported ones, or heap allocated parameters that could not be allocated. Any errors
+     * are communicated in the return value. Additionally, preallocated (e.g. stack) parameters that
+     * could not be queried are invalidated. Parameters to be allocated on the heap are omitted from
+     * the result.
+     *
+     * \note Parameter values do not depend on the order of query.
+     *
+     * \todo This method cannot be used to query info-buffers. Is that a problem?
+     *
+     * This method MUST be supported in any state. This call does not change the state nor the
+     * internal states of the component.
+     *
+     * This method MUST be "non-blocking" and return within 1ms.
+     *
+     * \param[in,out] stackParams   a list of params queried. These are initialized specific to each
+     *                      setting; e.g. size and index are set and rest of the members are
+     *                      cleared.
+     *                      \note Flexible settings that are of incorrect size will be invalidated.
+     * \param[in] heapParamIndices a vector of param indices for params to be queried and returned on the
+     *                      heap. These parameters will be returned in heapParams. Unsupported param
+     *                      indices will be ignored.
+     * \param[out] heapParams    a list of params where to which the supported heap parameters will be
+     *                      appended in the order they appear in heapParamIndices.
+     *
+     * \retval C2_OK        all parameters could be queried
+     * \retval C2_BAD_INDEX all supported parameters could be queried, but some parameters were not
+     *                      supported
+     * \retval C2_NO_MEMORY could not allocate memory for a supported parameter
+     * \retval C2_CORRUPTED some unknown error prevented the querying of the parameters
+     *                      (unexpected)
+     */
+    virtual status_t query_nb(
+        const std::vector<C2Param* const> &stackParams,
+        const std::vector<C2Param::Index> &heapParamIndices,
+        std::vector<std::unique_ptr<C2Param>>* const heapParams) const = 0;
+
+    /**
+     * Sets a set of parameters for the component or interface object.
+     * Tuning is performed at best effort: the component SHALL update all supported configuration at
+     * best effort (unless configured otherwise) and skip unsupported ones. Any errors are
+     * communicated in the return value and in |failures|.
+     *
+     * \note Parameter tuning DOES depend on the order of the tuning parameters. E.g. some parameter
+     * update may allow some subsequent parameter update.
+     *
+     * This method MUST be supported in any state.
+     *
+     * This method MUST be "non-blocking" and return within 1ms.
+     *
+     * \param[in,out] params          a list of parameter updates. These will be updated to the actual
+     *                      parameter values after the updates (this is because tuning is performed
+     *                      at best effort).
+     *                      \todo params that could not be updated are not marked here, so are
+     *                      confusing - are they "existing" values or intended to be configured
+     *                      values?
+     * \param[out] failures        a list of parameter failures
+     *
+     * \retval C2_OK        all parameters could be updated successfully
+     * \retval C2_BAD_INDEX all supported parameters could be updated successfully, but some
+     *                      parameters were not supported
+     * \retval C2_BAD_VALUE some supported parameters could not be updated successfully because
+     *                      they contained unsupported values. These are returned in |failures|.
+     * \retval C2_NO_MEMORY some supported parameters could not be updated successfully because
+     *                      they contained unsupported values, but could not allocate a failure
+     *                      object for them.
+     * \retval C2_CORRUPTED some unknown error prevented the update of the parameters
+     *                      (unexpected)
+     */
+    virtual status_t config_nb(
+            const std::vector<C2Param* const> &params,
+            std::vector<std::unique_ptr<C2SettingResult>>* const failures) = 0;
+
+    /**
+     * Atomically sets a set of parameters for the component or interface object.
+     *
+     * \note This method is used mainly for reserving resources for a component.
+     *
+     * The component SHALL update all supported configuration at
+     * best effort(TBD) (unless configured otherwise) and skip unsupported ones. Any errors are
+     * communicated in the return value and in |failures|.
+     *
+     * \note Parameter tuning DOES depend on the order of the tuning parameters. E.g. some parameter
+     * update may allow some subsequent parameter update.
+     *
+     * This method MUST be supported in any state.
+     *
+     * This method may be momentarily blocking, but MUST return within 5ms.
+     *
+     * \param params[in,out]          a list of parameter updates. These will be updated to the actual
+     *                      parameter values after the updates (this is because tuning is performed
+     *                      at best effort).
+     *                      \todo params that could not be updated are not marked here, so are
+     *                      confusing - are they "existing" values or intended to be configured
+     *                      values?
+     * \param failures[out]        a list of parameter failures
+     *
+     * \retval C2_OK        all parameters could be updated successfully
+     * \retval C2_BAD_INDEX all supported parameters could be updated successfully, but some
+     *                      parameters were not supported
+     * \retval C2_BAD_VALUE some supported parameters could not be updated successfully because
+     *                      they contained unsupported values. These are returned in |failures|.
+     * \retval C2_NO_MEMORY some supported parameters could not be updated successfully because
+     *                      they contained unsupported values, but could not allocate a failure
+     *                      object for them.
+     * \retval C2_CORRUPTED some unknown error prevented the update of the parameters
+     *                      (unexpected)
+     */
+    virtual status_t commit_sm(
+            const std::vector<C2Param* const> &params,
+            std::vector<std::unique_ptr<C2SettingResult>>* const failures) = 0;
+
+    // TUNNELING
+    // =============================================================================================
+
+    /**
+     * Creates a tunnel from this component to the target component.
+     *
+     * If the component is successfully created, subsequent work items queued may include a
+     * tunneled path between these components.
+     *
+     * This method MUST be supported in any state.
+     *
+     * This method may be momentarily blocking, but MUST return within 5ms.
+     *
+     * \retval C2_OK        the tunnel was successfully created
+     * \retval C2_BAD_INDEX the target component does not exist
+     * \retval C2_ALREADY_EXIST the tunnel already exists
+     * \retval C2_UNSUPPORTED  the tunnel is not supported
+     *
+     * \retval C2_TIMED_OUT could not create the tunnel within the time limit (unexpected)
+     * \retval C2_CORRUPTED some unknown error prevented the creation of the tunnel (unexpected)
+     */
+    virtual status_t createTunnel_sm(node_id targetComponent) = 0;
+
+    /**
+     * Releases a tunnel from this component to the target component.
+     *
+     * The release of a tunnel is delayed while there are pending work items for the tunnel.
+     * After releasing a tunnel, subsequent work items queued MUST NOT include a tunneled
+     * path between these components.
+     *
+     * This method MUST be supported in any state.
+     *
+     * This method may be momentarily blocking, but MUST return within 5ms.
+     *
+     * \retval C2_OK        the tunnel was marked for release successfully
+     * \retval C2_BAD_INDEX the target component does not exist
+     * \retval C2_NOT_FOUND the tunnel does not exist
+     *
+     * \retval C2_TIMED_OUT could not mark the tunnel for release within the time limit (unexpected)
+     * \retval C2_CORRUPTED some unknown error prevented the release of the tunnel (unexpected)
+     */
+    virtual status_t releaseTunnel_sm(node_id targetComponent) = 0;
+
+
+    // REFLECTION MECHANISM (USED FOR EXTENSION)
+    // =============================================================================================
+
+    /**
+     * Returns the parameter reflector.
+     *
+     * This is used to describe parameter fields.
+     *
+     * \return a shared parameter reflector object.
+     */
+    virtual std::shared_ptr<C2ParamReflector> getParamReflector() const = 0;
+
+    /**
+     * Returns the set of supported parameters.
+     *
+     * \param[out] params a vector of supported parameters will be appended to this vector.
+     *
+     * \retval C2_OK        the operation completed successfully.
+     * \retval C2_NO_MEMORY not enough memory to complete this method.
+     */
+    virtual status_t getSupportedParams(
+            std::vector<std::shared_ptr<C2ParamDescriptor>> * const params) const = 0;
+
+    /**
+     *
+     * \todo should this take a list considering that setting some fields may further limit other
+     * fields in the same list?
+     */
+    virtual status_t getSupportedValues(
+            const std::vector<const C2ParamField> fields,
+            std::vector<C2FieldSupportedValues>* const values) const = 0;
+
+    virtual ~C2ComponentInterface() = default;
+};
+
+class C2Component {
+public:
+    // METHODS AVAILABLE WHEN RUNNING
+    // =============================================================================================
+
+    /**
+     * Queues up work for the component.
+     *
+     * This method MUST be supported in running (including tripped) states.
+     *
+     * This method MUST be "non-blocking" and return within 1ms
+     *
+     * It is acceptable for this method to return OK and return an error value using the
+     * onWorkDone() callback.
+     *
+     * \retval C2_OK        the work was successfully queued
+     * \retval C2_BAD_INDEX some component(s) in the work do(es) not exist
+     * \retval C2_UNSUPPORTED  the components are not tunneled
+     *
+     * \retval C2_NO_MEMORY not enough memory to queue the work
+     * \retval C2_CORRUPTED some unknown error prevented queuing the work (unexpected)
+     */
+    virtual status_t queue_nb(std::list<std::unique_ptr<C2Work>>* const items) = 0;
+
+    /**
+     * Announces a work to be queued later for the component. This reserves a slot for the queue
+     * to ensure correct work ordering even if the work is queued later.
+     *
+     * This method MUST be supported in running (including tripped) states.
+     *
+     * This method MUST be "non-blocking" and return within 1 ms
+     *
+     * \retval C2_OK        the work announcement has been successfully recorded
+     * \retval C2_BAD_INDEX some component(s) in the work outline do(es) not exist
+     * \retval C2_UNSUPPORTED  the componentes are not tunneled
+     *
+     * \retval C2_NO_MEMORY not enough memory to record the work announcement
+     * \retval C2_CORRUPTED some unknown error prevented recording the announcement (unexpected)
+     *
+     * \todo Can this be rolled into queue_nb?
+     */
+    virtual status_t announce_nb(const std::vector<C2WorkOutline> &items) = 0;
+
+    /**
+     * Discards and abandons any pending work for the component, and optionally any component
+     * downstream.
+     *
+     * \todo define this: we could flush all work before last item queued for component across all
+     *                    components linked to this; flush only work items that are queued to this
+     *                    component
+     * \todo return work # of last flushed item; or all flushed (but not returned items)
+     * \todo we could make flush take a work item and flush all work before/after that item to allow
+     *       TBD (slicing/seek?)
+     * \todo we could simply take a list of numbers and flush those... this is bad for decoders
+     *       also, what would happen to fine grade references?
+     *
+     * This method MUST be supported in running (including tripped) states.
+     *
+     * This method may be momentarily blocking, but must return within 5ms.
+     *
+     * Work that could be immediately abandoned/discarded SHALL be returned in |flushedWork|; this
+     * can be done in an arbitrary order.
+     *
+     * Work that could not be abandoned or discarded immediately SHALL be marked to be
+     * discarded at the earliest opportunity, and SHALL be returned via the onWorkDone() callback.
+     *
+     * \param flushThrough    flush work from this component and all components connected downstream
+     *                      from it via tunneling.
+     *
+     * \retval C2_OK        the work announcement has been successfully recorded
+     * \retval C2_TIMED_OUT the flush could not be completed within the time limit (unexpected)
+     * \retval C2_CORRUPTED some unknown error prevented flushing from completion (unexpected)
+     */
+    virtual status_t flush_sm(bool flushThrough, std::list<std::unique_ptr<C2Work>>* const flushedWork) = 0;
+
+    /**
+     * Drains the component, and optionally downstream components
+     *
+     * \todo define this; we could place EOS to all upstream components, just this component, or
+     *       all upstream and downstream component.
+     * \todo should EOS carry over to downstream components?
+     *
+     * Marks last work item as "end-of-stream", so component is notified not to wait for further
+     * work before it processes work already queued. This method is called to set the end-of-stream
+     * flag after work has been queued. Client can continue to queue further work immediately after
+     * this method returns.
+     *
+     * This method MUST be supported in running (including tripped) states.
+     *
+     * This method MUST be "non-blocking" and return within 1ms.
+     *
+     * Work that is completed SHALL be returned via the onWorkDone() callback.
+     *
+     * \param drainThrough    marks the last work item with a persistent "end-of-stream" marker that
+     *                      will drain downstream components.
+     *
+     * \todo this may confuse work-ordering downstream; could be an mode enum
+     *
+     * \retval C2_OK        the work announcement has been successfully recorded
+     * \retval C2_TIMED_OUT the flush could not be completed within the time limit (unexpected)
+     * \retval C2_CORRUPTED some unknown error prevented flushing from completion (unexpected)
+     */
+    virtual status_t drain_nb(bool drainThrough) = 0;
+
+    // STATE CHANGE METHODS
+    // =============================================================================================
+
+    /**
+     * Starts the component.
+     *
+     * This method MUST be supported in stopped state.
+     *
+     * \todo This method MUST return within 500ms. Seems this should be able to return quickly, as
+     * there are no immediate guarantees. Though there are guarantees for responsiveness immediately
+     * after start returns.
+     *
+     * \todo Could we just start a ComponentInterface to get a Component?
+     *
+     * \retval C2_OK        the work announcement has been successfully recorded
+     * \retval C2_NO_MEMORY not enough memory to start the component
+     * \retval C2_TIMED_OUT the component could not be started within the time limit (unexpected)
+     * \retval C2_CORRUPTED some unknown error prevented starting the component (unexpected)
+     */
+    virtual status_t start() = 0;
+
+    /**
+     * Stops the component.
+     *
+     * This method MUST be supported in running (including tripped) state.
+     *
+     * This method MUST return withing 500ms.
+     *
+     * Upon this call, all pending work SHALL be abandoned.
+     *
+     * \todo should this return completed work, since client will just free it? Perhaps just to
+     * verify accounting.
+     *
+     * This does not alter any settings and tunings that may have resulted in a tripped state.
+     * (Is this material given the definition? Perhaps in case we want to start again.)
+     */
+    virtual status_t stop() = 0;
+
+    /**
+     * Resets the component.
+     *
+     * This method MUST be supported in running (including tripped) state.
+     *
+     * This method MUST be supported during any other call (\todo or just blocking ones?)
+     *
+     * This method MUST return withing 500ms.
+     *
+     * After this call returns all work is/must be abandoned, all references should be released.
+     *
+     * \todo should this return completed work, since client will just free it? Also, if it unblocks
+     * a stop, where should completed work be returned?
+     *
+     * This brings settings back to their default - "guaranteeing" no tripped space.
+     *
+     * \todo reclaim support - it seems that since ownership is passed, this will allow reclaiming stuff.
+     */
+    virtual void reset() = 0;
+
+    /**
+     * Releases the component.
+     *
+     * This method MUST be supported in any state. (\todo Or shall we force reset() first to bring
+     * to a known state?)
+     *
+     * This method MUST return withing 500ms.
+     *
+     * \todo should this return completed work, since client will just free it? Also, if it unblocks
+     * a stop, where should completed work be returned?
+     *
+     * TODO: does it matter if this call has a short time limit? Yes, as upon return all references
+     * shall be abandoned.
+     */
+    virtual void release() = 0;
+
+    /**
+     * Returns the interface for this component.
+     *
+     * \return the component interface
+     */
+    virtual std::shared_ptr<C2ComponentInterface> intf() = 0;
+
+protected:
+    virtual ~C2Component() = default;
+};
+
+class C2FrameInfoParser {
+public:
+    /**
+     * \return the content type supported by this info parser.
+     *
+     * \todo this may be redundant
+     */
+    virtual C2StringLiteral getType() const = 0;
+
+    /**
+     * \return a vector of supported parameter indices parsed by this info parser.
+     *
+     * \todo sticky vs. non-sticky params? this may be communicated by param-reflector.
+     */
+    virtual const std::vector<C2Param::Index> getParsedParams() const = 0;
+
+    /**
+     * Resets this info parser. This brings this parser to its initial state after creation.
+     *
+     * This method SHALL return within 5ms.
+     *
+     * \retval C2_OK        the info parser was reset
+     * \retval C2_TIMED_OUT could not reset the parser within the time limit (unexpected)
+     * \retval C2_CORRUPTED some unknown error prevented the resetting of the parser (unexpected)
+     */
+    virtual status_t reset() { return C2_OK; }
+
+    virtual status_t parseFrame(C2BufferPack &frame);
+
+    virtual ~C2FrameInfoParser() = default;
+};
+
+struct C2ComponentInfo {
+    // TBD
+
+};
+
+class C2AllocatorStore {
+public:
+    // TBD
+
+    enum Type {
+        LINEAR,     ///< basic linear allocator type
+        GRALLOC,    ///< basic gralloc allocator type
+    };
+
+    /**
+     * Creates an allocator.
+     *
+     * \param type      the type of allocator to create
+     * \param allocator shared pointer where the created allocator is stored. Cleared on failure
+     *                  and updated on success.
+     *
+     * \retval C2_OK        the allocator was created successfully
+     * \retval C2_TIMED_OUT could not create the allocator within the time limit (unexpected)
+     * \retval C2_CORRUPTED some unknown error prevented the creation of the allocator (unexpected)
+     *
+     * \retval C2_NOT_FOUND no such allocator
+     * \retval C2_NO_MEMORY not enough memory to create the allocator
+     */
+    virtual status_t createAllocator(Type type, std::shared_ptr<C2Allocator>* const allocator) = 0;
+
+    virtual ~C2AllocatorStore() = default;
+};
+
+class C2ComponentStore {
+    /**
+     * Creates a component.
+     *
+     * This method SHALL return within 100ms.
+     *
+     * \param name          name of the component to create
+     * \param component     shared pointer where the created component is stored. Cleared on
+     *                      failure and updated on success.
+     *
+     * \retval C2_OK        the component was created successfully
+     * \retval C2_TIMED_OUT could not create the component within the time limit (unexpected)
+     * \retval C2_CORRUPTED some unknown error prevented the creation of the component (unexpected)
+     *
+     * \retval C2_NOT_FOUND no such component
+     * \retval C2_NO_MEMORY not enough memory to create the component
+     */
+    virtual status_t createComponent(C2String name, std::shared_ptr<C2Component>* const component);
+
+    /**
+     * Creates a component interface.
+     *
+     * This method SHALL return within 100ms.
+     *
+     * \param name          name of the component interface to create
+     * \param interface     shared pointer where the created interface is stored
+     *
+     * \retval C2_OK        the component interface was created successfully
+     * \retval C2_TIMED_OUT could not create the component interface within the time limit
+     *                      (unexpected)
+     * \retval C2_CORRUPTED some unknown error prevented the creation of the component interface
+     *                      (unexpected)
+     *
+     * \retval C2_NOT_FOUND no such component interface
+     * \retval C2_NO_MEMORY not enough memory to create the component interface
+     *
+     * \todo Do we need an interface, or could this just be a component that is never started?
+     */
+    virtual status_t createInterface(C2String name, std::shared_ptr<C2ComponentInterface>* const interface);
+
+    /**
+     * Returns the list of components supported by this component store.
+     *
+     * This method SHALL return within 1ms.
+     *
+     * \retval vector of component information.
+     */
+    virtual std::vector<std::unique_ptr<const C2ComponentInfo>> getComponents();
+
+    // -------------------------------------- UTILITY METHODS --------------------------------------
+
+    // on-demand buffer layout conversion (swizzling)
+    virtual status_t copyBuffer(std::shared_ptr<C2GraphicBuffer> src, std::shared_ptr<C2GraphicBuffer> dst);
+
+    // status_t selectPreferredColor(formats<A>, formats<B>);
+
+    // GLOBAL SETTINGS
+    // system-wide stride & slice-height (???)
+
+    /**
+     * Queries a set of system-wide parameters.
+     * Querying is performed at best effort: the store SHALL query all supported parameters and
+     * skip unsupported ones, or heap allocated parameters that could not be allocated. Any errors
+     * are communicated in the return value. Additionally, preallocated (e.g. stack) parameters that
+     * could not be queried are invalidated. Parameters to be allocated on the heap are omitted from
+     * the result.
+     *
+     * \note Parameter values do not depend on the order of query.
+     *
+     * This method MUST be "non-blocking" and return within 1ms.
+     *
+     * \param stackParams     a list of params queried. These are initialized specific to each
+     *                      setting; e.g. size and index are set and rest of the members are
+     *                      cleared.
+     *                      NOTE: Flexible settings that are of incorrect size will be invalidated.
+     * \param heapParamIndices a vector of param indices for params to be queried and returned on the
+     *                      heap. These parameters will be returned in heapParams. Unsupported param
+     *                      indices will be ignored.
+     * \param heapParams      a list of params where to which the supported heap parameters will be
+     *                      appended in the order they appear in heapParamIndices.
+     *
+     * \retval C2_OK        all parameters could be queried
+     * \retval C2_BAD_INDEX all supported parameters could be queried, but some parameters were not
+     *                      supported
+     * \retval C2_NO_MEMORY could not allocate memory for a supported parameter
+     * \retval C2_CORRUPTED some unknown error prevented the querying of the parameters
+     *                      (unexpected)
+     */
+    virtual status_t query_nb(
+        const std::vector<C2Param* const> &stackParams,
+        const std::vector<C2Param::Index> &heapParamIndices,
+        std::vector<std::unique_ptr<C2Param>>* const heapParams) = 0;
+
+    /**
+     * Sets a set of system-wide parameters.
+     *
+     * \note There are no settable system-wide parameters defined thus far, but may be added in the
+     * future.
+     *
+     * Tuning is performed at best effort: the store SHALL update all supported configuration at
+     * best effort (unless configured otherwise) and skip unsupported ones. Any errors are
+     * communicated in the return value and in |failures|.
+     *
+     * \note Parameter tuning DOES depend on the order of the tuning parameters. E.g. some parameter
+     * update may allow some subsequent parameter update.
+     *
+     * This method MUST be "non-blocking" and return within 1ms.
+     *
+     * \param params          a list of parameter updates. These will be updated to the actual
+     *                      parameter values after the updates (this is because tuning is performed
+     *                      at best effort).
+     *                      \todo params that could not be updated are not marked here, so are
+     *                      confusing - are they "existing" values or intended to be configured
+     *                      values?
+     * \param failures        a list of parameter failures
+     *
+     * \retval C2_OK        all parameters could be updated successfully
+     * \retval C2_BAD_INDEX all supported parameters could be updated successfully, but some
+     *                      parameters were not supported
+     * \retval C2_BAD_VALUE some supported parameters could not be updated successfully because
+     *                      they contained unsupported values. These are returned in |failures|.
+     * \retval C2_NO_MEMORY some supported parameters could not be updated successfully because
+     *                      they contained unsupported values, but could not allocate a failure
+     *                      object for them.
+     * \retval C2_CORRUPTED some unknown error prevented the update of the parameters
+     *                      (unexpected)
+     */
+    virtual status_t config_nb(
+            const std::vector<C2Param* const> &params,
+            std::list<std::unique_ptr<C2SettingResult>>* const failures) = 0;
+
+    virtual ~C2ComponentStore() = default;
+};
+
+// ================================================================================================
+
+/// @}
+
+}  // namespace android
+
+#endif  // C2COMPONENT_H_
diff --git a/media/libstagefright/codec2/include/C2Config.h b/media/libstagefright/codec2/include/C2Config.h
new file mode 100644
index 0000000..30e9193
--- /dev/null
+++ b/media/libstagefright/codec2/include/C2Config.h
@@ -0,0 +1,251 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef C2CONFIG_H_
+#define C2CONFIG_H_
+
+#include <C2ParamDef.h>
+
+namespace android {
+
+/// \defgroup config Component configuration
+/// @{
+
+#ifndef DEFINE_C2_ENUM_VALUE_AUTO_HELPER
+#define DEFINE_C2_ENUM_VALUE_AUTO_HELPER(name, type, prefix, ...)
+#define DEFINE_C2_ENUM_VALUE_CUSTOM_HELPER(name, type, names, ...)
+#endif
+
+#define C2ENUM(name, type, ...) \
+enum name : type { __VA_ARGS__ }; \
+DEFINE_C2_ENUM_VALUE_AUTO_HELPER(name, type, NULL, __VA_ARGS__)
+
+#define C2ENUM_CUSTOM_PREFIX(name, type, prefix, ...) \
+enum name : type { __VA_ARGS__ }; \
+DEFINE_C2_ENUM_VALUE_AUTO_HELPER(name, type, prefix, __VA_ARGS__)
+
+#define C2ENUM_CUSTOM_NAMES(name, type, names, ...) \
+enum name : type { __VA_ARGS__ }; \
+DEFINE_C2_ENUM_VALUE_CUSTOM_HELPER(name, type, names, __VA_ARGS__)
+
+enum C2ParamIndexKind : uint32_t {
+    /// domain
+    kParamIndexDomain,
+
+    /// configuration descriptors
+    kParamIndexSupportedParams,
+    kParamIndexRequiredParams,
+    kParamIndexReadOnlyParams,
+    kParamIndexRequestedInfos,
+
+    /// latency
+    kParamIndexLatency,
+
+    // generic time behavior
+    kParamIndexTemporal,
+
+    /// port configuration
+    kParamIndexMime,
+    kParamIndexStreamCount,
+    kParamIndexFormat,
+
+    // video info
+
+    kParamIndexStructStart = 0x1,
+    kParamIndexVideoSize,
+    kParamIndexMaxVideoSizeHint,
+
+    kParamIndexParamStart = 0x800,
+};
+
+C2ENUM(C2DomainKind, int32_t,
+    C2DomainVideo,
+    C2DomainAudio,
+    C2DomainOther = C2DomainAudio + 1
+);
+
+// read-only
+
+typedef C2GlobalParam<C2Info, C2SimpleValueStruct<C2DomainKind>, kParamIndexDomain> C2ComponentDomainInfo;
+// typedef C2GlobalParam<C2Info, C2Uint32Value, kParamIndexDomain> C2ComponentDomainInfo;
+//DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleValueStruct<C2DomainKind>, { C2FIELD(mValue, "value") });
+
+// read-only
+typedef C2GlobalParam<C2Info, C2Uint32Array, kParamIndexSupportedParams> C2SupportedParamsInfo;
+
+/// \todo do we define it as a param?
+// read-only
+typedef C2GlobalParam<C2Info, C2Uint32Array, kParamIndexRequiredParams> C2RequiredParamsInfo;
+
+// read-only
+typedef C2GlobalParam<C2Info, C2Uint32Array, kParamIndexReadOnlyParams> C2ReadOnlyParamsInfo;
+
+// read-only
+typedef C2GlobalParam<C2Info, C2Uint32Array, kParamIndexRequestedInfos> C2RequestedInfosInfo;
+
+// read-only
+//typedef C2GlobalParam<C2Info, C2Uint32Value, kParamIndexRequestedInfos> C2RequestedInfosInfo;
+
+/// latency
+
+typedef C2PortParam<C2Info, C2Uint32Value, kParamIndexLatency> C2PortLatencyInfo;
+
+typedef C2GlobalParam<C2Info, C2Uint32Value, kParamIndexLatency> C2ComponentLatencyInfo;
+
+/// \todo
+typedef C2GlobalParam<C2Info, C2Uint32Value, kParamIndexTemporal> C2ComponentTemporalInfo;
+
+/// port configuration
+
+typedef C2PortParam<C2Tuning, C2StringValue, kParamIndexMime> C2PortMimeConfig;
+
+typedef C2PortParam<C2Tuning, C2Uint32Value, kParamIndexStreamCount> C2PortStreamCountConfig;
+
+typedef C2StreamParam<C2Tuning, C2StringValue, kParamIndexMime> C2StreamMimeConfig;
+
+C2ENUM(C2FormatKind, uint32_t,
+    C2FormatCompressed,
+    C2FormatAudio = 1,
+    C2FormatVideo = 4,
+)
+
+typedef C2StreamParam<C2Tuning, C2Uint32Value, kParamIndexFormat> C2StreamFormatConfig;
+
+/*
+   Component description fields:
+
+// format (video/compressed/audio/other-do we need other?) per stream
+
+// likely some of these are exposed as separate settings:
+
+struct C2BaseTuning {
+    // latency characteristics
+    uint32_t latency;
+    bool temporal;               // seems this only makes sense if latency is 1..., so this could be captured as latency = 0
+    uint32_t delay;
+
+    uint32_t numInputStreams;    // RW? - or suggestion only: RO
+    uint32_t numOutputStreams;   // RW
+                                 //
+    // refs characteristics (per stream?)
+    uint32_t maxInputRefs;       // RO
+    uint32_t maxOutputRefs;      // RO
+    uint32_t maxInputMemory;     // RO - max time refs are held for
+    uint32_t maxOutputMemory;    // RO
+
+    // per stream
+    bool compressed;
+    // format... video/compressed/audio/other?
+    // actual "audio/video" format type
+    uint32_t width/height? is this needed, or just queue...
+    // mime...
+};
+*/
+
+
+
+
+
+
+// overall component
+//   => C: domain: audio or video
+//   => C: kind: decoder, encoder or filter
+//   => "mime" class
+
+//   => C: temporal (bool) => does this depend on ordering?
+//   => I: latency
+//   => I: history max duration...
+//   => I: history max frames kept...
+//   => I: reordering depth
+//   => I: frc (bool) (perhaps ratio?)
+//   => I: current frc
+
+//   - pause
+//   => last frame 'number' processed
+//   => current frame 'number' processed
+//   => invalid settings =>[]
+
+// video decoder configuration:                                 // audio
+//   - encoding                                                 // -encoding
+//   - hint: max width/height                                   // -hint: sample rate, channels
+//   - hint: profile/level                                      // -hint: tools used
+//   - hint: framerate (bitrate?)                               // -hint: bitrate
+//   - default: color space (from container)
+//   - hint: color format                                       // -hint: pcm-encoding
+//   - hint: # of views (e.g. MVC)                              // -hint?: channel groups
+//   - default: HDR static info (from container)                // -hint?: channel mappings
+//   - hint: rotation (e.g. for allocator)
+
+// => # of streams required and their formats? (setting?)
+// => # of streams produced and their formats? (tuning)
+
+// => output
+//   - # of views                                               // -channel groups && channel mappings
+//   - width/height/crop/color format/color space/HDR static info (from buffers)
+//     (as required by the allocator & framework)
+//   - SEI (or equivalent) <= [port]
+//     - CC
+//   - reference info
+
+// video encoder configurations
+//   - encoding                                                 // - encoding
+//   - hint: width/height                                       // - hint: sample rate, channels
+//   - hint: frame rate
+//   - hint: max width/height (? does this differ from width/height?)
+//   - # of input (e.g. MVC)                                    // - hint: # groups and mappings
+//   - # of output (e.g. SVC) => bitrates/width/height/framerates? per stream
+//   - hint: profile/level                                      // - hint: profile/level
+//   - HDR static info + (info: HDR)
+//   - color space
+//   - hint: color format?                                      // - hint: pcm encoding
+//   - SEI
+//     - CC
+//   - reference directive
+//   - hint: bitrate (or quality)                               // - hint: bitrate/quality
+//   - optional: codec-specific parameters                      // - optional: csd
+
+// => output                                                    // => output
+//   - layers per stream?                                       // E-AC3?... DTS?...Dolby-Vision?
+//   - reference info
+
+
+// RM:
+//   - need SPS for full knowledge => component should return max. (component can use less)
+//   - critical parameters? (interlaced? profile? level?)
+
+struct C2VideoSizeStruct {
+    int32_t mWidth;     ///< video width
+    int32_t mHeight;    ///< video height
+
+    DEFINE_AND_DESCRIBE_C2STRUCT(VideoSize)
+    C2FIELD(mWidth, "width")
+    C2FIELD(mHeight, "height")
+};
+
+// video size for video decoder [OUT]
+typedef C2StreamParam<C2Info, C2VideoSizeStruct> C2VideoSizeStreamInfo;
+
+// max video size for video decoder [IN]
+typedef C2PortParam<C2Setting, C2VideoSizeStruct, kParamIndexMaxVideoSizeHint> C2MaxVideoSizeHintPortSetting;
+
+// video encoder size [IN]
+typedef C2StreamParam<C2Tuning, C2VideoSizeStruct> C2VideoSizeStreamTuning;
+
+/// @}
+
+} // namespace android
+
+#endif
diff --git a/media/libstagefright/codec2/include/C2Param.h b/media/libstagefright/codec2/include/C2Param.h
new file mode 100644
index 0000000..fd43061
--- /dev/null
+++ b/media/libstagefright/codec2/include/C2Param.h
@@ -0,0 +1,1171 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef C2PARAM_H_
+#define C2PARAM_H_
+
+#include <C2.h>
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <algorithm>
+#include <list>
+#include <string>
+#include <type_traits>
+
+#define C2_PACK __attribute__((packed))
+
+namespace android {
+
+/// \addtogroup Parameters
+/// @{
+
+/// \defgroup internal Internal helpers.
+
+/*!
+ * \file
+ * PARAMETERS: SETTINGs, TUNINGs, and INFOs
+ * ===
+ *
+ * These represent miscellaneous control and metadata information and are likely copied into
+ * kernel space. Therefore, these are C-like structures designed to carry just a small amount of
+ * information. We are using C++ to be able to add constructors, as well as non-virtual and class
+ * methods.
+ *
+ * ==Specification details:
+ *
+ * Restrictions:
+ *   - must be POD struct, e.g. no vtable (no virtual destructor)
+ *   - must have the same size in 64-bit and 32-bit mode (no size_t)
+ *   - as such, no pointer members
+ *
+ * Behavior:
+ * - Params can be global (not related to input or output), related to input or output,
+ *   or related to an input/output stream.
+ * - All params are queried/set using a unique param index, which incorporates a potential stream
+ *   index and/or port.
+ * - Querying (supported) params MUST never fail.
+ * - All params MUST have default values.
+ * - If some fields have "unsupported" or "invalid" values during setting, this SHOULD be
+ *   communicated to the app.
+ *   a) Ideally, this should be avoided.  When setting parameters, in general, component should do
+ *     "best effort" to apply all settings. It should change "invalid/unsupported" values to the
+ *     nearest supported values.
+ *   - This is communicated to the client by changing the source values in tune()/
+ *     configure().
+ *   b) If falling back to a supported value is absolutely impossible, the component SHALL return
+ *     an error for the specific setting, but should continue to apply other settings.
+ *     TODO: this currently may result in unintended results.
+ *
+ * **NOTE:** unlike OMX, params are not versioned. Instead, a new struct with new base index
+ * SHALL be added as new versions are required.
+ *
+ * The proper subtype (Setting, Info or Param) is incorporated into the class type. Define structs
+ * to define multiple subtyped versions of related parameters.
+ *
+ * ==Implementation details:
+ *
+ * - Use macros to define parameters
+ * - All parameters must have a default constructor
+ *   - This is only used for instantiating the class in source (e.g. will not be used
+ *     when building a parameter by the framework from key/value pairs.)
+ */
+
+/// \ingroup internal
+struct _C2ParamManipulator;
+
+/**
+ * Parameter base class.
+ */
+struct C2Param {
+    // param index encompasses the following:
+    //
+    // - type (setting, tuning, info, struct)
+    // - vendor extension flag
+    // - flexible parameter flag
+    // - direction (global, input, output)
+    // - stream flag
+    // - stream ID (usually 0)
+    //
+    // layout:
+    //
+    //      +------+-----+---+------+--------+----|------+--------------+
+    //      | kind | dir | - |stream|streamID|flex|vendor|  base index  |
+    //      +------+-----+---+------+--------+----+------+--------------+
+    //  bit: 31..30 29.28       25   24 .. 17  16    15   14    ..     0
+    //
+public:
+    /**
+     * C2Param kinds, usable as bitmaps.
+     */
+    enum Kind : uint32_t {
+        NONE    = 0,
+        STRUCT  = (1 << 0),
+        INFO    = (1 << 1),
+        SETTING = (1 << 2),
+        TUNING  = (1 << 3) | SETTING, // tunings are settings
+    };
+
+    /**
+     * base index (including the vendor extension bit) is a global index for
+     * C2 parameter structs. (e.g. the same indices cannot be reused for different
+     * structs for different components).
+     */
+    struct BaseIndex {
+    protected:
+        enum : uint32_t {
+            kTypeMask      = 0xC0000000,
+            kTypeStruct    = 0x00000000,
+            kTypeTuning    = 0x40000000,
+            kTypeSetting   = 0x80000000,
+            kTypeInfo      = 0xC0000000,
+
+            kDirMask       = 0x30000000,
+            kDirGlobal     = 0x20000000,
+            kDirUndefined  = 0x30000000, // MUST have all bits set
+            kDirInput      = 0x00000000,
+            kDirOutput     = 0x10000000,
+
+            kStreamFlag    = 0x02000000,
+            kStreamIdMask  = 0x01FE0000,
+            kStreamIdShift = 17,
+            kStreamIdMax   = kStreamIdMask >> kStreamIdShift,
+            kStreamMask    = kStreamFlag | kStreamIdMask,
+
+            kFlexibleFlag  = 0x00010000,
+            kVendorFlag    = 0x00008000,
+            kParamMask     = 0x0000FFFF,
+            kBaseMask      = kParamMask | kFlexibleFlag,
+        };
+
+    public:
+        enum : uint32_t {
+            kVendorStart = kVendorFlag, ///< vendor structs SHALL start after this
+            _kFlexibleFlag = kFlexibleFlag, // TODO: this is only needed for testing
+        };
+
+        /// constructor/conversion from uint32_t
+        inline BaseIndex(uint32_t index) : mIndex(index) { }
+
+        // no conversion from uint64_t
+        inline BaseIndex(uint64_t index) = delete;
+
+        /// returns true iff this is a vendor extension parameter
+        inline bool isVendor() const { return mIndex & kVendorFlag; }
+
+        /// returns true iff this is a flexible parameter (with variable size)
+        inline bool isFlexible() const { return mIndex & kFlexibleFlag; }
+
+        /// returns the base type: the index for the underlying struct
+        inline unsigned int baseIndex() const { return mIndex & kBaseMask; }
+
+        /// returns the param index for the underlying struct
+        inline unsigned int paramIndex() const { return mIndex & kParamMask; }
+
+        DEFINE_FIELD_BASED_COMPARISON_OPERATORS(BaseIndex, mIndex)
+
+    protected:
+        uint32_t mIndex;
+    };
+
+    /**
+     * type encompasses the parameter kind (tuning, setting, info), whether the
+     * parameter is global, input or output, and whether it is for a stream.
+     */
+    struct Type : public BaseIndex {
+        /// returns true iff this is a global parameter (not for input nor output)
+        inline bool isGlobal() const { return (mIndex & kDirMask) == kDirGlobal; }
+        /// returns true iff this is an input or input stream parameter
+        inline bool forInput() const { return (mIndex & kDirMask) == kDirInput; }
+        /// returns true iff this is an output or output stream parameter
+        inline bool forOutput() const { return (mIndex & kDirMask) == kDirOutput; }
+
+        /// returns true iff this is a stream parameter
+        inline bool forStream() const { return mIndex & kStreamFlag; }
+        /// returns true iff this is a port (input or output) parameter
+        inline bool forPort() const   { return !forStream() && !isGlobal(); }
+
+        /// returns the parameter type: the parameter index without the stream ID
+        inline uint32_t type() const { return mIndex & (~kStreamIdMask); }
+
+        /// return the kind of this param
+        inline Kind kind() const {
+            switch (mIndex & kTypeMask) {
+                case kTypeStruct: return STRUCT;
+                case kTypeInfo: return INFO;
+                case kTypeSetting: return SETTING;
+                case kTypeTuning: return TUNING;
+                default: return NONE; // should not happen
+            }
+        }
+
+        /// constructor/conversion from uint32_t
+        inline Type(uint32_t index) : BaseIndex(index) { }
+
+        // no conversion from uint64_t
+        inline Type(uint64_t index) = delete;
+
+    private:
+        friend struct C2Param;   // for setPort()
+        friend struct C2Tuning;  // for kTypeTuning
+        friend struct C2Setting; // for kTypeSetting
+        friend struct C2Info;    // for kTypeInfo
+        // for kDirGlobal
+        template<typename T, typename S, int I, class F> friend struct C2GlobalParam;
+        template<typename T, typename S, int I, class F> friend struct C2PortParam;   // for kDir*
+        template<typename T, typename S, int I, class F> friend struct C2StreamParam; // for kDir*
+        friend struct _C2ParamInspector; // for testing
+
+        /**
+         * Sets the port/stream direction.
+         * @return true on success, false if could not set direction (e.g. it is global param).
+         */
+        inline bool setPort(bool output) {
+            if (isGlobal()) {
+                return false;
+            } else {
+                mIndex = (mIndex & ~kDirMask) | (output ? kDirOutput : kDirInput);
+                return true;
+            }
+        }
+    };
+
+    /**
+     * index encompasses all remaining information: basically the stream ID.
+     */
+    struct Index : public Type {
+        /// returns the index as uint32_t
+        inline operator uint32_t() const { return mIndex; }
+
+        /// constructor/conversion from uint32_t
+        inline Index(uint32_t index) : Type(index) { }
+
+        // no conversion from uint64_t
+        inline Index(uint64_t index) = delete;
+
+        /// returns the stream ID or ~0 if not a stream
+        inline unsigned stream() const {
+            return forStream() ? rawStream() : ~0U;
+        }
+
+    private:
+        friend struct C2Param;           // for setStream, makeStreamId, isValid
+        friend struct _C2ParamInspector; // for testing
+
+        /**
+         * @return true if the type is valid, e.g. direction is not undefined AND
+         * stream is 0 if not a stream param.
+         */
+        inline bool isValid() const {
+            // there is no Type::isValid (even though some of this check could be
+            // performed on types) as this is only used on index...
+            return (forStream() ? rawStream() < kStreamIdMax : rawStream() == 0)
+                    && (mIndex & kDirMask) != kDirUndefined;
+        }
+
+        /// returns the raw stream ID field
+        inline unsigned rawStream() const {
+            return (mIndex & kStreamIdMask) >> kStreamIdShift;
+        }
+
+        /// returns the streamId bitfield for a given |stream|. If stream is invalid,
+        /// returns an invalid bitfield.
+        inline static uint32_t makeStreamId(unsigned stream) {
+            // saturate stream ID (max value is invalid)
+            if (stream > kStreamIdMax) {
+                stream = kStreamIdMax;
+            }
+            return (stream << kStreamIdShift) & kStreamIdMask;
+        }
+
+        /**
+         * Sets the stream index.
+         * \return true on success, false if could not set index (e.g. not a stream param).
+         */
+        inline bool setStream(unsigned stream) {
+            if (forStream()) {
+                mIndex = (mIndex & ~kStreamIdMask) | makeStreamId(stream);
+                return this->stream() < kStreamIdMax;
+            }
+            return false;
+        }
+    };
+
+public:
+    // public getters for Index methods
+
+    /// returns true iff this is a vendor extension parameter
+    inline bool isVendor() const { return _mIndex.isVendor(); }
+    /// returns true iff this is a flexible parameter
+    inline bool isFlexible() const { return _mIndex.isFlexible(); }
+    /// returns true iff this is a global parameter (not for input nor output)
+    inline bool isGlobal() const { return _mIndex.isGlobal(); }
+    /// returns true iff this is an input or input stream parameter
+    inline bool forInput() const { return _mIndex.forInput(); }
+    /// returns true iff this is an output or output stream parameter
+    inline bool forOutput() const { return _mIndex.forOutput(); }
+
+    /// returns true iff this is a stream parameter
+    inline bool forStream() const { return _mIndex.forStream(); }
+    /// returns true iff this is a port (input or output) parameter
+    inline bool forPort() const   { return _mIndex.forPort(); }
+
+    /// returns the stream ID or ~0 if not a stream
+    inline unsigned stream() const { return _mIndex.stream(); }
+
+    /// returns the parameter type: the parameter index without the stream ID
+    inline uint32_t type() const { return _mIndex.type(); }
+
+    /// returns the kind of this parameter
+    inline Kind kind() const { return _mIndex.kind(); }
+
+    /// returns the size of the parameter or 0 if the parameter is invalid
+    inline size_t size() const { return _mSize; }
+
+    /// returns true iff the parameter is valid
+    inline operator bool() const { return _mIndex.isValid() && _mSize > 0; }
+
+    /// returns true iff the parameter is invalid
+    inline bool operator!() const { return !operator bool(); }
+
+    // equality is done by memcmp (use equals() to prevent any overread)
+    inline bool operator==(const C2Param &o) const {
+        return equals(o) && memcmp(this, &o, _mSize) == 0;
+    }
+    inline bool operator!=(const C2Param &o) const { return !operator==(o); }
+
+    /// safe(r) type cast from pointer and size
+    inline static C2Param* From(void *addr, size_t len) {
+        // _mSize must fit into size
+        if (len < sizeof(_mSize) + offsetof(C2Param, _mSize)) {
+            return nullptr;
+        }
+        // _mSize must match length
+        C2Param *param = (C2Param*)addr;
+        if (param->_mSize != len) {
+            return nullptr;
+        }
+        return param;
+    }
+
+#if 0
+    template<typename P, class=decltype(C2Param(P()))>
+    P *As() { return P::From(this); }
+    template<typename P>
+    const P *As() const { return const_cast<const P*>(P::From(const_cast<C2Param*>(this))); }
+#endif
+
+protected:
+    /// sets the stream field. Returns true iff successful.
+    inline bool setStream(unsigned stream) {
+        return _mIndex.setStream(stream);
+    }
+
+    /// sets the port (direction). Returns true iff successful.
+    inline bool setPort(bool output) {
+        return _mIndex.setPort(output);
+    }
+
+public:
+    /// invalidate this parameter. There is no recovery from this call; e.g. parameter
+    /// cannot be 'corrected' to be valid.
+    inline void invalidate() { _mSize = 0; }
+
+    // if other is the same kind of (valid) param as this, copy it into this and return true.
+    // otherwise, do not copy anything, and return false.
+    inline bool updateFrom(const C2Param &other) {
+        if (other._mSize == _mSize && other._mIndex == _mIndex && _mSize > 0) {
+            memcpy(this, &other, _mSize);
+            return true;
+        }
+        return false;
+    }
+
+protected:
+    // returns |o| if it is a null ptr, or if can suitably be a param of given |type| (e.g. has
+    // same type (ignoring stream ID), and size). Otherwise, returns null. If |checkDir| is false,
+    // allow undefined or different direction (e.g. as constructed from C2PortParam() vs.
+    // C2PortParam::input), but still require equivalent type (stream, port or global); otherwise,
+    // return null.
+    inline static const C2Param* ifSuitable(
+            const C2Param* o, size_t size, Type type, size_t flexSize = 0, bool checkDir = true) {
+        if (o == nullptr || o->_mSize < size || (flexSize && ((o->_mSize - size) % flexSize))) {
+            return nullptr;
+        } else if (checkDir) {
+            return o->_mIndex.type() == type.mIndex ? o : nullptr;
+        } else if (o->_mIndex.isGlobal()) {
+            return nullptr;
+        } else {
+            return ((o->_mIndex.type() ^ type.mIndex) & ~Type::kDirMask) ? nullptr : o;
+        }
+    }
+
+    /// base constructor
+    inline C2Param(uint32_t paramSize, Index paramIndex)
+        : _mSize(paramSize),
+          _mIndex(paramIndex) {
+        if (paramSize > sizeof(C2Param)) {
+            memset(this + 1, 0, paramSize - sizeof(C2Param));
+        }
+    }
+
+    /// base constructor with stream set
+    inline C2Param(uint32_t paramSize, Index paramIndex, unsigned stream)
+        : _mSize(paramSize),
+          _mIndex(paramIndex | Index::makeStreamId(stream)) {
+        if (paramSize > sizeof(C2Param)) {
+            memset(this + 1, 0, paramSize - sizeof(C2Param));
+        }
+        if (!forStream()) {
+            invalidate();
+        }
+    }
+
+private:
+    friend struct _C2ParamInspector; // for testing
+
+    /// returns the base type: the index for the underlying struct (for testing
+    /// as this can be gotten by the baseIndex enum)
+    inline uint32_t _baseIndex() const { return _mIndex.baseIndex(); }
+
+    /// returns true iff |o| has the same size and index as this. This performs the
+    /// basic check for equality.
+    inline bool equals(const C2Param &o) const {
+        return _mSize == o._mSize && _mIndex == o._mIndex;
+    }
+
+    uint32_t _mSize;
+    Index _mIndex;
+};
+
+/// \ingroup internal
+/// allow C2Params access to private methods, e.g. constructors
+#define C2PARAM_MAKE_FRIENDS \
+    template<typename U, typename S, int I, class F> friend struct C2GlobalParam; \
+    template<typename U, typename S, int I, class F> friend struct C2PortParam; \
+    template<typename U, typename S, int I, class F> friend struct C2StreamParam; \
+
+/**
+ * Setting base structure for component method signatures. Wrap constructors.
+ */
+struct C2Setting : public C2Param {
+protected:
+    template<typename ...Args>
+    inline C2Setting(const Args(&... args)) : C2Param(args...) { }
+public: // TODO
+    enum : uint32_t { indexFlags = Type::kTypeSetting };
+};
+
+/**
+ * Tuning base structure for component method signatures. Wrap constructors.
+ */
+struct C2Tuning : public C2Setting {
+protected:
+    template<typename ...Args>
+    inline C2Tuning(const Args(&... args)) : C2Setting(args...) { }
+public: // TODO
+    enum : uint32_t { indexFlags = Type::kTypeTuning };
+};
+
+/**
+ * Info base structure for component method signatures. Wrap constructors.
+ */
+struct C2Info : public C2Param {
+protected:
+    template<typename ...Args>
+    inline C2Info(const Args(&... args)) : C2Param(args...) { }
+public: // TODO
+    enum : uint32_t { indexFlags = Type::kTypeInfo };
+};
+
+/**
+ * Structure uniquely specifying a field in an arbitrary structure.
+ *
+ * \note This structure is used differently in C2FieldDescriptor to
+ * identify array fields, such that _mSize is the size of each element. This is
+ * because the field descriptor contains the array-length, and we want to keep
+ * a relevant element size for variable length arrays.
+ */
+struct _C2FieldId {
+//public:
+    /**
+     * Constructor used for C2FieldDescriptor that removes the array extent.
+     *
+     * \param[in] offset pointer to the field in an object at address 0.
+     */
+    template<typename T, class B=typename std::remove_extent<T>::type>
+    inline _C2FieldId(T* offset)
+        : // offset is from "0" so will fit on 32-bits
+          _mOffset((uint32_t)(uintptr_t)(offset)),
+          _mSize(sizeof(B)) { }
+
+    /**
+     * Direct constructor from offset and size.
+     *
+     * \param[in] offset offset of the field.
+     * \param[in] size size of the field.
+     */
+    inline _C2FieldId(size_t offset, size_t size)
+        : _mOffset(offset), _mSize(size) {}
+
+    /**
+     * Constructor used to identify a field in an object.
+     *
+     * \param U[type] pointer to the object that contains this field. This is needed in case the
+     *        field is in an (inherited) base class, in which case T will be that base class.
+     * \param pm[im] member pointer to the field
+     */
+    template<typename R, typename T, typename U, typename B=typename std::remove_extent<R>::type>
+    inline _C2FieldId(U *, R T::* pm)
+        : _mOffset((uint32_t)(uintptr_t)(&(((U*)256)->*pm)) - 256u),
+          _mSize(sizeof(B)) { }
+
+    /**
+     * Constructor used to identify a field in an object.
+     *
+     * \param U[type] pointer to the object that contains this field
+     * \param pm[im] member pointer to the field
+     */
+    template<typename R, typename T, typename B=typename std::remove_extent<R>::type>
+    inline _C2FieldId(R T::* pm)
+        : _mOffset((uint32_t)(uintptr_t)(&(((T*)0)->*pm))),
+          _mSize(sizeof(B)) { }
+
+    inline bool operator==(const _C2FieldId &other) const {
+        return _mOffset == other._mOffset && _mSize == other._mSize;
+    }
+
+    inline bool operator<(const _C2FieldId &other) const {
+        return _mOffset < other._mOffset ||
+            // NOTE: order parent structure before sub field
+            (_mOffset == other._mOffset && _mSize > other._mSize);
+    }
+
+    DEFINE_OTHER_COMPARISON_OPERATORS(_C2FieldId)
+
+#if 0
+    inline uint32_t offset() const { return _mOffset; }
+    inline uint32_t size() const { return _mSize; }
+#endif
+
+#if defined(FRIEND_TEST)
+    friend void PrintTo(const _C2FieldId &d, ::std::ostream*);
+#endif
+
+private:
+    uint32_t _mOffset; // offset of field
+    uint32_t _mSize;   // size of field
+};
+
+/**
+ * Structure uniquely specifying a field in a configuration
+ */
+struct C2ParamField {
+//public:
+    // TODO: fix what this is for T[] (for now size becomes T[1])
+    template<typename S, typename T>
+    inline C2ParamField(S* param, T* offset)
+        : _mIndex(param->index()),
+          _mFieldId(offset) {}
+
+    template<typename R, typename T, typename U>
+    inline C2ParamField(U *p, R T::* pm) : _mIndex(p->type()), _mFieldId(p, pm) { }
+
+    inline bool operator==(const C2ParamField &other) const {
+        return _mIndex == other._mIndex && _mFieldId == other._mFieldId;
+    }
+
+    inline bool operator<(const C2ParamField &other) const {
+        return _mIndex < other._mIndex ||
+            (_mIndex == other._mIndex && _mFieldId < other._mFieldId);
+    }
+
+    DEFINE_OTHER_COMPARISON_OPERATORS(C2ParamField)
+
+private:
+    C2Param::Index _mIndex;
+    _C2FieldId _mFieldId;
+};
+
+/**
+ * A shared (union) representation of numeric values
+ */
+class C2Value {
+public:
+    /// A union of supported primitive types.
+    union Primitive {
+        int32_t  i32;   ///< int32_t value
+        uint32_t u32;   ///< uint32_t value
+        int64_t  i64;   ///< int64_t value
+        uint64_t u64;   ///< uint64_t value
+        float    fp;    ///< float value
+
+        // constructors - implicit
+        Primitive(int32_t value)  : i32(value) { }
+        Primitive(uint32_t value) : u32(value) { }
+        Primitive(int64_t value)  : i64(value) { }
+        Primitive(uint64_t value) : u64(value) { }
+        Primitive(float value)    : fp(value)  { }
+
+        Primitive() : u64(0) { }
+
+    private:
+        friend class C2Value;
+        template<typename T> const T &ref() const;
+    };
+
+    enum Type {
+        NO_INIT,
+        INT32,
+        UINT32,
+        INT64,
+        UINT64,
+        FLOAT,
+    };
+
+    template<typename T> static constexpr Type typeFor();
+
+    // constructors - implicit
+    template<typename T>
+    C2Value(T value)  : mType(typeFor<T>()),  mValue(value) { }
+
+    C2Value() : mType(NO_INIT) { }
+
+    inline Type type() const { return mType; }
+
+    template<typename T>
+    inline bool get(T *value) const {
+        if (mType == typeFor<T>()) {
+            *value = mValue.ref<T>();
+            return true;
+        }
+        return false;
+    }
+
+private:
+    Type mType;
+    Primitive mValue;
+};
+
+template<> const int32_t &C2Value::Primitive::ref<int32_t>() const { return i32; }
+template<> const int64_t &C2Value::Primitive::ref<int64_t>() const { return i64; }
+template<> const uint32_t &C2Value::Primitive::ref<uint32_t>() const { return u32; }
+template<> const uint64_t &C2Value::Primitive::ref<uint64_t>() const { return u64; }
+template<> const float &C2Value::Primitive::ref<float>() const { return fp; }
+
+template<> constexpr C2Value::Type C2Value::typeFor<int32_t>() { return INT32; }
+template<> constexpr C2Value::Type C2Value::typeFor<int64_t>() { return INT64; }
+template<> constexpr C2Value::Type C2Value::typeFor<uint32_t>() { return UINT32; }
+template<> constexpr C2Value::Type C2Value::typeFor<uint64_t>() { return UINT64; }
+template<> constexpr C2Value::Type C2Value::typeFor<float>() { return FLOAT; }
+
+/**
+ * field descriptor. A field is uniquely defined by an index into a parameter.
+ * (Note: Stream-id is not captured as a field.)
+ *
+ * Ordering of fields is by offset. In case of structures, it is depth first,
+ * with a structure taking an index just before and in addition to its members.
+ */
+struct C2FieldDescriptor {
+//public:
+    /** field types and flags
+     * \note: only 32-bit and 64-bit fields are supported (e.g. no boolean, as that
+     * is represented using INT32).
+     */
+    enum Type : uint32_t {
+        // primitive types
+        INT32   = C2Value::INT32,  ///< 32-bit signed integer
+        UINT32  = C2Value::UINT32, ///< 32-bit unsigned integer
+        INT64   = C2Value::INT64,  ///< 64-bit signed integer
+        UINT64  = C2Value::UINT64, ///< 64-bit signed integer
+        FLOAT   = C2Value::FLOAT,  ///< 32-bit floating point
+
+        // array types
+        STRING = 0x100, ///< fixed-size string (POD)
+        BLOB,           ///< blob. Blobs have no sub-elements and can be thought of as byte arrays;
+                        ///< however, bytes cannot be individually addressed by clients.
+
+        // complex types
+        STRUCT_FLAG = 0x10000, ///< structs. Marked with this flag in addition to their baseIndex.
+    };
+
+    typedef std::pair<C2String, C2Value::Primitive> named_value_type;
+    typedef std::vector<const named_value_type> named_values_type;
+    //typedef std::pair<std::vector<C2String>, std::vector<C2Value::Primitive>> named_values_type;
+
+    /**
+     * Template specialization that returns the named values for a type.
+     *
+     * \todo hide from client.
+     *
+     * \return a vector of name-value pairs.
+     */
+    template<typename B>
+    static named_values_type namedValuesFor(const B &);
+
+    inline C2FieldDescriptor(uint32_t type, uint32_t length, C2StringLiteral name, size_t offset, size_t size)
+        : _mType((Type)type), _mLength(length), _mName(name), _mFieldId(offset, size) { }
+
+    template<typename T, class B=typename std::remove_extent<T>::type>
+    inline C2FieldDescriptor(const T* offset, const char *name)
+        : _mType(this->getType((B*)nullptr)),
+          _mLength(std::is_array<T>::value ? std::extent<T>::value : 1),
+          _mName(name),
+          _mNamedValues(namedValuesFor(*(B*)0)),
+          _mFieldId(offset) {}
+
+/*
+    template<typename T, typename B=typename std::remove_extent<T>::type>
+    inline C2FieldDescriptor<T, B, false>(T* offset, const char *name)
+        : _mType(this->getType((B*)nullptr)),
+          _mLength(std::is_array<T>::value ? std::extent<T>::value : 1),
+          _mName(name),
+          _mFieldId(offset) {}
+*/
+
+    /// \deprecated
+    template<typename T, typename S, class B=typename std::remove_extent<T>::type>
+    constexpr inline C2FieldDescriptor(S*, T S::* field, const char *name)
+        : _mType(this->getType((B*)nullptr)),
+          _mLength(std::is_array<T>::value ? std::extent<T>::value : 1),
+          _mName(name),
+          _mFieldId(&(((S*)0)->*field)) {}
+
+    /// returns the type of this field
+    inline Type type() const { return _mType; }
+    /// returns the length of the field in case it is an array. Returns 0 for
+    /// T[] arrays, returns 1 for T[1] arrays as well as if the field is not an array.
+    inline size_t length() const { return _mLength; }
+    /// returns the name of the field
+    inline C2StringLiteral name() const { return _mName; }
+
+    const named_values_type &namedValues() const { return _mNamedValues; }
+
+#if defined(FRIEND_TEST)
+    friend void PrintTo(const C2FieldDescriptor &, ::std::ostream*);
+    friend bool operator==(const C2FieldDescriptor &, const C2FieldDescriptor &);
+    FRIEND_TEST(C2ParamTest_ParamFieldList, VerifyStruct);
+#endif
+
+private:
+    const Type _mType;
+    const uint32_t _mLength; // the last member can be arbitrary length if it is T[] array,
+                       // extending to the end of the parameter (this is marked with
+                       // 0). T[0]-s are not fields.
+    const C2StringLiteral _mName;
+    const named_values_type _mNamedValues;
+
+    const _C2FieldId _mFieldId;   // field identifier (offset and size)
+
+    // NOTE: We do not capture default value(s) here as that may depend on the component.
+    // NOTE: We also do not capture bestEffort, as 1) this should be true for most fields,
+    // 2) this is at parameter granularity.
+
+    // type resolution
+    inline static Type getType(int32_t*)  { return INT32; }
+    inline static Type getType(uint32_t*) { return UINT32; }
+    inline static Type getType(int64_t*)  { return INT64; }
+    inline static Type getType(uint64_t*) { return UINT64; }
+    inline static Type getType(float*)    { return FLOAT; }
+    inline static Type getType(char*)     { return STRING; }
+    inline static Type getType(uint8_t*)  { return BLOB; }
+
+    template<typename T,
+             class=typename std::enable_if<std::is_enum<T>::value>::type>
+    inline static Type getType(T*) {
+        typename std::underlying_type<T>::type underlying(0);
+        return getType(&underlying);
+    }
+
+    // verify C2Struct by having a fieldList and a baseIndex.
+    template<typename T,
+             class=decltype(T::baseIndex + 1), class=decltype(T::fieldList)>
+    inline static Type getType(T*) {
+        static_assert(!std::is_base_of<C2Param, T>::value, "cannot use C2Params as fields");
+        return (Type)(T::baseIndex | STRUCT_FLAG);
+    }
+};
+
+#define DEFINE_NO_NAMED_VALUES_FOR(type) \
+template<> inline C2FieldDescriptor::named_values_type C2FieldDescriptor::namedValuesFor(const type &) { \
+    return named_values_type(); \
+}
+
+// We cannot subtype constructor for enumerated types so insted define no named values for
+// non-enumerated integral types.
+DEFINE_NO_NAMED_VALUES_FOR(int32_t)
+DEFINE_NO_NAMED_VALUES_FOR(uint32_t)
+DEFINE_NO_NAMED_VALUES_FOR(int64_t)
+DEFINE_NO_NAMED_VALUES_FOR(uint64_t)
+DEFINE_NO_NAMED_VALUES_FOR(uint8_t)
+DEFINE_NO_NAMED_VALUES_FOR(char)
+DEFINE_NO_NAMED_VALUES_FOR(float)
+
+/**
+ * Describes the fields of a structure.
+ */
+struct C2StructDescriptor {
+public:
+    /// Returns the parameter type
+    inline C2Param::BaseIndex baseIndex() const { return _mType.baseIndex(); }
+
+    // Returns the number of fields in this param (not counting any recursive fields).
+    // Must be at least 1 for valid params.
+    inline size_t numFields() const { return _mFields.size(); }
+
+    // Returns the list of immediate fields (not counting any recursive fields).
+    typedef std::vector<const C2FieldDescriptor>::const_iterator field_iterator;
+    inline field_iterator cbegin() const { return _mFields.cbegin(); }
+    inline field_iterator cend() const { return _mFields.cend(); }
+
+    // only supplying const iterator - but these are needed for range based loops
+    inline field_iterator begin() const { return _mFields.cbegin(); }
+    inline field_iterator end() const { return _mFields.cend(); }
+
+    template<typename T>
+    inline C2StructDescriptor(T*)
+        : C2StructDescriptor(T::baseIndex, T::fieldList) { }
+
+    inline C2StructDescriptor(
+            C2Param::BaseIndex type,
+            std::initializer_list<const C2FieldDescriptor> fields)
+        : _mType(type), _mFields(fields) { }
+
+private:
+    const C2Param::BaseIndex _mType;
+    const std::vector<const C2FieldDescriptor> _mFields;
+};
+
+/**
+ * Describes parameters for a component.
+ */
+struct C2ParamDescriptor {
+public:
+    /**
+     * Returns whether setting this param is required to configure this component.
+     * This can only be true for builtin params for platform-defined components (e.g. video and
+     * audio encoders/decoders, video/audio filters).
+     * For vendor-defined components, it can be true even for vendor-defined params,
+     * but it is not recommended, in case the component becomes platform-defined.
+     */
+    inline bool isRequired() const { return _mIsRequired; }
+
+    /**
+     * Returns whether this parameter is persistent. This is always true for C2Tuning and C2Setting,
+     * but may be false for C2Info. If true, this parameter persists across frames and applies to
+     * the current and subsequent frames. If false, this C2Info parameter only applies to the
+     * current frame and is not assumed to have the same value (or even be present) on subsequent
+     * frames, unless it is specified for those frames.
+     */
+    inline bool isPersistent() const { return _mIsPersistent; }
+
+    /// Returns the name of this param.
+    /// This defaults to the underlying C2Struct's name, but could be altered for a component.
+    inline C2String name() const { return _mName; }
+
+    /// Returns the parameter type
+    /// \todo fix this
+    inline C2Param::Type type() const { return _mType; }
+
+    template<typename T>
+    inline C2ParamDescriptor(bool isRequired, C2StringLiteral name, const T*)
+        : _mIsRequired(isRequired),
+          _mIsPersistent(true),
+          _mName(name),
+          _mType(T::typeIndex) { }
+
+    inline C2ParamDescriptor(
+            bool isRequired, C2StringLiteral name, C2Param::Type type)
+        : _mIsRequired(isRequired),
+          _mIsPersistent(true),
+          _mName(name),
+          _mType(type) { }
+
+private:
+    const bool _mIsRequired;
+    const bool _mIsPersistent;
+    const C2String _mName;
+    const C2Param::Type _mType;
+};
+
+/// \ingroup internal
+/// Define a structure without baseIndex.
+#define DEFINE_C2STRUCT_NO_BASE(name) \
+public: \
+    typedef C2##name##Struct _type; /**< type name shorthand */ \
+    const static std::initializer_list<const C2FieldDescriptor> fieldList; /**< structure fields */
+
+/// Define a structure with matching baseIndex.
+#define DEFINE_C2STRUCT(name) \
+public: \
+    enum : uint32_t { baseIndex = kParamIndex##name }; \
+    DEFINE_C2STRUCT_NO_BASE(name)
+
+/// Define a flexible structure with matching baseIndex.
+#define DEFINE_FLEX_C2STRUCT(name, flexMember) \
+public: \
+    FLEX(C2##name##Struct, flexMember) \
+    enum : uint32_t { baseIndex = kParamIndex##name | C2Param::BaseIndex::_kFlexibleFlag }; \
+    DEFINE_C2STRUCT_NO_BASE(name)
+
+/// \ingroup internal
+/// Describe a structure of a templated structure.
+#define DESCRIBE_TEMPLATED_C2STRUCT(strukt, list) \
+    template<> \
+    const std::initializer_list<const C2FieldDescriptor> strukt::fieldList = list;
+
+/// \deprecated
+/// Describe the fields of a structure using an initializer list.
+#define DESCRIBE_C2STRUCT(name, list) \
+    const std::initializer_list<const C2FieldDescriptor> C2##name##Struct::fieldList = list;
+
+/**
+ * Describe a field of a structure.
+ * These must be in order.
+ *
+ * There are two ways to use this macro:
+ *
+ *  ~~~~~~~~~~~~~ (.cpp)
+ *  struct C2VideoWidthStruct {
+ *      int32_t mWidth;
+ *      C2VideoWidthStruct() {} // optional default constructor
+ *      C2VideoWidthStruct(int32_t _width) : mWidth(_width) {}
+ *
+ *      DEFINE_AND_DESCRIBE_C2STRUCT(VideoWidth)
+ *      C2FIELD(mWidth, "width")
+ *  };
+ *  ~~~~~~~~~~~~~
+ *
+ *  ~~~~~~~~~~~~~ (.cpp)
+ *  struct C2VideoWidthStruct {
+ *      int32_t mWidth;
+ *      C2VideoWidthStruct() = default; // optional default constructor
+ *      C2VideoWidthStruct(int32_t _width) : mWidth(_width) {}
+ *
+ *      DEFINE_C2STRUCT(VideoWidth)
+ *  } C2_PACK;
+ *
+ *  DESCRIBE_C2STRUCT(VideoWidth, {
+ *      C2FIELD(mWidth, "width")
+ *  })
+ *  ~~~~~~~~~~~~~
+ *
+ *  For flexible structures (those ending in T[]), use the flexible macros:
+ *
+ *  ~~~~~~~~~~~~~ (.cpp)
+ *  struct C2VideoFlexWidthsStruct {
+ *      int32_t mWidths[];
+ *      C2VideoFlexWidthsStruct(); // must have a default constructor
+ *
+ *  private:
+ *      // may have private constructors taking number of widths as the first argument
+ *      // This is used by the C2Param factory methods, e.g.
+ *      //   C2VideoFlexWidthsGlobalParam::alloc_unique(size_t, int32_t);
+ *      C2VideoFlexWidthsStruct(size_t flexCount, int32_t value) {
+ *          for (size_t i = 0; i < flexCount; ++i) {
+ *              mWidths[i] = value;
+ *          }
+ *      }
+ *
+ *      // If the last argument is T[N] or std::initializer_list<T>, the flexCount will
+ *      // be automatically calculated and passed by the C2Param factory methods, e.g.
+ *      //   int widths[] = { 1, 2, 3 };
+ *      //   C2VideoFlexWidthsGlobalParam::alloc_unique(widths);
+ *      template<unsigned N>
+ *      C2VideoFlexWidthsStruct(size_t flexCount, const int32_t(&init)[N]) {
+ *          for (size_t i = 0; i < flexCount; ++i) {
+ *              mWidths[i] = init[i];
+ *          }
+ *      }
+ *
+ *      DEFINE_AND_DESCRIBE_FLEX_C2STRUCT(VideoFlexWidths, mWidths)
+ *      C2FIELD(mWidths, "widths")
+ *  };
+ *  ~~~~~~~~~~~~~
+ *
+ *  ~~~~~~~~~~~~~ (.cpp)
+ *  struct C2VideoFlexWidthsStruct {
+ *      int32_t mWidths[];
+ *      C2VideoFlexWidthsStruct(); // must have a default constructor
+ *
+ *      DEFINE_FLEX_C2STRUCT(VideoFlexWidths, mWidths)
+ *  } C2_PACK;
+ *
+ *  DESCRIBE_C2STRUCT(VideoFlexWidths, {
+ *      C2FIELD(mWidths, "widths")
+ *  })
+ *  ~~~~~~~~~~~~~
+ *
+ */
+#define C2FIELD(member, name) \
+  C2FieldDescriptor(&((_type*)(nullptr))->member, name),
+
+/// \deprecated
+#define C2SOLE_FIELD(member, name) \
+  C2FieldDescriptor(&_type::member, name, 0)
+
+/// Define a structure with matching baseIndex and start describing its fields.
+/// This must be at the end of the structure definition.
+#define DEFINE_AND_DESCRIBE_C2STRUCT(name) \
+    DEFINE_C2STRUCT(name) }  C2_PACK; \
+    const std::initializer_list<const C2FieldDescriptor> C2##name##Struct::fieldList = {
+
+/// Define a flexible structure with matching baseIndex and start describing its fields.
+/// This must be at the end of the structure definition.
+#define DEFINE_AND_DESCRIBE_FLEX_C2STRUCT(name, flexMember) \
+    DEFINE_FLEX_C2STRUCT(name, flexMember) } C2_PACK; \
+    const std::initializer_list<const C2FieldDescriptor> C2##name##Struct::fieldList = {
+
+/**
+ * Parameter reflector class.
+ *
+ * This class centralizes the description of parameter structures. This can be shared
+ * by multiple components as describing a parameter does not imply support of that
+ * parameter. However, each supported parameter and any dependent structures within
+ * must be described by the parameter reflector provided by a component.
+ */
+class C2ParamReflector {
+public:
+    /**
+     *  Describes a parameter structure.
+     *
+     *  \param[in] paramIndex the base index of the parameter structure
+     *
+     *  \return the description of the parameter structure
+     *  \retval nullptr if the parameter is not supported by this reflector
+     *
+     *  This methods shall not block and return immediately.
+     *
+     *  \note this class does not take a set of indices because we would then prefer
+     *  to also return any dependent structures, and we don't want this logic to be
+     *  repeated in each reflector. Alternately, this could just return a map of all
+     *  descriptions, but we want to conserve memory if client only wants the description
+     *  of a few indices.
+     */
+    virtual std::unique_ptr<C2StructDescriptor> describe(C2Param::BaseIndex paramIndex) = 0;
+
+protected:
+    virtual ~C2ParamReflector() = default;
+};
+
+/**
+ * A useable supported values for a field.
+ *
+ * This can be either a range or a set of values. The range can be linear or geometric with a
+ * clear minimum and maximum value, and can have an optional step size or geometric ratio. Values
+ * can optionally represent flags.
+ *
+ * \note Do not use flags to represent bitfields. Use individual values or separate fields instead.
+ */
+template<typename T>
+struct C2TypedFieldSupportedValues {
+//public:
+    enum Type {
+        RANGE,      ///< a numeric range that can be continuous or discrete
+        VALUES,     ///< a list of values
+        FLAGS       ///< a list of flags that can be OR-ed
+    };
+
+    Type type;
+
+    struct {
+        T min;
+        T max;
+        T step;
+        T nom;
+        T denom;
+    } range;
+    std::vector<T> values;
+
+    C2TypedFieldSupportedValues(T min, T max, T step = T(std::is_floating_point<T>::value ? 0 : 1))
+        : type(RANGE),
+          range{min, max, step, (T)1, (T)1} { }
+
+    C2TypedFieldSupportedValues(T min, T max, T nom, T den) :
+        type(RANGE),
+        range{min, max, (T)0, nom, den} { }
+
+    C2TypedFieldSupportedValues(bool flags, std::initializer_list<T> list) :
+        type(flags ? FLAGS : VALUES),
+        values(list) {}
+};
+
+/**
+ * Generic supported values for a field.
+ *
+ * This can be either a range or a set of values. The range can be linear or geometric with a
+ * clear minimum and maximum value, and can have an optional step size or geometric ratio. Values
+ * can optionally represent flags.
+ *
+ * \note Do not use flags to represent bitfields. Use individual values or separate fields instead.
+ */
+struct C2FieldSupportedValues {
+//public:
+    enum Type {
+        RANGE,      ///< a numeric range that can be continuous or discrete
+        VALUES,     ///< a list of values
+        FLAGS       ///< a list of flags that can be OR-ed
+    };
+
+    Type type;
+
+    typedef C2Value::Primitive Primitive;
+
+    struct {
+        Primitive min;
+        Primitive max;
+        Primitive step;
+        Primitive nom;
+        Primitive denom;
+    } range;
+    std::vector<Primitive> values;
+
+    template<typename T>
+    C2FieldSupportedValues(T min, T max, T step = T(std::is_floating_point<T>::value ? 0 : 1))
+        : type(RANGE),
+          range{min, max, step, (T)1, (T)1} { }
+
+    template<typename T>
+    C2FieldSupportedValues(T min, T max, T nom, T den) :
+        type(RANGE),
+        range{min, max, (T)0, nom, den} { }
+
+    template<typename T>
+    C2FieldSupportedValues(bool flags, std::initializer_list<T> list)
+        : type(flags ? FLAGS : VALUES),
+          range{(T)0, (T)0, (T)0, (T)0, (T)0} {
+        for(T value : list) {
+            values.emplace_back(value);
+        }
+    }
+
+    template<typename T, typename E=decltype(C2FieldDescriptor::namedValuesFor(*(T*)0))>
+    C2FieldSupportedValues(bool flags, const T*)
+        : type(flags ? FLAGS : VALUES),
+          range{(T)0, (T)0, (T)0, (T)0, (T)0} {
+              C2FieldDescriptor::named_values_type named = C2FieldDescriptor::namedValuesFor(*(T*)0);
+        for (const C2FieldDescriptor::named_value_type &item : named) {
+            values.emplace_back(item.second);
+        }
+    }
+};
+
+/// @}
+
+}  // namespace android
+
+#endif  // C2PARAM_H_
diff --git a/media/libstagefright/codec2/include/C2ParamDef.h b/media/libstagefright/codec2/include/C2ParamDef.h
new file mode 100644
index 0000000..f369617
--- /dev/null
+++ b/media/libstagefright/codec2/include/C2ParamDef.h
@@ -0,0 +1,901 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** \file
+ * Templates used to declare parameters.
+ */
+#ifndef C2PARAM_DEF_H_
+#define C2PARAM_DEF_H_
+
+#include <type_traits>
+
+#include <C2Param.h>
+
+namespace android {
+
+/// \addtogroup Parameters
+/// @{
+
+/* ======================== UTILITY TEMPLATES FOR PARAMETER DEFINITIONS ======================== */
+
+/// \addtogroup internal
+/// @{
+
+/// Helper class that checks if a type has equality and inequality operators.
+struct C2_HIDE _C2Comparable_impl
+{
+    template<typename S, typename=decltype(S() == S())>
+    static std::true_type __testEQ(int);
+    template<typename>
+    static std::false_type __testEQ(...);
+
+    template<typename S, typename=decltype(S() != S())>
+    static std::true_type __testNE(int);
+    template<typename>
+    static std::false_type __testNE(...);
+};
+
+/**
+ * Helper template that returns if a type has equality and inequality operators.
+ *
+ * Use as _C2Comparable<typename S>::value.
+ */
+template<typename S>
+struct C2_HIDE _C2Comparable
+    : public std::integral_constant<bool, decltype(_C2Comparable_impl::__testEQ<S>(0))::value
+                        || decltype(_C2Comparable_impl::__testNE<S>(0))::value> {
+};
+
+///  Helper class that checks if a type has a baseIndex constant.
+struct C2_HIDE _C2BaseIndexHelper_impl
+{
+    template<typename S, int=S::baseIndex>
+    static std::true_type __testBaseIndex(int);
+    template<typename>
+    static std::false_type __testBaseIndex(...);
+};
+
+/// Helper template that verifies a type's baseIndex and creates it if the type does not have one.
+template<typename S, int BaseIndex,
+        bool HasBase=decltype(_C2BaseIndexHelper_impl::__testBaseIndex<S>(0))::value>
+struct C2_HIDE C2BaseIndexOverride {
+    // TODO: what if we allow structs without baseIndex?
+    static_assert(BaseIndex == S::baseIndex, "baseIndex differs from structure");
+};
+
+/// Specialization for types without a baseIndex.
+template<typename S, int BaseIndex>
+struct C2_HIDE C2BaseIndexOverride<S, BaseIndex, false> {
+public:
+    enum : uint32_t {
+        baseIndex = BaseIndex, ///< baseIndex override.
+    };
+};
+
+/// Helper template that adds a baseIndex to a type if it does not have one.
+template<typename S, int BaseIndex>
+struct C2_HIDE C2AddBaseIndex : public S, public C2BaseIndexOverride<S, BaseIndex> {};
+
+/**
+ * \brief Helper class to check struct requirements for parameters.
+ *
+ * Features:
+ *  - verify default constructor, no virtual methods, and no equality operators.
+ *  - expose typeIndex, and non-flex flexSize.
+ */
+template<typename S, int BaseIndex, unsigned TypeIndex>
+struct C2_HIDE C2StructCheck {
+    static_assert(
+            std::is_default_constructible<S>::value, "C2 structure must have default constructor");
+    static_assert(!std::is_polymorphic<S>::value, "C2 structure must not have virtual methods");
+    static_assert(!_C2Comparable<S>::value, "C2 structure must not have operator== or !=");
+
+public:
+    enum : uint32_t {
+        typeIndex = BaseIndex | TypeIndex
+    };
+
+protected:
+    enum : uint32_t {
+        flexSize = 0, // TODO: is this still needed? this may be confusing.
+    };
+};
+
+/// Helper class that checks if a type has an integer flexSize member.
+struct C2_HIDE _C2Flexible_impl {
+    /// specialization for types that have a flexSize member
+    template<typename S, unsigned=S::flexSize>
+    static std::true_type __testFlexSize(int);
+    template<typename>
+    static std::false_type __testFlexSize(...);
+};
+
+/// Helper template that returns if a type has an integer flexSize member.
+template<typename S>
+struct C2_HIDE _C2Flexible
+    : public std::integral_constant<bool, decltype(_C2Flexible_impl::__testFlexSize<S>(0))::value> {
+};
+
+/// Macro to test if a type is flexible (has a flexSize member).
+#define IF_FLEXIBLE(S) ENABLE_IF(_C2Flexible<S>::value)
+/// Shorthand for std::enable_if
+#define ENABLE_IF(cond) typename std::enable_if<cond>::type
+
+/// Helper template that exposes the flexible subtype of a struct.
+template<typename S, typename E=void>
+struct C2_HIDE _C2FlexHelper {
+    typedef void flexType;
+    enum : uint32_t { flexSize = 0 };
+};
+
+/// Specialization for flexible types.
+template<typename S>
+struct C2_HIDE _C2FlexHelper<S,
+        typename std::enable_if<!std::is_void<typename S::flexMemberType>::value>::type> {
+    typedef typename _C2FlexHelper<typename S::flexMemberType>::flexType flexType;
+    enum : uint32_t { flexSize = _C2FlexHelper<typename S::flexMemberType>::flexSize };
+};
+
+/// Specialization for flex arrays.
+template<typename S>
+struct C2_HIDE _C2FlexHelper<S[],
+        typename std::enable_if<std::is_void<typename _C2FlexHelper<S>::flexType>::value>::type> {
+    typedef S flexType;
+    enum : uint32_t { flexSize = sizeof(S) };
+};
+
+/**
+ * \brief Helper class to check flexible struct requirements and add common operations.
+ *
+ * Features:
+ *  - expose baseIndex and fieldList (this is normally inherited from the struct, but flexible
+ *    structs cannot be base classes and thus inherited from)
+ *  - disable copy assignment and construction (TODO: this is already done in the FLEX macro for the
+ *    flexible struct, so may not be needed here)
+ */
+template<typename S, int BaseIndex, unsigned TypeIndex>
+struct C2_HIDE C2FlexStructCheck : public C2StructCheck<S, BaseIndex, TypeIndex> {
+public:
+    enum : uint32_t {
+        /// \hideinitializer
+        baseIndex = BaseIndex | C2Param::BaseIndex::_kFlexibleFlag, ///< flexible struct base-index
+    };
+
+    const static std::initializer_list<const C2FieldDescriptor> fieldList; // TODO assign here
+
+    // default constructor needed because of the disabled copy constructor
+    inline C2FlexStructCheck() = default;
+
+protected:
+    // cannot copy flexible params
+    C2FlexStructCheck(const C2FlexStructCheck<S, BaseIndex, TypeIndex> &) = delete;
+    C2FlexStructCheck& operator= (const C2FlexStructCheck<S, BaseIndex, TypeIndex> &) = delete;
+
+    // constants used for helper methods
+    enum : uint32_t {
+        /// \hideinitializer
+        flexSize = _C2FlexHelper<S>::flexSize, ///< size of flexible type
+        /// \hideinitializer
+        maxSize = (uint32_t)std::min((size_t)UINT32_MAX, SIZE_MAX), // TODO: is this always u32 max?
+        /// \hideinitializer
+        baseSize = sizeof(S) + sizeof(C2Param), ///< size of the base param
+    };
+
+    /// returns the allocated size of this param with flexCount, or 0 if it would overflow.
+    inline static size_t calcSize(size_t flexCount, size_t size = baseSize) {
+        if (flexCount <= (maxSize - size) / S::flexSize) {
+            return size + S::flexSize * flexCount;
+        }
+        return 0;
+    }
+
+    /// dynamic new operator usable for params of type S
+    inline void* operator new(size_t size, size_t flexCount) noexcept {
+        // TODO: assert(size == baseSize);
+        size = calcSize(flexCount, size);
+        if (size > 0) {
+            return ::operator new(size);
+        }
+        return nullptr;
+    }
+};
+
+// TODO: this probably does not work.
+/// Expose fieldList from subClass;
+template<typename S, int BaseIndex, unsigned TypeIndex>
+const std::initializer_list<const C2FieldDescriptor> C2FlexStructCheck<S, BaseIndex, TypeIndex>::fieldList = S::fieldList;
+
+/// Define From() cast operators for params.
+#define DEFINE_CAST_OPERATORS(_type) \
+    inline static _type* From(C2Param *other) { \
+        return (_type*)C2Param::ifSuitable( \
+                other, sizeof(_type),_type::typeIndex, _type::flexSize, \
+                (_type::typeIndex & T::Index::kDirUndefined) != T::Index::kDirUndefined); \
+    } \
+    inline static const _type* From(const C2Param *other) { \
+        return const_cast<const _type*>(From(const_cast<C2Param *>(other))); \
+    } \
+    inline static _type* From(std::nullptr_t) { return nullptr; } \
+
+/**
+ * Define flexible allocators (alloc_shared or alloc_unique) for flexible params.
+ *  - P::alloc_xyz(flexCount, args...): allocate for given flex-count.
+ *  - P::alloc_xyz(args..., T[]): allocate for size of (and with) init array.
+ *  - P::alloc_xyz(T[]): allocate for size of (and with) init array with no other args.
+ *  - P::alloc_xyz(args..., std::initializer_list<T>): allocate for size of (and with) initializer
+ *    list.
+ */
+#define DEFINE_FLEXIBLE_ALLOC(_type, S, ptr) \
+    template<typename ...Args> \
+    inline static std::ptr##_ptr<_type> alloc_##ptr(size_t flexCount, const Args(&... args)) { \
+        return std::ptr##_ptr<_type>(new(flexCount) _type(flexCount, args...)); \
+    } \
+    /* NOTE: unfortunately this is not supported by clang yet */ \
+    template<typename ...Args, typename U=typename S::flexType, unsigned N> \
+    inline static std::ptr##_ptr<_type> alloc_##ptr(const Args(&... args), const U(&init)[N]) { \
+        return std::ptr##_ptr<_type>(new(N) _type(N, args..., init)); \
+    } \
+    /* so for now, specialize for no args */ \
+    template<typename U=typename S::flexType, unsigned N> \
+    inline static std::ptr##_ptr<_type> alloc_##ptr(const U(&init)[N]) { \
+        return std::ptr##_ptr<_type>(new(N) _type(N, init)); \
+    } \
+    template<typename ...Args, typename U=typename S::flexType> \
+    inline static std::ptr##_ptr<_type> alloc_##ptr( \
+            const Args(&... args), const std::initializer_list<U> &init) { \
+        return std::ptr##_ptr<_type>(new(init.size()) _type(init.size(), args..., init)); \
+    } \
+
+/**
+ * Define flexible methods alloc_shared, alloc_unique and flexCount.
+ */
+#define DEFINE_FLEXIBLE_METHODS(_type, S) \
+    DEFINE_FLEXIBLE_ALLOC(_type, S, shared) \
+    DEFINE_FLEXIBLE_ALLOC(_type, S, unique) \
+    inline size_t flexCount() const { \
+        static_assert(sizeof(_type) == _type::baseSize, "incorrect baseSize"); \
+        size_t sz = this->size(); \
+        if (sz >= sizeof(_type)) { \
+            return (sz - sizeof(_type)) / _type::flexSize; \
+        } \
+        return 0; \
+    } \
+
+/// Mark flexible member variable and make structure flexible.
+#define FLEX(cls, m) \
+    C2_DO_NOT_COPY(cls) \
+private: \
+    C2PARAM_MAKE_FRIENDS \
+    /* default constructor with flexCount */ \
+    inline cls(size_t) : cls() {} \
+    /** \if 0 */ \
+    template<typename, typename> friend struct _C2FlexHelper; \
+    typedef decltype(m) flexMemberType; \
+public: \
+    /* constexpr static flexMemberType cls::* flexMember = &cls::m; */ \
+    typedef typename _C2FlexHelper<flexMemberType>::flexType flexType; \
+    static_assert(\
+            !std::is_void<flexType>::value, \
+            "member is not flexible, or a flexible array of a flexible type"); \
+    enum : uint32_t { flexSize = _C2FlexHelper<flexMemberType>::flexSize }; \
+    /** \endif */ \
+
+/// @}
+
+/**
+ * Global-parameter template.
+ *
+ * Base template to define a global setting/tuning or info based on a structure and
+ * an optional BaseIndex. Global parameters are not tied to a port (input or output).
+ *
+ * Parameters wrap structures by prepending a (parameter) header. The fields of the wrapped
+ * structure can be accessed directly, and constructors and potential public methods are also
+ * wrapped.
+ *
+ * \tparam T param type C2Setting, C2Tuning or C2Info
+ * \tparam S wrapped structure
+ * \tparam BaseIndex optional base-index override. Must be specified for common/reused structures.
+ */
+template<typename T, typename S, int BaseIndex=S::baseIndex, class Flex=void>
+struct C2_HIDE C2GlobalParam : public T, public S, public C2BaseIndexOverride<S, BaseIndex>,
+        public C2StructCheck<S, BaseIndex, T::indexFlags | T::Type::kDirGlobal> {
+private:
+    typedef C2GlobalParam<T, S, BaseIndex> _type;
+
+public:
+    /// Wrapper around base structure's constructor.
+    template<typename ...Args>
+    inline C2GlobalParam(const Args(&... args)) : T(sizeof(_type), _type::typeIndex), S(args...) { }
+
+    DEFINE_CAST_OPERATORS(_type)
+};
+
+/**
+ * Global-parameter template for flexible structures.
+ *
+ * Base template to define a global setting/tuning or info based on a flexible structure and
+ * an optional BaseIndex. Global parameters are not tied to a port (input or output).
+ *
+ * \tparam T param type C2Setting, C2Tuning or C2Info
+ * \tparam S wrapped flexible structure
+ * \tparam BaseIndex optional base-index override. Must be specified for common/reused structures.
+ *
+ * Parameters wrap structures by prepending a (parameter) header. The fields and methods of flexible
+ * structures can be accessed via the m member variable; however, the constructors of the structure
+ * are wrapped directly. (This is because flexible types cannot be subclassed.)
+ */
+template<typename T, typename S, int BaseIndex>
+struct C2_HIDE C2GlobalParam<T, S, BaseIndex, IF_FLEXIBLE(S)>
+    : public T, public C2FlexStructCheck<S, BaseIndex, T::indexFlags | T::Type::kDirGlobal> {
+private:
+    typedef C2GlobalParam<T, S, BaseIndex> _type;
+
+    /// Wrapper around base structure's constructor.
+    template<typename ...Args>
+    inline C2GlobalParam(size_t flexCount, const Args(&... args))
+        : T(_type::calcSize(flexCount), _type::typeIndex), m(flexCount, args...) { }
+
+public:
+    S m; ///< wrapped flexible structure
+
+    DEFINE_FLEXIBLE_METHODS(_type, S)
+    DEFINE_CAST_OPERATORS(_type)
+};
+
+/**
+ * Port-parameter template.
+ *
+ * Base template to define a port setting/tuning or info based on a structure and
+ * an optional BaseIndex. Port parameters are tied to a port (input or output), but not to a
+ * specific stream.
+ *
+ * \tparam T param type C2Setting, C2Tuning or C2Info
+ * \tparam S wrapped structure
+ * \tparam BaseIndex optional base-index override. Must be specified for common/reused structures.
+ *
+ * Parameters wrap structures by prepending a (parameter) header. The fields of the wrapped
+ * structure can be accessed directly, and constructors and potential public methods are also
+ * wrapped.
+ *
+ * There are 3 flavors of port parameters: unspecified, input and output. Parameters with
+ * unspecified port expose a setPort method, and add an initial port parameter to the constructor.
+ */
+template<typename T, typename S, int BaseIndex=S::baseIndex, class Flex=void>
+struct C2_HIDE C2PortParam : public T, public S, public C2BaseIndexOverride<S, BaseIndex>,
+        private C2StructCheck<S, BaseIndex, T::indexFlags | T::Index::kDirUndefined> {
+private:
+    typedef C2PortParam<T, S, BaseIndex> _type;
+
+public:
+    /// Default constructor.
+    inline C2PortParam() : T(sizeof(_type), _type::typeIndex) { }
+    template<typename ...Args>
+    /// Wrapper around base structure's constructor while specifying port/direction.
+    inline C2PortParam(bool _output, const Args(&... args))
+        : T(sizeof(_type), _output ? output::typeIndex : input::typeIndex), S(args...) { }
+    /// Set port/direction.
+    inline void setPort(bool output) { C2Param::setPort(output); }
+
+    DEFINE_CAST_OPERATORS(_type)
+
+    /// Specialization for an input port parameter.
+    struct input : public T, public S, public C2BaseIndexOverride<S, BaseIndex>,
+            public C2StructCheck<S, BaseIndex, T::indexFlags | T::Index::kDirInput> {
+        /// Wrapper around base structure's constructor.
+        template<typename ...Args>
+        inline input(const Args(&... args)) : T(sizeof(_type), input::typeIndex), S(args...) { }
+
+        DEFINE_CAST_OPERATORS(input)
+
+    };
+
+    /// Specialization for an output port parameter.
+    struct output : public T, public S, public C2BaseIndexOverride<S, BaseIndex>,
+            public C2StructCheck<S, BaseIndex, T::indexFlags | T::Index::kDirOutput> {
+        /// Wrapper around base structure's constructor.
+        template<typename ...Args>
+        inline output(const Args(&... args)) : T(sizeof(_type), output::typeIndex), S(args...) { }
+
+        DEFINE_CAST_OPERATORS(output)
+    };
+};
+
+/**
+ * Port-parameter template for flexible structures.
+ *
+ * Base template to define a port setting/tuning or info based on a flexible structure and
+ * an optional BaseIndex. Port parameters are tied to a port (input or output), but not to a
+ * specific stream.
+ *
+ * \tparam T param type C2Setting, C2Tuning or C2Info
+ * \tparam S wrapped flexible structure
+ * \tparam BaseIndex optional base-index override. Must be specified for common/reused structures.
+ *
+ * Parameters wrap structures by prepending a (parameter) header. The fields and methods of flexible
+ * structures can be accessed via the m member variable; however, the constructors of the structure
+ * are wrapped directly. (This is because flexible types cannot be subclassed.)
+ *
+ * There are 3 flavors of port parameters: unspecified, input and output. Parameters with
+ * unspecified port expose a setPort method, and add an initial port parameter to the constructor.
+ */
+template<typename T, typename S, int BaseIndex>
+struct C2_HIDE C2PortParam<T, S, BaseIndex, IF_FLEXIBLE(S)>
+    : public T, public C2FlexStructCheck<S, BaseIndex, T::indexFlags | T::Type::kDirUndefined> {
+private:
+    typedef C2PortParam<T, S, BaseIndex> _type;
+
+    /// Default constructor for basic allocation: new(flexCount) P.
+    inline C2PortParam(size_t flexCount) : T(_type::calcSize(flexCount), _type::typeIndex) { }
+    template<typename ...Args>
+    /// Wrapper around base structure's constructor while also specifying port/direction.
+    inline C2PortParam(size_t flexCount, bool _output, const Args(&... args))
+        : T(_type::calcSize(flexCount), _output ? output::typeIndex : input::typeIndex),
+          m(flexCount, args...) { }
+
+public:
+    /// Set port/direction.
+    inline void setPort(bool output) { C2Param::setPort(output); }
+
+    S m; ///< wrapped flexible structure
+
+    DEFINE_FLEXIBLE_METHODS(_type, S)
+    DEFINE_CAST_OPERATORS(_type)
+
+    /// Specialization for an input port parameter.
+    struct input : public T, public C2BaseIndexOverride<S, BaseIndex>,
+            public C2FlexStructCheck<S, BaseIndex, T::indexFlags | T::Index::kDirInput> {
+    private:
+        /// Wrapper around base structure's constructor while also specifying port/direction.
+        template<typename ...Args>
+        inline input(size_t flexCount, const Args(&... args))
+            : T(_type::calcSize(flexCount), input::typeIndex), m(flexCount, args...) { }
+
+    public:
+        S m; ///< wrapped flexible structure
+
+        DEFINE_FLEXIBLE_METHODS(input, S)
+        DEFINE_CAST_OPERATORS(input)
+    };
+
+    /// Specialization for an output port parameter.
+    struct output : public T, public C2BaseIndexOverride<S, BaseIndex>,
+            public C2FlexStructCheck<S, BaseIndex, T::indexFlags | T::Index::kDirOutput> {
+    private:
+        /// Wrapper around base structure's constructor while also specifying port/direction.
+        template<typename ...Args>
+        inline output(size_t flexCount, const Args(&... args))
+            : T(_type::calcSize(flexCount), output::typeIndex), m(flexCount, args...) { }
+
+    public:
+        S m; ///< wrapped flexible structure
+
+        DEFINE_FLEXIBLE_METHODS(output, S)
+        DEFINE_CAST_OPERATORS(output)
+    };
+};
+
+/**
+ * Stream-parameter template.
+ *
+ * Base template to define a stream setting/tuning or info based on a structure and
+ * an optional BaseIndex. Stream parameters are tied to a specific stream on a port (input or
+ * output).
+ *
+ * \tparam T param type C2Setting, C2Tuning or C2Info
+ * \tparam S wrapped structure
+ * \tparam BaseIndex optional base-index override. Must be specified for common/reused structures.
+ *
+ * Parameters wrap structures by prepending a (parameter) header. The fields of the wrapped
+ * structure can be accessed directly, and constructors and potential public methods are also
+ * wrapped.
+ *
+ * There are 3 flavors of stream parameters: unspecified port, input and output. All of these expose
+ * a setStream method and an extra initial streamID parameter for the constructor. Moreover,
+ * parameters with unspecified port expose a setPort method, and add an additional initial port
+ * parameter to the constructor.
+ */
+template<typename T, typename S, int BaseIndex=S::baseIndex, class Flex=void>
+struct C2_HIDE C2StreamParam : public T, public S, public C2BaseIndexOverride<S, BaseIndex>,
+        private C2StructCheck<S, BaseIndex,
+                T::indexFlags | T::Index::kStreamFlag | T::Index::kDirUndefined> {
+private:
+    typedef C2StreamParam<T, S, BaseIndex> _type;
+
+public:
+    /// Default constructor. Port/direction and stream-ID is undefined.
+    inline C2StreamParam() : T(sizeof(_type), _type::typeIndex) { }
+    /// Wrapper around base structure's constructor while also specifying port/direction and
+    /// stream-ID.
+    template<typename ...Args>
+    inline C2StreamParam(bool _output, unsigned stream, const Args(&... args))
+        : T(sizeof(_type), _output ? output::typeIndex : input::typeIndex, stream),
+          S(args...) { }
+    /// Set port/direction.
+    inline void setPort(bool output) { C2Param::setPort(output); }
+    /// Set stream-id. \retval true if the stream-id was successfully set.
+    inline bool setStream(unsigned stream) { return C2Param::setStream(stream); }
+
+    DEFINE_CAST_OPERATORS(_type)
+
+    /// Specialization for an input stream parameter.
+    struct input : public T, public S, public C2BaseIndexOverride<S, BaseIndex>,
+            public C2StructCheck<S, BaseIndex,
+                    T::indexFlags | T::Index::kStreamFlag | T::Type::kDirInput> {
+        /// Default constructor. Stream-ID is undefined.
+        inline input() : T(sizeof(_type), input::typeIndex) { }
+        /// Wrapper around base structure's constructor while also specifying stream-ID.
+        template<typename ...Args>
+        inline input(unsigned stream, const Args(&... args))
+            : T(sizeof(_type), input::typeIndex, stream), S(args...) { }
+        /// Set stream-id. \retval true if the stream-id was successfully set.
+        inline bool setStream(unsigned stream) { return C2Param::setStream(stream); }
+
+        DEFINE_CAST_OPERATORS(input)
+    };
+
+    /// Specialization for an output stream parameter.
+    struct output : public T, public S, public C2BaseIndexOverride<S, BaseIndex>,
+            public C2StructCheck<S, BaseIndex,
+                    T::indexFlags | T::Index::kStreamFlag | T::Type::kDirOutput> {
+        /// Default constructor. Stream-ID is undefined.
+        inline output() : T(sizeof(_type), output::typeIndex) { }
+        /// Wrapper around base structure's constructor while also specifying stream-ID.
+        template<typename ...Args>
+        inline output(unsigned stream, const Args(&... args))
+            : T(sizeof(_type), output::typeIndex, stream), S(args...) { }
+        /// Set stream-id. \retval true if the stream-id was successfully set.
+        inline bool setStream(unsigned stream) { return C2Param::setStream(stream); }
+
+        DEFINE_CAST_OPERATORS(output)
+    };
+};
+
+/**
+ * Stream-parameter template for flexible structures.
+ *
+ * Base template to define a stream setting/tuning or info based on a flexible structure and
+ * an optional BaseIndex. Stream parameters are tied to a specific stream on a port (input or
+ * output).
+ *
+ * \tparam T param type C2Setting, C2Tuning or C2Info
+ * \tparam S wrapped flexible structure
+ * \tparam BaseIndex optional base-index override. Must be specified for common/reused structures.
+ *
+ * Parameters wrap structures by prepending a (parameter) header. The fields and methods of flexible
+ * structures can be accessed via the m member variable; however, the constructors of the structure
+ * are wrapped directly. (This is because flexible types cannot be subclassed.)
+ *
+ * There are 3 flavors of stream parameters: unspecified port, input and output. All of these expose
+ * a setStream method and an extra initial streamID parameter for the constructor. Moreover,
+ * parameters with unspecified port expose a setPort method, and add an additional initial port
+ * parameter to the constructor.
+ */
+template<typename T, typename S, int BaseIndex>
+struct C2_HIDE C2StreamParam<T, S, BaseIndex, IF_FLEXIBLE(S)>
+    : public T, public C2BaseIndexOverride<S, BaseIndex>,
+      private C2FlexStructCheck<S, BaseIndex,
+              T::indexFlags | T::Index::kStreamFlag | T::Index::kDirUndefined> {
+private:
+    typedef C2StreamParam<T, S> _type;
+    /// Default constructor. Port/direction and stream-ID is undefined.
+    inline C2StreamParam(size_t flexCount) : T(_type::calcSize(flexCount), _type::typeIndex, 0u) { }
+    /// Wrapper around base structure's constructor while also specifying port/direction and
+    /// stream-ID.
+    template<typename ...Args>
+    inline C2StreamParam(size_t flexCount, bool _output, unsigned stream, const Args(&... args))
+        : T(_type::calcSize(flexCount), _output ? output::typeIndex : input::typeIndex, stream),
+          m(flexCount, args...) { }
+
+public:
+    S m; ///< wrapped flexible structure
+
+    /// Set port/direction.
+    inline void setPort(bool output) { C2Param::setPort(output); }
+    /// Set stream-id. \retval true if the stream-id was successfully set.
+    inline bool setStream(unsigned stream) { return C2Param::setStream(stream); }
+
+    DEFINE_FLEXIBLE_METHODS(_type, S)
+    DEFINE_CAST_OPERATORS(_type)
+
+    /// Specialization for an input stream parameter.
+    struct input : public T, public C2BaseIndexOverride<S, BaseIndex>,
+            public C2FlexStructCheck<S, BaseIndex,
+                    T::indexFlags | T::Index::kStreamFlag | T::Type::kDirInput> {
+    private:
+        /// Default constructor. Stream-ID is undefined.
+        inline input(size_t flexCount) : T(_type::calcSize(flexCount), input::typeIndex) { }
+        /// Wrapper around base structure's constructor while also specifying stream-ID.
+        template<typename ...Args>
+        inline input(size_t flexCount, unsigned stream, const Args(&... args))
+            : T(_type::calcSize(flexCount), input::typeIndex, stream), m(flexCount, args...) { }
+
+    public:
+        S m; ///< wrapped flexible structure
+
+        /// Set stream-id. \retval true if the stream-id was successfully set.
+        inline bool setStream(unsigned stream) { return C2Param::setStream(stream); }
+
+        DEFINE_FLEXIBLE_METHODS(input, S)
+        DEFINE_CAST_OPERATORS(input)
+    };
+
+    /// Specialization for an output stream parameter.
+    struct output : public T, public C2BaseIndexOverride<S, BaseIndex>,
+            public C2FlexStructCheck<S, BaseIndex,
+                    T::indexFlags | T::Index::kStreamFlag | T::Type::kDirOutput> {
+    private:
+        /// Default constructor. Stream-ID is undefined.
+        inline output(size_t flexCount) : T(_type::calcSize(flexCount), output::typeIndex) { }
+        /// Wrapper around base structure's constructor while also specifying stream-ID.
+        template<typename ...Args>
+        inline output(size_t flexCount, unsigned stream, const Args(&... args))
+            : T(_type::calcSize(flexCount), output::typeIndex, stream), m(flexCount, args...) { }
+
+    public:
+        S m; ///< wrapped flexible structure
+
+        /// Set stream-id. \retval true if the stream-id was successfully set.
+        inline bool setStream(unsigned stream) { return C2Param::setStream(stream); }
+
+        DEFINE_FLEXIBLE_METHODS(output, S)
+        DEFINE_CAST_OPERATORS(output)
+    };
+};
+
+/* ======================== SIMPLE VALUE PARAMETERS ======================== */
+
+/**
+ * \ingroup internal
+ * A structure template encapsulating a single element with default constructors and no base-index.
+ */
+template<typename T>
+struct C2SimpleValueStruct {
+    T mValue; ///< simple value of the structure
+    // Default constructor.
+    inline C2SimpleValueStruct() = default;
+    // Constructor with an initial value.
+    inline C2SimpleValueStruct(T value) : mValue(value) {}
+    DEFINE_C2STRUCT_NO_BASE(SimpleValue)
+};
+
+// TODO: move this and next to some generic place
+/**
+ * Interface to a block of (mapped) memory containing an array of some type (T).
+ */
+template<typename T>
+struct C2MemoryBlock {
+    /// \returns the number of elements in this block.
+    virtual size_t size() const = 0;
+    /// \returns a const pointer to the start of this block. Care must be taken to not read outside
+    /// the block.
+    virtual const T *data() const = 0; // TODO: should this be friend access only in some C2Memory module?
+    /// \returns a pointer to the start of this block. Care must be taken to not read or write
+    /// outside the block.
+    inline T *data() { return const_cast<T*>(data()); }
+protected:
+    // TODO: for now it should never be deleted as C2MemoryBlock
+    virtual ~C2MemoryBlock() = default;
+};
+
+/**
+ * Interface to a block of memory containing a constant (constexpr) array of some type (T).
+ */
+template<typename T>
+struct C2ConstMemoryBlock : public C2MemoryBlock<T> {
+    virtual const T * data() const { return mData; }
+    virtual size_t size() const { return mSize; }
+
+    /// Constructor.
+    template<unsigned N>
+    inline constexpr C2ConstMemoryBlock(const T(&init)[N]) : mData(init), mSize(N) {}
+
+private:
+    const T *mData;
+    const size_t mSize;
+};
+
+/// \addtogroup internal
+/// @{
+
+/// Helper class to initialize flexible arrays with various initalizers.
+struct _C2ValueArrayHelper {
+    // char[]-s are used as null terminated strings, so the last element is never inited.
+
+    /// Initialize a flexible array using a constexpr memory block.
+    template<typename T>
+    static void init(T(&array)[], size_t arrayLen, const C2MemoryBlock<T> &block) {
+        // reserve last element for terminal 0 for strings
+        if (arrayLen && std::is_same<T, char>::value) {
+            --arrayLen;
+        }
+        if (block.data()) {
+            memcpy(array, block.data(), std::min(arrayLen, block.size()) * sizeof(T));
+        }
+    }
+
+    /// Initialize a flexible array using an initializer list.
+    template<typename T>
+    static void init(T(&array)[], size_t arrayLen, const std::initializer_list<T> &init) {
+        size_t ix = 0;
+        // reserve last element for terminal 0 for strings
+        if (arrayLen && std::is_same<T, char>::value) {
+            --arrayLen;
+        }
+        for (const T &item : init) {
+            if (ix == arrayLen) {
+                break;
+            }
+            array[ix++] = item;
+        }
+    }
+
+    /// Initialize a flexible array using another flexible array.
+    template<typename T, unsigned N>
+    static void init(T(&array)[], size_t arrayLen, const T(&str)[N]) {
+        // reserve last element for terminal 0 for strings
+        if (arrayLen && std::is_same<T, char>::value) {
+            --arrayLen;
+        }
+        if (arrayLen) {
+            strncpy(array, str, std::min(arrayLen, (size_t)N));
+        }
+    }
+};
+
+/**
+ * Specialization for a flexible blob and string arrays. A structure template encapsulating a single
+ * flexible array member with default flexible constructors and no base-index. This type cannot be
+ * constructed on its own as it's size is 0.
+ *
+ * \internal This is different from C2SimpleArrayStruct<T[]> simply because its member has the name
+ * as mValue to reflect this is a single value.
+ */
+template<typename T>
+struct C2SimpleValueStruct<T[]> {
+    static_assert(std::is_same<T, char>::value || std::is_same<T, uint8_t>::value,
+                  "C2SimpleValueStruct<T[]> is only for BLOB or STRING");
+    T mValue[];
+
+    inline C2SimpleValueStruct() = default;
+    DEFINE_C2STRUCT_NO_BASE(SimpleValue)
+    FLEX(C2SimpleValueStruct, mValue)
+
+private:
+    inline C2SimpleValueStruct(size_t flexCount, const C2MemoryBlock<T> &block) {
+        _C2ValueArrayHelper::init(mValue, flexCount, block);
+    }
+
+    inline C2SimpleValueStruct(size_t flexCount, const std::initializer_list<T> &init) {
+        _C2ValueArrayHelper::init(mValue, flexCount, init);
+    }
+
+    template<unsigned N>
+    inline C2SimpleValueStruct(size_t flexCount, const T(&init)[N]) {
+        _C2ValueArrayHelper::init(mValue, flexCount, init);
+    }
+};
+
+/// @}
+
+/**
+ * A structure template encapsulating a single flexible array element of a specific type (T) with
+ * default constructors and no base-index. This type cannot be constructed on its own as it's size
+ * is 0. Instead, it is meant to be used as a parameter, e.g.
+ *
+ *   typedef C2StreamParam<C2Info, C2SimpleArrayStruct<C2MyFancyStruct>,
+ *           kParamIndexMyFancyArrayStreamParam> C2MyFancyArrayStreamInfo;
+ */
+template<typename T>
+struct C2SimpleArrayStruct {
+    static_assert(!std::is_same<T, char>::value && !std::is_same<T, uint8_t>::value,
+                  "use C2SimpleValueStruct<T[]> is for BLOB or STRING");
+
+    T mValues[]; ///< array member
+    /// Default constructor
+    inline C2SimpleArrayStruct() = default;
+    DEFINE_C2STRUCT_NO_BASE(SimpleArray)
+    FLEX(C2SimpleArrayStruct, mValues)
+
+private:
+    /// Construct from a C2MemoryBlock.
+    /// Used only by the flexible parameter allocators (alloc_unique & alloc_shared).
+    inline C2SimpleArrayStruct(size_t flexCount, const C2MemoryBlock<T> &block) {
+        _C2ValueArrayHelper::init(mValues, flexCount, block);
+    }
+
+    /// Construct from an initializer list.
+    /// Used only by the flexible parameter allocators (alloc_unique & alloc_shared).
+    inline C2SimpleArrayStruct(size_t flexCount, const std::initializer_list<T> &init) {
+        _C2ValueArrayHelper::init(mValues, flexCount, init);
+    }
+
+    /// Construct from another flexible array.
+    /// Used only by the flexible parameter allocators (alloc_unique & alloc_shared).
+    template<unsigned N>
+    inline C2SimpleArrayStruct(size_t flexCount, const T(&init)[N]) {
+        _C2ValueArrayHelper::init(mValues, flexCount, init);
+    }
+};
+
+/**
+ * \addtogroup simplevalue Simple value and array structures.
+ * @{
+ *
+ * Simple value structures.
+ *
+ * Structures containing a single simple value. These can be reused to easily define simple
+ * parameters of various types:
+ *
+ *   typedef C2PortParam<C2Tuning, C2Int32Value, kParamIndexMyIntegerPortParam>
+ *           C2MyIntegerPortParamTuning;
+ *
+ * They contain a single member (mValue or mValues) that is described as "value" or "values".
+ */
+/// A 32-bit signed integer parameter in mValue, described as "value"
+typedef C2SimpleValueStruct<int32_t> C2Int32Value;
+/// A 32-bit signed integer array parameter in mValues, described as "values"
+typedef C2SimpleArrayStruct<int32_t> C2Int32Array;
+/// A 32-bit unsigned integer parameter in mValue, described as "value"
+typedef C2SimpleValueStruct<uint32_t> C2Uint32Value;
+/// A 32-bit unsigned integer array parameter in mValues, described as "values"
+typedef C2SimpleArrayStruct<uint32_t> C2Uint32Array;
+/// A 64-bit signed integer parameter in mValue, described as "value"
+typedef C2SimpleValueStruct<int64_t> C2Int64Value;
+/// A 64-bit signed integer array parameter in mValues, described as "values"
+typedef C2SimpleArrayStruct<int64_t> C2Int64Array;
+/// A 64-bit unsigned integer parameter in mValue, described as "value"
+typedef C2SimpleValueStruct<uint64_t> C2Uint64Value;
+/// A 64-bit unsigned integer array parameter in mValues, described as "values"
+typedef C2SimpleArrayStruct<uint64_t> C2Uint64Array;
+/// A float parameter in mValue, described as "value"
+typedef C2SimpleValueStruct<float> C2FloatValue;
+/// A float array parameter in mValues, described as "values"
+typedef C2SimpleArrayStruct<float> C2FloatArray;
+/// A blob flexible parameter in mValue, described as "value"
+typedef C2SimpleValueStruct<uint8_t[]> C2BlobValue;
+/// A string flexible parameter in mValue, described as "value"
+typedef C2SimpleValueStruct<char[]> C2StringValue;
+
+#if 1
+template<typename T>
+const std::initializer_list<const C2FieldDescriptor> C2SimpleValueStruct<T>::fieldList = { C2FIELD(mValue, "value") };
+template<typename T>
+const std::initializer_list<const C2FieldDescriptor> C2SimpleValueStruct<T[]>::fieldList = { C2FIELD(mValue, "value") };
+template<typename T>
+const std::initializer_list<const C2FieldDescriptor> C2SimpleArrayStruct<T>::fieldList = { C2FIELD(mValues, "values") };
+#else
+// This seem to be able to be handled by the template above
+DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleValueStruct<int32_t>, { C2FIELD(mValue, "value") });
+DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleValueStruct<uint32_t>, { C2FIELD(mValue, "value") });
+DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleValueStruct<int64_t>, { C2FIELD(mValue, "value") });
+DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleValueStruct<uint64_t>, { C2FIELD(mValue, "value") });
+DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleValueStruct<float>, { C2FIELD(mValue, "value") });
+DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleValueStruct<uint8_t[]>, { C2FIELD(mValue, "value") });
+DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleValueStruct<char[]>, { C2FIELD(mValue, "value") });
+DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleArrayStruct<int32_t>, { C2FIELD(mValues, "values") });
+DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleArrayStruct<uint32_t>, { C2FIELD(mValues, "values") });
+DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleArrayStruct<int64_t>, { C2FIELD(mValues, "values") });
+DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleArrayStruct<uint64_t>, { C2FIELD(mValues, "values") });
+DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleArrayStruct<float>, { C2FIELD(mValues, "values") });
+#endif
+
+/// @}
+
+/// @}
+
+}  // namespace android
+
+#endif  // C2PARAM_DEF_H_
diff --git a/media/libstagefright/codec2/include/C2Work.h b/media/libstagefright/codec2/include/C2Work.h
new file mode 100644
index 0000000..a42d11a
--- /dev/null
+++ b/media/libstagefright/codec2/include/C2Work.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef C2WORK_H_
+
+#define C2WORK_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <C2Param.h>
+#include <C2Buffer.h>
+#include <C2Config.h>
+
+#include <memory>
+#include <list>
+#include <vector>
+
+typedef int status_t;
+
+namespace android {
+
+/// \defgroup work Work and data processing
+/// @{
+
+struct C2SettingResult {
+    enum Failure {
+        READ_ONLY,  ///< parameter is read-only and cannot be set
+        MISMATCH,   ///< parameter mismatches input data
+        BAD_VALUE,  ///< parameter does not accept value
+        BAD_TYPE,   ///< parameter is not supported
+        BAD_PORT,   ///< parameter is not supported on the specific port
+        BAD_INDEX,  ///< parameter is not supported on the specific stream
+        CONFLICT,   ///< parameter is in conflict with another setting
+    };
+
+    C2ParamField field;
+    Failure failure;
+    std::unique_ptr<C2FieldSupportedValues> supportedValues; //< if different from normal (e.g. in conflict w/another param or input data)
+    std::list<C2ParamField> conflictingFields;
+};
+
+// ================================================================================================
+//  WORK
+// ================================================================================================
+
+// node_id-s
+typedef uint32_t node_id;
+
+enum flags_t : uint32_t {
+    BUFFERFLAG_CODEC_CONFIG,
+    BUFFERFLAG_DROP_FRAME,
+    BUFFERFLAG_END_OF_STREAM,
+};
+
+enum {
+    kParamIndexWorkOrdinal,
+};
+
+struct C2WorkOrdinalStruct {
+    uint64_t timestamp;
+    uint64_t frame_index;    // submission ordinal on the initial component
+    uint64_t custom_ordinal; // can be given by the component, e.g. decode order
+
+    DEFINE_AND_DESCRIBE_C2STRUCT(WorkOrdinal)
+    C2FIELD(timestamp, "timestamp")
+    C2FIELD(frame_index, "frame-index")
+    C2FIELD(custom_ordinal, "custom-ordinal")
+};
+
+struct C2BufferPack {
+//public:
+    flags_t  flags;
+    C2WorkOrdinalStruct ordinal;
+    std::vector<std::shared_ptr<C2Buffer>> buffers;
+    //< for initial work item, these may also come from the parser - if provided
+    //< for output buffers, these are the responses to requestedInfos
+    std::list<std::unique_ptr<C2Info>>       infos;
+    std::list<std::shared_ptr<C2InfoBuffer>> infoBuffers;
+};
+
+struct C2Worklet {
+//public:
+    // IN
+    node_id component;
+
+    std::list<std::unique_ptr<C2Param>> tunings; //< tunings to be applied before processing this
+                                                 // worklet
+    std::list<C2Param::Type> requestedInfos;
+    std::vector<std::shared_ptr<C2BlockAllocator>> allocators; //< This vector shall be the same size as
+                                                          //< output.buffers.
+
+    // OUT
+    C2BufferPack output;
+    std::list<std::unique_ptr<C2SettingResult>> failures;
+};
+
+/**
+ * This structure holds information about all a single work item.
+ *
+ * This structure shall be passed by the client to the component for the first worklet. As such,
+ * worklets must not be empty. The ownership of this object is passed.
+ *
+ * input:
+ *      The input data to be processed. This is provided by the client with ownership. When the work
+ *      is returned, the input buffer-pack's buffer vector shall contain nullptrs.
+ *
+ * worklets:
+ *      The chain of components and associated allocators, tunings and info requests that the data
+ *      must pass through. If this has more than a single element, the tunnels between successive
+ *      components of the worklet chain must have been (successfully) pre-registered at the time
+ *      the work is submitted. Allocating the output buffers in the worklets is the responsibility
+ *      of each component. Upon work submission, each output buffer-pack shall be an appropriately
+ *      sized vector containing nullptrs. When the work is completed/returned to the client,
+ *
+ * worklets_processed:
+ *      It shall be initialized to 0 by the client when the work is submitted.
+ *      It shall contain the number of worklets that were successfully processed when the work is
+ *      returned. If this is less then the number of worklets, result must not be success.
+ *      It must be in the range of [0, worklets.size()].
+ *
+ * result:
+ *      The final outcome of the work. If 0 when work is returned, it is assumed that all worklets
+ *      have been processed.
+ */
+struct C2Work {
+//public:
+    // pre-chain infos (for portions of a tunneling chain that happend before this work-chain for
+    // this work item - due to framework facilitated (non-tunneled) work-chaining)
+    std::list<std::pair<std::unique_ptr<C2PortMimeConfig>, std::unique_ptr<C2Info>>> preChainInfos;
+    std::list<std::pair<std::unique_ptr<C2PortMimeConfig>, std::unique_ptr<C2Buffer>>> preChainInfoBlobs;
+
+    C2BufferPack input;
+    std::list<std::unique_ptr<C2Worklet>> worklets;
+
+    uint32_t worklets_processed;
+    status_t result;
+};
+
+struct C2WorkOutline {
+//public:
+    C2WorkOrdinalStruct ordinal;
+    std::list<node_id> chain;
+};
+
+/// @}
+
+}  // namespace android
+
+#endif  // C2WORK_H_
diff --git a/media/libstagefright/codec2/tests/Android.mk b/media/libstagefright/codec2/tests/Android.mk
new file mode 100644
index 0000000..49c4253
--- /dev/null
+++ b/media/libstagefright/codec2/tests/Android.mk
@@ -0,0 +1,37 @@
+# Build the unit tests.
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
+
+LOCAL_MODULE := codec2_test
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_SRC_FILES := \
+	vndk/C2UtilTest.cpp \
+	C2_test.cpp \
+	C2Param_test.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+	libcutils \
+	libstagefright_codec2 \
+	liblog
+
+LOCAL_C_INCLUDES := \
+	frameworks/av/media/libstagefright/codec2/include \
+	frameworks/av/media/libstagefright/codec2/vndk/include \
+	$(TOP)/frameworks/native/include/media/openmax \
+
+LOCAL_CFLAGS += -Werror -Wall -std=c++14
+LOCAL_CLANG := true
+
+include $(BUILD_NATIVE_TEST)
+
+# Include subdirectory makefiles
+# ============================================================
+
+# If we're building with ONE_SHOT_MAKEFILE (mm, mmm), then what the framework
+# team really wants is to build the stuff defined by this makefile.
+ifeq (,$(ONE_SHOT_MAKEFILE))
+include $(call first-makefiles-under,$(LOCAL_PATH))
+endif
diff --git a/media/libstagefright/codec2/tests/C2Param_test.cpp b/media/libstagefright/codec2/tests/C2Param_test.cpp
new file mode 100644
index 0000000..ec82c84
--- /dev/null
+++ b/media/libstagefright/codec2/tests/C2Param_test.cpp
@@ -0,0 +1,2687 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "C2Param_test"
+
+#include <gtest/gtest.h>
+
+#include <util/C2ParamUtils.h>
+#include <C2ParamDef.h>
+
+namespace android {
+
+void PrintTo(const _C2FieldId &id, ::std::ostream* os) {
+    *os << "@" << id._mOffset << "+" << id._mSize;
+}
+
+void PrintTo(const C2FieldDescriptor &fd, ::std::ostream *os) {
+    using FD=C2FieldDescriptor;
+    switch (fd.type()) {
+    case FD::INT32: *os << "i32"; break;
+    case FD::INT64: *os << "i64"; break;
+    case FD::UINT32: *os << "u32"; break;
+    case FD::UINT64: *os << "u64"; break;
+    case FD::FLOAT: *os << "float"; break;
+    case FD::STRING: *os << "char"; break;
+    case FD::BLOB: *os << "u8"; break;
+    default:
+        if (fd.type() & FD::STRUCT_FLAG) {
+            *os << "struct-" << (fd.type() & ~FD::STRUCT_FLAG);
+        } else {
+            *os << "type-" << fd.type();
+        }
+    }
+    *os << " " << fd.name();
+    if (fd.length() > 1) {
+        *os << "[" << fd.length() << "]";
+    } else if (fd.length() == 0) {
+        *os << "[]";
+    }
+    *os << " (";
+    PrintTo(fd._mFieldId, os);
+    *os << "*" << fd.length() << ")";
+}
+
+enum C2ParamIndexType {
+    kParamIndexNumber,
+    kParamIndexNumbers,
+    kParamIndexNumber2,
+    kParamIndexVendorStart = C2Param::BaseIndex::kVendorStart,
+    kParamIndexVendorNumbers,
+};
+
+void ffff(int(*)(int)) {}
+
+/* ============================= STRUCT DECLARATION AND DESCRIPTION ============================= */
+
+typedef C2FieldDescriptor FD;
+
+class C2ParamTest : public ::testing::Test {
+};
+
+class C2ParamTest_ParamFieldList
+        : public ::testing::TestWithParam<std::initializer_list<const C2FieldDescriptor>> {
+};
+
+enum {
+    kParamIndexSize,
+    kParamIndexTestA,
+    kParamIndexTestB,
+    kParamIndexTestFlexS32,
+    kParamIndexTestFlexEndS32,
+    kParamIndexTestFlexS64,
+    kParamIndexTestFlexEndS64,
+    kParamIndexTestFlexSize,
+    kParamIndexTestFlexEndSize,
+};
+
+struct C2SizeStruct {
+    int32_t mNumber;
+    int32_t mHeight;
+    enum : uint32_t { baseIndex = kParamIndexSize };                        // <= needed for C2FieldDescriptor
+    const static std::initializer_list<const C2FieldDescriptor> fieldList;  // <= needed for C2FieldDescriptor
+    const static FD::Type TYPE = (FD::Type)(baseIndex | FD::STRUCT_FLAG);
+};
+
+DEFINE_NO_NAMED_VALUES_FOR(C2SizeStruct)
+
+// Test 1. define a structure without any helper methods
+
+bool operator==(const C2FieldDescriptor &a, const C2FieldDescriptor &b) {
+    return a.type() == b.type()
+            && a.length() == b.length()
+            && strcmp(a.name(), b.name()) == 0
+            && a._mFieldId == b._mFieldId;
+}
+
+struct C2TestStruct_A {
+    int32_t mSigned32;
+    int64_t mSigned64[2];
+    uint32_t mUnsigned32[1];
+    uint64_t mUnsigned64;
+    float mFloat;
+    C2SizeStruct mSize[3];
+    uint8_t mBlob[100];
+    char mString[100];
+    bool mYesNo[100];
+
+    const static std::initializer_list<const C2FieldDescriptor> fieldList;
+    // enum : uint32_t { baseIndex = kParamIndexTest };
+    // typedef C2TestStruct_A _type;
+} __attribute__((packed));
+
+const std::initializer_list<const C2FieldDescriptor> C2TestStruct_A::fieldList =
+    { { FD::INT32,    1, "s32",   0, 4 },
+      { FD::INT64,    2, "s64",   4, 8 },
+      { FD::UINT32,   1, "u32",  20, 4 },
+      { FD::UINT64,   1, "u64",  24, 8 },
+      { FD::FLOAT,    1, "fp",   32, 4 },
+      { C2SizeStruct::TYPE, 3, "size", 36, 8 },
+      { FD::BLOB,   100, "blob", 60, 1 },
+      { FD::STRING, 100, "str", 160, 1 },
+      { FD::BLOB,   100, "y-n", 260, 1 } };
+
+TEST_P(C2ParamTest_ParamFieldList, VerifyStruct) {
+    std::vector<const C2FieldDescriptor> fields = GetParam(), expected = C2TestStruct_A::fieldList;
+
+    // verify first field descriptor
+    EXPECT_EQ(FD::INT32, fields[0].type());
+    EXPECT_STREQ("s32", fields[0].name());
+    EXPECT_EQ(1u, fields[0].length());
+    EXPECT_EQ(_C2FieldId(0, 4), fields[0]._mFieldId);
+
+    EXPECT_EQ(expected[0], fields[0]);
+    EXPECT_EQ(expected[1], fields[1]);
+    EXPECT_EQ(expected[2], fields[2]);
+    EXPECT_EQ(expected[3], fields[3]);
+    EXPECT_EQ(expected[4], fields[4]);
+    EXPECT_EQ(expected[5], fields[5]);
+    EXPECT_EQ(expected[6], fields[6]);
+    EXPECT_EQ(expected[7], fields[7]);
+    for (size_t i = 8; i < fields.size() && i < expected.size(); ++i) {
+        EXPECT_EQ(expected[i], fields[i]);
+    }
+}
+
+INSTANTIATE_TEST_CASE_P(InitializerList, C2ParamTest_ParamFieldList, ::testing::Values(C2TestStruct_A::fieldList));
+
+// define fields using C2FieldDescriptor pointer constructor
+const std::initializer_list<const C2FieldDescriptor> C2TestStruct_A_FD_PTR_fieldList =
+    { C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mSigned32,   "s32"),
+      C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mSigned64,   "s64"),
+      C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mUnsigned32, "u32"),
+      C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mUnsigned64, "u64"),
+      C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mFloat,      "fp"),
+      C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mSize,       "size"),
+      C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mBlob,       "blob"),
+      C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mString,     "str"),
+    //  C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mYesNo,      "y-n")
+    };
+
+INSTANTIATE_TEST_CASE_P(PointerConstructor, C2ParamTest_ParamFieldList, ::testing::Values(C2TestStruct_A_FD_PTR_fieldList));
+
+// define fields using C2FieldDescriptor member-pointer constructor
+const std::initializer_list<const C2FieldDescriptor> C2TestStruct_A_FD_MEM_PTR_fieldList =
+    { C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mSigned32,   "s32"),
+      C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mSigned64,   "s64"),
+      C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mUnsigned32, "u32"),
+      C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mUnsigned64, "u64"),
+      C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mFloat,      "fp"),
+      C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mSize,       "size"),
+      C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mBlob,       "blob"),
+      C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mString,     "str"),
+    //  C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mYesNo,      "y-n")
+    };
+
+INSTANTIATE_TEST_CASE_P(MemberPointerConstructor, C2ParamTest_ParamFieldList, ::testing::Values(C2TestStruct_A_FD_MEM_PTR_fieldList));
+
+// Test 2. define a structure with two-step helper methods
+
+struct C2TestAStruct {
+    int32_t mSigned32;
+    int64_t mSigned64[2];
+    uint32_t mUnsigned32[1];
+    uint64_t mUnsigned64;
+    float mFloat;
+    C2SizeStruct mSize[3];
+    uint8_t mBlob[100];
+    char mString[100];
+    bool mYesNo[100];
+
+private: // test access level
+    DEFINE_C2STRUCT(TestA)
+} C2_PACK;
+
+DESCRIBE_C2STRUCT(TestA, {
+    C2FIELD(mSigned32, "s32")
+    C2FIELD(mSigned64, "s64")
+    C2FIELD(mUnsigned32, "u32")
+    C2FIELD(mUnsigned64, "u64")
+    C2FIELD(mFloat, "fp")
+    C2FIELD(mSize, "size")
+    C2FIELD(mBlob, "blob")
+    C2FIELD(mString, "str")
+    // C2FIELD(mYesNo, "y-n")
+}) // ; optional
+
+INSTANTIATE_TEST_CASE_P(DescribeStruct2Step, C2ParamTest_ParamFieldList, ::testing::Values(C2TestAStruct::fieldList));
+
+// Test 3. define a structure with one-step helper method
+
+struct C2TestBStruct {
+    int32_t mSigned32;
+    int64_t mSigned64[2];
+    uint32_t mUnsigned32[1];
+    uint64_t mUnsigned64;
+    float mFloat;
+    C2SizeStruct mSize[3];
+    uint8_t mBlob[100];
+    char mString[100];
+    bool mYesNo[100];
+
+private: // test access level
+    DEFINE_AND_DESCRIBE_C2STRUCT(TestB)
+
+    C2FIELD(mSigned32, "s32")
+    C2FIELD(mSigned64, "s64")
+    C2FIELD(mUnsigned32, "u32")
+    C2FIELD(mUnsigned64, "u64")
+    C2FIELD(mFloat, "fp")
+    C2FIELD(mSize, "size")
+    C2FIELD(mBlob, "blob")
+    C2FIELD(mString, "str")
+    // C2FIELD(mYesNo, "y-n")
+};
+
+INSTANTIATE_TEST_CASE_P(DescribeStruct1Step, C2ParamTest_ParamFieldList, ::testing::Values(C2TestBStruct::fieldList));
+
+// Test 4. flexible members
+
+template<typename T>
+class C2ParamTest_FlexParamFieldList : public ::testing::Test {
+protected:
+    using Type=FD::Type;
+
+    // static std::initializer_list<std::initializer_list<const C2FieldDescriptor>>
+    static std::vector<std::vector<const C2FieldDescriptor>>
+            GetLists();
+
+    constexpr static Type flexType =
+            std::is_same<T, int32_t>::value ? FD::INT32 :
+            std::is_same<T, int64_t>::value ? FD::INT64 :
+            std::is_same<T, uint32_t>::value ? FD::UINT32 :
+            std::is_same<T, uint64_t>::value ? FD::UINT64 :
+            std::is_same<T, float>::value ? FD::FLOAT :
+            std::is_same<T, uint8_t>::value ? FD::BLOB :
+            std::is_same<T, char>::value ? FD::STRING :
+            std::is_same<T, C2SizeStruct>::value ? C2SizeStruct::TYPE : (Type)0;
+    constexpr static size_t flexSize = sizeof(T);
+};
+
+typedef ::testing::Types<int32_t, int64_t, C2SizeStruct> FlexTypes;
+TYPED_TEST_CASE(C2ParamTest_FlexParamFieldList, FlexTypes);
+
+TYPED_TEST(C2ParamTest_FlexParamFieldList, VerifyStruct) {
+    for (auto a : this->GetLists()) {
+        std::vector<const C2FieldDescriptor> fields = a;
+        if (fields.size() > 1) {
+            EXPECT_EQ(2u, fields.size());
+            EXPECT_EQ(C2FieldDescriptor(FD::INT32, 1, "s32", 0, 4), fields[0]);
+            EXPECT_EQ(C2FieldDescriptor(this->flexType, 0, "flex", 4, this->flexSize),
+                      fields[1]);
+        } else {
+            EXPECT_EQ(1u, fields.size());
+            EXPECT_EQ(C2FieldDescriptor(this->flexType, 0, "flex", 0, this->flexSize),
+                      fields[0]);
+        }
+    }
+}
+
+struct C2TestStruct_FlexS32 {
+    int32_t mFlex[];
+
+    const static std::initializer_list<const C2FieldDescriptor> fieldList;
+    // enum : uint32_t { baseIndex = kParamIndexTestFlex, flexSize = 4 };
+    // typedef C2TestStruct_FlexS32 _type;
+    // typedef int32_t flexType;
+};
+
+const std::initializer_list<const C2FieldDescriptor> C2TestStruct_FlexS32::fieldList = {
+    { FD::INT32, 0, "flex", 0, 4 }
+};
+
+struct C2TestStruct_FlexEndS32 {
+    int32_t mSigned32;
+    int32_t mFlex[];
+
+    const static std::initializer_list<const C2FieldDescriptor> fieldList;
+    // enum : uint32_t { baseIndex = kParamIndexTestFlexEnd, flexSize = 4 };
+    // typedef C2TestStruct_FlexEnd _type;
+    // typedef int32_t flexType;
+};
+
+const std::initializer_list<const C2FieldDescriptor> C2TestStruct_FlexEndS32::fieldList = {
+    { FD::INT32, 1, "s32", 0, 4 },
+    { FD::INT32, 0, "flex", 4, 4 },
+};
+
+const static std::initializer_list<const C2FieldDescriptor> C2TestStruct_FlexEndS32_ptr_fieldList = {
+    C2FieldDescriptor(&((C2TestStruct_FlexEndS32*)0)->mSigned32, "s32"),
+    C2FieldDescriptor(&((C2TestStruct_FlexEndS32*)0)->mFlex, "flex"),
+};
+
+struct C2TestFlexS32Struct {
+    int32_t mFlexSigned32[];
+private: // test access level
+    C2TestFlexS32Struct() {}
+
+    DEFINE_AND_DESCRIBE_FLEX_C2STRUCT(TestFlexS32, mFlexSigned32)
+    C2FIELD(mFlexSigned32, "flex")
+};
+
+struct C2TestFlexEndS32Struct {
+    int32_t mSigned32;
+    int32_t mFlexSigned32[];
+private: // test access level
+    C2TestFlexEndS32Struct() {}
+
+    DEFINE_FLEX_C2STRUCT(TestFlexEndS32, mFlexSigned32)
+} C2_PACK;
+
+DESCRIBE_C2STRUCT(TestFlexEndS32, {
+    C2FIELD(mSigned32, "s32")
+    C2FIELD(mFlexSigned32, "flex")
+}) // ; optional
+
+template<>
+std::vector<std::vector<const C2FieldDescriptor>>
+//std::initializer_list<std::initializer_list<const C2FieldDescriptor>>
+C2ParamTest_FlexParamFieldList<int32_t>::GetLists() {
+    return {
+        C2TestStruct_FlexS32::fieldList,
+        C2TestStruct_FlexEndS32::fieldList,
+        C2TestStruct_FlexEndS32_ptr_fieldList,
+        C2TestFlexS32Struct::fieldList,
+        C2TestFlexEndS32Struct::fieldList,
+    };
+}
+
+struct C2TestStruct_FlexS64 {
+    int64_t mFlexSigned64[];
+
+    const static std::initializer_list<const C2FieldDescriptor> fieldList;
+    // enum : uint32_t { baseIndex = kParamIndexTestFlexS64, flexSize = 8 };
+    // typedef C2TestStruct_FlexS64 _type;
+    // typedef int64_t flexType;
+};
+
+const std::initializer_list<const C2FieldDescriptor> C2TestStruct_FlexS64::fieldList = {
+    { FD::INT64, 0, "flex", 0, 8 }
+};
+
+struct C2TestStruct_FlexEndS64 {
+    int32_t mSigned32;
+    int64_t mSigned64Flex[];
+
+    const static std::initializer_list<const C2FieldDescriptor> fieldList;
+    // enum : uint32_t { baseIndex = C2TestStruct_FlexEndS64, flexSize = 8 };
+    // typedef C2TestStruct_FlexEndS64 _type;
+    // typedef int64_t flexType;
+};
+
+const std::initializer_list<const C2FieldDescriptor> C2TestStruct_FlexEndS64::fieldList = {
+    { FD::INT32, 1, "s32", 0, 4 },
+    { FD::INT64, 0, "flex", 4, 8 },
+};
+
+struct C2TestFlexS64Struct {
+    int64_t mFlexSigned64[];
+    C2TestFlexS64Struct() {}
+
+    DEFINE_AND_DESCRIBE_FLEX_C2STRUCT(TestFlexS64, mFlexSigned64)
+    C2FIELD(mFlexSigned64, "flex")
+};
+
+struct C2TestFlexEndS64Struct {
+    int32_t mSigned32;
+    int64_t mFlexSigned64[];
+    C2TestFlexEndS64Struct() {}
+
+    DEFINE_FLEX_C2STRUCT(TestFlexEndS64, mFlexSigned64)
+} C2_PACK;
+
+DESCRIBE_C2STRUCT(TestFlexEndS64, {
+    C2FIELD(mSigned32, "s32")
+    C2FIELD(mFlexSigned64, "flex")
+}) // ; optional
+
+template<>
+std::vector<std::vector<const C2FieldDescriptor>>
+//std::initializer_list<std::initializer_list<const C2FieldDescriptor>>
+C2ParamTest_FlexParamFieldList<int64_t>::GetLists() {
+    return {
+        C2TestStruct_FlexS64::fieldList,
+        C2TestStruct_FlexEndS64::fieldList,
+        C2TestFlexS64Struct::fieldList,
+        C2TestFlexEndS64Struct::fieldList,
+    };
+}
+
+struct C2TestStruct_FlexSize {
+    C2SizeStruct mFlexSize[];
+
+    const static std::initializer_list<const C2FieldDescriptor> fieldList;
+    // enum : uint32_t { baseIndex = kParamIndexTestFlexSize, flexSize = 8 };
+    // typedef C2TestStruct_FlexSize _type;
+    // typedef C2SizeStruct flexType;
+};
+
+const std::initializer_list<const C2FieldDescriptor> C2TestStruct_FlexSize::fieldList = {
+    { C2SizeStruct::TYPE, 0, "flex", 0, sizeof(C2SizeStruct) }
+};
+
+struct C2TestStruct_FlexEndSize {
+    int32_t mSigned32;
+    C2SizeStruct mSizeFlex[];
+
+    const static std::initializer_list<const C2FieldDescriptor> fieldList;
+    // enum : uint32_t { baseIndex = C2TestStruct_FlexEndSize, flexSize = 8 };
+    // typedef C2TestStruct_FlexEndSize _type;
+    // typedef C2SizeStruct flexType;
+};
+
+const std::initializer_list<const C2FieldDescriptor> C2TestStruct_FlexEndSize::fieldList = {
+    { FD::INT32, 1, "s32", 0, 4 },
+    { C2SizeStruct::TYPE, 0, "flex", 4, sizeof(C2SizeStruct) },
+};
+
+struct C2TestFlexSizeStruct {
+    C2SizeStruct mFlexSize[];
+    C2TestFlexSizeStruct() {}
+
+    DEFINE_AND_DESCRIBE_FLEX_C2STRUCT(TestFlexSize, mFlexSize)
+    C2FIELD(mFlexSize, "flex")
+};
+
+struct C2TestFlexEndSizeStruct {
+    int32_t mSigned32;
+    C2SizeStruct mFlexSize[];
+    C2TestFlexEndSizeStruct() {}
+
+    DEFINE_FLEX_C2STRUCT(TestFlexEndSize, mFlexSize)
+} C2_PACK;
+
+DESCRIBE_C2STRUCT(TestFlexEndSize, {
+    C2FIELD(mSigned32, "s32")
+    C2FIELD(mFlexSize, "flex")
+}) // ; optional
+
+template<>
+std::vector<std::vector<const C2FieldDescriptor>>
+//std::initializer_list<std::initializer_list<const C2FieldDescriptor>>
+C2ParamTest_FlexParamFieldList<C2SizeStruct>::GetLists() {
+    return {
+        C2TestStruct_FlexSize::fieldList,
+        C2TestStruct_FlexEndSize::fieldList,
+        C2TestFlexSizeStruct::fieldList,
+        C2TestFlexEndSizeStruct::fieldList,
+    };
+}
+
+TEST_F(C2ParamTest, FieldId) {
+    // pointer constructor
+    EXPECT_EQ(_C2FieldId(0, 4), _C2FieldId(&((C2TestStruct_A*)0)->mSigned32));
+    EXPECT_EQ(_C2FieldId(4, 8), _C2FieldId(&((C2TestStruct_A*)0)->mSigned64));
+    EXPECT_EQ(_C2FieldId(20, 4), _C2FieldId(&((C2TestStruct_A*)0)->mUnsigned32));
+    EXPECT_EQ(_C2FieldId(24, 8), _C2FieldId(&((C2TestStruct_A*)0)->mUnsigned64));
+    EXPECT_EQ(_C2FieldId(32, 4), _C2FieldId(&((C2TestStruct_A*)0)->mFloat));
+    EXPECT_EQ(_C2FieldId(36, 8), _C2FieldId(&((C2TestStruct_A*)0)->mSize));
+    EXPECT_EQ(_C2FieldId(60, 1), _C2FieldId(&((C2TestStruct_A*)0)->mBlob));
+    EXPECT_EQ(_C2FieldId(160, 1), _C2FieldId(&((C2TestStruct_A*)0)->mString));
+    EXPECT_EQ(_C2FieldId(260, 1), _C2FieldId(&((C2TestStruct_A*)0)->mYesNo));
+
+    EXPECT_EQ(_C2FieldId(0, 4), _C2FieldId(&((C2TestFlexEndSizeStruct*)0)->mSigned32));
+    EXPECT_EQ(_C2FieldId(4, 8), _C2FieldId(&((C2TestFlexEndSizeStruct*)0)->mFlexSize));
+
+    // member pointer constructor
+    EXPECT_EQ(_C2FieldId(0, 4), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mSigned32));
+    EXPECT_EQ(_C2FieldId(4, 8), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mSigned64));
+    EXPECT_EQ(_C2FieldId(20, 4), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mUnsigned32));
+    EXPECT_EQ(_C2FieldId(24, 8), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mUnsigned64));
+    EXPECT_EQ(_C2FieldId(32, 4), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mFloat));
+    EXPECT_EQ(_C2FieldId(36, 8), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mSize));
+    EXPECT_EQ(_C2FieldId(60, 1), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mBlob));
+    EXPECT_EQ(_C2FieldId(160, 1), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mString));
+    EXPECT_EQ(_C2FieldId(260, 1), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mYesNo));
+
+    EXPECT_EQ(_C2FieldId(0, 4), _C2FieldId((C2TestFlexEndSizeStruct*)0, &C2TestFlexEndSizeStruct::mSigned32));
+    EXPECT_EQ(_C2FieldId(4, 8), _C2FieldId((C2TestFlexEndSizeStruct*)0, &C2TestFlexEndSizeStruct::mFlexSize));
+
+    // member pointer sans type pointer
+    EXPECT_EQ(_C2FieldId(0, 4), _C2FieldId(&C2TestStruct_A::mSigned32));
+    EXPECT_EQ(_C2FieldId(4, 8), _C2FieldId(&C2TestStruct_A::mSigned64));
+    EXPECT_EQ(_C2FieldId(20, 4), _C2FieldId(&C2TestStruct_A::mUnsigned32));
+    EXPECT_EQ(_C2FieldId(24, 8), _C2FieldId(&C2TestStruct_A::mUnsigned64));
+    EXPECT_EQ(_C2FieldId(32, 4), _C2FieldId(&C2TestStruct_A::mFloat));
+    EXPECT_EQ(_C2FieldId(36, 8), _C2FieldId(&C2TestStruct_A::mSize));
+    EXPECT_EQ(_C2FieldId(60, 1), _C2FieldId(&C2TestStruct_A::mBlob));
+    EXPECT_EQ(_C2FieldId(160, 1), _C2FieldId(&C2TestStruct_A::mString));
+    EXPECT_EQ(_C2FieldId(260, 1), _C2FieldId(&C2TestStruct_A::mYesNo));
+
+    EXPECT_EQ(_C2FieldId(0, 4), _C2FieldId(&C2TestFlexEndSizeStruct::mSigned32));
+    EXPECT_EQ(_C2FieldId(4, 8), _C2FieldId(&C2TestFlexEndSizeStruct::mFlexSize));
+
+    typedef C2GlobalParam<C2Info, C2TestAStruct> C2TestAInfo;
+    typedef C2GlobalParam<C2Info, C2TestFlexEndSizeStruct> C2TestFlexEndSizeInfo;
+
+    // pointer constructor in C2Param
+    EXPECT_EQ(_C2FieldId(8, 4), _C2FieldId(&((C2TestAInfo*)0)->mSigned32));
+    EXPECT_EQ(_C2FieldId(12, 8), _C2FieldId(&((C2TestAInfo*)0)->mSigned64));
+    EXPECT_EQ(_C2FieldId(28, 4), _C2FieldId(&((C2TestAInfo*)0)->mUnsigned32));
+    EXPECT_EQ(_C2FieldId(32, 8), _C2FieldId(&((C2TestAInfo*)0)->mUnsigned64));
+    EXPECT_EQ(_C2FieldId(40, 4), _C2FieldId(&((C2TestAInfo*)0)->mFloat));
+    EXPECT_EQ(_C2FieldId(44, 8), _C2FieldId(&((C2TestAInfo*)0)->mSize));
+    EXPECT_EQ(_C2FieldId(68, 1), _C2FieldId(&((C2TestAInfo*)0)->mBlob));
+    EXPECT_EQ(_C2FieldId(168, 1), _C2FieldId(&((C2TestAInfo*)0)->mString));
+    EXPECT_EQ(_C2FieldId(268, 1), _C2FieldId(&((C2TestAInfo*)0)->mYesNo));
+
+    EXPECT_EQ(_C2FieldId(8, 4), _C2FieldId(&((C2TestFlexEndSizeInfo*)0)->m.mSigned32));
+    EXPECT_EQ(_C2FieldId(12, 8), _C2FieldId(&((C2TestFlexEndSizeInfo*)0)->m.mFlexSize));
+
+    // member pointer in C2Param
+    EXPECT_EQ(_C2FieldId(8, 4), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mSigned32));
+    EXPECT_EQ(_C2FieldId(12, 8), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mSigned64));
+    EXPECT_EQ(_C2FieldId(28, 4), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mUnsigned32));
+    EXPECT_EQ(_C2FieldId(32, 8), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mUnsigned64));
+    EXPECT_EQ(_C2FieldId(40, 4), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mFloat));
+    EXPECT_EQ(_C2FieldId(44, 8), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mSize));
+    EXPECT_EQ(_C2FieldId(68, 1), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mBlob));
+    EXPECT_EQ(_C2FieldId(168, 1), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mString));
+    EXPECT_EQ(_C2FieldId(268, 1), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mYesNo));
+
+    // NOTE: cannot use a member pointer for flex params due to introduction of 'm'
+    // EXPECT_EQ(_C2FieldId(8, 4), _C2FieldId(&C2TestFlexEndSizeInfo::m.mSigned32));
+    // EXPECT_EQ(_C2FieldId(12, 8), _C2FieldId(&C2TestFlexEndSizeInfo::m.mFlexSize));
+
+
+
+}
+
+struct S32 {
+    template<typename T, class B=typename std::remove_extent<T>::type>
+    inline S32(const T*) {
+        static_assert(!std::is_array<T>::value, "should not be an array");
+        static_assert(std::is_same<B, int32_t>::value, "should be int32_t");
+    }
+};
+
+struct FLX {
+    template<typename U, typename T, class B=typename std::remove_extent<T>::type>
+    inline FLX(const T*, const U*) {
+        static_assert(std::is_array<T>::value, "should be an array");
+        static_assert(std::extent<T>::value == 0, "should be an array of 0 extent");
+        static_assert(std::is_same<B, U>::value, "should be type U");
+    }
+};
+
+struct MP {
+    template<typename U, typename T, typename ExpectedU, typename UnexpectedU>
+    inline MP(T U::*, const ExpectedU*, const UnexpectedU*) {
+        static_assert(!std::is_same<U, UnexpectedU>::value, "should not be member pointer of the base type");
+        static_assert(std::is_same<U, ExpectedU>::value, "should be member pointer of the derived type");
+    }
+
+    template<typename U, typename T, typename B, typename D>
+    inline MP(T D::*, const D*) { }
+};
+
+void compiledStatic_arrayTypePropagationTest() {
+    (void)S32(&((C2TestFlexEndS32Struct *)0)->mSigned32);
+    (void)FLX(&((C2TestFlexEndS32Struct *)0)->mFlexSigned32, (int32_t*)0);
+    (void)FLX(&((C2TestFlexS32Struct *)0)->mFlexSigned32, (int32_t*)0);
+
+    typedef C2GlobalParam<C2Info, C2TestAStruct> C2TestAInfo;
+
+    // TRICKY: &derivedClass::baseMember has type of baseClass::*
+    static_assert(std::is_same<decltype(&C2TestAInfo::mSigned32), int32_t C2TestAStruct::*>::value,
+                  "base member pointer should have base class in type");
+
+    // therefore, member pointer expands to baseClass::* in templates
+    (void)MP(&C2TestAInfo::mSigned32,
+             (C2TestAStruct*)0 /* expected */, (C2TestAInfo*)0 /* unexpected */);
+    // but can be cast to derivedClass::*
+    (void)MP((int32_t C2TestAInfo::*)&C2TestAInfo::mSigned32,
+             (C2TestAInfo*)0 /* expected */, (C2TestAStruct*)0 /* unexpected */);
+
+    // TRICKY: baseClass::* does not autoconvert to derivedClass::* even in templates
+    // (void)MP(&C2TestAInfo::mSigned32, (C2TestAInfo*)0);
+}
+
+TEST_F(C2ParamTest, MemberPointerCast) {
+    typedef C2GlobalParam<C2Info, C2TestAStruct> C2TestAInfo;
+
+    static_assert(offsetof(C2TestAInfo, mSigned32) == 8, "offset should be 8");
+    constexpr int32_t C2TestAStruct::* s32ptr = &C2TestAInfo::mSigned32;
+    constexpr int32_t C2TestAInfo::* s32ptr_derived = (int32_t C2TestAStruct::*)&C2TestAInfo::mSigned32;
+    constexpr int32_t C2TestAInfo::* s32ptr_cast2derived = (int32_t C2TestAInfo::*)s32ptr;
+    C2TestAInfo *info = (C2TestAInfo *)256;
+    C2TestAStruct *strukt = (C2TestAStruct *)info;
+    int32_t *info_s32_derived = &(info->*s32ptr_derived);
+    int32_t *info_s32_cast2derived = &(info->*s32ptr_cast2derived);
+    int32_t *info_s32 = &(info->*s32ptr);
+    int32_t *strukt_s32 = &(strukt->*s32ptr);
+
+    EXPECT_EQ(256u, (uintptr_t)info);
+    EXPECT_EQ(264u, (uintptr_t)strukt);
+    EXPECT_EQ(264u, (uintptr_t)info_s32_derived);
+    EXPECT_EQ(264u, (uintptr_t)info_s32_cast2derived);
+    EXPECT_EQ(264u, (uintptr_t)info_s32);
+    EXPECT_EQ(264u, (uintptr_t)strukt_s32);
+
+    typedef C2GlobalParam<C2Info, C2TestFlexEndSizeStruct> C2TestFlexEndSizeInfo;
+    static_assert(offsetof(C2TestFlexEndSizeInfo, m.mSigned32) == 8, "offset should be 8");
+    static_assert(offsetof(C2TestFlexEndSizeInfo, m.mFlexSize) == 12, "offset should be 12");
+}
+
+/* ===================================== PARAM USAGE TESTS ===================================== */
+
+struct C2NumberStruct {
+    int32_t mNumber;
+    C2NumberStruct() {}
+    C2NumberStruct(int32_t _number) : mNumber(_number) {}
+
+    DEFINE_AND_DESCRIBE_C2STRUCT(Number)
+    C2FIELD(mNumber, "number")
+};
+
+struct C2NumbersStruct {
+    int32_t mNumbers[];
+    C2NumbersStruct() {}
+
+    DEFINE_AND_DESCRIBE_FLEX_C2STRUCT(Numbers, mNumbers)
+    C2FIELD(mNumbers, "numbers")
+};
+static_assert(sizeof(C2NumbersStruct) == 0, "C2NumbersStruct has incorrect size");
+
+typedef C2GlobalParam<C2Tuning, C2NumberStruct> C2NumberTuning;
+typedef   C2PortParam<C2Tuning, C2NumberStruct> C2NumberPortTuning;
+typedef C2StreamParam<C2Tuning, C2NumberStruct> C2NumberStreamTuning;
+
+typedef C2GlobalParam<C2Tuning, C2NumbersStruct> C2NumbersTuning;
+typedef   C2PortParam<C2Tuning, C2NumbersStruct> C2NumbersPortTuning;
+typedef C2StreamParam<C2Tuning, C2NumbersStruct> C2NumbersStreamTuning;
+
+//
+#if 0
+
+void test() {
+    C2NumberStruct s(10);
+    (void)C2NumberStruct::fieldList;
+};
+
+typedef C2StreamParam<C2Tuning, C2Int64Value, kParamIndexNumberB> C2NumberConfig4;
+typedef C2PortParam<C2Tuning, C2Int32Value, kParamIndexNumber> C2NumberConfig3;
+typedef C2GlobalParam<C2Tuning, C2StringValue, kParamIndexNumber> C2VideoNameConfig;
+
+void test3() {
+    C2NumberConfig3 s(10);
+    s.mValue = 11;
+    s = 12;
+    (void)C2NumberConfig3::fieldList;
+    std::shared_ptr<C2VideoNameConfig> n = C2VideoNameConfig::alloc_shared(25);
+    strcpy(n->m.mValue, "lajos");
+    C2NumberConfig4 t(false, 0, 11);
+    t.mValue = 15;
+};
+
+struct C2NumbersStruct {
+    int32_t mNumbers[];
+    enum { baseIndex = kParamIndexNumber };
+    const static std::initializer_list<const C2FieldDescriptor> fieldList;
+    C2NumbersStruct() {}
+
+    FLEX(C2NumbersStruct, mNumbers);
+};
+
+static_assert(sizeof(C2NumbersStruct) == 0, "yes");
+
+
+typedef C2GlobalParam<C2Info, C2NumbersStruct> C2NumbersInfo;
+
+const std::initializer_list<const C2FieldDescriptor> C2NumbersStruct::fieldList =
+//    { { FD::INT32, 0, "widths" } };
+    { C2FieldDescriptor(&((C2NumbersStruct*)(nullptr))->mNumbers, "number") };
+
+typedef C2PortParam<C2Tuning, C2NumberStruct> C2NumberConfig;
+
+std::list<const C2FieldDescriptor> myList = C2NumberConfig::fieldList;
+
+    std::unique_ptr<android::C2ParamDescriptor> __test_describe(uint32_t paramType) {
+        std::list<const C2FieldDescriptor> fields = describeC2Params<C2NumberConfig>();
+
+        auto widths = C2NumbersInfo::alloc_shared(5);
+        widths->flexCount();
+        widths->m.mNumbers[4] = 1;
+
+        test();
+        test3();
+
+        C2NumberConfig outputWidth(false, 123);
+
+        C2Param::Index index(paramType);
+        switch (paramType) {
+        case C2NumberConfig::baseIndex:
+            return std::unique_ptr<C2ParamDescriptor>(new C2ParamDescriptor{
+                true /* isRequired */,
+                "number",
+                index,
+            });
+        }
+        return nullptr;
+    }
+
+
+} // namespace android
+
+#endif
+//
+
+template<typename T>
+bool canSetPort(T &o, bool output) { return o.setPort(output); }
+bool canSetPort(...) { return false; }
+
+template<typename S, typename=decltype(((S*)0)->setPort(true))>
+static std::true_type _canCallSetPort(int);
+template<typename>
+static std::false_type _canCallSetPort(...);
+#define canCallSetPort(x) decltype(_canCallSetPort<std::remove_reference<decltype(x)>::type>(0))::value
+
+/* ======================================= STATIC TESTS ======================================= */
+
+static_assert(_C2Comparable<int>::value, "int is not comparable");
+static_assert(!_C2Comparable<void>::value, "void is comparable");
+
+struct C2_HIDE _test0 {
+    bool operator==(const _test0&);
+    bool operator!=(const _test0&);
+};
+struct C2_HIDE _test1 {
+    bool operator==(const _test1&);
+};
+struct C2_HIDE _test2 {
+    bool operator!=(const _test2&);
+};
+static_assert(_C2Comparable<_test0>::value, "class with == and != is not comparable");
+static_assert(_C2Comparable<_test1>::value, "class with == is not comparable");
+static_assert(_C2Comparable<_test2>::value, "class with != is not comparable");
+
+/* ======================================= C2PARAM TESTS ======================================= */
+
+struct _C2ParamInspector {
+    static void StaticTest();
+    static void StaticFlexTest();
+};
+
+// TEST_F(_C2ParamInspector, StaticTest) {
+void _C2ParamInspector::StaticTest() {
+    typedef C2Param::Index I;
+
+    // C2NumberStruct: baseIndex = kIndex                          (args)
+    static_assert(C2NumberStruct::baseIndex == kParamIndexNumber, "bad index");
+    static_assert(sizeof(C2NumberStruct) == 4, "bad size");
+
+    // C2NumberTuning:             kIndex | tun | global           (args)
+    static_assert(C2NumberTuning::baseIndex == kParamIndexNumber, "bad index");
+    static_assert(C2NumberTuning::typeIndex == (kParamIndexNumber | I::kTypeTuning | I::kDirGlobal), "bad index");
+    static_assert(sizeof(C2NumberTuning) == 12, "bad size");
+
+    static_assert(offsetof(C2NumberTuning, _mSize) == 0, "bad size");
+    static_assert(offsetof(C2NumberTuning, _mIndex) == 4, "bad offset");
+    static_assert(offsetof(C2NumberTuning, mNumber) == 8, "bad offset");
+
+    // C2NumberPortTuning:         kIndex | tun | port             (bool, args)
+    static_assert(sizeof(C2NumberPortTuning) == 12, "bad size");
+    // C2NumberPortTuning::input:  kIndex | tun | port | input     (args)
+    // C2NumberPortTuning::output: kIndex | tun | port | output    (args)
+    static_assert(C2NumberPortTuning::input::baseIndex ==
+                  kParamIndexNumber, "bad index");
+    static_assert(C2NumberPortTuning::input::typeIndex ==
+                  (kParamIndexNumber | I::kTypeTuning | I::kDirInput), "bad index");
+    static_assert(C2NumberPortTuning::output::baseIndex ==
+                  kParamIndexNumber, "bad index");
+    static_assert(C2NumberPortTuning::output::typeIndex ==
+                  (kParamIndexNumber | I::kTypeTuning | I::kDirOutput), "bad index");
+    static_assert(sizeof(C2NumberPortTuning::input) == 12, "bad size");
+    static_assert(sizeof(C2NumberPortTuning::output) == 12, "bad size");
+    static_assert(offsetof(C2NumberPortTuning::input, _mSize) == 0, "bad size");
+    static_assert(offsetof(C2NumberPortTuning::input, _mIndex) == 4, "bad offset");
+    static_assert(offsetof(C2NumberPortTuning::input, mNumber) == 8, "bad offset");
+    static_assert(offsetof(C2NumberPortTuning::output, _mSize) == 0, "bad size");
+    static_assert(offsetof(C2NumberPortTuning::output, _mIndex) == 4, "bad offset");
+    static_assert(offsetof(C2NumberPortTuning::output, mNumber) == 8, "bad offset");
+
+    // C2NumberStreamTuning:       kIndex | tun | str              (bool, uint, args)
+    static_assert(sizeof(C2NumberStreamTuning) == 12u, "bad size");
+    // C2NumberStreamTuning::input kIndex | tun | str | input      (int, args)
+    // C2NumberStreamTuning::output kIx   | tun | str | output     (int, args)
+    static_assert(C2NumberStreamTuning::input::baseIndex ==
+                  kParamIndexNumber, "bad index");
+    static_assert(C2NumberStreamTuning::input::typeIndex ==
+                  (kParamIndexNumber | I::kTypeTuning | I::kDirInput | I::kStreamFlag), "bad index");
+    static_assert(C2NumberStreamTuning::output::baseIndex ==
+                  kParamIndexNumber, "bad index");
+    static_assert(C2NumberStreamTuning::output::typeIndex ==
+                  (kParamIndexNumber | I::kTypeTuning | I::kDirOutput | I::kStreamFlag), "bad index");
+    static_assert(sizeof(C2NumberStreamTuning::input) == 12u, "bad size");
+    static_assert(sizeof(C2NumberStreamTuning::output) == 12u, "bad size");
+    static_assert(offsetof(C2NumberStreamTuning::input, _mSize) == 0, "bad size");
+    static_assert(offsetof(C2NumberStreamTuning::input, _mIndex) == 4, "bad offset");
+    static_assert(offsetof(C2NumberStreamTuning::input, mNumber) == 8, "bad offset");
+    static_assert(offsetof(C2NumberStreamTuning::output, _mSize) == 0, "bad size");
+    static_assert(offsetof(C2NumberStreamTuning::output, _mIndex) == 4, "bad offset");
+    static_assert(offsetof(C2NumberStreamTuning::output, mNumber) == 8, "bad offset");
+}
+
+void _C2ParamInspector::StaticFlexTest() {
+    typedef C2Param::Index I;
+
+    // C2NumbersStruct: baseIndex = kIndex                          (args)
+    static_assert(C2NumbersStruct::baseIndex == (I::kFlexibleFlag | kParamIndexNumbers), "bad index");
+    static_assert(sizeof(C2NumbersStruct) == 0, "bad size");
+
+    // C2NumbersTuning:             kIndex | tun | global           (args)
+    static_assert(C2NumbersTuning::baseIndex == (I::kFlexibleFlag | kParamIndexNumbers), "bad index");
+    static_assert(C2NumbersTuning::typeIndex == (I::kFlexibleFlag | kParamIndexNumbers | I::kTypeTuning | I::kDirGlobal), "bad index");
+    static_assert(sizeof(C2NumbersTuning) == 8, "bad size");
+
+    static_assert(offsetof(C2NumbersTuning, _mSize) == 0, "bad size");
+    static_assert(offsetof(C2NumbersTuning, _mIndex) == 4, "bad offset");
+    static_assert(offsetof(C2NumbersTuning, m.mNumbers) == 8, "bad offset");
+
+    // C2NumbersPortTuning:         kIndex | tun | port             (bool, args)
+    static_assert(sizeof(C2NumbersPortTuning) == 8, "bad size");
+    // C2NumbersPortTuning::input:  kIndex | tun | port | input     (args)
+    // C2NumbersPortTuning::output: kIndex | tun | port | output    (args)
+    static_assert(C2NumbersPortTuning::input::baseIndex ==
+                  (I::kFlexibleFlag | kParamIndexNumbers), "bad index");
+    static_assert(C2NumbersPortTuning::input::typeIndex ==
+                  (I::kFlexibleFlag | kParamIndexNumbers | I::kTypeTuning | I::kDirInput), "bad index");
+    static_assert(C2NumbersPortTuning::output::baseIndex ==
+                  (I::kFlexibleFlag | kParamIndexNumbers), "bad index");
+    static_assert(C2NumbersPortTuning::output::typeIndex ==
+                  (I::kFlexibleFlag | kParamIndexNumbers | I::kTypeTuning | I::kDirOutput), "bad index");
+    static_assert(sizeof(C2NumbersPortTuning::input) == 8, "bad size");
+    static_assert(sizeof(C2NumbersPortTuning::output) == 8, "bad size");
+    static_assert(offsetof(C2NumbersPortTuning::input, _mSize) == 0, "bad size");
+    static_assert(offsetof(C2NumbersPortTuning::input, _mIndex) == 4, "bad offset");
+    static_assert(offsetof(C2NumbersPortTuning::input, m.mNumbers) == 8, "bad offset");
+    static_assert(offsetof(C2NumbersPortTuning::output, _mSize) == 0, "bad size");
+    static_assert(offsetof(C2NumbersPortTuning::output, _mIndex) == 4, "bad offset");
+    static_assert(offsetof(C2NumbersPortTuning::output, m.mNumbers) == 8, "bad offset");
+
+    // C2NumbersStreamTuning:       kIndex | tun | str              (bool, uint, args)
+    static_assert(sizeof(C2NumbersStreamTuning) == 8, "bad size");
+    // C2NumbersStreamTuning::input kIndex | tun | str | input      (int, args)
+    // C2NumbersStreamTuning::output kIx   | tun | str | output     (int, args)
+    static_assert(C2NumbersStreamTuning::input::baseIndex ==
+                  (I::kFlexibleFlag | kParamIndexNumbers), "bad index");
+    static_assert(C2NumbersStreamTuning::input::typeIndex ==
+                  (I::kFlexibleFlag | kParamIndexNumbers | I::kTypeTuning | I::kDirInput | I::kStreamFlag), "bad index");
+    static_assert(C2NumbersStreamTuning::output::baseIndex ==
+                  (I::kFlexibleFlag | kParamIndexNumbers), "bad index");
+    static_assert(C2NumbersStreamTuning::output::typeIndex ==
+                  (I::kFlexibleFlag | kParamIndexNumbers | I::kTypeTuning | I::kDirOutput | I::kStreamFlag), "bad index");
+    static_assert(sizeof(C2NumbersStreamTuning::input) == 8, "bad size");
+    static_assert(sizeof(C2NumbersStreamTuning::output) == 8, "bad size");
+    static_assert(offsetof(C2NumbersStreamTuning::input, _mSize) == 0, "bad size");
+    static_assert(offsetof(C2NumbersStreamTuning::input, _mIndex) == 4, "bad offset");
+    static_assert(offsetof(C2NumbersStreamTuning::input, m.mNumbers) == 8, "bad offset");
+    static_assert(offsetof(C2NumbersStreamTuning::output, _mSize) == 0, "bad size");
+    static_assert(offsetof(C2NumbersStreamTuning::output, _mIndex) == 4, "bad offset");
+    static_assert(offsetof(C2NumbersStreamTuning::output, m.mNumbers) == 8, "bad offset");
+}
+
+TEST_F(C2ParamTest, ParamOpsTest) {
+    const C2NumberStruct str(100);
+    C2NumberStruct bstr;
+
+    {
+        EXPECT_EQ(100, str.mNumber);
+        bstr.mNumber = 100;
+
+        C2Param::BaseIndex index = C2NumberStruct::baseIndex;
+        EXPECT_FALSE(index.isVendor());
+        EXPECT_FALSE(index.isFlexible());
+        EXPECT_EQ(index.baseIndex(), kParamIndexNumber);
+        EXPECT_EQ(index.paramIndex(), kParamIndexNumber);
+    }
+
+    const C2NumberTuning tun(100);
+    C2NumberTuning btun;
+
+    {
+        // flags & invariables
+        for (const auto &p : { tun, btun }) {
+            EXPECT_TRUE((bool)p);
+            EXPECT_FALSE(!p);
+            EXPECT_EQ(12u, p.size());
+
+            EXPECT_FALSE(p.isVendor());
+            EXPECT_FALSE(p.isFlexible());
+            EXPECT_TRUE(p.isGlobal());
+            EXPECT_FALSE(p.forInput());
+            EXPECT_FALSE(p.forOutput());
+            EXPECT_FALSE(p.forStream());
+            EXPECT_FALSE(p.forPort());
+        }
+
+        // value
+        EXPECT_EQ(100, tun.mNumber);
+        EXPECT_EQ(0, btun.mNumber);
+        EXPECT_FALSE(tun == btun);
+        EXPECT_FALSE(tun.operator==(btun));
+        EXPECT_TRUE(tun != btun);
+        EXPECT_TRUE(tun.operator!=(btun));
+        btun.mNumber = 100;
+        EXPECT_EQ(tun, btun);
+
+        // index
+        EXPECT_EQ(C2Param::Type(tun.type()).baseIndex(), C2NumberStruct::baseIndex);
+        EXPECT_EQ(C2Param::Type(tun.type()).paramIndex(), kParamIndexNumber);
+        EXPECT_EQ(tun.type(), C2NumberTuning::typeIndex);
+        EXPECT_EQ(tun.stream(), ~0u);
+
+        C2Param::BaseIndex index = C2NumberTuning::baseIndex;
+        EXPECT_FALSE(index.isVendor());
+        EXPECT_FALSE(index.isFlexible());
+        EXPECT_EQ(index.baseIndex(), kParamIndexNumber);
+        EXPECT_EQ(index.paramIndex(), kParamIndexNumber);
+
+        C2Param::Type type = C2NumberTuning::typeIndex;
+        EXPECT_FALSE(type.isVendor());
+        EXPECT_FALSE(type.isFlexible());
+        EXPECT_TRUE(type.isGlobal());
+        EXPECT_FALSE(type.forInput());
+        EXPECT_FALSE(type.forOutput());
+        EXPECT_FALSE(type.forStream());
+        EXPECT_FALSE(type.forPort());
+
+        EXPECT_EQ(C2NumberTuning::From(nullptr), nullptr);
+        EXPECT_EQ(C2NumberTuning::From(&tun), &tun);
+        EXPECT_EQ(C2NumberPortTuning::From(&tun), nullptr);
+        EXPECT_EQ(C2NumberPortTuning::input::From(&tun), nullptr);
+        EXPECT_EQ(C2NumberPortTuning::output::From(&tun), nullptr);
+        EXPECT_EQ(C2NumberStreamTuning::From(&tun), nullptr);
+        EXPECT_EQ(C2NumberStreamTuning::input::From(&tun), nullptr);
+        EXPECT_EQ(C2NumberStreamTuning::output::From(&tun), nullptr);
+    }
+
+    const C2NumberPortTuning outp1(true, 100), inp1(false, 100);
+    C2NumberPortTuning boutp1, binp1, binp3(false, 100);
+    const C2NumberPortTuning::input inp2(100);
+    C2NumberPortTuning::input binp2;
+    const C2NumberPortTuning::output outp2(100);
+    C2NumberPortTuning::output boutp2;
+
+    {
+        static_assert(canCallSetPort(binp3), "should be able to");
+        static_assert(canCallSetPort(binp1), "should be able to");
+        static_assert(!canCallSetPort(inp1), "should not be able to (const)");
+        static_assert(!canCallSetPort(inp2), "should not be able to (const & type)");
+        static_assert(!canCallSetPort(binp2), "should not be able to (type)");
+
+        // flags & invariables
+        for (const auto &p : { outp1, inp1, boutp1 }) {
+            EXPECT_EQ(12u, p.size());
+            EXPECT_FALSE(p.isVendor());
+            EXPECT_FALSE(p.isFlexible());
+            EXPECT_FALSE(p.isGlobal());
+            EXPECT_FALSE(p.forStream());
+            EXPECT_TRUE(p.forPort());
+        }
+        for (const auto &p : { inp2, binp2 }) {
+            EXPECT_EQ(12u, p.size());
+            EXPECT_FALSE(p.isVendor());
+            EXPECT_FALSE(p.isFlexible());
+            EXPECT_FALSE(p.isGlobal());
+            EXPECT_FALSE(p.forStream());
+            EXPECT_TRUE(p.forPort());
+        }
+        for (const auto &p : { outp2, boutp2 }) {
+            EXPECT_EQ(12u, p.size());
+            EXPECT_FALSE(p.isVendor());
+            EXPECT_FALSE(p.isFlexible());
+            EXPECT_FALSE(p.isGlobal());
+            EXPECT_FALSE(p.forStream());
+            EXPECT_TRUE(p.forPort());
+        }
+
+        // port specific flags & invariables
+        EXPECT_FALSE(outp1.forInput());
+        EXPECT_TRUE(outp1.forOutput());
+
+        EXPECT_TRUE(inp1.forInput());
+        EXPECT_FALSE(inp1.forOutput());
+
+        for (const auto &p : { outp1, inp1 }) {
+            EXPECT_TRUE((bool)p);
+            EXPECT_FALSE(!p);
+            EXPECT_EQ(100, p.mNumber);
+        }
+        for (const auto &p : { outp2, boutp2 }) {
+            EXPECT_TRUE((bool)p);
+            EXPECT_FALSE(!p);
+
+            EXPECT_FALSE(p.forInput());
+            EXPECT_TRUE(p.forOutput());
+        }
+        for (const auto &p : { inp2, binp2 }) {
+            EXPECT_TRUE((bool)p);
+            EXPECT_FALSE(!p);
+
+            EXPECT_TRUE(p.forInput());
+            EXPECT_FALSE(p.forOutput());
+        }
+        for (const auto &p : { boutp1 } ) {
+            EXPECT_FALSE((bool)p);
+            EXPECT_TRUE(!p);
+
+            EXPECT_FALSE(p.forInput());
+            EXPECT_FALSE(p.forOutput());
+            EXPECT_EQ(0, p.mNumber);
+        }
+
+        // values
+        EXPECT_EQ(100, inp2.mNumber);
+        EXPECT_EQ(100, outp2.mNumber);
+        EXPECT_EQ(0, binp1.mNumber);
+        EXPECT_EQ(0, binp2.mNumber);
+        EXPECT_EQ(0, boutp1.mNumber);
+        EXPECT_EQ(0, boutp2.mNumber);
+
+        EXPECT_TRUE(inp1 != outp1);
+        EXPECT_TRUE(inp1 == inp2);
+        EXPECT_TRUE(outp1 == outp2);
+        EXPECT_TRUE(binp1 == boutp1);
+        EXPECT_TRUE(binp2 != boutp2);
+
+        EXPECT_TRUE(inp1 != binp1);
+        binp1.mNumber = 100;
+        EXPECT_TRUE(inp1 != binp1);
+        binp1.setPort(false /* output */);
+        EXPECT_TRUE((bool)binp1);
+        EXPECT_FALSE(!binp1);
+        EXPECT_TRUE(inp1 == binp1);
+
+        EXPECT_TRUE(inp2 != binp2);
+        binp2.mNumber = 100;
+        EXPECT_TRUE(inp2 == binp2);
+
+        binp1.setPort(true /* output */);
+        EXPECT_TRUE(outp1 == binp1);
+
+        EXPECT_TRUE(outp1 != boutp1);
+        boutp1.mNumber = 100;
+        EXPECT_TRUE(outp1 != boutp1);
+        boutp1.setPort(true /* output */);
+        EXPECT_TRUE((bool)boutp1);
+        EXPECT_FALSE(!boutp1);
+        EXPECT_TRUE(outp1 == boutp1);
+
+        EXPECT_TRUE(outp2 != boutp2);
+        boutp2.mNumber = 100;
+        EXPECT_TRUE(outp2 == boutp2);
+
+        boutp1.setPort(false /* output */);
+        EXPECT_TRUE(inp1 == boutp1);
+
+        // index
+        EXPECT_EQ(C2Param::Type(inp1.type()).baseIndex(), C2NumberStruct::baseIndex);
+        EXPECT_EQ(C2Param::Type(inp1.type()).paramIndex(), kParamIndexNumber);
+        EXPECT_EQ(inp1.type(), C2NumberPortTuning::input::typeIndex);
+        EXPECT_EQ(inp1.stream(), ~0u);
+
+        EXPECT_EQ(C2Param::Type(inp2.type()).baseIndex(), C2NumberStruct::baseIndex);
+        EXPECT_EQ(C2Param::Type(inp2.type()).paramIndex(), kParamIndexNumber);
+        EXPECT_EQ(inp2.type(), C2NumberPortTuning::input::typeIndex);
+        EXPECT_EQ(inp2.stream(), ~0u);
+
+        EXPECT_EQ(C2Param::Type(outp1.type()).baseIndex(), C2NumberStruct::baseIndex);
+        EXPECT_EQ(C2Param::Type(outp1.type()).paramIndex(), kParamIndexNumber);
+        EXPECT_EQ(outp1.type(), C2NumberPortTuning::output::typeIndex);
+        EXPECT_EQ(outp1.stream(), ~0u);
+
+        EXPECT_EQ(C2Param::Type(outp2.type()).baseIndex(), C2NumberStruct::baseIndex);
+        EXPECT_EQ(C2Param::Type(outp2.type()).paramIndex(), kParamIndexNumber);
+        EXPECT_EQ(outp2.type(), C2NumberPortTuning::output::typeIndex);
+        EXPECT_EQ(outp2.stream(), ~0u);
+
+        C2Param::BaseIndex index = C2NumberPortTuning::input::typeIndex;
+        EXPECT_FALSE(index.isVendor());
+        EXPECT_FALSE(index.isFlexible());
+        EXPECT_EQ(index.baseIndex(), kParamIndexNumber);
+        EXPECT_EQ(index.paramIndex(), kParamIndexNumber);
+
+        index = C2NumberPortTuning::output::typeIndex;
+        EXPECT_FALSE(index.isVendor());
+        EXPECT_FALSE(index.isFlexible());
+        EXPECT_EQ(index.baseIndex(), kParamIndexNumber);
+        EXPECT_EQ(index.paramIndex(), kParamIndexNumber);
+
+        C2Param::Type type = C2NumberPortTuning::input::typeIndex;
+        EXPECT_FALSE(type.isVendor());
+        EXPECT_FALSE(type.isFlexible());
+        EXPECT_FALSE(type.isGlobal());
+        EXPECT_TRUE(type.forInput());
+        EXPECT_FALSE(type.forOutput());
+        EXPECT_FALSE(type.forStream());
+        EXPECT_TRUE(type.forPort());
+
+        type = C2NumberPortTuning::output::typeIndex;
+        EXPECT_FALSE(type.isVendor());
+        EXPECT_FALSE(type.isFlexible());
+        EXPECT_FALSE(type.isGlobal());
+        EXPECT_FALSE(type.forInput());
+        EXPECT_TRUE(type.forOutput());
+        EXPECT_FALSE(type.forStream());
+        EXPECT_TRUE(type.forPort());
+
+        EXPECT_EQ(C2NumberPortTuning::From(nullptr), nullptr);
+        EXPECT_EQ(C2NumberPortTuning::input::From(nullptr), nullptr);
+        EXPECT_EQ(C2NumberPortTuning::output::From(nullptr), nullptr);
+        EXPECT_EQ(C2NumberTuning::From(&inp1), nullptr);
+        EXPECT_EQ(C2NumberTuning::From(&inp2), nullptr);
+        EXPECT_EQ(C2NumberTuning::From(&outp1), nullptr);
+        EXPECT_EQ(C2NumberTuning::From(&outp2), nullptr);
+        EXPECT_EQ(C2NumberPortTuning::From(&inp1), &inp1);
+        EXPECT_EQ(C2NumberPortTuning::From(&inp2), (C2NumberPortTuning*)&inp2);
+        EXPECT_EQ(C2NumberPortTuning::From(&outp1), &outp1);
+        EXPECT_EQ(C2NumberPortTuning::From(&outp2), (C2NumberPortTuning*)&outp2);
+        EXPECT_EQ(C2NumberPortTuning::input::From(&inp1), (C2NumberPortTuning::input*)&inp1);
+        EXPECT_EQ(C2NumberPortTuning::input::From(&inp2), &inp2);
+        EXPECT_EQ(C2NumberPortTuning::input::From(&outp1), nullptr);
+        EXPECT_EQ(C2NumberPortTuning::input::From(&outp2), nullptr);
+        EXPECT_EQ(C2NumberPortTuning::output::From(&inp1), nullptr);
+        EXPECT_EQ(C2NumberPortTuning::output::From(&inp2), nullptr);
+        EXPECT_EQ(C2NumberPortTuning::output::From(&outp1), (C2NumberPortTuning::output*)&outp1);
+        EXPECT_EQ(C2NumberPortTuning::output::From(&outp2), &outp2);
+        EXPECT_EQ(C2NumberStreamTuning::From(&inp1), nullptr);
+        EXPECT_EQ(C2NumberStreamTuning::From(&inp2), nullptr);
+        EXPECT_EQ(C2NumberStreamTuning::From(&outp1), nullptr);
+        EXPECT_EQ(C2NumberStreamTuning::From(&outp2), nullptr);
+        EXPECT_EQ(C2NumberStreamTuning::input::From(&inp1), nullptr);
+        EXPECT_EQ(C2NumberStreamTuning::input::From(&inp2), nullptr);
+        EXPECT_EQ(C2NumberStreamTuning::input::From(&outp1), nullptr);
+        EXPECT_EQ(C2NumberStreamTuning::input::From(&outp2), nullptr);
+        EXPECT_EQ(C2NumberStreamTuning::output::From(&inp1), nullptr);
+        EXPECT_EQ(C2NumberStreamTuning::output::From(&inp2), nullptr);
+        EXPECT_EQ(C2NumberStreamTuning::output::From(&outp1), nullptr);
+        EXPECT_EQ(C2NumberStreamTuning::output::From(&outp2), nullptr);
+    }
+
+    const C2NumberStreamTuning outs1(true, 1u, 100), ins1(false, 1u, 100);
+    C2NumberStreamTuning bouts1, bins1, bins3(false, 1u, 100);
+    const C2NumberStreamTuning::input ins2(1u, 100);
+    C2NumberStreamTuning::input bins2;
+    const C2NumberStreamTuning::output outs2(1u, 100);
+    C2NumberStreamTuning::output bouts2;
+
+    {
+        static_assert(canCallSetPort(bins3), "should be able to");
+        static_assert(canCallSetPort(bins1), "should be able to");
+        static_assert(!canCallSetPort(ins1), "should not be able to (const)");
+        static_assert(!canCallSetPort(ins2), "should not be able to (const & type)");
+        static_assert(!canCallSetPort(bins2), "should not be able to (type)");
+
+        // flags & invariables
+        for (const auto &p : { outs1, ins1, bouts1 }) {
+            EXPECT_EQ(12u, p.size());
+            EXPECT_FALSE(p.isVendor());
+            EXPECT_FALSE(p.isFlexible());
+            EXPECT_FALSE(p.isGlobal());
+            EXPECT_TRUE(p.forStream());
+            EXPECT_FALSE(p.forPort());
+        }
+        for (const auto &p : { ins2, bins2 }) {
+            EXPECT_EQ(12u, p.size());
+            EXPECT_FALSE(p.isVendor());
+            EXPECT_FALSE(p.isFlexible());
+            EXPECT_FALSE(p.isGlobal());
+            EXPECT_TRUE(p.forStream());
+            EXPECT_FALSE(p.forPort());
+        }
+        for (const auto &p : { outs2, bouts2 }) {
+            EXPECT_EQ(12u, p.size());
+            EXPECT_FALSE(p.isVendor());
+            EXPECT_FALSE(p.isFlexible());
+            EXPECT_FALSE(p.isGlobal());
+            EXPECT_TRUE(p.forStream());
+            EXPECT_FALSE(p.forPort());
+        }
+
+        // port specific flags & invariables
+        EXPECT_FALSE(outs1.forInput());
+        EXPECT_TRUE(outs1.forOutput());
+
+        EXPECT_TRUE(ins1.forInput());
+        EXPECT_FALSE(ins1.forOutput());
+
+        for (const auto &p : { outs1, ins1 }) {
+            EXPECT_TRUE((bool)p);
+            EXPECT_FALSE(!p);
+            EXPECT_EQ(100, p.mNumber);
+            EXPECT_EQ(1u, p.stream());
+        }
+        for (const auto &p : { outs2, bouts2 }) {
+            EXPECT_TRUE((bool)p);
+            EXPECT_FALSE(!p);
+
+            EXPECT_FALSE(p.forInput());
+            EXPECT_TRUE(p.forOutput());
+        }
+        for (const auto &p : { ins2, bins2 }) {
+            EXPECT_TRUE((bool)p);
+            EXPECT_FALSE(!p);
+
+            EXPECT_TRUE(p.forInput());
+            EXPECT_FALSE(p.forOutput());
+        }
+        for (const auto &p : { bouts1 } ) {
+            EXPECT_FALSE((bool)p);
+            EXPECT_TRUE(!p);
+
+            EXPECT_FALSE(p.forInput());
+            EXPECT_FALSE(p.forOutput());
+            EXPECT_EQ(0, p.mNumber);
+        }
+
+        // values
+        EXPECT_EQ(100, ins2.mNumber);
+        EXPECT_EQ(100, outs2.mNumber);
+        EXPECT_EQ(0, bins1.mNumber);
+        EXPECT_EQ(0, bins2.mNumber);
+        EXPECT_EQ(0, bouts1.mNumber);
+        EXPECT_EQ(0, bouts2.mNumber);
+
+        EXPECT_EQ(1u, ins2.stream());
+        EXPECT_EQ(1u, outs2.stream());
+        EXPECT_EQ(0u, bins1.stream());
+        EXPECT_EQ(0u, bins2.stream());
+        EXPECT_EQ(0u, bouts1.stream());
+        EXPECT_EQ(0u, bouts2.stream());
+
+        EXPECT_TRUE(ins1 != outs1);
+        EXPECT_TRUE(ins1 == ins2);
+        EXPECT_TRUE(outs1 == outs2);
+        EXPECT_TRUE(bins1 == bouts1);
+        EXPECT_TRUE(bins2 != bouts2);
+
+        EXPECT_TRUE(ins1 != bins1);
+        bins1.mNumber = 100;
+        EXPECT_TRUE(ins1 != bins1);
+        bins1.setPort(false /* output */);
+        EXPECT_TRUE(ins1 != bins1);
+        bins1.setStream(1u);
+        EXPECT_TRUE(ins1 == bins1);
+
+        EXPECT_TRUE(ins2 != bins2);
+        bins2.mNumber = 100;
+        EXPECT_TRUE(ins2 != bins2);
+        bins2.setStream(1u);
+        EXPECT_TRUE(ins2 == bins2);
+
+        bins1.setPort(true /* output */);
+        EXPECT_TRUE(outs1 == bins1);
+
+        EXPECT_TRUE(outs1 != bouts1);
+        bouts1.mNumber = 100;
+        EXPECT_TRUE(outs1 != bouts1);
+        bouts1.setPort(true /* output */);
+        EXPECT_TRUE(outs1 != bouts1);
+        bouts1.setStream(1u);
+        EXPECT_TRUE(outs1 == bouts1);
+
+        EXPECT_TRUE(outs2 != bouts2);
+        bouts2.mNumber = 100;
+        EXPECT_TRUE(outs2 != bouts2);
+        bouts2.setStream(1u);
+        EXPECT_TRUE(outs2 == bouts2);
+
+        bouts1.setPort(false /* output */);
+        EXPECT_TRUE(ins1 == bouts1);
+
+        // index
+        EXPECT_EQ(C2Param::Type(ins1.type()).baseIndex(), C2NumberStruct::baseIndex);
+        EXPECT_EQ(C2Param::Type(ins1.type()).paramIndex(), kParamIndexNumber);
+        EXPECT_EQ(ins1.type(), C2NumberStreamTuning::input::typeIndex);
+
+        EXPECT_EQ(C2Param::Type(ins2.type()).baseIndex(), C2NumberStruct::baseIndex);
+        EXPECT_EQ(C2Param::Type(ins2.type()).paramIndex(), kParamIndexNumber);
+        EXPECT_EQ(ins2.type(), C2NumberStreamTuning::input::typeIndex);
+
+        EXPECT_EQ(C2Param::Type(outs1.type()).baseIndex(), C2NumberStruct::baseIndex);
+        EXPECT_EQ(C2Param::Type(outs1.type()).paramIndex(), kParamIndexNumber);
+        EXPECT_EQ(outs1.type(), C2NumberStreamTuning::output::typeIndex);
+
+        EXPECT_EQ(C2Param::Type(outs2.type()).baseIndex(), C2NumberStruct::baseIndex);
+        EXPECT_EQ(C2Param::Type(outs2.type()).paramIndex(), kParamIndexNumber);
+        EXPECT_EQ(outs2.type(), C2NumberStreamTuning::output::typeIndex);
+
+        C2Param::BaseIndex index = C2NumberStreamTuning::input::typeIndex;
+        EXPECT_FALSE(index.isVendor());
+        EXPECT_FALSE(index.isFlexible());
+        EXPECT_EQ(index.baseIndex(), kParamIndexNumber);
+        EXPECT_EQ(index.paramIndex(), kParamIndexNumber);
+
+        index = C2NumberStreamTuning::output::typeIndex;
+        EXPECT_FALSE(index.isVendor());
+        EXPECT_FALSE(index.isFlexible());
+        EXPECT_EQ(index.baseIndex(), kParamIndexNumber);
+        EXPECT_EQ(index.paramIndex(), kParamIndexNumber);
+
+        C2Param::Type type = C2NumberStreamTuning::input::typeIndex;
+        EXPECT_FALSE(type.isVendor());
+        EXPECT_FALSE(type.isFlexible());
+        EXPECT_FALSE(type.isGlobal());
+        EXPECT_TRUE(type.forInput());
+        EXPECT_FALSE(type.forOutput());
+        EXPECT_TRUE(type.forStream());
+        EXPECT_FALSE(type.forPort());
+
+        type = C2NumberStreamTuning::output::typeIndex;
+        EXPECT_FALSE(type.isVendor());
+        EXPECT_FALSE(type.isFlexible());
+        EXPECT_FALSE(type.isGlobal());
+        EXPECT_FALSE(type.forInput());
+        EXPECT_TRUE(type.forOutput());
+        EXPECT_TRUE(type.forStream());
+        EXPECT_FALSE(type.forPort());
+
+        EXPECT_EQ(C2NumberPortTuning::From(nullptr), nullptr);
+        EXPECT_EQ(C2NumberPortTuning::input::From(nullptr), nullptr);
+        EXPECT_EQ(C2NumberPortTuning::output::From(nullptr), nullptr);
+        EXPECT_EQ(C2NumberTuning::From(&ins1), nullptr);
+        EXPECT_EQ(C2NumberTuning::From(&ins2), nullptr);
+        EXPECT_EQ(C2NumberTuning::From(&outs1), nullptr);
+        EXPECT_EQ(C2NumberTuning::From(&outs2), nullptr);
+        EXPECT_EQ(C2NumberPortTuning::From(&ins1), nullptr);
+        EXPECT_EQ(C2NumberPortTuning::From(&ins2), nullptr);
+        EXPECT_EQ(C2NumberPortTuning::From(&outs1), nullptr);
+        EXPECT_EQ(C2NumberPortTuning::From(&outs2), nullptr);
+        EXPECT_EQ(C2NumberPortTuning::input::From(&ins1), nullptr);
+        EXPECT_EQ(C2NumberPortTuning::input::From(&ins2), nullptr);
+        EXPECT_EQ(C2NumberPortTuning::input::From(&outs1), nullptr);
+        EXPECT_EQ(C2NumberPortTuning::input::From(&outs2), nullptr);
+        EXPECT_EQ(C2NumberPortTuning::output::From(&ins1), nullptr);
+        EXPECT_EQ(C2NumberPortTuning::output::From(&ins2), nullptr);
+        EXPECT_EQ(C2NumberPortTuning::output::From(&outs1), nullptr);
+        EXPECT_EQ(C2NumberPortTuning::output::From(&outs2), nullptr);
+        EXPECT_EQ(C2NumberStreamTuning::From(&ins1), &ins1);
+        EXPECT_EQ(C2NumberStreamTuning::From(&ins2), (C2NumberStreamTuning*)&ins2);
+        EXPECT_EQ(C2NumberStreamTuning::From(&outs1), &outs1);
+        EXPECT_EQ(C2NumberStreamTuning::From(&outs2), (C2NumberStreamTuning*)&outs2);
+        EXPECT_EQ(C2NumberStreamTuning::input::From(&ins1), (C2NumberStreamTuning::input*)&ins1);
+        EXPECT_EQ(C2NumberStreamTuning::input::From(&ins2), &ins2);
+        EXPECT_EQ(C2NumberStreamTuning::input::From(&outs1), nullptr);
+        EXPECT_EQ(C2NumberStreamTuning::input::From(&outs2), nullptr);
+        EXPECT_EQ(C2NumberStreamTuning::output::From(&ins1), nullptr);
+        EXPECT_EQ(C2NumberStreamTuning::output::From(&ins2), nullptr);
+        EXPECT_EQ(C2NumberStreamTuning::output::From(&outs1), (C2NumberStreamTuning::output*)&outs1);
+        EXPECT_EQ(C2NumberStreamTuning::output::From(&outs2), &outs2);
+
+    }
+
+    {
+        uint32_t videoWidth[] = { 12u, C2NumberStreamTuning::output::typeIndex, 100 };
+        C2Param *p1 = C2Param::From(videoWidth, sizeof(videoWidth));
+        EXPECT_NE(p1, nullptr);
+        EXPECT_EQ(12u, p1->size());
+        EXPECT_EQ(p1->type(), C2NumberStreamTuning::output::typeIndex);
+
+        p1 = C2Param::From(videoWidth, sizeof(videoWidth) + 2);
+        EXPECT_EQ(p1, nullptr);
+
+        p1 = C2Param::From(videoWidth, sizeof(videoWidth) - 2);
+        EXPECT_EQ(p1, nullptr);
+
+        p1 = C2Param::From(videoWidth, 3);
+        EXPECT_EQ(p1, nullptr);
+
+        p1 = C2Param::From(videoWidth, 0);
+        EXPECT_EQ(p1, nullptr);
+    }
+}
+
+void StaticTestAddBaseIndex() {
+    struct nobase {};
+    struct base { enum : uint32_t { baseIndex = 1 }; };
+    static_assert(C2AddBaseIndex<nobase, 2>::baseIndex == 2, "should be 2");
+    static_assert(C2AddBaseIndex<base, 1>::baseIndex == 1, "should be 1");
+}
+
+class TestFlexHelper {
+    struct _Flex {
+        int32_t a;
+        char b[];
+        _Flex() {}
+        FLEX(_Flex, b);
+    };
+
+    struct _BoFlex {
+        _Flex a;
+        _BoFlex() {}
+        FLEX(_BoFlex, a);
+    };
+
+    struct _NonFlex {
+    };
+
+
+    static void StaticTest() {
+        static_assert(std::is_same<_C2FlexHelper<char>::flexType, void>::value, "should be void");
+        static_assert(std::is_same<_C2FlexHelper<char[]>::flexType, char>::value, "should be char");
+        static_assert(std::is_same<_C2FlexHelper<_Flex>::flexType, char>::value, "should be char");
+
+        static_assert(std::is_same<_C2FlexHelper<_BoFlex>::flexType, char>::value, "should be void");
+
+        static_assert(_C2Flexible<_Flex>::value, "should be flexible");
+        static_assert(!_C2Flexible<_NonFlex>::value, "should not be flexible");
+    }
+};
+
+TEST_F(C2ParamTest, FlexParamOpsTest) {
+//    const C2NumbersStruct str{100};
+    C2NumbersStruct bstr;
+    {
+//        EXPECT_EQ(100, str->m.mNumbers[0]);
+        (void)&bstr.mNumbers[0];
+
+        C2Param::BaseIndex index = C2NumbersStruct::baseIndex;
+        EXPECT_FALSE(index.isVendor());
+        EXPECT_TRUE(index.isFlexible());
+        EXPECT_EQ(index.baseIndex(), kParamIndexNumbers | C2Param::BaseIndex::_kFlexibleFlag);
+        EXPECT_EQ(index.paramIndex(), kParamIndexNumbers);
+    }
+
+    std::unique_ptr<C2NumbersTuning> tun_ = C2NumbersTuning::alloc_unique(1);
+    tun_->m.mNumbers[0] = 100;
+    std::unique_ptr<const C2NumbersTuning> tun = std::move(tun_);
+    std::shared_ptr<C2NumbersTuning> btun = C2NumbersTuning::alloc_shared(1);
+
+    {
+        // flags & invariables
+        const C2NumbersTuning *T[] = { tun.get(), btun.get() };
+        for (const auto p : T) {
+            EXPECT_TRUE((bool)(*p));
+            EXPECT_FALSE(!(*p));
+            EXPECT_EQ(12u, p->size());
+
+            EXPECT_FALSE(p->isVendor());
+            EXPECT_TRUE(p->isFlexible());
+            EXPECT_TRUE(p->isGlobal());
+            EXPECT_FALSE(p->forInput());
+            EXPECT_FALSE(p->forOutput());
+            EXPECT_FALSE(p->forStream());
+            EXPECT_FALSE(p->forPort());
+        }
+
+        // value
+        EXPECT_EQ(100, tun->m.mNumbers[0]);
+        EXPECT_EQ(0, btun->m.mNumbers[0]);
+        EXPECT_FALSE(*tun == *btun);
+        EXPECT_FALSE(tun->operator==(*btun));
+        EXPECT_TRUE(*tun != *btun);
+        EXPECT_TRUE(tun->operator!=(*btun));
+        btun->m.mNumbers[0] = 100;
+        EXPECT_EQ(*tun, *btun);
+
+        // index
+        EXPECT_EQ(C2Param::Type(tun->type()).baseIndex(), C2NumbersStruct::baseIndex);
+        EXPECT_EQ(C2Param::Type(tun->type()).paramIndex(), kParamIndexNumbers);
+        EXPECT_EQ(tun->type(), C2NumbersTuning::typeIndex);
+        EXPECT_EQ(tun->stream(), ~0u);
+
+        C2Param::BaseIndex index = C2NumbersTuning::baseIndex;
+        EXPECT_FALSE(index.isVendor());
+        EXPECT_TRUE(index.isFlexible());
+        EXPECT_EQ(index.baseIndex(), kParamIndexNumbers | C2Param::BaseIndex::_kFlexibleFlag);
+        EXPECT_EQ(index.paramIndex(), kParamIndexNumbers);
+
+        C2Param::Type type = C2NumbersTuning::typeIndex;
+        EXPECT_FALSE(type.isVendor());
+        EXPECT_TRUE(type.isFlexible());
+        EXPECT_TRUE(type.isGlobal());
+        EXPECT_FALSE(type.forInput());
+        EXPECT_FALSE(type.forOutput());
+        EXPECT_FALSE(type.forStream());
+        EXPECT_FALSE(type.forPort());
+
+        EXPECT_EQ(C2NumbersTuning::From(nullptr), nullptr);
+        EXPECT_EQ(C2NumbersTuning::From(tun.get()), tun.get());
+        EXPECT_EQ(C2NumbersPortTuning::From(tun.get()), nullptr);
+        EXPECT_EQ(C2NumbersPortTuning::input::From(tun.get()), nullptr);
+        EXPECT_EQ(C2NumbersPortTuning::output::From(tun.get()), nullptr);
+        EXPECT_EQ(C2NumbersStreamTuning::From(tun.get()), nullptr);
+        EXPECT_EQ(C2NumbersStreamTuning::input::From(tun.get()), nullptr);
+        EXPECT_EQ(C2NumbersStreamTuning::output::From(tun.get()), nullptr);
+    }
+
+    std::unique_ptr<C2NumbersPortTuning> outp1_(C2NumbersPortTuning::alloc_unique(1, true)),
+            inp1_ = C2NumbersPortTuning::alloc_unique(1, false);
+    outp1_->m.mNumbers[0] = 100;
+    inp1_->m.mNumbers[0] = 100;
+    std::unique_ptr<const C2NumbersPortTuning> outp1 = std::move(outp1_);
+    std::unique_ptr<const C2NumbersPortTuning> inp1 = std::move(inp1_);
+    std::shared_ptr<C2NumbersPortTuning> boutp1(C2NumbersPortTuning::alloc_shared(1)),
+            binp1 = C2NumbersPortTuning::alloc_shared(1),
+            binp3 = C2NumbersPortTuning::alloc_shared(1, false);
+    binp3->m.mNumbers[0] = 100;
+    std::unique_ptr<C2NumbersPortTuning::input> inp2_(C2NumbersPortTuning::input::alloc_unique(1));
+    inp2_->m.mNumbers[0] = 100;
+    std::unique_ptr<const C2NumbersPortTuning::input> inp2 = std::move(inp2_);
+    std::shared_ptr<C2NumbersPortTuning::input> binp2(C2NumbersPortTuning::input::alloc_shared(1));
+    std::unique_ptr<C2NumbersPortTuning::output> outp2_(C2NumbersPortTuning::output::alloc_unique(1));
+    outp2_->m.mNumbers[0] = 100;
+    std::unique_ptr<const C2NumbersPortTuning::output> outp2 = std::move(outp2_);
+    std::shared_ptr<C2NumbersPortTuning::output> boutp2(C2NumbersPortTuning::output::alloc_shared(1));
+
+    {
+        static_assert(canCallSetPort(*binp3), "should be able to");
+        static_assert(canCallSetPort(*binp1), "should be able to");
+        static_assert(!canCallSetPort(*inp1), "should not be able to (const)");
+        static_assert(!canCallSetPort(*inp2), "should not be able to (const & type)");
+        static_assert(!canCallSetPort(*binp2), "should not be able to (type)");
+
+        // flags & invariables
+        const C2NumbersPortTuning *P[] = { outp1.get(), inp1.get(), boutp1.get() };
+        for (const auto p : P) {
+            EXPECT_EQ(12u, p->size());
+            EXPECT_FALSE(p->isVendor());
+            EXPECT_TRUE(p->isFlexible());
+            EXPECT_FALSE(p->isGlobal());
+            EXPECT_FALSE(p->forStream());
+            EXPECT_TRUE(p->forPort());
+        }
+        const C2NumbersPortTuning::input *PI[] = { inp2.get(), binp2.get() };
+        for (const auto p : PI) {
+            EXPECT_EQ(12u, p->size());
+            EXPECT_FALSE(p->isVendor());
+            EXPECT_TRUE(p->isFlexible());
+            EXPECT_FALSE(p->isGlobal());
+            EXPECT_FALSE(p->forStream());
+            EXPECT_TRUE(p->forPort());
+        }
+        const C2NumbersPortTuning::output *PO[] = { outp2.get(), boutp2.get() };
+        for (const auto p : PO) {
+            EXPECT_EQ(12u, p->size());
+            EXPECT_FALSE(p->isVendor());
+            EXPECT_TRUE(p->isFlexible());
+            EXPECT_FALSE(p->isGlobal());
+            EXPECT_FALSE(p->forStream());
+            EXPECT_TRUE(p->forPort());
+        }
+
+        // port specific flags & invariables
+        EXPECT_FALSE(outp1->forInput());
+        EXPECT_TRUE(outp1->forOutput());
+
+        EXPECT_TRUE(inp1->forInput());
+        EXPECT_FALSE(inp1->forOutput());
+
+        const C2NumbersPortTuning *P2[] = { outp1.get(), inp1.get() };
+        for (const auto p : P2) {
+            EXPECT_TRUE((bool)(*p));
+            EXPECT_FALSE(!(*p));
+            EXPECT_EQ(100, p->m.mNumbers[0]);
+        }
+        for (const auto p : PO) {
+            EXPECT_TRUE((bool)(*p));
+            EXPECT_FALSE(!(*p));
+
+            EXPECT_FALSE(p->forInput());
+            EXPECT_TRUE(p->forOutput());
+        }
+        for (const auto p : PI) {
+            EXPECT_TRUE((bool)(*p));
+            EXPECT_FALSE(!(*p));
+
+            EXPECT_TRUE(p->forInput());
+            EXPECT_FALSE(p->forOutput());
+        }
+        const C2NumbersPortTuning *P3[] = { boutp1.get() };
+        for (const auto p : P3) {
+            EXPECT_FALSE((bool)(*p));
+            EXPECT_TRUE(!(*p));
+
+            EXPECT_FALSE(p->forInput());
+            EXPECT_FALSE(p->forOutput());
+            EXPECT_EQ(0, p->m.mNumbers[0]);
+        }
+
+        // values
+        EXPECT_EQ(100, inp2->m.mNumbers[0]);
+        EXPECT_EQ(100, outp2->m.mNumbers[0]);
+        EXPECT_EQ(0, binp1->m.mNumbers[0]);
+        EXPECT_EQ(0, binp2->m.mNumbers[0]);
+        EXPECT_EQ(0, boutp1->m.mNumbers[0]);
+        EXPECT_EQ(0, boutp2->m.mNumbers[0]);
+
+        EXPECT_TRUE(*inp1 != *outp1);
+        EXPECT_TRUE(*inp1 == *inp2);
+        EXPECT_TRUE(*outp1 == *outp2);
+        EXPECT_TRUE(*binp1 == *boutp1);
+        EXPECT_TRUE(*binp2 != *boutp2);
+
+        EXPECT_TRUE(*inp1 != *binp1);
+        binp1->m.mNumbers[0] = 100;
+        EXPECT_TRUE(*inp1 != *binp1);
+        binp1->setPort(false /* output */);
+        EXPECT_TRUE((bool)*binp1);
+        EXPECT_FALSE(!*binp1);
+        EXPECT_TRUE(*inp1 == *binp1);
+
+        EXPECT_TRUE(*inp2 != *binp2);
+        binp2->m.mNumbers[0] = 100;
+        EXPECT_TRUE(*inp2 == *binp2);
+
+        binp1->setPort(true /* output */);
+        EXPECT_TRUE(*outp1 == *binp1);
+
+        EXPECT_TRUE(*outp1 != *boutp1);
+        boutp1->m.mNumbers[0] = 100;
+        EXPECT_TRUE(*outp1 != *boutp1);
+        boutp1->setPort(true /* output */);
+        EXPECT_TRUE((bool)*boutp1);
+        EXPECT_FALSE(!*boutp1);
+        EXPECT_TRUE(*outp1 == *boutp1);
+
+        EXPECT_TRUE(*outp2 != *boutp2);
+        boutp2->m.mNumbers[0] = 100;
+        EXPECT_TRUE(*outp2 == *boutp2);
+
+        boutp1->setPort(false /* output */);
+        EXPECT_TRUE(*inp1 == *boutp1);
+
+        // index
+        EXPECT_EQ(C2Param::Type(inp1->type()).baseIndex(), C2NumbersStruct::baseIndex);
+        EXPECT_EQ(C2Param::Type(inp1->type()).paramIndex(), kParamIndexNumbers);
+        EXPECT_EQ(inp1->type(), C2NumbersPortTuning::input::typeIndex);
+        EXPECT_EQ(inp1->stream(), ~0u);
+
+        EXPECT_EQ(C2Param::Type(inp2->type()).baseIndex(), C2NumbersStruct::baseIndex);
+        EXPECT_EQ(C2Param::Type(inp2->type()).paramIndex(), kParamIndexNumbers);
+        EXPECT_EQ(inp2->type(), C2NumbersPortTuning::input::typeIndex);
+        EXPECT_EQ(inp2->stream(), ~0u);
+
+        EXPECT_EQ(C2Param::Type(outp1->type()).baseIndex(), C2NumbersStruct::baseIndex);
+        EXPECT_EQ(C2Param::Type(outp1->type()).paramIndex(), kParamIndexNumbers);
+        EXPECT_EQ(outp1->type(), C2NumbersPortTuning::output::typeIndex);
+        EXPECT_EQ(outp1->stream(), ~0u);
+
+        EXPECT_EQ(C2Param::Type(outp2->type()).baseIndex(), C2NumbersStruct::baseIndex);
+        EXPECT_EQ(C2Param::Type(outp2->type()).paramIndex(), kParamIndexNumbers);
+        EXPECT_EQ(outp2->type(), C2NumbersPortTuning::output::typeIndex);
+        EXPECT_EQ(outp2->stream(), ~0u);
+
+        C2Param::BaseIndex index = C2NumbersPortTuning::input::typeIndex;
+        EXPECT_FALSE(index.isVendor());
+        EXPECT_TRUE(index.isFlexible());
+        EXPECT_EQ(index.baseIndex(), kParamIndexNumbers | C2Param::BaseIndex::_kFlexibleFlag);
+        EXPECT_EQ(index.paramIndex(), kParamIndexNumbers);
+
+        index = C2NumbersPortTuning::output::typeIndex;
+        EXPECT_FALSE(index.isVendor());
+        EXPECT_TRUE(index.isFlexible());
+        EXPECT_EQ(index.baseIndex(), kParamIndexNumbers | C2Param::BaseIndex::_kFlexibleFlag);
+        EXPECT_EQ(index.paramIndex(), kParamIndexNumbers);
+
+        C2Param::Type type = C2NumbersPortTuning::input::typeIndex;
+        EXPECT_FALSE(type.isVendor());
+        EXPECT_TRUE(type.isFlexible());
+        EXPECT_FALSE(type.isGlobal());
+        EXPECT_TRUE(type.forInput());
+        EXPECT_FALSE(type.forOutput());
+        EXPECT_FALSE(type.forStream());
+        EXPECT_TRUE(type.forPort());
+
+        type = C2NumbersPortTuning::output::typeIndex;
+        EXPECT_FALSE(type.isVendor());
+        EXPECT_TRUE(type.isFlexible());
+        EXPECT_FALSE(type.isGlobal());
+        EXPECT_FALSE(type.forInput());
+        EXPECT_TRUE(type.forOutput());
+        EXPECT_FALSE(type.forStream());
+        EXPECT_TRUE(type.forPort());
+
+        EXPECT_EQ(C2NumbersPortTuning::From(nullptr), nullptr);
+        EXPECT_EQ(C2NumbersPortTuning::input::From(nullptr), nullptr);
+        EXPECT_EQ(C2NumbersPortTuning::output::From(nullptr), nullptr);
+        EXPECT_EQ(C2NumbersTuning::From(inp1.get()), nullptr);
+        EXPECT_EQ(C2NumbersTuning::From(inp2.get()), nullptr);
+        EXPECT_EQ(C2NumbersTuning::From(outp1.get()), nullptr);
+        EXPECT_EQ(C2NumbersTuning::From(outp2.get()), nullptr);
+        EXPECT_EQ(C2NumbersPortTuning::From(inp1.get()), inp1.get());
+        EXPECT_EQ(C2NumbersPortTuning::From(inp2.get()), (C2NumbersPortTuning*)inp2.get());
+        EXPECT_EQ(C2NumbersPortTuning::From(outp1.get()), outp1.get());
+        EXPECT_EQ(C2NumbersPortTuning::From(outp2.get()), (C2NumbersPortTuning*)outp2.get());
+        EXPECT_EQ(C2NumbersPortTuning::input::From(inp1.get()), (C2NumbersPortTuning::input*)inp1.get());
+        EXPECT_EQ(C2NumbersPortTuning::input::From(inp2.get()), inp2.get());
+        EXPECT_EQ(C2NumbersPortTuning::input::From(outp1.get()), nullptr);
+        EXPECT_EQ(C2NumbersPortTuning::input::From(outp2.get()), nullptr);
+        EXPECT_EQ(C2NumbersPortTuning::output::From(inp1.get()), nullptr);
+        EXPECT_EQ(C2NumbersPortTuning::output::From(inp2.get()), nullptr);
+        EXPECT_EQ(C2NumbersPortTuning::output::From(outp1.get()), (C2NumbersPortTuning::output*)outp1.get());
+        EXPECT_EQ(C2NumbersPortTuning::output::From(outp2.get()), outp2.get());
+        EXPECT_EQ(C2NumbersStreamTuning::From(inp1.get()), nullptr);
+        EXPECT_EQ(C2NumbersStreamTuning::From(inp2.get()), nullptr);
+        EXPECT_EQ(C2NumbersStreamTuning::From(outp1.get()), nullptr);
+        EXPECT_EQ(C2NumbersStreamTuning::From(outp2.get()), nullptr);
+        EXPECT_EQ(C2NumbersStreamTuning::input::From(inp1.get()), nullptr);
+        EXPECT_EQ(C2NumbersStreamTuning::input::From(inp2.get()), nullptr);
+        EXPECT_EQ(C2NumbersStreamTuning::input::From(outp1.get()), nullptr);
+        EXPECT_EQ(C2NumbersStreamTuning::input::From(outp2.get()), nullptr);
+        EXPECT_EQ(C2NumbersStreamTuning::output::From(inp1.get()), nullptr);
+        EXPECT_EQ(C2NumbersStreamTuning::output::From(inp2.get()), nullptr);
+        EXPECT_EQ(C2NumbersStreamTuning::output::From(outp1.get()), nullptr);
+        EXPECT_EQ(C2NumbersStreamTuning::output::From(outp2.get()), nullptr);
+
+    }
+
+    std::unique_ptr<C2NumbersStreamTuning> outs1_(C2NumbersStreamTuning::alloc_unique(1, true, 1u));
+    outs1_->m.mNumbers[0] = 100;
+    std::unique_ptr<const C2NumbersStreamTuning> outs1 = std::move(outs1_);
+    std::unique_ptr<C2NumbersStreamTuning> ins1_(C2NumbersStreamTuning::alloc_unique(1, false, 1u));
+    ins1_->m.mNumbers[0] = 100;
+    std::unique_ptr<const C2NumbersStreamTuning> ins1 = std::move(ins1_);
+    std::shared_ptr<C2NumbersStreamTuning> bouts1(C2NumbersStreamTuning::alloc_shared(1));
+    std::shared_ptr<C2NumbersStreamTuning> bins1(C2NumbersStreamTuning::alloc_shared(1));
+    std::shared_ptr<C2NumbersStreamTuning> bins3(C2NumbersStreamTuning::alloc_shared(1, false, 1u));
+    bins3->m.mNumbers[0] = 100;
+    std::unique_ptr<C2NumbersStreamTuning::input> ins2_(C2NumbersStreamTuning::input::alloc_unique(1, 1u));
+    ins2_->m.mNumbers[0] = 100;
+    std::unique_ptr<const C2NumbersStreamTuning::input> ins2 = std::move(ins2_);
+    std::shared_ptr<C2NumbersStreamTuning::input> bins2(C2NumbersStreamTuning::input::alloc_shared(1));
+    std::unique_ptr<C2NumbersStreamTuning::output> outs2_(C2NumbersStreamTuning::output::alloc_unique(1, 1u));
+    outs2_->m.mNumbers[0] = 100;
+    std::unique_ptr<const C2NumbersStreamTuning::output> outs2 = std::move(outs2_);
+    std::shared_ptr<C2NumbersStreamTuning::output> bouts2(C2NumbersStreamTuning::output::alloc_shared(1));
+
+    {
+        static_assert(canCallSetPort(*bins3), "should be able to");
+        static_assert(canCallSetPort(*bins1), "should be able to");
+        static_assert(!canCallSetPort(*ins1), "should not be able to (const)");
+        static_assert(!canCallSetPort(*ins2), "should not be able to (const & type)");
+        static_assert(!canCallSetPort(*bins2), "should not be able to (type)");
+
+        // flags & invariables
+        const C2NumbersStreamTuning *S[] = { outs1.get(), ins1.get(), bouts1.get() };
+        for (const auto p : S) {
+            EXPECT_EQ(12u, p->size());
+            EXPECT_FALSE(p->isVendor());
+            EXPECT_TRUE(p->isFlexible());
+            EXPECT_FALSE(p->isGlobal());
+            EXPECT_TRUE(p->forStream());
+            EXPECT_FALSE(p->forPort());
+        }
+        const C2NumbersStreamTuning::input *SI[] = { ins2.get(), bins2.get() };
+        for (const auto p : SI) {
+            EXPECT_EQ(12u, p->size());
+            EXPECT_FALSE(p->isVendor());
+            EXPECT_TRUE(p->isFlexible());
+            EXPECT_FALSE(p->isGlobal());
+            EXPECT_TRUE(p->forStream());
+            EXPECT_FALSE(p->forPort());
+        }
+        const C2NumbersStreamTuning::output *SO[] = { outs2.get(), bouts2.get() };
+        for (const auto p : SO) {
+            EXPECT_EQ(12u, p->size());
+            EXPECT_FALSE(p->isVendor());
+            EXPECT_TRUE(p->isFlexible());
+            EXPECT_FALSE(p->isGlobal());
+            EXPECT_TRUE(p->forStream());
+            EXPECT_FALSE(p->forPort());
+        }
+
+        // port specific flags & invariables
+        EXPECT_FALSE(outs1->forInput());
+        EXPECT_TRUE(outs1->forOutput());
+
+        EXPECT_TRUE(ins1->forInput());
+        EXPECT_FALSE(ins1->forOutput());
+
+        const C2NumbersStreamTuning *S2[] = { outs1.get(), ins1.get() };
+        for (const auto p : S2) {
+            EXPECT_TRUE((bool)(*p));
+            EXPECT_FALSE(!(*p));
+            EXPECT_EQ(100, p->m.mNumbers[0]);
+            EXPECT_EQ(1u, p->stream());
+        }
+        for (const auto p : SO) {
+            EXPECT_TRUE((bool)(*p));
+            EXPECT_FALSE(!(*p));
+
+            EXPECT_FALSE(p->forInput());
+            EXPECT_TRUE(p->forOutput());
+        }
+        for (const auto p : SI) {
+            EXPECT_TRUE((bool)(*p));
+            EXPECT_FALSE(!(*p));
+
+            EXPECT_TRUE(p->forInput());
+            EXPECT_FALSE(p->forOutput());
+        }
+        const C2NumbersStreamTuning *S3[] = { bouts1.get() };
+        for (const auto p : S3) {
+            EXPECT_FALSE((bool)(*p));
+            EXPECT_TRUE(!(*p));
+
+            EXPECT_FALSE(p->forInput());
+            EXPECT_FALSE(p->forOutput());
+            EXPECT_EQ(0, p->m.mNumbers[0]);
+        }
+
+        // values
+        EXPECT_EQ(100, ins2->m.mNumbers[0]);
+        EXPECT_EQ(100, outs2->m.mNumbers[0]);
+        EXPECT_EQ(0, bins1->m.mNumbers[0]);
+        EXPECT_EQ(0, bins2->m.mNumbers[0]);
+        EXPECT_EQ(0, bouts1->m.mNumbers[0]);
+        EXPECT_EQ(0, bouts2->m.mNumbers[0]);
+
+        EXPECT_EQ(1u, ins2->stream());
+        EXPECT_EQ(1u, outs2->stream());
+        EXPECT_EQ(0u, bins1->stream());
+        EXPECT_EQ(0u, bins2->stream());
+        EXPECT_EQ(0u, bouts1->stream());
+        EXPECT_EQ(0u, bouts2->stream());
+
+        EXPECT_TRUE(*ins1 != *outs1);
+        EXPECT_TRUE(*ins1 == *ins2);
+        EXPECT_TRUE(*outs1 == *outs2);
+        EXPECT_TRUE(*bins1 == *bouts1);
+        EXPECT_TRUE(*bins2 != *bouts2);
+
+        EXPECT_TRUE(*ins1 != *bins1);
+        bins1->m.mNumbers[0] = 100;
+        EXPECT_TRUE(*ins1 != *bins1);
+        bins1->setPort(false /* output */);
+        EXPECT_TRUE(*ins1 != *bins1);
+        bins1->setStream(1u);
+        EXPECT_TRUE(*ins1 == *bins1);
+
+        EXPECT_TRUE(*ins2 != *bins2);
+        bins2->m.mNumbers[0] = 100;
+        EXPECT_TRUE(*ins2 != *bins2);
+        bins2->setStream(1u);
+        EXPECT_TRUE(*ins2 == *bins2);
+
+        bins1->setPort(true /* output */);
+        EXPECT_TRUE(*outs1 == *bins1);
+
+        EXPECT_TRUE(*outs1 != *bouts1);
+        bouts1->m.mNumbers[0] = 100;
+        EXPECT_TRUE(*outs1 != *bouts1);
+        bouts1->setPort(true /* output */);
+        EXPECT_TRUE(*outs1 != *bouts1);
+        bouts1->setStream(1u);
+        EXPECT_TRUE(*outs1 == *bouts1);
+
+        EXPECT_TRUE(*outs2 != *bouts2);
+        bouts2->m.mNumbers[0] = 100;
+        EXPECT_TRUE(*outs2 != *bouts2);
+        bouts2->setStream(1u);
+        EXPECT_TRUE(*outs2 == *bouts2);
+
+        bouts1->setPort(false /* output */);
+        EXPECT_TRUE(*ins1 == *bouts1);
+
+        // index
+        EXPECT_EQ(C2Param::Type(ins1->type()).baseIndex(), C2NumbersStruct::baseIndex);
+        EXPECT_EQ(C2Param::Type(ins1->type()).paramIndex(), kParamIndexNumbers);
+        EXPECT_EQ(ins1->type(), C2NumbersStreamTuning::input::typeIndex);
+
+        EXPECT_EQ(C2Param::Type(ins2->type()).baseIndex(), C2NumbersStruct::baseIndex);
+        EXPECT_EQ(C2Param::Type(ins2->type()).paramIndex(), kParamIndexNumbers);
+        EXPECT_EQ(ins2->type(), C2NumbersStreamTuning::input::typeIndex);
+
+        EXPECT_EQ(C2Param::Type(outs1->type()).baseIndex(), C2NumbersStruct::baseIndex);
+        EXPECT_EQ(C2Param::Type(outs1->type()).paramIndex(), kParamIndexNumbers);
+        EXPECT_EQ(outs1->type(), C2NumbersStreamTuning::output::typeIndex);
+
+        EXPECT_EQ(C2Param::Type(outs2->type()).baseIndex(), C2NumbersStruct::baseIndex);
+        EXPECT_EQ(C2Param::Type(outs2->type()).paramIndex(), kParamIndexNumbers);
+        EXPECT_EQ(outs2->type(), C2NumbersStreamTuning::output::typeIndex);
+
+        C2Param::BaseIndex index = C2NumbersStreamTuning::input::typeIndex;
+        EXPECT_FALSE(index.isVendor());
+        EXPECT_TRUE(index.isFlexible());
+        EXPECT_EQ(index.baseIndex(), kParamIndexNumbers | C2Param::BaseIndex::_kFlexibleFlag);
+        EXPECT_EQ(index.paramIndex(), kParamIndexNumbers);
+
+        index = C2NumbersStreamTuning::output::typeIndex;
+        EXPECT_FALSE(index.isVendor());
+        EXPECT_TRUE(index.isFlexible());
+        EXPECT_EQ(index.baseIndex(), kParamIndexNumbers | C2Param::BaseIndex::_kFlexibleFlag);
+        EXPECT_EQ(index.paramIndex(), kParamIndexNumbers);
+
+        C2Param::Type type = C2NumbersStreamTuning::input::typeIndex;
+        EXPECT_FALSE(type.isVendor());
+        EXPECT_TRUE(type.isFlexible());
+        EXPECT_FALSE(type.isGlobal());
+        EXPECT_TRUE(type.forInput());
+        EXPECT_FALSE(type.forOutput());
+        EXPECT_TRUE(type.forStream());
+        EXPECT_FALSE(type.forPort());
+
+        type = C2NumbersStreamTuning::output::typeIndex;
+        EXPECT_FALSE(type.isVendor());
+        EXPECT_TRUE(type.isFlexible());
+        EXPECT_FALSE(type.isGlobal());
+        EXPECT_FALSE(type.forInput());
+        EXPECT_TRUE(type.forOutput());
+        EXPECT_TRUE(type.forStream());
+        EXPECT_FALSE(type.forPort());
+
+        EXPECT_EQ(C2NumbersPortTuning::From(nullptr), nullptr);
+        EXPECT_EQ(C2NumbersPortTuning::input::From(nullptr), nullptr);
+        EXPECT_EQ(C2NumbersPortTuning::output::From(nullptr), nullptr);
+        EXPECT_EQ(C2NumbersTuning::From(ins1.get()), nullptr);
+        EXPECT_EQ(C2NumbersTuning::From(ins2.get()), nullptr);
+        EXPECT_EQ(C2NumbersTuning::From(outs1.get()), nullptr);
+        EXPECT_EQ(C2NumbersTuning::From(outs2.get()), nullptr);
+        EXPECT_EQ(C2NumbersPortTuning::From(ins1.get()), nullptr);
+        EXPECT_EQ(C2NumbersPortTuning::From(ins2.get()), nullptr);
+        EXPECT_EQ(C2NumbersPortTuning::From(outs1.get()), nullptr);
+        EXPECT_EQ(C2NumbersPortTuning::From(outs2.get()), nullptr);
+        EXPECT_EQ(C2NumbersPortTuning::input::From(ins1.get()), nullptr);
+        EXPECT_EQ(C2NumbersPortTuning::input::From(ins2.get()), nullptr);
+        EXPECT_EQ(C2NumbersPortTuning::input::From(outs1.get()), nullptr);
+        EXPECT_EQ(C2NumbersPortTuning::input::From(outs2.get()), nullptr);
+        EXPECT_EQ(C2NumbersPortTuning::output::From(ins1.get()), nullptr);
+        EXPECT_EQ(C2NumbersPortTuning::output::From(ins2.get()), nullptr);
+        EXPECT_EQ(C2NumbersPortTuning::output::From(outs1.get()), nullptr);
+        EXPECT_EQ(C2NumbersPortTuning::output::From(outs2.get()), nullptr);
+        EXPECT_EQ(C2NumbersStreamTuning::From(ins1.get()), ins1.get());
+        EXPECT_EQ(C2NumbersStreamTuning::From(ins2.get()), (C2NumbersStreamTuning*)ins2.get());
+        EXPECT_EQ(C2NumbersStreamTuning::From(outs1.get()), outs1.get());
+        EXPECT_EQ(C2NumbersStreamTuning::From(outs2.get()), (C2NumbersStreamTuning*)outs2.get());
+        EXPECT_EQ(C2NumbersStreamTuning::input::From(ins1.get()), (C2NumbersStreamTuning::input*)ins1.get());
+        EXPECT_EQ(C2NumbersStreamTuning::input::From(ins2.get()), ins2.get());
+        EXPECT_EQ(C2NumbersStreamTuning::input::From(outs1.get()), nullptr);
+        EXPECT_EQ(C2NumbersStreamTuning::input::From(outs2.get()), nullptr);
+        EXPECT_EQ(C2NumbersStreamTuning::output::From(ins1.get()), nullptr);
+        EXPECT_EQ(C2NumbersStreamTuning::output::From(ins2.get()), nullptr);
+        EXPECT_EQ(C2NumbersStreamTuning::output::From(outs1.get()), (C2NumbersStreamTuning::output*)outs1.get());
+        EXPECT_EQ(C2NumbersStreamTuning::output::From(outs2.get()), outs2.get());
+
+    }
+
+    {
+        C2Int32Value int32Value(INT32_MIN);
+        static_assert(std::is_same<decltype(int32Value.mValue), int32_t>::value, "should be int32_t");
+        EXPECT_EQ(INT32_MIN, int32Value.mValue);
+        std::list<const C2FieldDescriptor> fields = int32Value.fieldList;
+        EXPECT_EQ(1u, fields.size());
+        EXPECT_EQ(FD::INT32, fields.cbegin()->type());
+        EXPECT_EQ(1u, fields.cbegin()->length());
+        EXPECT_EQ(C2String("value"), fields.cbegin()->name());
+    }
+
+    {
+        C2Uint32Value uint32Value(UINT32_MAX);
+        static_assert(std::is_same<decltype(uint32Value.mValue), uint32_t>::value, "should be uint32_t");
+        EXPECT_EQ(UINT32_MAX, uint32Value.mValue);
+        std::list<const C2FieldDescriptor> fields = uint32Value.fieldList;
+        EXPECT_EQ(1u, fields.size());
+        EXPECT_EQ(FD::UINT32, fields.cbegin()->type());
+        EXPECT_EQ(1u, fields.cbegin()->length());
+        EXPECT_EQ(C2String("value"), fields.cbegin()->name());
+    }
+
+    {
+        C2Int64Value int64Value(INT64_MIN);
+        static_assert(std::is_same<decltype(int64Value.mValue), int64_t>::value, "should be int64_t");
+        EXPECT_EQ(INT64_MIN, int64Value.mValue);
+        std::list<const C2FieldDescriptor> fields = int64Value.fieldList;
+        EXPECT_EQ(1u, fields.size());
+        EXPECT_EQ(FD::INT64, fields.cbegin()->type());
+        EXPECT_EQ(1u, fields.cbegin()->length());
+        EXPECT_EQ(C2String("value"), fields.cbegin()->name());
+    }
+
+    {
+        C2Uint64Value uint64Value(UINT64_MAX);
+        static_assert(std::is_same<decltype(uint64Value.mValue), uint64_t>::value, "should be uint64_t");
+        EXPECT_EQ(UINT64_MAX, uint64Value.mValue);
+        std::list<const C2FieldDescriptor> fields = uint64Value.fieldList;
+        EXPECT_EQ(1u, fields.size());
+        EXPECT_EQ(FD::UINT64, fields.cbegin()->type());
+        EXPECT_EQ(1u, fields.cbegin()->length());
+        EXPECT_EQ(C2String("value"), fields.cbegin()->name());
+    }
+
+    {
+        C2FloatValue floatValue(123.4f);
+        static_assert(std::is_same<decltype(floatValue.mValue), float>::value, "should be float");
+        EXPECT_EQ(123.4f, floatValue.mValue);
+        std::list<const C2FieldDescriptor> fields = floatValue.fieldList;
+        EXPECT_EQ(1u, fields.size());
+        EXPECT_EQ(FD::FLOAT, fields.cbegin()->type());
+        EXPECT_EQ(1u, fields.cbegin()->length());
+        EXPECT_EQ(C2String("value"), fields.cbegin()->name());
+    }
+
+    {
+        uint8_t initValue[] = "ABCD";
+        typedef C2GlobalParam<C2Setting, C2BlobValue, 0> BlobSetting;
+        std::unique_ptr<BlobSetting> blobValue = BlobSetting::alloc_unique(6, C2ConstMemoryBlock<uint8_t>(initValue));
+        static_assert(std::is_same<decltype(blobValue->m.mValue), uint8_t[]>::value, "should be uint8_t[]");
+        EXPECT_EQ(0, memcmp(blobValue->m.mValue, "ABCD\0", 6));
+        EXPECT_EQ(6u, blobValue->flexCount());
+        std::list<const C2FieldDescriptor> fields = blobValue->fieldList;
+        EXPECT_EQ(1u, fields.size());
+        EXPECT_EQ(FD::BLOB, fields.cbegin()->type());
+        EXPECT_EQ(0u, fields.cbegin()->length());
+        EXPECT_EQ(C2String("value"), fields.cbegin()->name());
+
+        blobValue = BlobSetting::alloc_unique(3, C2ConstMemoryBlock<uint8_t>(initValue));
+        EXPECT_EQ(0, memcmp(blobValue->m.mValue, "ABC", 3));
+        EXPECT_EQ(3u, blobValue->flexCount());
+    }
+
+    {
+        constexpr char initValue[] = "ABCD";
+        typedef C2GlobalParam<C2Setting, C2StringValue, 0> StringSetting;
+        std::unique_ptr<StringSetting> stringValue = StringSetting::alloc_unique(6, C2ConstMemoryBlock<char>(initValue));
+        stringValue = StringSetting::alloc_unique(6, initValue);
+        static_assert(std::is_same<decltype(stringValue->m.mValue), char[]>::value, "should be char[]");
+        EXPECT_EQ(0, memcmp(stringValue->m.mValue, "ABCD\0", 6));
+        EXPECT_EQ(6u, stringValue->flexCount());
+        std::list<const C2FieldDescriptor> fields = stringValue->fieldList;
+        EXPECT_EQ(1u, fields.size());
+        EXPECT_EQ(FD::STRING, fields.cbegin()->type());
+        EXPECT_EQ(0u, fields.cbegin()->length());
+        EXPECT_EQ(C2String("value"), fields.cbegin()->name());
+
+        stringValue = StringSetting::alloc_unique(3, C2ConstMemoryBlock<char>(initValue));
+        EXPECT_EQ(0, memcmp(stringValue->m.mValue, "AB", 3));
+        EXPECT_EQ(3u, stringValue->flexCount());
+
+        stringValue = StringSetting::alloc_unique(11, "initValue");
+        EXPECT_EQ(0, memcmp(stringValue->m.mValue, "initValue\0", 11));
+        EXPECT_EQ(11u, stringValue->flexCount());
+
+        stringValue = StringSetting::alloc_unique(initValue);
+        EXPECT_EQ(0, memcmp(stringValue->m.mValue, "ABCD", 5));
+        EXPECT_EQ(5u, stringValue->flexCount());
+
+        stringValue = StringSetting::alloc_unique({ 'A', 'B', 'C', 'D' });
+        EXPECT_EQ(0, memcmp(stringValue->m.mValue, "ABC", 4));
+        EXPECT_EQ(4u, stringValue->flexCount());
+    }
+
+    {
+        uint32_t videoWidth[] = { 12u, C2NumbersStreamTuning::output::typeIndex, 100 };
+        C2Param *p1 = C2Param::From(videoWidth, sizeof(videoWidth));
+        EXPECT_NE(nullptr, p1);
+        EXPECT_EQ(12u, p1->size());
+        EXPECT_EQ(C2NumbersStreamTuning::output::typeIndex, p1->type());
+
+        C2NumbersStreamTuning::output *vst = C2NumbersStreamTuning::output::From(p1);
+        EXPECT_NE(nullptr, vst);
+        if (vst) {
+            EXPECT_EQ(1u, vst->flexCount());
+            EXPECT_EQ(100, vst->m.mNumbers[0]);
+        }
+
+        p1 = C2Param::From(videoWidth, sizeof(videoWidth) + 2);
+        EXPECT_EQ(nullptr, p1);
+
+        p1 = C2Param::From(videoWidth, sizeof(videoWidth) - 2);
+        EXPECT_EQ(nullptr, p1);
+
+        p1 = C2Param::From(videoWidth, 3);
+        EXPECT_EQ(nullptr, p1);
+
+        p1 = C2Param::From(videoWidth, 0);
+        EXPECT_EQ(nullptr, p1);
+    }
+
+    {
+        uint32_t videoWidth[] = { 16u, C2NumbersPortTuning::input::typeIndex, 101, 102 };
+
+        C2Param *p1 = C2Param::From(videoWidth, sizeof(videoWidth));
+        EXPECT_NE(nullptr, p1);
+        EXPECT_EQ(16u, p1->size());
+        EXPECT_EQ(C2NumbersPortTuning::input::typeIndex, p1->type());
+
+        C2NumbersPortTuning::input *vpt = C2NumbersPortTuning::input::From(p1);
+        EXPECT_NE(nullptr, vpt);
+        if (vpt) {
+            EXPECT_EQ(2u, vpt->flexCount());
+            EXPECT_EQ(101, vpt->m.mNumbers[0]);
+            EXPECT_EQ(102, vpt->m.mNumbers[1]);
+        }
+
+        p1 = C2Param::From(videoWidth, sizeof(videoWidth) + 2);
+        EXPECT_EQ(nullptr, p1);
+
+        p1 = C2Param::From(videoWidth, sizeof(videoWidth) - 2);
+        EXPECT_EQ(nullptr, p1);
+
+        p1 = C2Param::From(videoWidth, 3);
+        EXPECT_EQ(nullptr, p1);
+
+        p1 = C2Param::From(videoWidth, 0);
+        EXPECT_EQ(nullptr, p1);
+    }
+}
+
+// ***********************
+
+}
+
+#include <util/C2ParamUtils.h>
+#include <C2Config.h>
+#include <C2Component.h>
+#include <unordered_map>
+
+namespace android {
+
+C2ENUM(
+    MetadataType, int32_t,
+    kInvalid = -1,
+    kNone = 0,
+    kGralloc,
+    kNativeHandle,
+    kANativeWindow,
+    kCamera,
+)
+
+enum {
+    kParamIndexVideoConfig = 0x1234,
+};
+
+struct C2VideoConfigStruct {
+    int32_t mWidth;
+    uint32_t mHeight;
+    MetadataType mMetadataType;
+    int32_t mSupportedFormats[];
+
+    C2VideoConfigStruct() {}
+
+    DEFINE_AND_DESCRIBE_FLEX_C2STRUCT(VideoConfig, mSupportedFormats)
+    C2FIELD(mWidth, "width")
+    C2FIELD(mHeight, "height")
+    C2FIELD(mMetadataType, "metadata-type")
+    C2FIELD(mSupportedFormats, "formats")
+};
+
+typedef C2PortParam<C2Tuning, C2VideoConfigStruct> C2VideoConfigPortTuning;
+
+class MyReflector : public C2ParamReflector {
+private:
+    std::unique_ptr<C2VideoConfigPortTuning::input> inputVideoConfigTuning;
+    std::unique_ptr<C2VideoConfigPortTuning::output> outputVideoConfigTuning;
+
+public:
+    void describeSupportedValues() {
+        C2TypedFieldSupportedValues<int32_t> supportedWidths(16, 1920, 8);
+        C2FieldSupportedValues supportedWidths2(16, 1920, 8);
+
+
+        std::list<C2FieldSupportedValues> supported;
+        //supported.emplace_push(inputVideoConfigTuning->mNumber, range(16, 1920, 8));
+        //supported.emplace_push(inputVideoConfigTuning->mHeight, range(16, 1088, 8));
+        //supported.emplace_push(inputVideoConfigTuning->mMetadataType, all_enums);
+        //supported.emplace_push(inputVideoConfigTuning->mSupportedFormats, { 0, 1, 5, 7 });
+    }
+
+    virtual std::unique_ptr<android::C2StructDescriptor> describe(C2Param::BaseIndex paramType) {
+        switch (paramType.baseIndex()) {
+        case C2VideoConfigPortTuning::baseIndex:
+            return std::unique_ptr<C2StructDescriptor>(new C2StructDescriptor{
+                paramType.baseIndex(),
+                C2VideoConfigPortTuning::fieldList,
+            });
+        }
+        return nullptr;
+    }
+};
+
+class MyComponentInstance : public C2ComponentInterface {
+public:
+    virtual C2String getName() const {
+        /// \todo this seems too specific
+        return "sample.interface";
+    };
+
+    virtual node_id getId() const {
+        /// \todo how are these shared?
+        return 0;
+    }
+
+    virtual status_t commit_sm(
+            const std::vector<C2Param* const> &params,
+            std::vector<std::unique_ptr<C2SettingResult>>* const failures) {
+        (void)params;
+        (void)failures;
+        return C2_UNSUPPORTED;
+    }
+
+    virtual status_t config_nb(
+            const std::vector<C2Param* const> &params,
+            std::vector<std::unique_ptr<C2SettingResult>>* const failures) {
+        (void)params;
+        (void)failures;
+        return C2_UNSUPPORTED;
+    }
+
+    virtual status_t createTunnel_sm(node_id targetComponent) {
+        (void)targetComponent;
+        return C2_UNSUPPORTED;
+    }
+
+    virtual status_t query_nb(
+            const std::vector<C2Param* const> &stackParams,
+            const std::vector<C2Param::Index> &heapParamIndices,
+            std::vector<std::unique_ptr<C2Param>>* const heapParams) const {
+        for (C2Param* const param : stackParams) {
+            if (!*param) { // param is already invalid - remember it
+                continue;
+            }
+
+            // note: this does not handle stream params (should use index...)
+            if (!mMyParams.count(param->type())) {
+                continue; // not my param
+            }
+
+            C2Param & myParam = mMyParams.find(param->type())->second;
+            if (myParam.size() != param->size()) { // incorrect size
+                param->invalidate();
+                continue;
+            }
+
+            param->updateFrom(myParam);
+        }
+
+        for (const C2Param::Index index : heapParamIndices) {
+            if (mMyParams.count(index)) {
+                C2Param & myParam = mMyParams.find(index)->second;
+                std::unique_ptr<C2Param> paramCopy(C2Param::From(&myParam, myParam.size()));
+                heapParams->push_back(std::move(paramCopy));
+            }
+        }
+
+        return C2_OK;
+    }
+
+    std::unordered_map<uint32_t, C2Param &> mMyParams;
+
+    C2ComponentDomainInfo mDomainInfo;
+
+    MyComponentInstance() {
+        mMyParams.insert({mDomainInfo.type(), mDomainInfo});
+    }
+
+    virtual status_t releaseTunnel_sm(node_id targetComponent) {
+        (void)targetComponent;
+        return C2_UNSUPPORTED;
+    }
+
+    class MyParamReflector : public C2ParamReflector {
+        const MyComponentInstance *instance;
+
+    public:
+        MyParamReflector(const MyComponentInstance *i) : instance(i) { }
+
+        virtual std::unique_ptr<C2StructDescriptor> describe(C2Param::BaseIndex paramIndex) {
+            switch (paramIndex.baseIndex()) {
+            case decltype(instance->mDomainInfo)::baseIndex:
+            default:
+                return std::unique_ptr<C2StructDescriptor>(new C2StructDescriptor{
+                    instance->mDomainInfo.type(),
+                    decltype(instance->mDomainInfo)::fieldList,
+                });
+            }
+            return nullptr;
+        }
+    };
+
+    virtual status_t getSupportedValues(
+            const std::vector<const C2ParamField> fields,
+            std::vector<C2FieldSupportedValues>* const values) const {
+        for (const C2ParamField &field : fields) {
+            if (field == C2ParamField(&mDomainInfo, &C2ComponentDomainInfo::mValue)) {
+                values->push_back(C2FieldSupportedValues(
+                    false /* flag */,
+                    &mDomainInfo.mValue
+                    //,
+                    //{(int32_t)C2DomainVideo}
+                ));
+            }
+        }
+        return C2_OK;
+    }
+
+    virtual std::shared_ptr<C2ParamReflector> getParamReflector() const {
+        return std::shared_ptr<C2ParamReflector>(new MyParamReflector(this));
+    }
+
+    virtual status_t getSupportedParams(std::vector<std::shared_ptr<C2ParamDescriptor>> * const params) const {
+        params->push_back(std::make_shared<C2ParamDescriptor>(
+                true /* required */, "_domain", &mDomainInfo));
+        return C2_OK;
+    }
+
+    status_t getSupportedParams2(std::vector<std::shared_ptr<C2ParamDescriptor>> * const params) {
+        params->push_back(std::shared_ptr<C2ParamDescriptor>(
+                new C2ParamDescriptor(true /* required */, "_domain", &mDomainInfo)));
+        return C2_OK;
+    }
+
+};
+
+template<typename E, bool S=std::is_enum<E>::value>
+struct getter {
+    int32_t get(const C2FieldSupportedValues::Primitive &p, int32_t*) {
+        return p.i32;
+    }
+    int64_t get(const C2FieldSupportedValues::Primitive &p, int64_t*) {
+        return p.i64;
+    }
+    uint32_t get(const C2FieldSupportedValues::Primitive &p, uint32_t*) {
+        return p.u32;
+    }
+    uint64_t get(const C2FieldSupportedValues::Primitive &p, uint64_t*) {
+        return p.u64;
+    }
+    float get(const C2FieldSupportedValues::Primitive &p, float*) {
+        return p.fp;
+    }
+};
+
+template<typename E>
+struct getter<E, true> {
+     typename std::underlying_type<E>::type get(const C2FieldSupportedValues::Primitive &p, E*) {
+         using u=typename std::underlying_type<E>::type;
+         return getter<u>().get(p, (u*)0);
+     }
+};
+
+template<typename T, bool E=std::is_enum<T>::value>
+struct lax_underlying_type {
+    typedef typename std::underlying_type<T>::type type;
+};
+
+template<typename T>
+struct lax_underlying_type<T, false> {
+    typedef T type;
+};
+
+template<typename E>
+typename lax_underlying_type<E>::type get(
+        const C2FieldSupportedValues::Primitive &p, E*) {
+    return getter<E>().get(p, (E*)0);
+}
+
+template<typename T>
+void dumpFSV(const C2FieldSupportedValues &sv, T*t) {
+    using namespace std;
+    cout << (std::is_enum<T>::value ? (std::is_signed<typename std::underlying_type<T>::type>::value ? "i" : "u")
+             : std::is_integral<T>::value ? std::is_signed<T>::value ? "i" : "u" : "f")
+        << (8 * sizeof(T));
+    if (sv.type == sv.RANGE) {
+        cout << ".range(" << get(sv.range.min, t);
+        if (get(sv.range.step, t) != std::is_integral<T>::value) {
+            cout << ":" << get(sv.range.step, t);
+        }
+        if (get(sv.range.nom, t) != 1 || get(sv.range.denom, t) != 1) {
+            cout << ":" << get(sv.range.nom, t) << "/" << get(sv.range.denom, t);
+        }
+        cout << get(sv.range.max, t) << ")";
+    }
+    if (sv.values.size()) {
+        cout << (sv.type == sv.FLAGS ? ".flags(" : ".list(");
+        const char *sep = "";
+        for (const C2FieldSupportedValues::Primitive &p : sv.values) {
+            cout << sep << get(p, t);
+            sep = ",";
+        }
+        cout << ")";
+    }
+    cout << endl;
+}
+
+void dumpType(C2Param::Type type) {
+    using namespace std;
+    cout << (type.isVendor() ? "Vendor" : "C2");
+    if (type.forInput()) {
+        cout << "Input";
+    } else if (type.forOutput()) {
+        cout << "Output";
+    } else if (type.forPort() && !type.forStream()) {
+        cout << "Port";
+    }
+    if (type.forStream()) {
+        cout << "Stream";
+    }
+
+    if (type.isFlexible()) {
+        cout << "Flex";
+    }
+
+    cout << type.paramIndex();
+
+    switch (type.kind()) {
+    case C2Param::INFO: cout << "Info"; break;
+    case C2Param::SETTING: cout << "Setting"; break;
+    case C2Param::TUNING: cout << "Tuning"; break;
+    case C2Param::STRUCT: cout << "Struct"; break;
+    default: cout << "Kind" << (int32_t)type.kind(); break;
+    }
+}
+
+void dumpType(C2Param::BaseIndex type) {
+    using namespace std;
+    cout << (type.isVendor() ? "Vendor" : "C2");
+    if (type.isFlexible()) {
+        cout << "Flex";
+    }
+
+    cout << type.paramIndex() << "Struct";
+}
+
+void dumpType(FD::Type type) {
+    using namespace std;
+    switch (type) {
+    case FD::BLOB: cout << "blob "; break;
+    case FD::FLOAT: cout << "float "; break;
+    case FD::INT32: cout << "int32_t "; break;
+    case FD::INT64: cout << "int64_t "; break;
+    case FD::UINT32: cout << "uint32_t "; break;
+    case FD::UINT64: cout << "uint64_t "; break;
+    case FD::STRING: cout << "char "; break;
+    default:
+        cout << "struct ";
+        dumpType((C2Param::Type)type);
+        break;
+    }
+}
+
+void dumpStruct(const C2StructDescriptor &sd) {
+    using namespace std;
+    cout << "struct ";
+    dumpType(sd.baseIndex());
+    cout << " {" << endl;
+    //C2FieldDescriptor &f;
+    for (const C2FieldDescriptor &f : sd) {
+        PrintTo(f, &cout);
+        cout << endl;
+
+        if (f.namedValues().size()) {
+            cout << ".named(";
+            const char *sep = "";
+            for (const FD::named_value_type &p : f.namedValues()) {
+                cout << sep << p.first << "=";
+                switch (f.type()) {
+                case C2Value::INT32: cout << get(p.second, (int32_t *)0); break;
+                case C2Value::INT64: cout << get(p.second, (int64_t *)0); break;
+                case C2Value::UINT32: cout << get(p.second, (uint32_t *)0); break;
+                case C2Value::UINT64: cout << get(p.second, (uint64_t *)0); break;
+                case C2Value::FLOAT: cout << get(p.second, (float *)0); break;
+                default: cout << "???"; break;
+                }
+                sep = ",";
+            }
+            cout << ")";
+        }
+    }
+
+    cout << "};" << endl;
+}
+
+void dumpDesc(const C2ParamDescriptor &pd) {
+    using namespace std;
+    if (pd.isRequired()) {
+        cout << "required ";
+    }
+    if (pd.isPersistent()) {
+        cout << "persistent ";
+    }
+    cout << "struct ";
+    dumpType(pd.type());
+    cout << " " << pd.name() << ";" << endl;
+}
+
+TEST_F(C2ParamTest, ReflectorTest) {
+    C2ComponentDomainInfo domainInfo;
+    std::shared_ptr<C2ComponentInterface> comp(new MyComponentInstance);
+    std::vector<C2FieldSupportedValues> values;
+
+    std::unique_ptr<C2StructDescriptor> desc{
+        comp->getParamReflector()->describe(C2ComponentDomainInfo::indexFlags)};
+    dumpStruct(*desc);
+
+    EXPECT_EQ(
+        C2_OK,
+        comp->getSupportedValues(
+            { C2ParamField(&domainInfo, &C2ComponentDomainInfo::mValue) },
+            &values)
+    );
+
+    for (const C2FieldSupportedValues &sv : values) {
+        dumpFSV(sv, &domainInfo.mValue);
+    }
+}
+
+C2ENUM(Enum1, uint32_t,
+    Enum1Value1,
+    Enum1Value2,
+    Enum1Value4 = Enum1Value2 + 2,
+);
+
+C2ENUM_CUSTOM_PREFIX(Enum2, uint32_t, "Enum",
+    Enum2Value1,
+    Enum2Value2,
+    Enum2Value4 = Enum1Value2 + 2,
+);
+
+C2ENUM_CUSTOM_NAMES(Enum3, uint8_t,
+    ({ { "value1", Enum3Value1 },
+       { "value2", Enum3Value2 },
+       { "value4", Enum3Value4 },
+       { "invalid", Invalid } }),
+    Enum3Value1,
+    Enum3Value2,
+    Enum3Value4 = Enum3Value2 + 2,
+    Invalid,
+);
+
+TEST_F(C2ParamTest, EnumUtilsTest) {
+    std::vector<std::pair<C2String, Enum3>> pairs ( { { "value1", Enum3Value1 },
+      { "value2", Enum3Value2 },
+      { "value4", Enum3Value4 },
+      { "invalid", Invalid } });
+    Enum3 e3;
+    FD::namedValuesFor(e3);
+}
+
+TEST_F(C2ParamTest, ParamUtilsTest) {
+    // upper case
+    EXPECT_EQ("yes", C2ParamUtils::camelCaseToDashed("YES"));
+    EXPECT_EQ("no", C2ParamUtils::camelCaseToDashed("NO"));
+    EXPECT_EQ("yes-no", C2ParamUtils::camelCaseToDashed("YES_NO"));
+    EXPECT_EQ("yes-no", C2ParamUtils::camelCaseToDashed("YES__NO"));
+    EXPECT_EQ("a2dp", C2ParamUtils::camelCaseToDashed("A2DP"));
+    EXPECT_EQ("mp2-ts", C2ParamUtils::camelCaseToDashed("MP2_TS"));
+    EXPECT_EQ("block-2d", C2ParamUtils::camelCaseToDashed("BLOCK_2D"));
+    EXPECT_EQ("mpeg-2-ts", C2ParamUtils::camelCaseToDashed("MPEG_2_TS"));
+    EXPECT_EQ("_hidden-value", C2ParamUtils::camelCaseToDashed("_HIDDEN_VALUE"));
+    EXPECT_EQ("__hidden-value2", C2ParamUtils::camelCaseToDashed("__HIDDEN_VALUE2"));
+    EXPECT_EQ("__hidden-value-2", C2ParamUtils::camelCaseToDashed("__HIDDEN_VALUE_2"));
+
+    // camel case
+    EXPECT_EQ("yes", C2ParamUtils::camelCaseToDashed("Yes"));
+    EXPECT_EQ("no", C2ParamUtils::camelCaseToDashed("No"));
+    EXPECT_EQ("yes-no", C2ParamUtils::camelCaseToDashed("YesNo"));
+    EXPECT_EQ("yes-no", C2ParamUtils::camelCaseToDashed("Yes_No"));
+    EXPECT_EQ("mp2-ts", C2ParamUtils::camelCaseToDashed("MP2Ts"));
+    EXPECT_EQ("block-2d", C2ParamUtils::camelCaseToDashed("Block2D"));
+    EXPECT_EQ("mpeg-2-ts", C2ParamUtils::camelCaseToDashed("Mpeg2ts"));
+    EXPECT_EQ("_hidden-value", C2ParamUtils::camelCaseToDashed("_HiddenValue"));
+    EXPECT_EQ("__hidden-value-2", C2ParamUtils::camelCaseToDashed("__HiddenValue2"));
+
+    // mixed case
+    EXPECT_EQ("mp2t-s", C2ParamUtils::camelCaseToDashed("MP2T_s"));
+    EXPECT_EQ("block-2d", C2ParamUtils::camelCaseToDashed("Block_2D"));
+    EXPECT_EQ("block-2-d", C2ParamUtils::camelCaseToDashed("Block2_D"));
+    EXPECT_EQ("mpeg-2-ts", C2ParamUtils::camelCaseToDashed("Mpeg_2ts"));
+    EXPECT_EQ("mpeg-2-ts", C2ParamUtils::camelCaseToDashed("Mpeg_2_TS"));
+    EXPECT_EQ("_hidden-value", C2ParamUtils::camelCaseToDashed("_Hidden__VALUE"));
+    EXPECT_EQ("__hidden-value-2", C2ParamUtils::camelCaseToDashed("__HiddenValue_2"));
+    EXPECT_EQ("_2", C2ParamUtils::camelCaseToDashed("_2"));
+    EXPECT_EQ("__23", C2ParamUtils::camelCaseToDashed("__23"));
+}
+
+TEST_F(C2ParamTest, C2ValueTest) {
+    C2Value val;
+    int32_t i32 = -32;
+    int64_t i64 = -64;
+    uint32_t u32 = 32;
+    uint64_t u64 = 64;
+    float fp = 1.5f;
+
+    EXPECT_EQ(C2Value::NO_INIT, val.type());
+    EXPECT_EQ(false, val.get(&i32));
+    EXPECT_EQ(-32, i32);
+    EXPECT_EQ(false, val.get(&i64));
+    EXPECT_EQ(-64, i64);
+    EXPECT_EQ(false, val.get(&u32));
+    EXPECT_EQ(32u, u32);
+    EXPECT_EQ(false, val.get(&u64));
+    EXPECT_EQ(64u, u64);
+    EXPECT_EQ(false, val.get(&fp));
+    EXPECT_EQ(1.5f, fp);
+
+    val = int32_t(-3216);
+    EXPECT_EQ(C2Value::INT32, val.type());
+    EXPECT_EQ(true, val.get(&i32));
+    EXPECT_EQ(-3216, i32);
+    EXPECT_EQ(false, val.get(&i64));
+    EXPECT_EQ(-64, i64);
+    EXPECT_EQ(false, val.get(&u32));
+    EXPECT_EQ(32u, u32);
+    EXPECT_EQ(false, val.get(&u64));
+    EXPECT_EQ(64u, u64);
+    EXPECT_EQ(false, val.get(&fp));
+    EXPECT_EQ(1.5f, fp);
+
+    val = uint32_t(3216);
+    EXPECT_EQ(C2Value::UINT32, val.type());
+    EXPECT_EQ(false, val.get(&i32));
+    EXPECT_EQ(-3216, i32);
+    EXPECT_EQ(false, val.get(&i64));
+    EXPECT_EQ(-64, i64);
+    EXPECT_EQ(true, val.get(&u32));
+    EXPECT_EQ(3216u, u32);
+    EXPECT_EQ(false, val.get(&u64));
+    EXPECT_EQ(64u, u64);
+    EXPECT_EQ(false, val.get(&fp));
+    EXPECT_EQ(1.5f, fp);
+
+    val = int64_t(-6432);
+    EXPECT_EQ(C2Value::INT64, val.type());
+    EXPECT_EQ(false, val.get(&i32));
+    EXPECT_EQ(-3216, i32);
+    EXPECT_EQ(true, val.get(&i64));
+    EXPECT_EQ(-6432, i64);
+    EXPECT_EQ(false, val.get(&u32));
+    EXPECT_EQ(3216u, u32);
+    EXPECT_EQ(false, val.get(&u64));
+    EXPECT_EQ(64u, u64);
+    EXPECT_EQ(false, val.get(&fp));
+    EXPECT_EQ(1.5f, fp);
+
+    val = uint64_t(6432);
+    EXPECT_EQ(C2Value::UINT64, val.type());
+    EXPECT_EQ(false, val.get(&i32));
+    EXPECT_EQ(-3216, i32);
+    EXPECT_EQ(false, val.get(&i64));
+    EXPECT_EQ(-6432, i64);
+    EXPECT_EQ(false, val.get(&u32));
+    EXPECT_EQ(3216u, u32);
+    EXPECT_EQ(true, val.get(&u64));
+    EXPECT_EQ(6432u, u64);
+    EXPECT_EQ(false, val.get(&fp));
+    EXPECT_EQ(1.5f, fp);
+
+    val = 15.25f;
+    EXPECT_EQ(C2Value::FLOAT, val.type());
+    EXPECT_EQ(false, val.get(&i32));
+    EXPECT_EQ(-3216, i32);
+    EXPECT_EQ(false, val.get(&i64));
+    EXPECT_EQ(-6432, i64);
+    EXPECT_EQ(false, val.get(&u32));
+    EXPECT_EQ(3216u, u32);
+    EXPECT_EQ(false, val.get(&u64));
+    EXPECT_EQ(6432u, u64);
+    EXPECT_EQ(true, val.get(&fp));
+    EXPECT_EQ(15.25f, fp);
+}
+
+} // namespace android
diff --git a/media/libstagefright/codec2/tests/C2_test.cpp b/media/libstagefright/codec2/tests/C2_test.cpp
new file mode 100644
index 0000000..92a3d91
--- /dev/null
+++ b/media/libstagefright/codec2/tests/C2_test.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "C2_test"
+
+#include <gtest/gtest.h>
+
+#include <C2.h>
+
+namespace android {
+
+/* ======================================= STATIC TESTS ======================================= */
+
+template<int N>
+struct c2_const_checker
+{
+    inline constexpr static int num() { return N; }
+};
+
+constexpr auto min_i32_i32 = c2_min(int32_t(1), int32_t(2));
+static_assert(std::is_same<decltype(min_i32_i32), const int32_t>::value, "should be int32_t");
+constexpr auto min_i32_i64 = c2_min(int32_t(3), int64_t(2));
+static_assert(std::is_same<decltype(min_i32_i64), const int64_t>::value, "should be int64_t");
+constexpr auto min_i8_i32 = c2_min(int8_t(0xff), int32_t(0xffffffff));
+static_assert(std::is_same<decltype(min_i8_i32), const int32_t>::value, "should be int32_t");
+
+static_assert(c2_const_checker<min_i32_i32>::num() == 1, "should be 1");
+static_assert(c2_const_checker<min_i32_i64>::num() == 2, "should be 2");
+static_assert(c2_const_checker<min_i8_i32>::num() == 0xffffffff, "should be 0xffffffff");
+
+constexpr auto min_u32_u32 = c2_min(uint32_t(1), uint32_t(2));
+static_assert(std::is_same<decltype(min_u32_u32), const uint32_t>::value, "should be uint32_t");
+constexpr auto min_u32_u64 = c2_min(uint32_t(3), uint64_t(2));
+static_assert(std::is_same<decltype(min_u32_u64), const uint32_t>::value, "should be uint32_t");
+constexpr auto min_u32_u8 = c2_min(uint32_t(0xffffffff), uint8_t(0xff));
+static_assert(std::is_same<decltype(min_u32_u8), const uint8_t>::value, "should be uint8_t");
+
+static_assert(c2_const_checker<min_u32_u32>::num() == 1, "should be 1");
+static_assert(c2_const_checker<min_u32_u64>::num() == 2, "should be 2");
+static_assert(c2_const_checker<min_u32_u8>::num() == 0xff, "should be 0xff");
+
+constexpr auto max_i32_i32 = c2_max(int32_t(1), int32_t(2));
+static_assert(std::is_same<decltype(max_i32_i32), const int32_t>::value, "should be int32_t");
+constexpr auto max_i32_i64 = c2_max(int32_t(3), int64_t(2));
+static_assert(std::is_same<decltype(max_i32_i64), const int64_t>::value, "should be int64_t");
+constexpr auto max_i8_i32 = c2_max(int8_t(0xff), int32_t(0xffffffff));
+static_assert(std::is_same<decltype(max_i8_i32), const int32_t>::value, "should be int32_t");
+
+static_assert(c2_const_checker<max_i32_i32>::num() == 2, "should be 2");
+static_assert(c2_const_checker<max_i32_i64>::num() == 3, "should be 3");
+static_assert(c2_const_checker<max_i8_i32>::num() == 0xffffffff, "should be 0xffffffff");
+
+constexpr auto max_u32_u32 = c2_max(uint32_t(1), uint32_t(2));
+static_assert(std::is_same<decltype(max_u32_u32), const uint32_t>::value, "should be uint32_t");
+constexpr auto max_u32_u64 = c2_max(uint32_t(3), uint64_t(2));
+static_assert(std::is_same<decltype(max_u32_u64), const uint64_t>::value, "should be uint64_t");
+constexpr auto max_u32_u8 = c2_max(uint32_t(0x7fffffff), uint8_t(0xff));
+static_assert(std::is_same<decltype(max_u32_u8), const uint32_t>::value, "should be uint32_t");
+
+static_assert(c2_const_checker<max_u32_u32>::num() == 2, "should be 2");
+static_assert(c2_const_checker<max_u32_u64>::num() == 3, "should be 3");
+static_assert(c2_const_checker<max_u32_u8>::num() == 0x7fffffff, "should be 0x7fffffff");
+
+} // namespace android
diff --git a/media/libstagefright/codec2/tests/vndk/C2UtilTest.cpp b/media/libstagefright/codec2/tests/vndk/C2UtilTest.cpp
new file mode 100644
index 0000000..7a1374b
--- /dev/null
+++ b/media/libstagefright/codec2/tests/vndk/C2UtilTest.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <util/_C2MacroUtils.h>
+
+/** \file
+ * Tests for vndk/util.
+ */
+
+/* --------------------------------------- _C2MacroUtils --------------------------------------- */
+
+static_assert(0 == _C2_ARGC(), "should be 0");
+static_assert(1 == _C2_ARGC(1), "should be 1");
+static_assert(2 == _C2_ARGC(1, 2), "should be 2");
+static_assert(64 == _C2_ARGC(
+        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+        26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+        49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64), "should be 64");
+
+static_assert(0 == _C2_ARGC(,), "should be 0");
+static_assert(1 == _C2_ARGC(1,), "should be 1");
+static_assert(2 == _C2_ARGC(1, 2,), "should be 2");
+static_assert(64 == _C2_ARGC(
+        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+        26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+        49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,), "should be 64");
+
diff --git a/media/libstagefright/codec2/vndk/include/util/C2ParamUtils.h b/media/libstagefright/codec2/vndk/include/util/C2ParamUtils.h
new file mode 100644
index 0000000..edae303
--- /dev/null
+++ b/media/libstagefright/codec2/vndk/include/util/C2ParamUtils.h
@@ -0,0 +1,302 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef C2UTILS_PARAM_UTILS_H_
+#define C2UTILS_PARAM_UTILS_H_
+
+#include <C2Param.h>
+#include <util/_C2MacroUtils.h>
+
+#include <iostream>
+
+/** \file
+ * Utilities for parameter handling to be used by Codec2 implementations.
+ */
+
+namespace android {
+
+/// \cond INTERNAL
+
+/* ---------------------------- UTILITIES FOR ENUMERATION REFLECTION ---------------------------- */
+
+/**
+ * Utility class that allows ignoring enum value assignment (e.g. both '(_C2EnumConst)kValue = x'
+ * and '(_C2EnumConst)kValue' will eval to kValue.
+ */
+template<typename T>
+class _C2EnumConst {
+public:
+    // implicit conversion from T
+    inline _C2EnumConst(T value) : _mValue(value) {}
+    // implicit conversion to T
+    inline operator T() { return _mValue; }
+    // implicit conversion to C2Value::Primitive
+    inline operator C2Value::Primitive() { return (T)_mValue; }
+    // ignore assignment and return T here to avoid implicit conversion to T later
+    inline T &operator =(T value __unused) { return _mValue; }
+private:
+    T _mValue;
+};
+
+/// mapper to get name of enum
+/// \note this will contain any initialization, which we will remove when converting to lower-case
+#define _C2_GET_ENUM_NAME(x, y) #x
+/// mapper to get value of enum
+#define _C2_GET_ENUM_VALUE(x, type) (_C2EnumConst<type>)x
+
+/// \endcond
+
+#define DEFINE_C2_ENUM_VALUE_AUTO_HELPER(name, type, prefix, ...) \
+template<> C2FieldDescriptor::named_values_type C2FieldDescriptor::namedValuesFor(const name &r __unused) { \
+    return C2ParamUtils::sanitizeEnumValues( \
+            std::vector<C2Value::Primitive> { _C2_MAP(_C2_GET_ENUM_VALUE, type, __VA_ARGS__) }, \
+            { _C2_MAP(_C2_GET_ENUM_NAME, type, __VA_ARGS__) }, \
+            prefix); \
+}
+
+#define DEFINE_C2_ENUM_VALUE_CUSTOM_HELPER(name, type, names, ...) \
+template<> C2FieldDescriptor::named_values_type C2FieldDescriptor::namedValuesFor(const name &r __unused) { \
+    return C2ParamUtils::customEnumValues( \
+            std::vector<std::pair<C2StringLiteral, name>> names); \
+}
+
+
+class C2ParamUtils {
+private:
+    static size_t countLeadingUnderscores(C2StringLiteral a) {
+        size_t i = 0;
+        while (a[i] == '_') {
+            ++i;
+        }
+        return i;
+    }
+
+    static size_t countMatching(C2StringLiteral a, const C2String &b) {
+        for (size_t i = 0; i < b.size(); ++i) {
+            if (!a[i] || a[i] != b[i]) {
+                return i;
+            }
+        }
+        return b.size();
+    }
+
+    // ABCDef => abc-def
+    // ABCD2ef => abcd2-ef // 0
+    // ABCD2Ef => ancd2-ef // -1
+    // AbcDef => abc-def // -1
+    // Abc2Def => abc-2def
+    // Abc2def => abc-2-def
+    // _Yo => _yo
+    // _yo => _yo
+    // C2_yo => c2-yo
+    // C2__yo => c2-yo
+
+    static C2String camelCaseToDashed(C2String name) {
+        enum {
+            kNone = '.',
+            kLower = 'a',
+            kUpper = 'A',
+            kDigit = '1',
+            kDash = '-',
+            kUnderscore = '_',
+        } type = kNone;
+        size_t word_start = 0;
+        for (size_t ix = 0; ix < name.size(); ++ix) {
+            /* std::cout << name.substr(0, word_start) << "|"
+                    << name.substr(word_start, ix - word_start) << "["
+                    << name.substr(ix, 1) << "]" << name.substr(ix + 1)
+                    << ": " << (char)type << std::endl; */
+            if (isupper(name[ix])) {
+                if (type == kLower) {
+                    name.insert(ix++, 1, '-');
+                    word_start = ix;
+                }
+                name[ix] = tolower(name[ix]);
+                type = kUpper;
+            } else if (islower(name[ix])) {
+                if (type == kDigit && ix > 0) {
+                    name.insert(ix++, 1, '-');
+                    word_start = ix;
+                } else if (type == kUpper && ix > word_start + 1) {
+                    name.insert(ix++ - 1, 1, '-');
+                    word_start = ix - 1;
+                }
+                type = kLower;
+            } else if (isdigit(name[ix])) {
+                if (type == kLower) {
+                    name.insert(ix++, 1, '-');
+                    word_start = ix;
+                }
+                type = kDigit;
+            } else if (name[ix] == '_') {
+                if (type == kDash) {
+                    name.erase(ix--, 1);
+                } else if (type != kNone && type != kUnderscore) {
+                    name[ix] = '-';
+                    type = kDash;
+                    word_start = ix + 1;
+                } else {
+                    type = kUnderscore;
+                    word_start = ix + 1;
+                }
+            } else {
+                name.resize(ix);
+            }
+        }
+        // std::cout << "=> " << name << std::endl;
+        return name;
+    }
+
+    static std::vector<C2String> sanitizeEnumValueNames(
+            const std::vector<C2StringLiteral> names,
+            C2StringLiteral _prefix = NULL) {
+        std::vector<C2String> sanitizedNames;
+        C2String prefix;
+        size_t extraUnderscores = 0;
+        bool first = true;
+        if (_prefix) {
+            extraUnderscores = countLeadingUnderscores(_prefix);
+            prefix = _prefix + extraUnderscores;
+            first = false;
+            // std::cout << "prefix:" << prefix << ", underscores:" << extraUnderscores << std::endl;
+        }
+
+        // calculate prefix and minimum leading underscores
+        for (C2StringLiteral s : names) {
+            // std::cout << s << std::endl;
+            size_t underscores = countLeadingUnderscores(s);
+            if (first) {
+                extraUnderscores = underscores;
+                prefix = s + underscores;
+                first = false;
+            } else {
+                size_t matching = countMatching(
+                    s + underscores,
+                    prefix);
+                prefix.resize(matching);
+                extraUnderscores = std::min(underscores, extraUnderscores);
+            }
+            // std::cout << "prefix:" << prefix << ", underscores:" << extraUnderscores << std::endl;
+            if (prefix.size() == 0 && extraUnderscores == 0) {
+                break;
+            }
+        }
+
+        // we swallow the first underscore after upper case prefixes
+        bool upperCasePrefix = true;
+        for (size_t i = 0; i < prefix.size(); ++i) {
+            if (islower(prefix[i])) {
+                upperCasePrefix = false;
+                break;
+            }
+        }
+
+        for (C2StringLiteral s : names) {
+            size_t underscores = countLeadingUnderscores(s);
+            C2String sanitized = C2String(s, underscores - extraUnderscores);
+            sanitized.append(s + prefix.size() + underscores +
+                        (upperCasePrefix && s[prefix.size() + underscores] == '_'));
+            sanitizedNames.push_back(camelCaseToDashed(sanitized));
+        }
+
+        for (C2String s : sanitizedNames) {
+            std::cout << s << std::endl;
+        }
+
+        return sanitizedNames;
+    }
+
+    friend class C2ParamTest_ParamUtilsTest_Test;
+
+public:
+    static std::vector<C2String> getEnumValuesFromString(C2StringLiteral value) {
+        std::vector<C2String> foundNames;
+        size_t pos = 0, len = strlen(value);
+        do {
+            size_t endPos = strcspn(value + pos, " ,=") + pos;
+            if (endPos > pos) {
+                foundNames.emplace_back(value + pos, endPos - pos);
+            }
+            if (value[endPos] && value[endPos] != ',') {
+                endPos += strcspn(value + endPos, ",");
+            }
+            pos = strspn(value + endPos, " ,") + endPos;
+        } while (pos < len);
+        return foundNames;
+    }
+
+    template<typename T>
+    static C2FieldDescriptor::named_values_type sanitizeEnumValues(
+            std::vector<T> values,
+            std::vector<C2StringLiteral> names,
+            C2StringLiteral prefix = NULL) {
+        C2FieldDescriptor::named_values_type namedValues;
+        std::vector<C2String> sanitizedNames = sanitizeEnumValueNames(names, prefix);
+        for (size_t i = 0; i < values.size() && i < sanitizedNames.size(); ++i) {
+            namedValues.emplace_back(sanitizedNames[i], values[i]);
+        }
+        return namedValues;
+    }
+
+    template<typename E>
+    static C2FieldDescriptor::named_values_type customEnumValues(
+            std::vector<std::pair<C2StringLiteral, E>> items) {
+        C2FieldDescriptor::named_values_type namedValues;
+        for (auto &item : items) {
+            namedValues.emplace_back(item.first, item.second);
+        }
+        return namedValues;
+    }
+};
+
+/* ---------------------------- UTILITIES FOR PARAMETER REFLECTION ---------------------------- */
+
+/* ======================== UTILITY TEMPLATES FOR PARAMETER REFLECTION ======================== */
+
+#if 1
+template<typename... Params>
+class C2_HIDE _C2Tuple { };
+
+C2_HIDE
+void addC2Params(std::list<const C2FieldDescriptor> &, _C2Tuple<> *) {
+}
+
+template<typename T, typename... Params>
+C2_HIDE
+void addC2Params(std::list<const C2FieldDescriptor> &fields, _C2Tuple<T, Params...> *)
+{
+    //C2Param::index_t index = T::baseIndex;
+    //(void)index;
+    fields.insert(fields.end(), T::fieldList);
+    addC2Params(fields, (_C2Tuple<Params...> *)nullptr);
+}
+
+template<typename... Params>
+C2_HIDE
+std::list<const C2FieldDescriptor> describeC2Params() {
+    std::list<const C2FieldDescriptor> fields;
+    addC2Params(fields, (_C2Tuple<Params...> *)nullptr);
+    return fields;
+}
+
+#endif
+
+/* ---------------------------- UTILITIES FOR ENUMERATION REFLECTION ---------------------------- */
+
+}  // namespace android
+
+#endif  // C2UTILS_PARAM_UTILS_H_
+
diff --git a/media/libstagefright/codec2/vndk/include/util/_C2MacroUtils.h b/media/libstagefright/codec2/vndk/include/util/_C2MacroUtils.h
new file mode 100644
index 0000000..04e9ba5
--- /dev/null
+++ b/media/libstagefright/codec2/vndk/include/util/_C2MacroUtils.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef C2UTILS_MACRO_UTILS_H_
+#define C2UTILS_MACRO_UTILS_H_
+
+/** \file
+ * Macro utilities for the utils library used by Codec2 implementations.
+ */
+
+/// \if 0
+
+/* --------------------------------- VARIABLE ARGUMENT COUNTING --------------------------------- */
+
+// remove empty arguments - _C2_ARG() expands to '', while _C2_ARG(x) expands to ', x'
+// _C2_ARGn(...) does the same for n arguments
+#define _C2_ARG(...) , ##__VA_ARGS__
+#define _C2_ARG2(_1, _2) _C2_ARG(_1) _C2_ARG(_2)
+#define _C2_ARG4(_1, _2, _3, _4) _C2_ARG2(_1, _2) _C2_ARG2(_3, _4)
+#define _C2_ARG8(_1, _2, _3, _4, _5, _6, _7, _8) _C2_ARG4(_1, _2, _3, _4) _C2_ARG4(_5, _6, _7, _8)
+#define _C2_ARG16(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16) \
+        _C2_ARG8(_1, _2, _3, _4, _5, _6, _7, _8) _C2_ARG8(_9, _10, _11, _12, _13, _14, _15, _16)
+
+// return the 65th argument
+#define _C2_ARGC_3(_, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, \
+        _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, \
+        _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, \
+        _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64, ...) _64
+
+/// \endif
+
+/**
+ * Returns the number of arguments.
+ */
+// We do this by prepending 1 and appending 65 designed values such that the 65th element
+// will be the number of arguments.
+#define _C2_ARGC(...) _C2_ARGC_1(0, ##__VA_ARGS__, \
+        64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, \
+        42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, \
+        20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+
+/// \if 0
+
+// step 1. remove empty arguments - this is needed to allow trailing comma in enum definitions
+// (NOTE: we don't know which argument will have this trailing comma so we have to try all)
+#define _C2_ARGC_1(_, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, \
+        _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, \
+        _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, \
+        _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64, ...) \
+    _C2_ARGC_2(_ _C2_ARG(_0) \
+    _C2_ARG16(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16) \
+    _C2_ARG16(_17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32) \
+    _C2_ARG16(_33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48) \
+    _C2_ARG16(_49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64), \
+    ##__VA_ARGS__)
+
+// step 2. this is needed as removed arguments cannot be passed directly as empty into a macro
+#define _C2_ARGC_2(...) _C2_ARGC_3(__VA_ARGS__)
+
+/// \endif
+
+/* -------------------------------- VARIABLE ARGUMENT CONVERSION -------------------------------- */
+
+/// \if 0
+
+// macros that convert _1, _2, _3, ... to fn(_1, arg), fn(_2, arg), fn(_3, arg), ...
+#define _C2_MAP_64(fn, arg, head, ...) fn(head, arg), _C2_MAP_63(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_63(fn, arg, head, ...) fn(head, arg), _C2_MAP_62(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_62(fn, arg, head, ...) fn(head, arg), _C2_MAP_61(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_61(fn, arg, head, ...) fn(head, arg), _C2_MAP_60(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_60(fn, arg, head, ...) fn(head, arg), _C2_MAP_59(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_59(fn, arg, head, ...) fn(head, arg), _C2_MAP_58(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_58(fn, arg, head, ...) fn(head, arg), _C2_MAP_57(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_57(fn, arg, head, ...) fn(head, arg), _C2_MAP_56(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_56(fn, arg, head, ...) fn(head, arg), _C2_MAP_55(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_55(fn, arg, head, ...) fn(head, arg), _C2_MAP_54(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_54(fn, arg, head, ...) fn(head, arg), _C2_MAP_53(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_53(fn, arg, head, ...) fn(head, arg), _C2_MAP_52(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_52(fn, arg, head, ...) fn(head, arg), _C2_MAP_51(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_51(fn, arg, head, ...) fn(head, arg), _C2_MAP_50(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_50(fn, arg, head, ...) fn(head, arg), _C2_MAP_49(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_49(fn, arg, head, ...) fn(head, arg), _C2_MAP_48(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_48(fn, arg, head, ...) fn(head, arg), _C2_MAP_47(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_47(fn, arg, head, ...) fn(head, arg), _C2_MAP_46(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_46(fn, arg, head, ...) fn(head, arg), _C2_MAP_45(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_45(fn, arg, head, ...) fn(head, arg), _C2_MAP_44(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_44(fn, arg, head, ...) fn(head, arg), _C2_MAP_43(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_43(fn, arg, head, ...) fn(head, arg), _C2_MAP_42(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_42(fn, arg, head, ...) fn(head, arg), _C2_MAP_41(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_41(fn, arg, head, ...) fn(head, arg), _C2_MAP_40(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_40(fn, arg, head, ...) fn(head, arg), _C2_MAP_39(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_39(fn, arg, head, ...) fn(head, arg), _C2_MAP_38(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_38(fn, arg, head, ...) fn(head, arg), _C2_MAP_37(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_37(fn, arg, head, ...) fn(head, arg), _C2_MAP_36(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_36(fn, arg, head, ...) fn(head, arg), _C2_MAP_35(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_35(fn, arg, head, ...) fn(head, arg), _C2_MAP_34(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_34(fn, arg, head, ...) fn(head, arg), _C2_MAP_33(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_33(fn, arg, head, ...) fn(head, arg), _C2_MAP_32(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_32(fn, arg, head, ...) fn(head, arg), _C2_MAP_31(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_31(fn, arg, head, ...) fn(head, arg), _C2_MAP_30(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_30(fn, arg, head, ...) fn(head, arg), _C2_MAP_29(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_29(fn, arg, head, ...) fn(head, arg), _C2_MAP_28(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_28(fn, arg, head, ...) fn(head, arg), _C2_MAP_27(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_27(fn, arg, head, ...) fn(head, arg), _C2_MAP_26(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_26(fn, arg, head, ...) fn(head, arg), _C2_MAP_25(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_25(fn, arg, head, ...) fn(head, arg), _C2_MAP_24(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_24(fn, arg, head, ...) fn(head, arg), _C2_MAP_23(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_23(fn, arg, head, ...) fn(head, arg), _C2_MAP_22(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_22(fn, arg, head, ...) fn(head, arg), _C2_MAP_21(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_21(fn, arg, head, ...) fn(head, arg), _C2_MAP_20(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_20(fn, arg, head, ...) fn(head, arg), _C2_MAP_19(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_19(fn, arg, head, ...) fn(head, arg), _C2_MAP_18(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_18(fn, arg, head, ...) fn(head, arg), _C2_MAP_17(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_17(fn, arg, head, ...) fn(head, arg), _C2_MAP_16(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_16(fn, arg, head, ...) fn(head, arg), _C2_MAP_15(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_15(fn, arg, head, ...) fn(head, arg), _C2_MAP_14(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_14(fn, arg, head, ...) fn(head, arg), _C2_MAP_13(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_13(fn, arg, head, ...) fn(head, arg), _C2_MAP_12(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_12(fn, arg, head, ...) fn(head, arg), _C2_MAP_11(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_11(fn, arg, head, ...) fn(head, arg), _C2_MAP_10(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_10(fn, arg, head, ...) fn(head, arg), _C2_MAP_9(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_9(fn, arg, head, ...) fn(head, arg), _C2_MAP_8(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_8(fn, arg, head, ...) fn(head, arg), _C2_MAP_7(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_7(fn, arg, head, ...) fn(head, arg), _C2_MAP_6(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_6(fn, arg, head, ...) fn(head, arg), _C2_MAP_5(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_5(fn, arg, head, ...) fn(head, arg), _C2_MAP_4(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_4(fn, arg, head, ...) fn(head, arg), _C2_MAP_3(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_3(fn, arg, head, ...) fn(head, arg), _C2_MAP_2(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_2(fn, arg, head, ...) fn(head, arg), _C2_MAP_1(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_1(fn, arg, head, ...) fn(head, arg)
+
+/// \endif
+
+/**
+ * Maps each argument using another macro x -> fn(x, arg)
+ */
+// use wrapper to call the proper mapper based on the number of arguments
+#define _C2_MAP(fn, arg, ...) _C2_MAP__(_C2_ARGC(__VA_ARGS__), fn, arg, ##__VA_ARGS__)
+
+/// \if 0
+
+// evaluate _n so it becomes a number
+#define _C2_MAP__(_n, fn, arg, ...) _C2_MAP_(_n, fn, arg, __VA_ARGS__)
+// call the proper mapper
+#define _C2_MAP_(_n, fn, arg, ...) _C2_MAP_##_n (fn, arg, __VA_ARGS__)
+
+/// \endif
+
+#endif  // C2UTILS_MACRO_UTILS_H_
diff --git a/media/libstagefright/codecs/aacdec/Android.mk b/media/libstagefright/codecs/aacdec/Android.mk
index 84ea708..6490f8f 100644
--- a/media/libstagefright/codecs/aacdec/Android.mk
+++ b/media/libstagefright/codecs/aacdec/Android.mk
@@ -19,7 +19,6 @@
 LOCAL_CFLAGS :=
 
 LOCAL_CFLAGS += -Werror
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
 
 LOCAL_STATIC_LIBRARIES := libFraunhoferAAC
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index 1c5e3c6..9fbdb72 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -62,6 +62,7 @@
     OMX_AUDIO_AACObjectHE_PS,
     OMX_AUDIO_AACObjectLD,
     OMX_AUDIO_AACObjectELD,
+    OMX_AUDIO_AACObjectER_Scalable,
 };
 
 SoftAAC2::SoftAAC2(
diff --git a/media/libstagefright/codecs/aacenc/Android.mk b/media/libstagefright/codecs/aacenc/Android.mk
index 71c374b..2f34e83 100644
--- a/media/libstagefright/codecs/aacenc/Android.mk
+++ b/media/libstagefright/codecs/aacenc/Android.mk
@@ -78,7 +78,6 @@
 	$(LOCAL_PATH)/basic_op
 
 LOCAL_CFLAGS += -Werror
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
 
 include $(BUILD_STATIC_LIBRARY)
@@ -106,7 +105,6 @@
   LOCAL_CFLAGS :=
 
   LOCAL_CFLAGS += -Werror
-  LOCAL_CLANG := true
   LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
 
   LOCAL_STATIC_LIBRARIES := libFraunhoferAAC
@@ -132,7 +130,6 @@
   LOCAL_CFLAGS := -DOSCL_IMPORT_REF=
 
   LOCAL_CFLAGS += -Werror
-  LOCAL_CLANG := true
   LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
 
   LOCAL_STATIC_LIBRARIES := \
diff --git a/media/libstagefright/codecs/amrnb/common/Android.mk b/media/libstagefright/codecs/amrnb/common/Android.mk
index 15220a4..0bb5724 100644
--- a/media/libstagefright/codecs/amrnb/common/Android.mk
+++ b/media/libstagefright/codecs/amrnb/common/Android.mk
@@ -68,7 +68,6 @@
         -D"OSCL_UNUSED_ARG(x)=(void)(x)" -DOSCL_IMPORT_REF= -DOSCL_EXPORT_REF=
 
 LOCAL_CFLAGS += -Werror
-LOCAL_CLANG := true
 #addressing b/25409744
 #LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
 
diff --git a/media/libstagefright/codecs/amrnb/dec/Android.mk b/media/libstagefright/codecs/amrnb/dec/Android.mk
index 7967ec3..3959b80 100644
--- a/media/libstagefright/codecs/amrnb/dec/Android.mk
+++ b/media/libstagefright/codecs/amrnb/dec/Android.mk
@@ -48,7 +48,6 @@
         -D"OSCL_UNUSED_ARG(x)=(void)(x)" -DOSCL_IMPORT_REF=
 
 LOCAL_CFLAGS += -Werror
-LOCAL_CLANG := true
 #LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_MODULE := libstagefright_amrnbdec
@@ -73,7 +72,6 @@
 LOCAL_CFLAGS := -DOSCL_IMPORT_REF=
 
 LOCAL_CFLAGS += -Werror
-LOCAL_CLANG := true
 #LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_STATIC_LIBRARIES := \
@@ -104,7 +102,6 @@
 LOCAL_SHARED_LIBRARIES := \
         libstagefright_amrnb_common libaudioutils liblog
 
-LOCAL_CLANG := true
 #LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_MODULE := libstagefright_amrnbdec_test
diff --git a/media/libstagefright/codecs/amrnb/enc/Android.mk b/media/libstagefright/codecs/amrnb/enc/Android.mk
index f8a41af..af1efb9 100644
--- a/media/libstagefright/codecs/amrnb/enc/Android.mk
+++ b/media/libstagefright/codecs/amrnb/enc/Android.mk
@@ -70,7 +70,6 @@
         -D"OSCL_UNUSED_ARG(x)=(void)(x)"
 
 LOCAL_CFLAGS += -Werror
-LOCAL_CLANG := true
 #addressing b/25409744
 #LOCAL_SANITIZE := signed-integer-overflow
 
@@ -94,7 +93,6 @@
         $(LOCAL_PATH)/../common
 
 LOCAL_CFLAGS += -Werror
-LOCAL_CLANG := true
 #addressing b/25409744
 #LOCAL_SANITIZE := signed-integer-overflow
 
@@ -127,7 +125,6 @@
 LOCAL_SHARED_LIBRARIES := \
     libstagefright_amrnb_common
 
-LOCAL_CLANG := true
 #addressing b/25409744
 #LOCAL_SANITIZE := signed-integer-overflow
 
diff --git a/media/libstagefright/codecs/amrwb/Android.mk b/media/libstagefright/codecs/amrwb/Android.mk
index 1649c4a..73a1751 100644
--- a/media/libstagefright/codecs/amrwb/Android.mk
+++ b/media/libstagefright/codecs/amrwb/Android.mk
@@ -51,7 +51,6 @@
         -D"OSCL_UNUSED_ARG(x)=(void)(x)" -DOSCL_IMPORT_REF=
 
 LOCAL_CFLAGS += -Werror
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_MODULE := libstagefright_amrwbdec
@@ -74,7 +73,6 @@
 LOCAL_SHARED_LIBRARIES := \
         libaudioutils
 
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_MODULE := libstagefright_amrwbdec_test
diff --git a/media/libstagefright/codecs/amrwbenc/Android.mk b/media/libstagefright/codecs/amrwbenc/Android.mk
index 026006e..3395fc1 100644
--- a/media/libstagefright/codecs/amrwbenc/Android.mk
+++ b/media/libstagefright/codecs/amrwbenc/Android.mk
@@ -102,7 +102,6 @@
 	$(LOCAL_PATH)/inc
 
 LOCAL_CFLAGS += -Werror
-LOCAL_CLANG := true
 #LOCAL_SANITIZE := signed-integer-overflow
 
 include $(BUILD_STATIC_LIBRARY)
@@ -120,7 +119,6 @@
 	frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Werror
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_STATIC_LIBRARIES := \
diff --git a/media/libstagefright/codecs/amrwbenc/SampleCode/AMRWB_E_SAMPLE.c b/media/libstagefright/codecs/amrwbenc/SampleCode/AMRWB_E_SAMPLE.c
index 0cb0097..7c094f3 100644
--- a/media/libstagefright/codecs/amrwbenc/SampleCode/AMRWB_E_SAMPLE.c
+++ b/media/libstagefright/codecs/amrwbenc/SampleCode/AMRWB_E_SAMPLE.c
@@ -90,7 +90,7 @@
 	VO_AUDIO_CODECAPI       AudioAPI;
 	VO_MEM_OPERATOR         moper;
 	VO_CODEC_INIT_USERDATA  useData;
-	VO_HANDLE               hCodec;
+	VO_HANDLE               hCodec = NULL;
 	VO_CODECBUFFER          inData;
 	VO_CODECBUFFER          outData;
 	VO_AUDIO_OUTPUTINFO     outFormat;
@@ -211,7 +211,7 @@
 			if(returnCode == 0)
 			{
 				framenum++;
-				printf(" Frames processed: %hd\r", framenum);
+				printf(" Frames processed: %d\r", framenum);
 				if(framenum == 1)
 				{
 					fwrite(OutputBuf, 1, outData.Length + size1, fdst);
diff --git a/media/libstagefright/codecs/avc/common/Android.mk b/media/libstagefright/codecs/avc/common/Android.mk
index ed0f8ca..ecf0884 100644
--- a/media/libstagefright/codecs/avc/common/Android.mk
+++ b/media/libstagefright/codecs/avc/common/Android.mk
@@ -17,7 +17,6 @@
  	$(LOCAL_PATH)/include
 
 LOCAL_CFLAGS += -Werror
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/avc/enc/Android.mk b/media/libstagefright/codecs/avc/enc/Android.mk
index 511c9f2..cfca608 100644
--- a/media/libstagefright/codecs/avc/enc/Android.mk
+++ b/media/libstagefright/codecs/avc/enc/Android.mk
@@ -31,7 +31,6 @@
     -DOSCL_IMPORT_REF= -D"OSCL_UNUSED_ARG(x)=(void)(x)" -DOSCL_EXPORT_REF=
 
 LOCAL_CFLAGS += -Werror
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow
 
 include $(BUILD_STATIC_LIBRARY)
@@ -60,7 +59,7 @@
         libstagefright_avcenc
 
 LOCAL_SHARED_LIBRARIES := \
-        libstagefright \
+        libmedia \
         libstagefright_avc_common \
         libstagefright_foundation \
         libstagefright_omx \
@@ -72,7 +71,6 @@
 LOCAL_MODULE_TAGS := optional
 
 LOCAL_CFLAGS += -Werror
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow
 
 include $(BUILD_SHARED_LIBRARY)
@@ -92,7 +90,6 @@
 
 LOCAL_CFLAGS := \
     -DOSCL_IMPORT_REF= -DOSCL_UNUSED_ARG= -DOSCL_EXPORT_REF=
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_STATIC_LIBRARIES := \
diff --git a/media/libstagefright/codecs/avcdec/Android.mk b/media/libstagefright/codecs/avcdec/Android.mk
index ef0dbfd..9da8a6f 100644
--- a/media/libstagefright/codecs/avcdec/Android.mk
+++ b/media/libstagefright/codecs/avcdec/Android.mk
@@ -14,13 +14,12 @@
 LOCAL_C_INCLUDES += $(TOP)/frameworks/av/media/libstagefright/include
 LOCAL_C_INCLUDES += $(TOP)/frameworks/native/include/media/openmax
 
-LOCAL_SHARED_LIBRARIES  := libstagefright
+LOCAL_SHARED_LIBRARIES  := libmedia
 LOCAL_SHARED_LIBRARIES  += libstagefright_omx
 LOCAL_SHARED_LIBRARIES  += libstagefright_foundation
 LOCAL_SHARED_LIBRARIES  += libutils
 LOCAL_SHARED_LIBRARIES  += liblog
 
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_LDFLAGS := -Wl,-Bsymbolic
diff --git a/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp b/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
index 8694c73..78436f6 100644
--- a/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
+++ b/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
@@ -27,11 +27,10 @@
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/MediaDefs.h>
 #include <OMX_VideoExt.h>
+#include <inttypes.h>
 
 namespace android {
 
-#define PRINT_TIME  ALOGV
-
 #define componentName                   "video_decoder.avc"
 #define codingType                      OMX_VIDEO_CodingAVC
 #define CODEC_MIME_TYPE                 MEDIA_MIMETYPE_VIDEO_AVC
@@ -49,58 +48,10 @@
         (IVD_CONTROL_API_COMMAND_TYPE_T)IH264D_CMD_CTL_SET_NUM_CORES
 
 static const CodecProfileLevel kProfileLevels[] = {
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel1  },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel1b },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel11 },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel12 },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel13 },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel2  },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel21 },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel22 },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel3  },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel31 },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel32 },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel4  },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel41 },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel42 },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel5  },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel51 },
     { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel52 },
 
-    { OMX_VIDEO_AVCProfileMain,     OMX_VIDEO_AVCLevel1  },
-    { OMX_VIDEO_AVCProfileMain,     OMX_VIDEO_AVCLevel1b },
-    { OMX_VIDEO_AVCProfileMain,     OMX_VIDEO_AVCLevel11 },
-    { OMX_VIDEO_AVCProfileMain,     OMX_VIDEO_AVCLevel12 },
-    { OMX_VIDEO_AVCProfileMain,     OMX_VIDEO_AVCLevel13 },
-    { OMX_VIDEO_AVCProfileMain,     OMX_VIDEO_AVCLevel2  },
-    { OMX_VIDEO_AVCProfileMain,     OMX_VIDEO_AVCLevel21 },
-    { OMX_VIDEO_AVCProfileMain,     OMX_VIDEO_AVCLevel22 },
-    { OMX_VIDEO_AVCProfileMain,     OMX_VIDEO_AVCLevel3  },
-    { OMX_VIDEO_AVCProfileMain,     OMX_VIDEO_AVCLevel31 },
-    { OMX_VIDEO_AVCProfileMain,     OMX_VIDEO_AVCLevel32 },
-    { OMX_VIDEO_AVCProfileMain,     OMX_VIDEO_AVCLevel4  },
-    { OMX_VIDEO_AVCProfileMain,     OMX_VIDEO_AVCLevel41 },
-    { OMX_VIDEO_AVCProfileMain,     OMX_VIDEO_AVCLevel42 },
-    { OMX_VIDEO_AVCProfileMain,     OMX_VIDEO_AVCLevel5  },
-    { OMX_VIDEO_AVCProfileMain,     OMX_VIDEO_AVCLevel51 },
     { OMX_VIDEO_AVCProfileMain,     OMX_VIDEO_AVCLevel52 },
 
-    { OMX_VIDEO_AVCProfileHigh,     OMX_VIDEO_AVCLevel1  },
-    { OMX_VIDEO_AVCProfileHigh,     OMX_VIDEO_AVCLevel1b },
-    { OMX_VIDEO_AVCProfileHigh,     OMX_VIDEO_AVCLevel11 },
-    { OMX_VIDEO_AVCProfileHigh,     OMX_VIDEO_AVCLevel12 },
-    { OMX_VIDEO_AVCProfileHigh,     OMX_VIDEO_AVCLevel13 },
-    { OMX_VIDEO_AVCProfileHigh,     OMX_VIDEO_AVCLevel2  },
-    { OMX_VIDEO_AVCProfileHigh,     OMX_VIDEO_AVCLevel21 },
-    { OMX_VIDEO_AVCProfileHigh,     OMX_VIDEO_AVCLevel22 },
-    { OMX_VIDEO_AVCProfileHigh,     OMX_VIDEO_AVCLevel3  },
-    { OMX_VIDEO_AVCProfileHigh,     OMX_VIDEO_AVCLevel31 },
-    { OMX_VIDEO_AVCProfileHigh,     OMX_VIDEO_AVCLevel32 },
-    { OMX_VIDEO_AVCProfileHigh,     OMX_VIDEO_AVCLevel4  },
-    { OMX_VIDEO_AVCProfileHigh,     OMX_VIDEO_AVCLevel41 },
-    { OMX_VIDEO_AVCProfileHigh,     OMX_VIDEO_AVCLevel42 },
-    { OMX_VIDEO_AVCProfileHigh,     OMX_VIDEO_AVCLevel5  },
-    { OMX_VIDEO_AVCProfileHigh,     OMX_VIDEO_AVCLevel51 },
     { OMX_VIDEO_AVCProfileHigh,     OMX_VIDEO_AVCLevel52 },
 };
 
@@ -126,7 +77,7 @@
             1 /* numMinInputBuffers */, kNumBuffers, INPUT_BUF_SIZE,
             1 /* numMinOutputBuffers */, kNumBuffers, CODEC_MIME_TYPE);
 
-    GETTIME(&mTimeStart, NULL);
+    mTimeStart = mTimeEnd = systemTime();
 
     // If input dump is enabled, then open create an empty file
     GENERATE_FILE_NAMES();
@@ -221,8 +172,7 @@
     memset(mTimeStampsValid, 0, sizeof(mTimeStampsValid));
 
     /* Initialize both start and end times */
-    gettimeofday(&mTimeStart, NULL);
-    gettimeofday(&mTimeEnd, NULL);
+    mTimeStart = mTimeEnd = systemTime();
 
     return OK;
 }
@@ -606,7 +556,7 @@
         {
             ivd_video_decode_ip_t s_dec_ip;
             ivd_video_decode_op_t s_dec_op;
-            WORD32 timeDelay, timeTaken;
+            nsecs_t timeDelay, timeTaken;
             size_t sizeY, sizeUV;
 
             if (!setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx)) {
@@ -618,10 +568,10 @@
             // If input dump is enabled, then write to file
             DUMP_TO_FILE(mInFile, s_dec_ip.pv_stream_buffer, s_dec_ip.u4_num_Bytes, mInputOffset);
 
-            GETTIME(&mTimeStart, NULL);
+            mTimeStart = systemTime();
             /* Compute time elapsed between end of previous decode()
              * to start of current decode() */
-            TIME_DIFF(mTimeEnd, mTimeStart, timeDelay);
+            timeDelay = mTimeStart - mTimeEnd;
 
             IV_API_CALL_STATUS_T status;
             status = ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, (void *)&s_dec_op);
@@ -649,11 +599,12 @@
 
             getVUIParams();
 
-            GETTIME(&mTimeEnd, NULL);
+            mTimeEnd = systemTime();
             /* Compute time taken for decode() */
-            TIME_DIFF(mTimeStart, mTimeEnd, timeTaken);
+            timeTaken = mTimeEnd - mTimeStart;
 
-            PRINT_TIME("timeTaken=%6d delay=%6d numBytes=%6d", timeTaken, timeDelay,
+            ALOGV("timeTaken=%6lldus delay=%6lldus numBytes=%6d",
+                    (long long) (timeTaken / 1000ll), (long long) (timeDelay / 1000ll),
                    s_dec_op.u4_num_bytes_consumed);
             if (s_dec_op.u4_frame_decoded_flag && !mFlushNeeded) {
                 mFlushNeeded = true;
diff --git a/media/libstagefright/codecs/avcdec/SoftAVCDec.h b/media/libstagefright/codecs/avcdec/SoftAVCDec.h
index 2a71188..18b7556 100644
--- a/media/libstagefright/codecs/avcdec/SoftAVCDec.h
+++ b/media/libstagefright/codecs/avcdec/SoftAVCDec.h
@@ -41,14 +41,6 @@
 /** Used to remove warnings about unused parameters */
 #define UNUSED(x) ((void)(x))
 
-/** Get time */
-#define GETTIME(a, b) gettimeofday(a, b);
-
-/** Compute difference between start and end */
-#define TIME_DIFF(start, end, diff) \
-    diff = (((end).tv_sec - (start).tv_sec) * 1000000) + \
-            ((end).tv_usec - (start).tv_usec);
-
 struct SoftAVC : public SoftVideoDecoderOMXComponent {
     SoftAVC(const char *name, const OMX_CALLBACKTYPE *callbacks,
             OMX_PTR appData, OMX_COMPONENTTYPE **component);
@@ -70,8 +62,8 @@
 
     size_t mNumCores;            // Number of cores to be uesd by the codec
 
-    struct timeval mTimeStart;   // Time at the start of decode()
-    struct timeval mTimeEnd;     // Time at the end of decode()
+    nsecs_t mTimeStart;   // Time at the start of decode()
+    nsecs_t mTimeEnd;     // Time at the end of decode()
 
     // Internal buffer to be used to flush out the buffers from decoder
     uint8_t *mFlushOutBuffer;
@@ -129,10 +121,9 @@
 #define INPUT_DUMP_EXT      "h264"
 
 #define GENERATE_FILE_NAMES() {                         \
-    GETTIME(&mTimeStart, NULL);                         \
     strcpy(mInFile, "");                                \
-    sprintf(mInFile, "%s_%ld.%ld.%s", INPUT_DUMP_PATH,  \
-            mTimeStart.tv_sec, mTimeStart.tv_usec,      \
+    sprintf(mInFile, "%s_%lld.%s", INPUT_DUMP_PATH,     \
+            (long long) mTimeStart,                     \
             INPUT_DUMP_EXT);                            \
 }
 
diff --git a/media/libstagefright/codecs/avcenc/Android.mk b/media/libstagefright/codecs/avcenc/Android.mk
index 523036a..1b1a1a0 100644
--- a/media/libstagefright/codecs/avcenc/Android.mk
+++ b/media/libstagefright/codecs/avcenc/Android.mk
@@ -12,17 +12,14 @@
 LOCAL_C_INCLUDES := $(TOP)/external/libavc/encoder
 LOCAL_C_INCLUDES += $(TOP)/external/libavc/common
 LOCAL_C_INCLUDES += $(TOP)/frameworks/av/media/libstagefright/include
-LOCAL_C_INCLUDES += $(TOP)/frameworks/native/include/media/openmax
-LOCAL_C_INCLUDES += $(TOP)/frameworks/av/media/libstagefright/include
 LOCAL_C_INCLUDES += $(TOP)/frameworks/native/include/media/hardware
 LOCAL_C_INCLUDES += $(TOP)/frameworks/native/include/media/openmax
 
-LOCAL_SHARED_LIBRARIES  := libstagefright
+LOCAL_SHARED_LIBRARIES  := libmedia
 LOCAL_SHARED_LIBRARIES  += libstagefright_omx
 LOCAL_SHARED_LIBRARIES  += libutils
 LOCAL_SHARED_LIBRARIES  += liblog
 
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_LDFLAGS := -Wl,-Bsymbolic
diff --git a/media/libstagefright/codecs/flac/enc/Android.mk b/media/libstagefright/codecs/flac/enc/Android.mk
index 7e6e015..a3c37fb 100644
--- a/media/libstagefright/codecs/flac/enc/Android.mk
+++ b/media/libstagefright/codecs/flac/enc/Android.mk
@@ -10,11 +10,10 @@
         external/flac/include
 
 LOCAL_CFLAGS += -Werror
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
 
 LOCAL_SHARED_LIBRARIES := \
-        libstagefright libstagefright_omx libstagefright_foundation libutils liblog
+        libmedia libstagefright_omx libstagefright_foundation libutils liblog
 
 LOCAL_STATIC_LIBRARIES := \
         libFLAC \
diff --git a/media/libstagefright/codecs/g711/dec/Android.mk b/media/libstagefright/codecs/g711/dec/Android.mk
index 8e5a2ff..11978a1 100644
--- a/media/libstagefright/codecs/g711/dec/Android.mk
+++ b/media/libstagefright/codecs/g711/dec/Android.mk
@@ -9,13 +9,12 @@
         frameworks/native/include/media/openmax
 
 LOCAL_SHARED_LIBRARIES := \
-        libstagefright libstagefright_omx libutils liblog
+        libmedia libstagefright_omx libutils liblog
 
 LOCAL_MODULE := libstagefright_soft_g711dec
 LOCAL_MODULE_TAGS := optional
 
 LOCAL_CFLAGS += -Werror
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/gsm/dec/Android.mk b/media/libstagefright/codecs/gsm/dec/Android.mk
index a225c31..eed1348 100644
--- a/media/libstagefright/codecs/gsm/dec/Android.mk
+++ b/media/libstagefright/codecs/gsm/dec/Android.mk
@@ -10,11 +10,10 @@
         external/libgsm/inc
 
 LOCAL_CFLAGS += -Werror
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
 
 LOCAL_SHARED_LIBRARIES := \
-        libstagefright libstagefright_omx libutils liblog
+        libmedia libstagefright_omx libutils liblog
 
 LOCAL_STATIC_LIBRARIES := \
         libgsm
diff --git a/media/libstagefright/codecs/hevcdec/Android.mk b/media/libstagefright/codecs/hevcdec/Android.mk
index 78c4637..83e377c 100644
--- a/media/libstagefright/codecs/hevcdec/Android.mk
+++ b/media/libstagefright/codecs/hevcdec/Android.mk
@@ -13,10 +13,9 @@
 LOCAL_C_INCLUDES += $(TOP)/external/libhevc/common
 LOCAL_C_INCLUDES += $(TOP)/frameworks/av/media/libstagefright/include
 LOCAL_C_INCLUDES += $(TOP)/frameworks/native/include/media/openmax
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow
 
-LOCAL_SHARED_LIBRARIES  := libstagefright
+LOCAL_SHARED_LIBRARIES  := libmedia
 LOCAL_SHARED_LIBRARIES  += libstagefright_omx
 LOCAL_SHARED_LIBRARIES  += libstagefright_foundation
 LOCAL_SHARED_LIBRARIES  += libutils
diff --git a/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp b/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
index 5c70387..a3fd336 100644
--- a/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
+++ b/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
@@ -48,14 +48,6 @@
         (IVD_CONTROL_API_COMMAND_TYPE_T)IHEVCD_CXA_CMD_CTL_SET_NUM_CORES
 
 static const CodecProfileLevel kProfileLevels[] = {
-    { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel1  },
-    { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel2  },
-    { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel21 },
-    { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel3  },
-    { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel31 },
-    { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel4  },
-    { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel41 },
-    { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel5  },
     { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel51 },
 };
 
diff --git a/media/libstagefright/codecs/m4v_h263/dec/Android.mk b/media/libstagefright/codecs/m4v_h263/dec/Android.mk
index eb39b44..e83d24d 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/Android.mk
+++ b/media/libstagefright/codecs/m4v_h263/dec/Android.mk
@@ -47,7 +47,6 @@
 LOCAL_CFLAGS := -DOSCL_EXPORT_REF= -DOSCL_IMPORT_REF=
 
 LOCAL_CFLAGS += -Werror
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow
 
 include $(BUILD_STATIC_LIBRARY)
@@ -71,13 +70,12 @@
         libstagefright_m4vh263dec
 
 LOCAL_SHARED_LIBRARIES := \
-        libstagefright libstagefright_omx libstagefright_foundation libutils liblog
+        libmedia libstagefright_omx libstagefright_foundation libutils liblog
 
 LOCAL_MODULE := libstagefright_soft_mpeg4dec
 LOCAL_MODULE_TAGS := optional
 
 LOCAL_CFLAGS += -Werror
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/m4v_h263/enc/Android.mk b/media/libstagefright/codecs/m4v_h263/enc/Android.mk
index 9f52538..7b706fe 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/Android.mk
+++ b/media/libstagefright/codecs/m4v_h263/enc/Android.mk
@@ -34,7 +34,6 @@
     $(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Werror
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow
 
 include $(BUILD_STATIC_LIBRARY)
@@ -64,7 +63,7 @@
         libstagefright_m4vh263enc
 
 LOCAL_SHARED_LIBRARIES := \
-        libstagefright \
+        libmedia \
         libstagefright_omx \
         libutils \
         liblog \
@@ -73,7 +72,6 @@
 LOCAL_MODULE_TAGS := optional
 
 LOCAL_CFLAGS += -Werror
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow
 
 include $(BUILD_SHARED_LIBRARY)
@@ -89,7 +87,6 @@
         $(LOCAL_PATH)/include
 
 LOCAL_CFLAGS := -DOSCL_EXPORT_REF= -DOSCL_IMPORT_REF= -DBX_RC
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_STATIC_LIBRARIES := \
diff --git a/media/libstagefright/codecs/mp3dec/Android.mk b/media/libstagefright/codecs/mp3dec/Android.mk
index 11581c1..62dce35 100644
--- a/media/libstagefright/codecs/mp3dec/Android.mk
+++ b/media/libstagefright/codecs/mp3dec/Android.mk
@@ -54,7 +54,6 @@
         -D"OSCL_UNUSED_ARG(x)=(void)(x)"
 
 LOCAL_CFLAGS += -Werror
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_MODULE := libstagefright_mp3dec
@@ -77,11 +76,10 @@
         $(LOCAL_PATH)/include
 
 LOCAL_CFLAGS += -Werror
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_SHARED_LIBRARIES := \
-        libstagefright libstagefright_omx libstagefright_foundation libutils liblog
+        libmedia libstagefright_omx libstagefright_foundation libutils liblog
 
 LOCAL_STATIC_LIBRARIES := \
         libstagefright_mp3dec
@@ -103,7 +101,6 @@
         $(LOCAL_PATH)/test/include \
         $(call include-path-for, audio-utils)
 
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow
 LOCAL_STATIC_LIBRARIES := \
         libstagefright_mp3dec libsndfile
diff --git a/media/libstagefright/codecs/mpeg2dec/Android.mk b/media/libstagefright/codecs/mpeg2dec/Android.mk
index f1c1719..65a081e 100644
--- a/media/libstagefright/codecs/mpeg2dec/Android.mk
+++ b/media/libstagefright/codecs/mpeg2dec/Android.mk
@@ -14,14 +14,13 @@
 LOCAL_C_INCLUDES += $(TOP)/frameworks/av/media/libstagefright/include
 LOCAL_C_INCLUDES += $(TOP)/frameworks/native/include/media/openmax
 
-LOCAL_SHARED_LIBRARIES  := libstagefright
+LOCAL_SHARED_LIBRARIES  := libmedia
 LOCAL_SHARED_LIBRARIES  += libstagefright_omx
 LOCAL_SHARED_LIBRARIES  += libstagefright_foundation
 LOCAL_SHARED_LIBRARIES  += libutils
 LOCAL_SHARED_LIBRARIES  += liblog
 
 LOCAL_LDFLAGS := -Wl,-Bsymbolic
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp
index 5ed037a..ce28faf 100644
--- a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp
+++ b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp
@@ -47,14 +47,8 @@
         (IVD_CONTROL_API_COMMAND_TYPE_T)IMPEG2D_CMD_CTL_SET_NUM_CORES
 
 static const CodecProfileLevel kProfileLevels[] = {
-    { OMX_VIDEO_MPEG2ProfileSimple, OMX_VIDEO_MPEG2LevelLL  },
-    { OMX_VIDEO_MPEG2ProfileSimple, OMX_VIDEO_MPEG2LevelML  },
-    { OMX_VIDEO_MPEG2ProfileSimple, OMX_VIDEO_MPEG2LevelH14 },
     { OMX_VIDEO_MPEG2ProfileSimple, OMX_VIDEO_MPEG2LevelHL  },
 
-    { OMX_VIDEO_MPEG2ProfileMain  , OMX_VIDEO_MPEG2LevelLL  },
-    { OMX_VIDEO_MPEG2ProfileMain  , OMX_VIDEO_MPEG2LevelML  },
-    { OMX_VIDEO_MPEG2ProfileMain  , OMX_VIDEO_MPEG2LevelH14 },
     { OMX_VIDEO_MPEG2ProfileMain  , OMX_VIDEO_MPEG2LevelHL  },
 };
 
diff --git a/media/libstagefright/codecs/on2/dec/Android.mk b/media/libstagefright/codecs/on2/dec/Android.mk
index 76f7600..de1e937 100644
--- a/media/libstagefright/codecs/on2/dec/Android.mk
+++ b/media/libstagefright/codecs/on2/dec/Android.mk
@@ -15,13 +15,12 @@
         libvpx
 
 LOCAL_SHARED_LIBRARIES := \
-        libstagefright libstagefright_omx libstagefright_foundation libutils liblog
+        libmedia libstagefright_omx libstagefright_foundation libutils liblog
 
 LOCAL_MODULE := libstagefright_soft_vpxdec
 LOCAL_MODULE_TAGS := optional
 
 LOCAL_CFLAGS += -Werror
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/on2/enc/Android.mk b/media/libstagefright/codecs/on2/enc/Android.mk
index 522672b..5c85bbd 100644
--- a/media/libstagefright/codecs/on2/enc/Android.mk
+++ b/media/libstagefright/codecs/on2/enc/Android.mk
@@ -2,7 +2,9 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES := \
-        SoftVPXEncoder.cpp
+        SoftVPXEncoder.cpp \
+        SoftVP8Encoder.cpp \
+        SoftVP9Encoder.cpp
 
 LOCAL_C_INCLUDES := \
         $(TOP)/external/libvpx/libvpx \
@@ -11,14 +13,13 @@
         frameworks/av/media/libstagefright/include \
         frameworks/native/include/media/openmax \
 
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
 
 LOCAL_STATIC_LIBRARIES := \
         libvpx
 
 LOCAL_SHARED_LIBRARIES := \
-        libstagefright libstagefright_omx libstagefright_foundation libutils liblog \
+        libmedia libstagefright_omx libstagefright_foundation libutils liblog \
 
 LOCAL_MODULE := libstagefright_soft_vpxenc
 LOCAL_MODULE_TAGS := optional
diff --git a/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.cpp
new file mode 100644
index 0000000..04737a9
--- /dev/null
+++ b/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.cpp
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "SoftVP8Encoder"
+#include "SoftVP8Encoder.h"
+
+#include <utils/Log.h>
+#include <utils/misc.h>
+
+#include <media/hardware/HardwareAPI.h>
+#include <media/hardware/MetadataBufferType.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaDefs.h>
+
+#ifndef INT32_MAX
+#define INT32_MAX   2147483647
+#endif
+
+namespace android {
+
+static const CodecProfileLevel kVp8ProfileLevels[] = {
+    { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version0 },
+    { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version1 },
+    { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version2 },
+    { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version3 },
+};
+
+SoftVP8Encoder::SoftVP8Encoder(const char *name,
+                               const OMX_CALLBACKTYPE *callbacks,
+                               OMX_PTR appData,
+                               OMX_COMPONENTTYPE **component)
+    : SoftVPXEncoder(
+            name, callbacks, appData, component, "video_encoder.vp8",
+            OMX_VIDEO_CodingVP8, MEDIA_MIMETYPE_VIDEO_VP8, 2,
+            kVp8ProfileLevels, NELEM(kVp8ProfileLevels)),
+      mDCTPartitions(0),
+      mLevel(OMX_VIDEO_VP8Level_Version0) {
+}
+
+void SoftVP8Encoder::setCodecSpecificInterface() {
+    mCodecInterface = vpx_codec_vp8_cx();
+}
+
+void SoftVP8Encoder::setCodecSpecificConfiguration() {
+    switch (mLevel) {
+        case OMX_VIDEO_VP8Level_Version0:
+            mCodecConfiguration->g_profile = 0;
+            break;
+
+        case OMX_VIDEO_VP8Level_Version1:
+            mCodecConfiguration->g_profile = 1;
+            break;
+
+        case OMX_VIDEO_VP8Level_Version2:
+            mCodecConfiguration->g_profile = 2;
+            break;
+
+        case OMX_VIDEO_VP8Level_Version3:
+            mCodecConfiguration->g_profile = 3;
+            break;
+
+        default:
+            mCodecConfiguration->g_profile = 0;
+    }
+}
+
+vpx_codec_err_t SoftVP8Encoder::setCodecSpecificControls() {
+    vpx_codec_err_t codec_return = vpx_codec_control(mCodecContext,
+                                                     VP8E_SET_TOKEN_PARTITIONS,
+                                                     mDCTPartitions);
+    if (codec_return != VPX_CODEC_OK) {
+        ALOGE("Error setting dct partitions for vpx encoder.");
+    }
+    return codec_return;
+}
+
+OMX_ERRORTYPE SoftVP8Encoder::internalGetParameter(OMX_INDEXTYPE index,
+                                                   OMX_PTR param) {
+    // can include extension index OMX_INDEXEXTTYPE
+    const int32_t indexFull = index;
+
+    switch (indexFull) {
+        case OMX_IndexParamVideoVp8:
+            return internalGetVp8Params(
+                (OMX_VIDEO_PARAM_VP8TYPE *)param);
+
+        default:
+            return SoftVPXEncoder::internalGetParameter(index, param);
+    }
+}
+
+OMX_ERRORTYPE SoftVP8Encoder::internalSetParameter(OMX_INDEXTYPE index,
+                                                   const OMX_PTR param) {
+    // can include extension index OMX_INDEXEXTTYPE
+    const int32_t indexFull = index;
+
+    switch (indexFull) {
+        case OMX_IndexParamVideoVp8:
+            return internalSetVp8Params(
+                (const OMX_VIDEO_PARAM_VP8TYPE *)param);
+
+        default:
+            return SoftVPXEncoder::internalSetParameter(index, param);
+    }
+}
+
+OMX_ERRORTYPE SoftVP8Encoder::internalGetVp8Params(
+        OMX_VIDEO_PARAM_VP8TYPE* vp8Params) {
+    if (vp8Params->nPortIndex != kOutputPortIndex) {
+        return OMX_ErrorUnsupportedIndex;
+    }
+
+    vp8Params->eProfile = OMX_VIDEO_VP8ProfileMain;
+    vp8Params->eLevel = mLevel;
+    vp8Params->bErrorResilientMode = mErrorResilience;
+    vp8Params->nDCTPartitions = mDCTPartitions;
+    return OMX_ErrorNone;
+}
+
+OMX_ERRORTYPE SoftVP8Encoder::internalSetVp8Params(
+        const OMX_VIDEO_PARAM_VP8TYPE* vp8Params) {
+    if (vp8Params->nPortIndex != kOutputPortIndex) {
+        return OMX_ErrorUnsupportedIndex;
+    }
+
+    if (vp8Params->eProfile != OMX_VIDEO_VP8ProfileMain) {
+        return OMX_ErrorBadParameter;
+    }
+
+    if (vp8Params->eLevel == OMX_VIDEO_VP8Level_Version0 ||
+        vp8Params->eLevel == OMX_VIDEO_VP8Level_Version1 ||
+        vp8Params->eLevel == OMX_VIDEO_VP8Level_Version2 ||
+        vp8Params->eLevel == OMX_VIDEO_VP8Level_Version3) {
+        mLevel = vp8Params->eLevel;
+    } else {
+        return OMX_ErrorBadParameter;
+    }
+
+    mErrorResilience = vp8Params->bErrorResilientMode;
+    if (vp8Params->nDCTPartitions <= kMaxDCTPartitions) {
+        mDCTPartitions = vp8Params->nDCTPartitions;
+    } else {
+        return OMX_ErrorBadParameter;
+    }
+    return OMX_ErrorNone;
+}
+
+}  // namespace android
diff --git a/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.h b/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.h
new file mode 100644
index 0000000..b4904bf
--- /dev/null
+++ b/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SOFT_VP8_ENCODER_H_
+
+#define SOFT_VP8_ENCODER_H_
+
+#include "SoftVPXEncoder.h"
+
+#include <OMX_VideoExt.h>
+#include <OMX_IndexExt.h>
+
+#include <hardware/gralloc.h>
+
+#include "vpx/vpx_encoder.h"
+#include "vpx/vpx_codec.h"
+#include "vpx/vp8cx.h"
+
+namespace android {
+
+// Exposes a vp8 encoder as an OMX Component
+//
+// In addition to the base class settings, Only following encoder settings are
+// available:
+//    - token partitioning
+struct SoftVP8Encoder : public SoftVPXEncoder {
+    SoftVP8Encoder(const char *name,
+                   const OMX_CALLBACKTYPE *callbacks,
+                   OMX_PTR appData,
+                   OMX_COMPONENTTYPE **component);
+
+protected:
+    // Returns current values for requested OMX
+    // parameters
+    virtual OMX_ERRORTYPE internalGetParameter(
+            OMX_INDEXTYPE index, OMX_PTR param);
+
+    // Validates, extracts and stores relevant OMX
+    // parameters
+    virtual OMX_ERRORTYPE internalSetParameter(
+            OMX_INDEXTYPE index, const OMX_PTR param);
+
+    // Populates |mCodecInterface| with codec specific settings.
+    virtual void setCodecSpecificInterface();
+
+    // Sets codec specific configuration.
+    virtual void setCodecSpecificConfiguration();
+
+    // Initializes codec specific encoder settings.
+    virtual vpx_codec_err_t setCodecSpecificControls();
+
+    // Gets vp8 specific parameters.
+    OMX_ERRORTYPE internalGetVp8Params(
+        OMX_VIDEO_PARAM_VP8TYPE* vp8Params);
+
+    // Handles vp8 specific parameters.
+    OMX_ERRORTYPE internalSetVp8Params(
+        const OMX_VIDEO_PARAM_VP8TYPE* vp8Params);
+
+private:
+    // Max value supported for DCT partitions
+    static const uint32_t kMaxDCTPartitions = 3;
+
+    // vp8 specific configuration parameter
+    // that enables token partitioning of
+    // the stream into substreams
+    int32_t mDCTPartitions;
+
+    // Encoder profile corresponding to OMX level parameter
+    //
+    // The inconsistency in the naming is caused by
+    // OMX spec referring vpx profiles (g_profile)
+    // as "levels" whereas using the name "profile" for
+    // something else.
+    OMX_VIDEO_VP8LEVELTYPE mLevel;
+
+    DISALLOW_EVIL_CONSTRUCTORS(SoftVP8Encoder);
+};
+
+}  // namespace android
+
+#endif  // SOFT_VP8_ENCODER_H_
diff --git a/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.cpp
new file mode 100644
index 0000000..4c7290d
--- /dev/null
+++ b/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.cpp
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "SoftVP9Encoder"
+#include "SoftVP9Encoder.h"
+
+#include <utils/Log.h>
+#include <utils/misc.h>
+
+#include <media/hardware/HardwareAPI.h>
+#include <media/hardware/MetadataBufferType.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaDefs.h>
+
+namespace android {
+
+static const CodecProfileLevel kVp9ProfileLevels[] = {
+    { OMX_VIDEO_VP9Profile0, OMX_VIDEO_VP9Level41 },
+};
+
+SoftVP9Encoder::SoftVP9Encoder(
+        const char *name, const OMX_CALLBACKTYPE *callbacks, OMX_PTR appData,
+        OMX_COMPONENTTYPE **component)
+    : SoftVPXEncoder(
+            name, callbacks, appData, component, "video_encoder.vp9",
+            OMX_VIDEO_CodingVP9, MEDIA_MIMETYPE_VIDEO_VP9, 4,
+            kVp9ProfileLevels, NELEM(kVp9ProfileLevels)),
+      mLevel(OMX_VIDEO_VP9Level1),
+      mTileColumns(0),
+      mFrameParallelDecoding(OMX_FALSE) {
+}
+
+void SoftVP9Encoder::setCodecSpecificInterface() {
+    mCodecInterface = vpx_codec_vp9_cx();
+}
+
+void SoftVP9Encoder::setCodecSpecificConfiguration() {
+    mCodecConfiguration->g_profile = 0;
+}
+
+vpx_codec_err_t SoftVP9Encoder::setCodecSpecificControls() {
+    vpx_codec_err_t codecReturn = vpx_codec_control(
+            mCodecContext, VP9E_SET_TILE_COLUMNS, mTileColumns);
+    if (codecReturn != VPX_CODEC_OK) {
+        ALOGE("Error setting VP9E_SET_TILE_COLUMNS to %d. vpx_codec_control() "
+              "returned %d", mTileColumns, codecReturn);
+        return codecReturn;
+    }
+    codecReturn = vpx_codec_control(
+            mCodecContext, VP9E_SET_FRAME_PARALLEL_DECODING,
+            mFrameParallelDecoding);
+    if (codecReturn != VPX_CODEC_OK) {
+        ALOGE("Error setting VP9E_SET_FRAME_PARALLEL_DECODING to %d."
+              "vpx_codec_control() returned %d", mFrameParallelDecoding,
+              codecReturn);
+        return codecReturn;
+    }
+    // For VP9, we always set CPU_USED to 8 (because the realtime default is 0
+    // which is too slow).
+    codecReturn = vpx_codec_control(mCodecContext, VP8E_SET_CPUUSED, 8);
+    if (codecReturn != VPX_CODEC_OK) {
+        ALOGE("Error setting VP8E_SET_CPUUSED to 8. vpx_codec_control() "
+              "returned %d", codecReturn);
+        return codecReturn;
+    }
+    return codecReturn;
+}
+
+OMX_ERRORTYPE SoftVP9Encoder::internalGetParameter(
+        OMX_INDEXTYPE index, OMX_PTR param) {
+    // can include extension index OMX_INDEXEXTTYPE
+    const int32_t indexFull = index;
+
+    switch (indexFull) {
+        case OMX_IndexParamVideoVp9:
+            return internalGetVp9Params(
+                    (OMX_VIDEO_PARAM_VP9TYPE *)param);
+
+        default:
+            return SoftVPXEncoder::internalGetParameter(index, param);
+    }
+}
+
+OMX_ERRORTYPE SoftVP9Encoder::internalSetParameter(
+        OMX_INDEXTYPE index, const OMX_PTR param) {
+    // can include extension index OMX_INDEXEXTTYPE
+    const int32_t indexFull = index;
+
+    switch (indexFull) {
+        case OMX_IndexParamVideoVp9:
+            return internalSetVp9Params(
+                    (const OMX_VIDEO_PARAM_VP9TYPE *)param);
+
+        default:
+            return SoftVPXEncoder::internalSetParameter(index, param);
+    }
+}
+
+OMX_ERRORTYPE SoftVP9Encoder::internalGetVp9Params(
+        OMX_VIDEO_PARAM_VP9TYPE *vp9Params) {
+    if (vp9Params->nPortIndex != kOutputPortIndex) {
+        return OMX_ErrorUnsupportedIndex;
+    }
+
+    vp9Params->eProfile = OMX_VIDEO_VP9Profile0;
+    vp9Params->eLevel = mLevel;
+    vp9Params->bErrorResilientMode = mErrorResilience;
+    vp9Params->nTileColumns = mTileColumns;
+    vp9Params->bEnableFrameParallelDecoding = mFrameParallelDecoding;
+    return OMX_ErrorNone;
+}
+
+OMX_ERRORTYPE SoftVP9Encoder::internalSetVp9Params(
+        const OMX_VIDEO_PARAM_VP9TYPE *vp9Params) {
+    if (vp9Params->nPortIndex != kOutputPortIndex) {
+        return OMX_ErrorUnsupportedIndex;
+    }
+
+    if (vp9Params->eProfile != OMX_VIDEO_VP9Profile0) {
+        return OMX_ErrorBadParameter;
+    }
+
+    if (vp9Params->eLevel == OMX_VIDEO_VP9Level1 ||
+        vp9Params->eLevel == OMX_VIDEO_VP9Level11 ||
+        vp9Params->eLevel == OMX_VIDEO_VP9Level2 ||
+        vp9Params->eLevel == OMX_VIDEO_VP9Level21 ||
+        vp9Params->eLevel == OMX_VIDEO_VP9Level3 ||
+        vp9Params->eLevel == OMX_VIDEO_VP9Level31 ||
+        vp9Params->eLevel == OMX_VIDEO_VP9Level4 ||
+        vp9Params->eLevel == OMX_VIDEO_VP9Level41 ||
+        vp9Params->eLevel == OMX_VIDEO_VP9Level5 ||
+        vp9Params->eLevel == OMX_VIDEO_VP9Level51 ||
+        vp9Params->eLevel == OMX_VIDEO_VP9Level52 ||
+        vp9Params->eLevel == OMX_VIDEO_VP9Level6 ||
+        vp9Params->eLevel == OMX_VIDEO_VP9Level61 ||
+        vp9Params->eLevel == OMX_VIDEO_VP9Level62) {
+        mLevel = vp9Params->eLevel;
+    } else {
+        return OMX_ErrorBadParameter;
+    }
+
+    mErrorResilience = vp9Params->bErrorResilientMode;
+    mTileColumns = vp9Params->nTileColumns;
+    mFrameParallelDecoding = vp9Params->bEnableFrameParallelDecoding;
+    return OMX_ErrorNone;
+}
+
+}  // namespace android
diff --git a/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.h b/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.h
new file mode 100644
index 0000000..85df69a
--- /dev/null
+++ b/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SOFT_VP9_ENCODER_H_
+
+#define SOFT_VP9_ENCODER_H_
+
+#include "SoftVPXEncoder.h"
+
+#include <OMX_VideoExt.h>
+#include <OMX_IndexExt.h>
+
+#include <hardware/gralloc.h>
+
+#include "vpx/vpx_encoder.h"
+#include "vpx/vpx_codec.h"
+#include "vpx/vp8cx.h"
+
+namespace android {
+
+// Exposes a VP9 encoder as an OMX Component
+//
+// In addition to the base class settings, Only following encoder settings are
+// available:
+//    - tile rows
+//    - tile columns
+//    - frame parallel mode
+struct SoftVP9Encoder : public SoftVPXEncoder {
+    SoftVP9Encoder(const char *name,
+                   const OMX_CALLBACKTYPE *callbacks,
+                   OMX_PTR appData,
+                   OMX_COMPONENTTYPE **component);
+
+protected:
+    // Returns current values for requested OMX
+    // parameters
+    virtual OMX_ERRORTYPE internalGetParameter(
+            OMX_INDEXTYPE index, OMX_PTR param);
+
+    // Validates, extracts and stores relevant OMX
+    // parameters
+    virtual OMX_ERRORTYPE internalSetParameter(
+            OMX_INDEXTYPE index, const OMX_PTR param);
+
+    // Populates |mCodecInterface| with codec specific settings.
+    virtual void setCodecSpecificInterface();
+
+    // Sets codec specific configuration.
+    virtual void setCodecSpecificConfiguration();
+
+    // Initializes codec specific encoder settings.
+    virtual vpx_codec_err_t setCodecSpecificControls();
+
+    // Gets vp9 specific parameters.
+    OMX_ERRORTYPE internalGetVp9Params(
+        OMX_VIDEO_PARAM_VP9TYPE* vp9Params);
+
+    // Handles vp9 specific parameters.
+    OMX_ERRORTYPE internalSetVp9Params(
+        const OMX_VIDEO_PARAM_VP9TYPE* vp9Params);
+
+private:
+    // Encoder profile corresponding to OMX level parameter
+    //
+    // The inconsistency in the naming is caused by
+    // OMX spec referring vpx profiles (g_profile)
+    // as "levels" whereas using the name "profile" for
+    // something else.
+    OMX_VIDEO_VP9LEVELTYPE mLevel;
+
+    int32_t mTileColumns;
+
+    OMX_BOOL mFrameParallelDecoding;
+
+    DISALLOW_EVIL_CONSTRUCTORS(SoftVP9Encoder);
+};
+
+}  // namespace android
+
+#endif  // SOFT_VP9_ENCODER_H_
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
index 5edfbb5..5609032 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
@@ -18,6 +18,9 @@
 #define LOG_TAG "SoftVPXEncoder"
 #include "SoftVPXEncoder.h"
 
+#include "SoftVP8Encoder.h"
+#include "SoftVP9Encoder.h"
+
 #include <utils/Log.h>
 #include <utils/misc.h>
 
@@ -42,7 +45,6 @@
     params->nVersion.s.nStep = 0;
 }
 
-
 static int GetCPUCoreCount() {
     int cpuCoreCount = 1;
 #if defined(_SC_NPROCESSORS_ONLN)
@@ -55,30 +57,26 @@
     return cpuCoreCount;
 }
 
-static const CodecProfileLevel kProfileLevels[] = {
-    { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version0 },
-    { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version1 },
-    { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version2 },
-    { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version3 },
-};
-
 SoftVPXEncoder::SoftVPXEncoder(const char *name,
                                const OMX_CALLBACKTYPE *callbacks,
                                OMX_PTR appData,
-                               OMX_COMPONENTTYPE **component)
+                               OMX_COMPONENTTYPE **component,
+                               const char* role,
+                               OMX_VIDEO_CODINGTYPE codingType,
+                               const char* mimeType,
+                               int32_t minCompressionRatio,
+                               const CodecProfileLevel *profileLevels,
+                               size_t numProfileLevels)
     : SoftVideoEncoderOMXComponent(
-            name, "video_encoder.vp8", OMX_VIDEO_CodingVP8,
-            kProfileLevels, NELEM(kProfileLevels),
+            name, role, codingType, profileLevels, numProfileLevels,
             176 /* width */, 144 /* height */,
             callbacks, appData, component),
       mCodecContext(NULL),
       mCodecConfiguration(NULL),
       mCodecInterface(NULL),
       mBitrateUpdated(false),
-      mBitrateControlMode(VPX_VBR),  // variable bitrate
-      mDCTPartitions(0),
+      mBitrateControlMode(VPX_VBR),
       mErrorResilience(OMX_FALSE),
-      mLevel(OMX_VIDEO_VP8Level_Version0),
       mKeyFrameInterval(0),
       mMinQuantizer(0),
       mMaxQuantizer(0),
@@ -96,10 +94,9 @@
 
     initPorts(
             kNumBuffers, kNumBuffers, kMinOutputBufferSize,
-            MEDIA_MIMETYPE_VIDEO_VP8, 2 /* minCompressionRatio */);
+            mimeType, minCompressionRatio);
 }
 
-
 SoftVPXEncoder::~SoftVPXEncoder() {
     releaseEncoder();
 }
@@ -108,18 +105,18 @@
     vpx_codec_err_t codec_return;
     status_t result = UNKNOWN_ERROR;
 
-    mCodecInterface = vpx_codec_vp8_cx();
+    setCodecSpecificInterface();
     if (mCodecInterface == NULL) {
         goto CLEAN_UP;
     }
-    ALOGD("VP8: initEncoder. BRMode: %u. TSLayers: %zu. KF: %u. QP: %u - %u",
+    ALOGD("VPx: initEncoder. BRMode: %u. TSLayers: %zu. KF: %u. QP: %u - %u",
           (uint32_t)mBitrateControlMode, mTemporalLayers, mKeyFrameInterval,
           mMinQuantizer, mMaxQuantizer);
 
     mCodecConfiguration = new vpx_codec_enc_cfg_t;
     codec_return = vpx_codec_enc_config_default(mCodecInterface,
                                                 mCodecConfiguration,
-                                                0);  // Codec specific flags
+                                                0);
 
     if (codec_return != VPX_CODEC_OK) {
         ALOGE("Error populating default configuration for vpx encoder.");
@@ -131,27 +128,6 @@
     mCodecConfiguration->g_threads = GetCPUCoreCount();
     mCodecConfiguration->g_error_resilient = mErrorResilience;
 
-    switch (mLevel) {
-        case OMX_VIDEO_VP8Level_Version0:
-            mCodecConfiguration->g_profile = 0;
-            break;
-
-        case OMX_VIDEO_VP8Level_Version1:
-            mCodecConfiguration->g_profile = 1;
-            break;
-
-        case OMX_VIDEO_VP8Level_Version2:
-            mCodecConfiguration->g_profile = 2;
-            break;
-
-        case OMX_VIDEO_VP8Level_Version3:
-            mCodecConfiguration->g_profile = 3;
-            break;
-
-        default:
-            mCodecConfiguration->g_profile = 0;
-    }
-
     // OMX timebase unit is microsecond
     // g_timebase is in seconds (i.e. 1/1000000 seconds)
     mCodecConfiguration->g_timebase.num = 1;
@@ -253,7 +229,6 @@
             goto CLEAN_UP;
         }
     }
-
     // Set bitrate values for each layer
     for (size_t i = 0; i < mCodecConfiguration->ts_number_layers; i++) {
         mCodecConfiguration->ts_target_bitrate[i] =
@@ -271,7 +246,7 @@
     if (mMaxQuantizer > 0) {
         mCodecConfiguration->rc_max_quantizer = mMaxQuantizer;
     }
-
+    setCodecSpecificConfiguration();
     mCodecContext = new vpx_codec_ctx_t;
     codec_return = vpx_codec_enc_init(mCodecContext,
                                       mCodecInterface,
@@ -283,14 +258,6 @@
         goto CLEAN_UP;
     }
 
-    codec_return = vpx_codec_control(mCodecContext,
-                                     VP8E_SET_TOKEN_PARTITIONS,
-                                     mDCTPartitions);
-    if (codec_return != VPX_CODEC_OK) {
-        ALOGE("Error setting dct partitions for vpx encoder.");
-        goto CLEAN_UP;
-    }
-
     // Extra CBR settings
     if (mBitrateControlMode == VPX_CBR) {
         codec_return = vpx_codec_control(mCodecContext,
@@ -318,6 +285,13 @@
         }
     }
 
+    codec_return = setCodecSpecificControls();
+
+    if (codec_return != VPX_CODEC_OK) {
+        // The codec specific method would have logged the error.
+        goto CLEAN_UP;
+    }
+
     if (mColorFormat != OMX_COLOR_FormatYUV420Planar || mInputDataIsMeta) {
         free(mConversionBuffer);
         mConversionBuffer = NULL;
@@ -338,7 +312,6 @@
     return result;
 }
 
-
 status_t SoftVPXEncoder::releaseEncoder() {
     if (mCodecContext != NULL) {
         vpx_codec_destroy(mCodecContext);
@@ -362,7 +335,6 @@
     return OK;
 }
 
-
 OMX_ERRORTYPE SoftVPXEncoder::internalGetParameter(OMX_INDEXTYPE index,
                                                    OMX_PTR param) {
     // can include extension index OMX_INDEXEXTTYPE
@@ -393,54 +365,15 @@
             return OMX_ErrorNone;
         }
 
-        // VP8 specific parameters that use extension headers
-        case OMX_IndexParamVideoVp8: {
-            OMX_VIDEO_PARAM_VP8TYPE *vp8Params =
-                (OMX_VIDEO_PARAM_VP8TYPE *)param;
-
-            if (!isValidOMXParam(vp8Params)) {
-                return OMX_ErrorBadParameter;
-            }
-
-            if (vp8Params->nPortIndex != kOutputPortIndex) {
-                return OMX_ErrorUnsupportedIndex;
-            }
-
-            vp8Params->eProfile = OMX_VIDEO_VP8ProfileMain;
-            vp8Params->eLevel = mLevel;
-            vp8Params->nDCTPartitions = mDCTPartitions;
-            vp8Params->bErrorResilientMode = mErrorResilience;
-            return OMX_ErrorNone;
-        }
-
-        case OMX_IndexParamVideoAndroidVp8Encoder: {
-            OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vp8AndroidParams =
-                (OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *)param;
-
-            if (!isValidOMXParam(vp8AndroidParams)) {
-                return OMX_ErrorBadParameter;
-            }
-
-            if (vp8AndroidParams->nPortIndex != kOutputPortIndex) {
-                return OMX_ErrorUnsupportedIndex;
-            }
-
-            vp8AndroidParams->nKeyFrameInterval = mKeyFrameInterval;
-            vp8AndroidParams->eTemporalPattern = mTemporalPatternType;
-            vp8AndroidParams->nTemporalLayerCount = mTemporalLayers;
-            vp8AndroidParams->nMinQuantizer = mMinQuantizer;
-            vp8AndroidParams->nMaxQuantizer = mMaxQuantizer;
-            memcpy(vp8AndroidParams->nTemporalLayerBitrateRatio,
-                   mTemporalLayerBitrateRatio, sizeof(mTemporalLayerBitrateRatio));
-            return OMX_ErrorNone;
-        }
+        case OMX_IndexParamVideoAndroidVp8Encoder:
+            return internalGetAndroidVpxParams(
+                (OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *)param);
 
         default:
             return SoftVideoEncoderOMXComponent::internalGetParameter(index, param);
     }
 }
 
-
 OMX_ERRORTYPE SoftVPXEncoder::internalSetParameter(OMX_INDEXTYPE index,
                                                    const OMX_PTR param) {
     // can include extension index OMX_INDEXEXTTYPE
@@ -458,27 +391,9 @@
             return internalSetBitrateParams(bitRate);
         }
 
-        case OMX_IndexParamVideoVp8: {
-            const OMX_VIDEO_PARAM_VP8TYPE *vp8Params =
-                (const OMX_VIDEO_PARAM_VP8TYPE*) param;
-
-            if (!isValidOMXParam(vp8Params)) {
-                return OMX_ErrorBadParameter;
-            }
-
-            return internalSetVp8Params(vp8Params);
-        }
-
-        case OMX_IndexParamVideoAndroidVp8Encoder: {
-            const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vp8AndroidParams =
-                (const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE*) param;
-
-            if (!isValidOMXParam(vp8AndroidParams)) {
-                return OMX_ErrorBadParameter;
-            }
-
-            return internalSetAndroidVp8Params(vp8AndroidParams);
-        }
+        case OMX_IndexParamVideoAndroidVp8Encoder:
+            return internalSetAndroidVpxParams(
+                (const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *)param);
 
         default:
             return SoftVideoEncoderOMXComponent::internalSetParameter(index, param);
@@ -530,77 +445,21 @@
     }
 }
 
-OMX_ERRORTYPE SoftVPXEncoder::internalSetVp8Params(
-        const OMX_VIDEO_PARAM_VP8TYPE* vp8Params) {
-    if (vp8Params->nPortIndex != kOutputPortIndex) {
+OMX_ERRORTYPE SoftVPXEncoder::internalGetBitrateParams(
+        OMX_VIDEO_PARAM_BITRATETYPE* bitrate) {
+    if (bitrate->nPortIndex != kOutputPortIndex) {
         return OMX_ErrorUnsupportedIndex;
     }
 
-    if (vp8Params->eProfile != OMX_VIDEO_VP8ProfileMain) {
-        return OMX_ErrorBadParameter;
-    }
+    bitrate->nTargetBitrate = mBitrate;
 
-    if (vp8Params->eLevel == OMX_VIDEO_VP8Level_Version0 ||
-        vp8Params->eLevel == OMX_VIDEO_VP8Level_Version1 ||
-        vp8Params->eLevel == OMX_VIDEO_VP8Level_Version2 ||
-        vp8Params->eLevel == OMX_VIDEO_VP8Level_Version3) {
-        mLevel = vp8Params->eLevel;
+    if (mBitrateControlMode == VPX_VBR) {
+        bitrate->eControlRate = OMX_Video_ControlRateVariable;
+    } else if (mBitrateControlMode == VPX_CBR) {
+        bitrate->eControlRate = OMX_Video_ControlRateConstant;
     } else {
-        return OMX_ErrorBadParameter;
+        return OMX_ErrorUnsupportedSetting;
     }
-
-    if (vp8Params->nDCTPartitions <= kMaxDCTPartitions) {
-        mDCTPartitions = vp8Params->nDCTPartitions;
-    } else {
-        return OMX_ErrorBadParameter;
-    }
-
-    mErrorResilience = vp8Params->bErrorResilientMode;
-    return OMX_ErrorNone;
-}
-
-OMX_ERRORTYPE SoftVPXEncoder::internalSetAndroidVp8Params(
-        const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE* vp8AndroidParams) {
-    if (vp8AndroidParams->nPortIndex != kOutputPortIndex) {
-        return OMX_ErrorUnsupportedIndex;
-    }
-    if (vp8AndroidParams->eTemporalPattern != OMX_VIDEO_VPXTemporalLayerPatternNone &&
-        vp8AndroidParams->eTemporalPattern != OMX_VIDEO_VPXTemporalLayerPatternWebRTC) {
-        return OMX_ErrorBadParameter;
-    }
-    if (vp8AndroidParams->nTemporalLayerCount > OMX_VIDEO_ANDROID_MAXVP8TEMPORALLAYERS) {
-        return OMX_ErrorBadParameter;
-    }
-    if (vp8AndroidParams->nMinQuantizer > vp8AndroidParams->nMaxQuantizer) {
-        return OMX_ErrorBadParameter;
-    }
-
-    mTemporalPatternType = vp8AndroidParams->eTemporalPattern;
-    if (vp8AndroidParams->eTemporalPattern == OMX_VIDEO_VPXTemporalLayerPatternWebRTC) {
-        mTemporalLayers = vp8AndroidParams->nTemporalLayerCount;
-    } else if (vp8AndroidParams->eTemporalPattern == OMX_VIDEO_VPXTemporalLayerPatternNone) {
-        mTemporalLayers = 0;
-    }
-    // Check the bitrate distribution between layers is in increasing order
-    if (mTemporalLayers > 1) {
-        for (size_t i = 0; i < mTemporalLayers - 1; i++) {
-            if (vp8AndroidParams->nTemporalLayerBitrateRatio[i + 1] <=
-                    vp8AndroidParams->nTemporalLayerBitrateRatio[i]) {
-                ALOGE("Wrong bitrate ratio - should be in increasing order.");
-                return OMX_ErrorBadParameter;
-            }
-        }
-    }
-    mKeyFrameInterval = vp8AndroidParams->nKeyFrameInterval;
-    mMinQuantizer = vp8AndroidParams->nMinQuantizer;
-    mMaxQuantizer = vp8AndroidParams->nMaxQuantizer;
-    memcpy(mTemporalLayerBitrateRatio, vp8AndroidParams->nTemporalLayerBitrateRatio,
-            sizeof(mTemporalLayerBitrateRatio));
-    ALOGD("VP8: internalSetAndroidVp8Params. BRMode: %u. TS: %zu. KF: %u."
-          " QP: %u - %u BR0: %u. BR1: %u. BR2: %u",
-          (uint32_t)mBitrateControlMode, mTemporalLayers, mKeyFrameInterval,
-          mMinQuantizer, mMaxQuantizer, mTemporalLayerBitrateRatio[0],
-          mTemporalLayerBitrateRatio[1], mTemporalLayerBitrateRatio[2]);
     return OMX_ErrorNone;
 }
 
@@ -623,71 +482,134 @@
     return OMX_ErrorNone;
 }
 
+OMX_ERRORTYPE SoftVPXEncoder::internalGetAndroidVpxParams(
+        OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vpxAndroidParams) {
+    if (vpxAndroidParams->nPortIndex != kOutputPortIndex) {
+        return OMX_ErrorUnsupportedIndex;
+    }
+
+    vpxAndroidParams->nKeyFrameInterval = mKeyFrameInterval;
+    vpxAndroidParams->eTemporalPattern = mTemporalPatternType;
+    vpxAndroidParams->nTemporalLayerCount = mTemporalLayers;
+    vpxAndroidParams->nMinQuantizer = mMinQuantizer;
+    vpxAndroidParams->nMaxQuantizer = mMaxQuantizer;
+    memcpy(vpxAndroidParams->nTemporalLayerBitrateRatio,
+           mTemporalLayerBitrateRatio, sizeof(mTemporalLayerBitrateRatio));
+    return OMX_ErrorNone;
+}
+
+OMX_ERRORTYPE SoftVPXEncoder::internalSetAndroidVpxParams(
+        const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vpxAndroidParams) {
+    if (vpxAndroidParams->nPortIndex != kOutputPortIndex) {
+        return OMX_ErrorUnsupportedIndex;
+    }
+    if (vpxAndroidParams->eTemporalPattern != OMX_VIDEO_VPXTemporalLayerPatternNone &&
+            vpxAndroidParams->eTemporalPattern != OMX_VIDEO_VPXTemporalLayerPatternWebRTC) {
+        return OMX_ErrorBadParameter;
+    }
+    if (vpxAndroidParams->nTemporalLayerCount > OMX_VIDEO_ANDROID_MAXVP8TEMPORALLAYERS) {
+        return OMX_ErrorBadParameter;
+    }
+    if (vpxAndroidParams->nMinQuantizer > vpxAndroidParams->nMaxQuantizer) {
+        return OMX_ErrorBadParameter;
+    }
+
+    mTemporalPatternType = vpxAndroidParams->eTemporalPattern;
+    if (vpxAndroidParams->eTemporalPattern == OMX_VIDEO_VPXTemporalLayerPatternWebRTC) {
+        mTemporalLayers = vpxAndroidParams->nTemporalLayerCount;
+    } else if (vpxAndroidParams->eTemporalPattern == OMX_VIDEO_VPXTemporalLayerPatternNone) {
+        mTemporalLayers = 0;
+    }
+    // Check the bitrate distribution between layers is in increasing order
+    if (mTemporalLayers > 1) {
+        for (size_t i = 0; i < mTemporalLayers - 1; i++) {
+            if (vpxAndroidParams->nTemporalLayerBitrateRatio[i + 1] <=
+                    vpxAndroidParams->nTemporalLayerBitrateRatio[i]) {
+                ALOGE("Wrong bitrate ratio - should be in increasing order.");
+                return OMX_ErrorBadParameter;
+            }
+        }
+    }
+    mKeyFrameInterval = vpxAndroidParams->nKeyFrameInterval;
+    mMinQuantizer = vpxAndroidParams->nMinQuantizer;
+    mMaxQuantizer = vpxAndroidParams->nMaxQuantizer;
+    memcpy(mTemporalLayerBitrateRatio, vpxAndroidParams->nTemporalLayerBitrateRatio,
+            sizeof(mTemporalLayerBitrateRatio));
+    ALOGD("VPx: internalSetAndroidVpxParams. BRMode: %u. TS: %zu. KF: %u."
+            " QP: %u - %u BR0: %u. BR1: %u. BR2: %u",
+            (uint32_t)mBitrateControlMode, mTemporalLayers, mKeyFrameInterval,
+            mMinQuantizer, mMaxQuantizer, mTemporalLayerBitrateRatio[0],
+            mTemporalLayerBitrateRatio[1], mTemporalLayerBitrateRatio[2]);
+    return OMX_ErrorNone;
+}
+
 vpx_enc_frame_flags_t SoftVPXEncoder::getEncodeFlags() {
     vpx_enc_frame_flags_t flags = 0;
-    int patternIdx = mTemporalPatternIdx % mTemporalPatternLength;
-    mTemporalPatternIdx++;
-    switch (mTemporalPattern[patternIdx]) {
-        case kTemporalUpdateLast:
-            flags |= VP8_EFLAG_NO_UPD_GF;
-            flags |= VP8_EFLAG_NO_UPD_ARF;
-            flags |= VP8_EFLAG_NO_REF_GF;
-            flags |= VP8_EFLAG_NO_REF_ARF;
-            break;
-        case kTemporalUpdateGoldenWithoutDependency:
-            flags |= VP8_EFLAG_NO_REF_GF;
-            // Deliberately no break here.
-        case kTemporalUpdateGolden:
-            flags |= VP8_EFLAG_NO_REF_ARF;
-            flags |= VP8_EFLAG_NO_UPD_ARF;
-            flags |= VP8_EFLAG_NO_UPD_LAST;
-            break;
-        case kTemporalUpdateAltrefWithoutDependency:
-            flags |= VP8_EFLAG_NO_REF_ARF;
-            flags |= VP8_EFLAG_NO_REF_GF;
-            // Deliberately no break here.
-        case kTemporalUpdateAltref:
-            flags |= VP8_EFLAG_NO_UPD_GF;
-            flags |= VP8_EFLAG_NO_UPD_LAST;
-            break;
-        case kTemporalUpdateNoneNoRefAltref:
-            flags |= VP8_EFLAG_NO_REF_ARF;
-            // Deliberately no break here.
-        case kTemporalUpdateNone:
-            flags |= VP8_EFLAG_NO_UPD_GF;
-            flags |= VP8_EFLAG_NO_UPD_ARF;
-            flags |= VP8_EFLAG_NO_UPD_LAST;
-            flags |= VP8_EFLAG_NO_UPD_ENTROPY;
-            break;
-        case kTemporalUpdateNoneNoRefGoldenRefAltRef:
-            flags |= VP8_EFLAG_NO_REF_GF;
-            flags |= VP8_EFLAG_NO_UPD_GF;
-            flags |= VP8_EFLAG_NO_UPD_ARF;
-            flags |= VP8_EFLAG_NO_UPD_LAST;
-            flags |= VP8_EFLAG_NO_UPD_ENTROPY;
-            break;
-        case kTemporalUpdateGoldenWithoutDependencyRefAltRef:
-            flags |= VP8_EFLAG_NO_REF_GF;
-            flags |= VP8_EFLAG_NO_UPD_ARF;
-            flags |= VP8_EFLAG_NO_UPD_LAST;
-            break;
-        case kTemporalUpdateLastRefAltRef:
-            flags |= VP8_EFLAG_NO_UPD_GF;
-            flags |= VP8_EFLAG_NO_UPD_ARF;
-            flags |= VP8_EFLAG_NO_REF_GF;
-            break;
-        case kTemporalUpdateGoldenRefAltRef:
-            flags |= VP8_EFLAG_NO_UPD_ARF;
-            flags |= VP8_EFLAG_NO_UPD_LAST;
-            break;
-        case kTemporalUpdateLastAndGoldenRefAltRef:
-            flags |= VP8_EFLAG_NO_UPD_ARF;
-            flags |= VP8_EFLAG_NO_REF_GF;
-            break;
-        case kTemporalUpdateLastRefAll:
-            flags |= VP8_EFLAG_NO_UPD_ARF;
-            flags |= VP8_EFLAG_NO_UPD_GF;
-            break;
+    if (mTemporalPatternLength > 0) {
+      int patternIdx = mTemporalPatternIdx % mTemporalPatternLength;
+      mTemporalPatternIdx++;
+      switch (mTemporalPattern[patternIdx]) {
+          case kTemporalUpdateLast:
+              flags |= VP8_EFLAG_NO_UPD_GF;
+              flags |= VP8_EFLAG_NO_UPD_ARF;
+              flags |= VP8_EFLAG_NO_REF_GF;
+              flags |= VP8_EFLAG_NO_REF_ARF;
+              break;
+          case kTemporalUpdateGoldenWithoutDependency:
+              flags |= VP8_EFLAG_NO_REF_GF;
+              // Deliberately no break here.
+          case kTemporalUpdateGolden:
+              flags |= VP8_EFLAG_NO_REF_ARF;
+              flags |= VP8_EFLAG_NO_UPD_ARF;
+              flags |= VP8_EFLAG_NO_UPD_LAST;
+              break;
+          case kTemporalUpdateAltrefWithoutDependency:
+              flags |= VP8_EFLAG_NO_REF_ARF;
+              flags |= VP8_EFLAG_NO_REF_GF;
+              // Deliberately no break here.
+          case kTemporalUpdateAltref:
+              flags |= VP8_EFLAG_NO_UPD_GF;
+              flags |= VP8_EFLAG_NO_UPD_LAST;
+              break;
+          case kTemporalUpdateNoneNoRefAltref:
+              flags |= VP8_EFLAG_NO_REF_ARF;
+              // Deliberately no break here.
+          case kTemporalUpdateNone:
+              flags |= VP8_EFLAG_NO_UPD_GF;
+              flags |= VP8_EFLAG_NO_UPD_ARF;
+              flags |= VP8_EFLAG_NO_UPD_LAST;
+              flags |= VP8_EFLAG_NO_UPD_ENTROPY;
+              break;
+          case kTemporalUpdateNoneNoRefGoldenRefAltRef:
+              flags |= VP8_EFLAG_NO_REF_GF;
+              flags |= VP8_EFLAG_NO_UPD_GF;
+              flags |= VP8_EFLAG_NO_UPD_ARF;
+              flags |= VP8_EFLAG_NO_UPD_LAST;
+              flags |= VP8_EFLAG_NO_UPD_ENTROPY;
+              break;
+          case kTemporalUpdateGoldenWithoutDependencyRefAltRef:
+              flags |= VP8_EFLAG_NO_REF_GF;
+              flags |= VP8_EFLAG_NO_UPD_ARF;
+              flags |= VP8_EFLAG_NO_UPD_LAST;
+              break;
+          case kTemporalUpdateLastRefAltRef:
+              flags |= VP8_EFLAG_NO_UPD_GF;
+              flags |= VP8_EFLAG_NO_UPD_ARF;
+              flags |= VP8_EFLAG_NO_REF_GF;
+              break;
+          case kTemporalUpdateGoldenRefAltRef:
+              flags |= VP8_EFLAG_NO_UPD_ARF;
+              flags |= VP8_EFLAG_NO_UPD_LAST;
+              break;
+          case kTemporalUpdateLastAndGoldenRefAltRef:
+              flags |= VP8_EFLAG_NO_UPD_ARF;
+              flags |= VP8_EFLAG_NO_REF_GF;
+              break;
+          case kTemporalUpdateLastRefAll:
+              flags |= VP8_EFLAG_NO_UPD_ARF;
+              flags |= VP8_EFLAG_NO_UPD_GF;
+              break;
+      }
     }
     return flags;
 }
@@ -765,10 +687,7 @@
         vpx_img_wrap(&raw_frame, VPX_IMG_FMT_I420, mWidth, mHeight,
                      kInputBufferAlignment, (uint8_t *)source);
 
-        vpx_enc_frame_flags_t flags = 0;
-        if (mTemporalPatternLength > 0) {
-            flags = getEncodeFlags();
-        }
+        vpx_enc_frame_flags_t flags = getEncodeFlags();
         if (mKeyFrameRequested) {
             flags |= VPX_EFLAG_FORCE_KF;
             mKeyFrameRequested = false;
@@ -779,7 +698,7 @@
             vpx_codec_err_t res = vpx_codec_enc_config_set(mCodecContext,
                                                            mCodecConfiguration);
             if (res != VPX_CODEC_OK) {
-                ALOGE("vp8 encoder failed to update bitrate: %s",
+                ALOGE("vpx encoder failed to update bitrate: %s",
                       vpx_codec_err_to_string(res));
                 notify(OMX_EventError,
                        OMX_ErrorUndefined,
@@ -849,9 +768,15 @@
 
 }  // namespace android
 
-
 android::SoftOMXComponent *createSoftOMXComponent(
         const char *name, const OMX_CALLBACKTYPE *callbacks,
         OMX_PTR appData, OMX_COMPONENTTYPE **component) {
-    return new android::SoftVPXEncoder(name, callbacks, appData, component);
+  if (!strcmp(name, "OMX.google.vp8.encoder")) {
+      return new android::SoftVP8Encoder(name, callbacks, appData, component);
+  } else if (!strcmp(name, "OMX.google.vp9.encoder")) {
+      return new android::SoftVP9Encoder(name, callbacks, appData, component);
+  } else {
+      CHECK(!"Unknown component");
+  }
+  return NULL;
 }
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
index cd0a0cf..86e71da 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
@@ -31,18 +31,18 @@
 
 namespace android {
 
-// Exposes a vpx encoder as an OMX Component
+// Base class for a VPX Encoder OMX Component
 //
 // Boilerplate for callback bindings are taken care
 // by the base class SimpleSoftOMXComponent and its
 // parent SoftOMXComponent.
 //
-// Only following encoder settings are available
+// Only following encoder settings are available (codec specific settings might
+// be available in the sub-classes):
 //    - target bitrate
 //    - rate control (constant / variable)
 //    - frame rate
 //    - error resilience
-//    - token partitioning
 //    - reconstruction & loop filters (g_profile)
 //
 // Only following color formats are recognized
@@ -54,7 +54,7 @@
 //    - encoding deadline is realtime
 //    - multithreaded encoding utilizes a number of threads equal
 // to online cpu's available
-//    - the algorithm interface for encoder is vp8
+//    - the algorithm interface for encoder is decided by the sub-class in use
 //    - fractional bits of frame rate is discarded
 //    - OMX timestamps are in microseconds, therefore
 // encoder timebase is fixed to 1/1000000
@@ -63,7 +63,13 @@
     SoftVPXEncoder(const char *name,
                    const OMX_CALLBACKTYPE *callbacks,
                    OMX_PTR appData,
-                   OMX_COMPONENTTYPE **component);
+                   OMX_COMPONENTTYPE **component,
+                   const char* role,
+                   OMX_VIDEO_CODINGTYPE codingType,
+                   const char* mimeType,
+                   int32_t minCompressionRatio,
+                   const CodecProfileLevel *profileLevels,
+                   size_t numProfileLevels);
 
 protected:
     virtual ~SoftVPXEncoder();
@@ -87,7 +93,44 @@
     // encoding of the frame
     virtual void onQueueFilled(OMX_U32 portIndex);
 
-private:
+    // Initializes vpx encoder with available settings.
+    status_t initEncoder();
+
+    // Populates mCodecInterface with codec specific settings.
+    virtual void setCodecSpecificInterface() = 0;
+
+    // Sets codec specific configuration.
+    virtual void setCodecSpecificConfiguration() = 0;
+
+    // Sets codec specific encoder controls.
+    virtual vpx_codec_err_t setCodecSpecificControls() = 0;
+
+    // Get current encode flags.
+    virtual vpx_enc_frame_flags_t getEncodeFlags();
+
+    // Releases vpx encoder instance, with it's associated
+    // data structures.
+    //
+    // Unless called earlier, this is handled by the
+    // dtor.
+    status_t releaseEncoder();
+
+    // Get bitrate parameters.
+    virtual OMX_ERRORTYPE internalGetBitrateParams(
+        OMX_VIDEO_PARAM_BITRATETYPE* bitrate);
+
+    // Updates bitrate to reflect port settings.
+    virtual OMX_ERRORTYPE internalSetBitrateParams(
+        const OMX_VIDEO_PARAM_BITRATETYPE* bitrate);
+
+    // Gets Android vpx specific parameters.
+    OMX_ERRORTYPE internalGetAndroidVpxParams(
+            OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vpxAndroidParams);
+
+    // Handles Android vpx specific parameters.
+    OMX_ERRORTYPE internalSetAndroidVpxParams(
+            const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vpxAndroidParams);
+
     enum TemporalReferences {
         // For 1 layer case: reference all (last, golden, and alt ref), but only
         // update last.
@@ -137,9 +180,6 @@
     static const uint32_t kInputBufferAlignment = 1;
     static const uint32_t kOutputBufferAlignment = 2;
 
-    // Max value supported for DCT partitions
-    static const uint32_t kMaxDCTPartitions = 3;
-
     // Number of supported input color formats
     static const uint32_t kNumberOfSupportedColorFormats = 3;
 
@@ -161,23 +201,10 @@
     // Bitrate control mode, either constant or variable
     vpx_rc_mode mBitrateControlMode;
 
-    // vp8 specific configuration parameter
-    // that enables token partitioning of
-    // the stream into substreams
-    int32_t mDCTPartitions;
-
     // Parameter that denotes whether error resilience
     // is enabled in encoder
     OMX_BOOL mErrorResilience;
 
-    // Encoder profile corresponding to OMX level parameter
-    //
-    // The inconsistency in the naming is caused by
-    // OMX spec referring vpx profiles (g_profile)
-    // as "levels" whereas using the name "profile" for
-    // something else.
-    OMX_VIDEO_VP8LEVELTYPE mLevel;
-
     // Key frame interval in frames
     uint32_t mKeyFrameInterval;
 
@@ -216,31 +243,6 @@
 
     bool mKeyFrameRequested;
 
-    // Initializes vpx encoder with available settings.
-    status_t initEncoder();
-
-    // Releases vpx encoder instance, with it's associated
-    // data structures.
-    //
-    // Unless called earlier, this is handled by the
-    // dtor.
-    status_t releaseEncoder();
-
-    // Get current encode flags
-    vpx_enc_frame_flags_t getEncodeFlags();
-
-    // Updates bitrate to reflect port settings.
-    OMX_ERRORTYPE internalSetBitrateParams(
-        const OMX_VIDEO_PARAM_BITRATETYPE* bitrate);
-
-    // Handles vp8 specific parameters.
-    OMX_ERRORTYPE internalSetVp8Params(
-        const OMX_VIDEO_PARAM_VP8TYPE* vp8Params);
-
-    // Handles Android vp8 specific parameters.
-    OMX_ERRORTYPE internalSetAndroidVp8Params(
-        const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE* vp8AndroidParams);
-
     DISALLOW_EVIL_CONSTRUCTORS(SoftVPXEncoder);
 };
 
diff --git a/media/libstagefright/codecs/on2/h264dec/Android.mk b/media/libstagefright/codecs/on2/h264dec/Android.mk
index 7159674..f28e17b 100644
--- a/media/libstagefright/codecs/on2/h264dec/Android.mk
+++ b/media/libstagefright/codecs/on2/h264dec/Android.mk
@@ -95,11 +95,10 @@
                         $(LOCAL_PATH)/./omxdl/arm_neon/vc/m4p10/api
 endif
 
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_SHARED_LIBRARIES := \
-	libstagefright libstagefright_omx libstagefright_foundation libutils liblog \
+	libmedia libstagefright_omx libstagefright_foundation libutils liblog \
 
 LOCAL_MODULE := libstagefright_soft_h264dec
 
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/ARM_DELIVERY.TXT b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/ARM_DELIVERY.TXT
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/ARM_MANIFEST.TXT b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/ARM_MANIFEST.TXT
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/filelist_vc.txt b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/filelist_vc.txt
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/comm/src/omxVCCOMM_Copy16x16_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/comm/src/omxVCCOMM_Copy16x16_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/comm/src/omxVCCOMM_Copy8x8_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/comm/src/omxVCCOMM_Copy8x8_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/comm/src/omxVCCOMM_ExpandFrame_I_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/comm/src/omxVCCOMM_ExpandFrame_I_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_Average_4x_Align_unsafe_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_Average_4x_Align_unsafe_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_DeblockingChroma_unsafe_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_DeblockingChroma_unsafe_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_DeblockingLuma_unsafe_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_DeblockingLuma_unsafe_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_DecodeCoeffsToPair_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_DecodeCoeffsToPair_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_DequantTables_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_DequantTables_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_Align_unsafe_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_Align_unsafe_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_Copy_unsafe_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_Copy_unsafe_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_DiagCopy_unsafe_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_DiagCopy_unsafe_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_HalfDiagVerHor4x4_unsafe_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_HalfDiagVerHor4x4_unsafe_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_HalfHor4x4_unsafe_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_HalfHor4x4_unsafe_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_HalfVer4x4_unsafe_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_HalfVer4x4_unsafe_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_Interpolate_Chroma_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_Interpolate_Chroma_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_QuantTables_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_QuantTables_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_TransformResidual4x4_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_TransformResidual4x4_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_UnpackBlock4x4_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_UnpackBlock4x4_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_DequantTransformResidualFromPairAndAdd_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_DequantTransformResidualFromPairAndAdd_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_FilterDeblockingLuma_HorEdge_I_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_FilterDeblockingLuma_HorEdge_I_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_FilterDeblockingLuma_VerEdge_I_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_FilterDeblockingLuma_VerEdge_I_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_InterpolateLuma_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_InterpolateLuma_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_PredictIntraChroma_8x8_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_PredictIntraChroma_8x8_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_PredictIntra_16x16_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_PredictIntra_16x16_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_PredictIntra_4x4_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_PredictIntra_4x4_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_TransformDequantChromaDCFromPair_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_TransformDequantChromaDCFromPair_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_TransformDequantLumaDCFromPair_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_TransformDequantLumaDCFromPair_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/armVCM4P2_Clip8_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/armVCM4P2_Clip8_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/armVCM4P2_DecodeVLCZigzag_AC_unsafe_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/armVCM4P2_DecodeVLCZigzag_AC_unsafe_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/armVCM4P2_SetPredDir_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/armVCM4P2_SetPredDir_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_DecodePadMV_PVOP_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_DecodePadMV_PVOP_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_DecodeVLCZigzag_Inter_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_DecodeVLCZigzag_Inter_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_DecodeVLCZigzag_IntraACVLC_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_DecodeVLCZigzag_IntraACVLC_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_DecodeVLCZigzag_IntraDCVLC_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_DecodeVLCZigzag_IntraDCVLC_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_FindMVpred_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_FindMVpred_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_IDCT8x8blk_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_IDCT8x8blk_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_MCReconBlock_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_MCReconBlock_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_PredictReconCoefIntra_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_PredictReconCoefIntra_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_QuantInvInter_I_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_QuantInvInter_I_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_QuantInvIntra_I_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_QuantInvIntra_I_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_chroma_hor.s b/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_chroma_hor.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_chroma_hor_ver.s b/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_chroma_hor_ver.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_chroma_ver.s b/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_chroma_ver.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_hor_half.s b/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_hor_half.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_hor_quarter.s b/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_hor_quarter.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_hor_ver_quarter.s b/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_hor_ver_quarter.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_mid_hor.s b/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_mid_hor.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_ver_half.s b/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_ver_half.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_ver_quarter.s b/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_ver_quarter.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/opus/dec/Android.mk b/media/libstagefright/codecs/opus/dec/Android.mk
index f272763..7b0ad2c 100644
--- a/media/libstagefright/codecs/opus/dec/Android.mk
+++ b/media/libstagefright/codecs/opus/dec/Android.mk
@@ -10,10 +10,9 @@
         frameworks/native/include/media/openmax \
 
 LOCAL_SHARED_LIBRARIES := \
-        libopus libstagefright libstagefright_omx \
+        libopus libmedia libstagefright_omx \
         libstagefright_foundation libutils liblog
 
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
 
 LOCAL_MODULE := libstagefright_soft_opusdec
diff --git a/media/libstagefright/codecs/raw/Android.mk b/media/libstagefright/codecs/raw/Android.mk
index e454c84..caed2cc 100644
--- a/media/libstagefright/codecs/raw/Android.mk
+++ b/media/libstagefright/codecs/raw/Android.mk
@@ -9,7 +9,6 @@
         frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Werror
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
 
 LOCAL_SHARED_LIBRARIES := \
diff --git a/media/libstagefright/codecs/vorbis/dec/Android.mk b/media/libstagefright/codecs/vorbis/dec/Android.mk
index 039be6f..3d72d3a 100644
--- a/media/libstagefright/codecs/vorbis/dec/Android.mk
+++ b/media/libstagefright/codecs/vorbis/dec/Android.mk
@@ -10,14 +10,13 @@
         frameworks/native/include/media/openmax \
 
 LOCAL_SHARED_LIBRARIES := \
-        libvorbisidec libstagefright libstagefright_omx \
+        libvorbisidec libmedia libstagefright_omx \
         libstagefright_foundation libutils liblog
 
 LOCAL_MODULE := libstagefright_soft_vorbisdec
 LOCAL_MODULE_TAGS := optional
 
 LOCAL_CFLAGS += -Werror
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/colorconversion/Android.mk b/media/libstagefright/colorconversion/Android.mk
index 0bf9701..ba74740 100644
--- a/media/libstagefright/colorconversion/Android.mk
+++ b/media/libstagefright/colorconversion/Android.mk
@@ -14,7 +14,6 @@
         libyuv_static \
 
 LOCAL_CFLAGS += -Werror
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_MODULE:= libstagefright_color_conversion
diff --git a/media/libstagefright/data/media_codecs_google_video.xml b/media/libstagefright/data/media_codecs_google_video.xml
index b03c769..ce164a2 100644
--- a/media/libstagefright/data/media_codecs_google_video.xml
+++ b/media/libstagefright/data/media_codecs_google_video.xml
@@ -101,5 +101,12 @@
             <Limit name="bitrate" range="1-40000000" />
             <Feature name="bitrate-modes" value="VBR,CBR" />
         </MediaCodec>
+        <MediaCodec name="OMX.google.vp9.encoder" type="video/x-vnd.on2.vp9">
+            <!-- profiles and levels:  ProfileMain : Level_Version0-3 -->
+            <Limit name="size" min="2x2" max="2048x2048" />
+            <Limit name="alignment" value="2x2" />
+            <Limit name="bitrate" range="1-40000000" />
+            <Feature name="bitrate-modes" value="VBR,CBR" />
+        </MediaCodec>
     </Encoders>
 </Included>
diff --git a/media/libstagefright/filters/Android.mk b/media/libstagefright/filters/Android.mk
index bd75a88..f8e8352 100644
--- a/media/libstagefright/filters/Android.mk
+++ b/media/libstagefright/filters/Android.mk
@@ -21,7 +21,6 @@
 LOCAL_C_INCLUDES += $(intermediates)
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-LOCAL_CLANG := true
 
 LOCAL_SHARED_LIBRARIES := libmedia
 
diff --git a/media/libstagefright/filters/GraphicBufferListener.cpp b/media/libstagefright/filters/GraphicBufferListener.cpp
index c1aaa17..db061c1 100644
--- a/media/libstagefright/filters/GraphicBufferListener.cpp
+++ b/media/libstagefright/filters/GraphicBufferListener.cpp
@@ -22,6 +22,7 @@
 #include <media/stagefright/MediaErrors.h>
 
 #include <gui/BufferItem.h>
+#include <utils/String8.h>
 
 #include "GraphicBufferListener.h"
 
diff --git a/media/libstagefright/filters/IntrinsicBlurFilter.cpp b/media/libstagefright/filters/IntrinsicBlurFilter.cpp
index cbcf699..e00afd9 100644
--- a/media/libstagefright/filters/IntrinsicBlurFilter.cpp
+++ b/media/libstagefright/filters/IntrinsicBlurFilter.cpp
@@ -19,7 +19,7 @@
 
 #include <utils/Log.h>
 
-#include <media/stagefright/foundation/ABuffer.h>
+#include <media/MediaCodecBuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 
@@ -88,7 +88,7 @@
 }
 
 status_t IntrinsicBlurFilter::processBuffers(
-        const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer) {
+        const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer) {
     mAllocIn->copy1DRangeFrom(0, mWidth * mHeight, srcBuffer->data());
     mBlur->forEach(mAllocOut);
     mAllocOut->copy1DRangeTo(0, mWidth * mHeight, outBuffer->data());
diff --git a/media/libstagefright/filters/IntrinsicBlurFilter.h b/media/libstagefright/filters/IntrinsicBlurFilter.h
index 4707ab7..a2aabfa 100644
--- a/media/libstagefright/filters/IntrinsicBlurFilter.h
+++ b/media/libstagefright/filters/IntrinsicBlurFilter.h
@@ -31,7 +31,7 @@
     virtual void reset();
     virtual status_t setParameters(const sp<AMessage> &msg);
     virtual status_t processBuffers(
-            const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer);
+            const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer);
 
 protected:
     virtual ~IntrinsicBlurFilter() {};
diff --git a/media/libstagefright/filters/MediaFilter.cpp b/media/libstagefright/filters/MediaFilter.cpp
index cd69418..777ab5b 100644
--- a/media/libstagefright/filters/MediaFilter.cpp
+++ b/media/libstagefright/filters/MediaFilter.cpp
@@ -31,6 +31,8 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaFilter.h>
 
+#include <media/MediaCodecBuffer.h>
+
 #include <gui/BufferItem.h>
 
 #include "ColorConvert.h"
@@ -40,6 +42,9 @@
 #include "SaturationFilter.h"
 #include "ZeroFilter.h"
 
+#include "../include/ACodecBufferChannel.h"
+#include "../include/SharedMemoryBuffer.h"
+
 namespace android {
 
 // parameter: number of input and output buffers
@@ -49,6 +54,9 @@
     : mState(UNINITIALIZED),
       mGeneration(0),
       mGraphicBufferListener(NULL) {
+    mBufferChannel = std::make_shared<ACodecBufferChannel>(
+            new AMessage(kWhatInputBufferFilled, this),
+            new AMessage(kWhatOutputBufferDrained, this));
 }
 
 MediaFilter::~MediaFilter() {
@@ -56,8 +64,8 @@
 
 //////////////////// PUBLIC FUNCTIONS //////////////////////////////////////////
 
-void MediaFilter::setNotificationMessage(const sp<AMessage> &msg) {
-    mNotify = msg;
+std::shared_ptr<BufferChannelBase> MediaFilter::getBufferChannel() {
+    return mBufferChannel;
 }
 
 void MediaFilter::initiateAllocateComponent(const sp<AMessage> &msg) {
@@ -189,29 +197,6 @@
     }
 }
 
-//////////////////// PORT DESCRIPTION //////////////////////////////////////////
-
-MediaFilter::PortDescription::PortDescription() {
-}
-
-void MediaFilter::PortDescription::addBuffer(
-        IOMX::buffer_id id, const sp<ABuffer> &buffer) {
-    mBufferIDs.push_back(id);
-    mBuffers.push_back(buffer);
-}
-
-size_t MediaFilter::PortDescription::countBuffers() {
-    return mBufferIDs.size();
-}
-
-IOMX::buffer_id MediaFilter::PortDescription::bufferIDAt(size_t index) const {
-    return mBufferIDs.itemAt(index);
-}
-
-sp<ABuffer> MediaFilter::PortDescription::bufferAt(size_t index) const {
-    return mBuffers.itemAt(index);
-}
-
 //////////////////// HELPER FUNCTIONS //////////////////////////////////////////
 
 void MediaFilter::signalProcessBuffers() {
@@ -219,10 +204,7 @@
 }
 
 void MediaFilter::signalError(status_t error) {
-    sp<AMessage> notify = mNotify->dup();
-    notify->setInt32("what", CodecBase::kWhatError);
-    notify->setInt32("err", error);
-    notify->post();
+    mCallback->onError(error, ACTION_CODE_FATAL);
 }
 
 status_t MediaFilter::allocateBuffersOnPort(OMX_U32 portIndex) {
@@ -250,7 +232,8 @@
         info.mBufferID = i;
         info.mGeneration = mGeneration;
         info.mOutputFlags = 0;
-        info.mData = new ABuffer(mem->pointer(), bufferSize);
+        info.mData = new SharedMemoryBuffer(
+                isInput ? mInputFormat : mOutputFormat, mem);
         info.mData->meta()->setInt64("timeUs", 0);
 
         mBuffers[portIndex].push_back(info);
@@ -261,21 +244,15 @@
         }
     }
 
-    sp<AMessage> notify = mNotify->dup();
-    notify->setInt32("what", CodecBase::kWhatBuffersAllocated);
-
-    notify->setInt32("portIndex", portIndex);
-
-    sp<PortDescription> desc = new PortDescription;
-
+    std::vector<ACodecBufferChannel::BufferAndId> array(mBuffers[portIndex].size());
     for (size_t i = 0; i < mBuffers[portIndex].size(); ++i) {
-        const BufferInfo &info = mBuffers[portIndex][i];
-
-        desc->addBuffer(info.mBufferID, info.mData);
+        array[i] = {mBuffers[portIndex][i].mData, mBuffers[portIndex][i].mBufferID};
     }
-
-    notify->setObject("portDesc", desc);
-    notify->post();
+    if (portIndex == kPortIndexInput) {
+        mBufferChannel->setInputBufferArray(array);
+    } else {
+        mBufferChannel->setOutputBufferArray(array);
+    }
 
     return OK;
 }
@@ -309,20 +286,14 @@
 
     info->mGeneration = mGeneration;
 
-    sp<AMessage> notify = mNotify->dup();
-    notify->setInt32("what", CodecBase::kWhatFillThisBuffer);
-    notify->setInt32("buffer-id", info->mBufferID);
-
     info->mData->meta()->clear();
-    notify->setBuffer("buffer", info->mData);
 
     sp<AMessage> reply = new AMessage(kWhatInputBufferFilled, this);
     reply->setInt32("buffer-id", info->mBufferID);
 
-    notify->setMessage("reply", reply);
-
     info->mStatus = BufferInfo::OWNED_BY_UPSTREAM;
-    notify->post();
+
+    mBufferChannel->fillThisBuffer(info->mBufferID);
 }
 
 void MediaFilter::postDrainThisBuffer(BufferInfo *info) {
@@ -330,50 +301,20 @@
 
     info->mGeneration = mGeneration;
 
-    sp<AMessage> notify = mNotify->dup();
-    notify->setInt32("what", CodecBase::kWhatDrainThisBuffer);
-    notify->setInt32("buffer-id", info->mBufferID);
-    notify->setInt32("flags", info->mOutputFlags);
-    notify->setBuffer("buffer", info->mData);
-
     sp<AMessage> reply = new AMessage(kWhatOutputBufferDrained, this);
     reply->setInt32("buffer-id", info->mBufferID);
 
-    notify->setMessage("reply", reply);
-
-    notify->post();
+    mBufferChannel->drainThisBuffer(info->mBufferID, info->mOutputFlags);
 
     info->mStatus = BufferInfo::OWNED_BY_UPSTREAM;
 }
 
 void MediaFilter::postEOS() {
-    sp<AMessage> notify = mNotify->dup();
-    notify->setInt32("what", CodecBase::kWhatEOS);
-    notify->setInt32("err", ERROR_END_OF_STREAM);
-    notify->post();
+    mCallback->onEos(ERROR_END_OF_STREAM);
 
     ALOGV("Sent kWhatEOS.");
 }
 
-void MediaFilter::sendFormatChange() {
-    sp<AMessage> notify = mNotify->dup();
-
-    notify->setInt32("what", kWhatOutputFormatChanged);
-
-    AString mime;
-    CHECK(mOutputFormat->findString("mime", &mime));
-    notify->setString("mime", mime.c_str());
-
-    notify->setInt32("stride", mStride);
-    notify->setInt32("slice-height", mSliceHeight);
-    notify->setInt32("color-format", mColorFormatOut);
-    notify->setRect("crop", 0, 0, mStride - 1, mSliceHeight - 1);
-    notify->setInt32("width", mWidth);
-    notify->setInt32("height", mHeight);
-
-    notify->post();
-}
-
 void MediaFilter::requestFillEmptyInput() {
     if (mPortEOS[kPortIndexInput]) {
         return;
@@ -459,11 +400,8 @@
         return;
     }
 
-    sp<AMessage> notify = mNotify->dup();
-    notify->setInt32("what", kWhatComponentAllocated);
     // HACK - need "OMX.google" to use MediaCodec's software renderer
-    notify->setString("componentName", "OMX.google.MediaFilter");
-    notify->post();
+    mCallback->onComponentAllocated("OMX.google.MediaFilter");
     mState = INITIALIZED;
     ALOGV("Handled kWhatAllocateComponent.");
 }
@@ -540,16 +478,9 @@
     mOutputFormat->setInt32("width", mWidth);
     mOutputFormat->setInt32("height", mHeight);
 
-    sp<AMessage> notify = mNotify->dup();
-    notify->setInt32("what", kWhatComponentConfigured);
-    notify->setString("componentName", "MediaFilter");
-    notify->setMessage("input-format", mInputFormat);
-    notify->setMessage("output-format", mOutputFormat);
-    notify->post();
+    mCallback->onComponentConfigured(mInputFormat, mOutputFormat);
     mState = CONFIGURED;
     ALOGV("Handled kWhatConfigureComponent.");
-
-    sendFormatChange();
 }
 
 void MediaFilter::onStart() {
@@ -559,6 +490,8 @@
 
     allocateBuffersOnPort(kPortIndexOutput);
 
+    mCallback->onStartCompleted();
+
     status_t err = mFilter->start();
     if (err != (status_t)OK) {
         ALOGE("Failed to start filter component, err %d", err);
@@ -597,11 +530,12 @@
     CHECK_EQ(info->mStatus, BufferInfo::OWNED_BY_UPSTREAM);
     info->mStatus = BufferInfo::OWNED_BY_US;
 
-    sp<ABuffer> buffer;
+    sp<MediaCodecBuffer> buffer;
     int32_t err = OK;
     bool eos = false;
 
-    if (!msg->findBuffer("buffer", &buffer)) {
+    sp<RefBase> obj;
+    if (!msg->findObject("buffer", &obj)) {
         // these are unfilled buffers returned by client
         CHECK(msg->findInt32("err", &err));
 
@@ -616,6 +550,8 @@
         }
 
         buffer.clear();
+    } else {
+        buffer = static_cast<MediaCodecBuffer *>(obj.get());
     }
 
     int32_t isCSD;
@@ -688,9 +624,11 @@
         mState = INITIALIZED;
     }
 
-    sp<AMessage> notify = mNotify->dup();
-    notify->setInt32("what", CodecBase::kWhatShutdownCompleted);
-    notify->post();
+    if (keepComponentAllocated) {
+        mCallback->onStopCompleted();
+    } else {
+        mCallback->onReleaseCompleted();
+    }
 }
 
 void MediaFilter::onFlush() {
@@ -712,9 +650,7 @@
     mPortEOS[kPortIndexOutput] = false;
     mInputEOSResult = OK;
 
-    sp<AMessage> notify = mNotify->dup();
-    notify->setInt32("what", CodecBase::kWhatFlushCompleted);
-    notify->post();
+    mCallback->onFlushCompleted();
     ALOGV("Posted kWhatFlushCompleted");
 
     // MediaCodec returns all input buffers after flush, so in
@@ -746,13 +682,10 @@
         return;
     }
 
-    sp<AMessage> reply = mNotify->dup();
-    reply->setInt32("what", CodecBase::kWhatInputSurfaceCreated);
-    reply->setObject(
-            "input-surface",
+    mCallback->onInputSurfaceCreated(
+            nullptr, nullptr,
             new BufferProducerWrapper(
                     mGraphicBufferListener->getIGraphicBufferProducer()));
-    reply->post();
 }
 
 void MediaFilter::onInputFrameAvailable() {
@@ -768,7 +701,8 @@
     // TODO: check input format and convert only if necessary
     // copy RGBA graphic buffer into temporary ARGB input buffer
     BufferInfo *inputInfo = new BufferInfo;
-    inputInfo->mData = new ABuffer(buf->getWidth() * buf->getHeight() * 4);
+    inputInfo->mData = new MediaCodecBuffer(
+            mInputFormat, new ABuffer(buf->getWidth() * buf->getHeight() * 4));
     ALOGV("Copying surface data into temp buffer.");
     convertRGBAToARGB(
             (uint8_t*)bufPtr, buf->getWidth(), buf->getHeight(),
@@ -813,9 +747,7 @@
     }
 
     mPortEOS[kPortIndexOutput] = true;
-    sp<AMessage> notify = mNotify->dup();
-    notify->setInt32("what", CodecBase::kWhatSignaledInputEOS);
-    notify->post();
+    mCallback->onSignaledInputEOS(OK);
 
     ALOGV("Output stream saw EOS.");
 }
diff --git a/media/libstagefright/filters/RSFilter.cpp b/media/libstagefright/filters/RSFilter.cpp
index b569945..225a375 100644
--- a/media/libstagefright/filters/RSFilter.cpp
+++ b/media/libstagefright/filters/RSFilter.cpp
@@ -19,7 +19,7 @@
 
 #include <utils/Log.h>
 
-#include <media/stagefright/foundation/ABuffer.h>
+#include <media/MediaCodecBuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 
@@ -85,7 +85,7 @@
 }
 
 status_t RSFilter::processBuffers(
-        const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer) {
+        const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer) {
     mAllocIn->copy1DRangeFrom(0, mWidth * mHeight, srcBuffer->data());
     mCallback->processBuffers(mAllocIn.get(), mAllocOut.get());
     mAllocOut->copy1DRangeTo(0, mWidth * mHeight, outBuffer->data());
diff --git a/media/libstagefright/filters/RSFilter.h b/media/libstagefright/filters/RSFilter.h
index c5b5074..3326284 100644
--- a/media/libstagefright/filters/RSFilter.h
+++ b/media/libstagefright/filters/RSFilter.h
@@ -35,7 +35,7 @@
     virtual void reset();
     virtual status_t setParameters(const sp<AMessage> &msg);
     virtual status_t processBuffers(
-            const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer);
+            const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer);
 
 protected:
     virtual ~RSFilter();
diff --git a/media/libstagefright/filters/SaturationFilter.cpp b/media/libstagefright/filters/SaturationFilter.cpp
index ba5f75a..0a1df05 100644
--- a/media/libstagefright/filters/SaturationFilter.cpp
+++ b/media/libstagefright/filters/SaturationFilter.cpp
@@ -19,7 +19,7 @@
 
 #include <utils/Log.h>
 
-#include <media/stagefright/foundation/ABuffer.h>
+#include <media/MediaCodecBuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 
@@ -88,7 +88,7 @@
 }
 
 status_t SaturationFilter::processBuffers(
-        const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer) {
+        const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer) {
     mAllocIn->copy1DRangeFrom(0, mWidth * mHeight, srcBuffer->data());
     mScript->forEach_root(mAllocIn, mAllocOut);
     mAllocOut->copy1DRangeTo(0, mWidth * mHeight, outBuffer->data());
diff --git a/media/libstagefright/filters/SaturationFilter.h b/media/libstagefright/filters/SaturationFilter.h
index 0545021..317e469 100644
--- a/media/libstagefright/filters/SaturationFilter.h
+++ b/media/libstagefright/filters/SaturationFilter.h
@@ -33,7 +33,7 @@
     virtual void reset();
     virtual status_t setParameters(const sp<AMessage> &msg);
     virtual status_t processBuffers(
-            const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer);
+            const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer);
 
 protected:
     virtual ~SaturationFilter() {};
diff --git a/media/libstagefright/filters/SimpleFilter.h b/media/libstagefright/filters/SimpleFilter.h
index 4cd37ef..a3c2d76 100644
--- a/media/libstagefright/filters/SimpleFilter.h
+++ b/media/libstagefright/filters/SimpleFilter.h
@@ -21,11 +21,11 @@
 #include <utils/Errors.h>
 #include <utils/RefBase.h>
 
-struct ABuffer;
-struct AMessage;
-
 namespace android {
 
+struct AMessage;
+class MediaCodecBuffer;
+
 struct SimpleFilter : public RefBase {
 public:
     SimpleFilter() : mWidth(0), mHeight(0), mStride(0), mSliceHeight(0),
@@ -37,7 +37,7 @@
     virtual void reset() = 0;
     virtual status_t setParameters(const sp<AMessage> &msg) = 0;
     virtual status_t processBuffers(
-            const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer) = 0;
+            const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer) = 0;
 
 protected:
     int32_t mWidth, mHeight;
diff --git a/media/libstagefright/filters/ZeroFilter.cpp b/media/libstagefright/filters/ZeroFilter.cpp
index 3f1243c..74b94b7 100644
--- a/media/libstagefright/filters/ZeroFilter.cpp
+++ b/media/libstagefright/filters/ZeroFilter.cpp
@@ -17,7 +17,7 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "ZeroFilter"
 
-#include <media/stagefright/foundation/ABuffer.h>
+#include <media/MediaCodecBuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 
@@ -38,7 +38,7 @@
 }
 
 status_t ZeroFilter::processBuffers(
-        const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer) {
+        const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer) {
     // assuming identical input & output buffers, since we're a copy filter
     if (mInvertData) {
         uint32_t* src = (uint32_t*)srcBuffer->data();
diff --git a/media/libstagefright/filters/ZeroFilter.h b/media/libstagefright/filters/ZeroFilter.h
index bd34dfb..f941cc8 100644
--- a/media/libstagefright/filters/ZeroFilter.h
+++ b/media/libstagefright/filters/ZeroFilter.h
@@ -29,7 +29,7 @@
     virtual void reset() {};
     virtual status_t setParameters(const sp<AMessage> &msg);
     virtual status_t processBuffers(
-            const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer);
+            const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer);
 
 protected:
     virtual ~ZeroFilter() {};
diff --git a/media/libstagefright/foundation/ALooperRoster.cpp b/media/libstagefright/foundation/ALooperRoster.cpp
index 5f11fb6..8a7c3eb 100644
--- a/media/libstagefright/foundation/ALooperRoster.cpp
+++ b/media/libstagefright/foundation/ALooperRoster.cpp
@@ -100,7 +100,7 @@
     }
 }
 
-static void makeFourCC(uint32_t fourcc, char *s) {
+static void makeFourCC(uint32_t fourcc, char *s, size_t bufsz) {
     s[0] = (fourcc >> 24) & 0xff;
     if (s[0]) {
         s[1] = (fourcc >> 16) & 0xff;
@@ -108,7 +108,7 @@
         s[3] = fourcc & 0xff;
         s[4] = 0;
     } else {
-        sprintf(s, "%u", fourcc);
+        snprintf(s, bufsz, "%u", fourcc);
     }
 }
 
@@ -146,7 +146,7 @@
                 if (verboseStats) {
                     for (size_t j = 0; j < handler->mMessages.size(); j++) {
                         char fourcc[15];
-                        makeFourCC(handler->mMessages.keyAt(j), fourcc);
+                        makeFourCC(handler->mMessages.keyAt(j), fourcc, sizeof(fourcc));
                         s.appendFormat("\n    %s: %u",
                                 fourcc,
                                 handler->mMessages.valueAt(j));
diff --git a/media/libstagefright/foundation/AString.cpp b/media/libstagefright/foundation/AString.cpp
index b167543..04fac19 100644
--- a/media/libstagefright/foundation/AString.cpp
+++ b/media/libstagefright/foundation/AString.cpp
@@ -14,6 +14,9 @@
  * limitations under the License.
  */
 
+#define LOG_TAG "AString"
+#include <utils/Log.h>
+
 #include <ctype.h>
 #include <stdarg.h>
 #include <stdio.h>
@@ -40,14 +43,24 @@
     : mData(NULL),
       mSize(0),
       mAllocSize(1) {
-    setTo(s);
+    if (!s) {
+        ALOGW("ctor got NULL, using empty string instead");
+        clear();
+    } else {
+        setTo(s);
+    }
 }
 
 AString::AString(const char *s, size_t size)
     : mData(NULL),
       mSize(0),
       mAllocSize(1) {
-    setTo(s, size);
+    if (!s) {
+        ALOGW("ctor got NULL, using empty string instead");
+        clear();
+    } else {
+        setTo(s, size);
+    }
 }
 
 AString::AString(const String8 &from)
diff --git a/media/libstagefright/foundation/ColorUtils.cpp b/media/libstagefright/foundation/ColorUtils.cpp
index d7439b2..88a8351 100644
--- a/media/libstagefright/foundation/ColorUtils.cpp
+++ b/media/libstagefright/foundation/ColorUtils.cpp
@@ -343,6 +343,23 @@
 }
 
 // static
+ColorAspects ColorUtils::unpackToColorAspects(uint32_t packed) {
+    ColorAspects aspects;
+    aspects.mRange        = (ColorAspects::Range)((packed >> 24) & 0xFF);
+    aspects.mPrimaries    = (ColorAspects::Primaries)((packed >> 16) & 0xFF);
+    aspects.mMatrixCoeffs = (ColorAspects::MatrixCoeffs)((packed >> 8) & 0xFF);
+    aspects.mTransfer     = (ColorAspects::Transfer)(packed & 0xFF);
+
+    return aspects;
+}
+
+// static
+uint32_t ColorUtils::packToU32(const ColorAspects &aspects) {
+    return (aspects.mRange << 24) | (aspects.mPrimaries << 16)
+            | (aspects.mMatrixCoeffs << 8) | aspects.mTransfer;
+}
+
+// static
 void ColorUtils::setDefaultCodecColorAspectsIfNeeded(
         ColorAspects &aspects, int32_t width, int32_t height) {
     ColorAspects::MatrixCoeffs coeffs;
diff --git a/media/libstagefright/foundation/hexdump.cpp b/media/libstagefright/foundation/hexdump.cpp
index a44d832..872c5f3 100644
--- a/media/libstagefright/foundation/hexdump.cpp
+++ b/media/libstagefright/foundation/hexdump.cpp
@@ -49,7 +49,7 @@
         appendIndent(&line, indent);
 
         char tmp[32];
-        sprintf(tmp, "%08lx:  ", (unsigned long)offset);
+        snprintf(tmp, sizeof(tmp), "%08lx:  ", (unsigned long)offset);
 
         line.append(tmp);
 
@@ -60,7 +60,7 @@
             if (offset + i >= size) {
                 line.append("   ");
             } else {
-                sprintf(tmp, "%02x ", data[offset + i]);
+                snprintf(tmp, sizeof(tmp), "%02x ", data[offset + i]);
                 line.append(tmp);
             }
         }
diff --git a/media/libstagefright/foundation/tests/AData_test.cpp b/media/libstagefright/foundation/tests/AData_test.cpp
new file mode 100644
index 0000000..f014c25
--- /dev/null
+++ b/media/libstagefright/foundation/tests/AData_test.cpp
@@ -0,0 +1,981 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "AData_test"
+
+#include <gtest/gtest.h>
+#include <utils/RefBase.h>
+//#include <utils/StrongPointer.h>
+
+#include <media/stagefright/foundation/AData.h>
+#include <media/stagefright/foundation/ABuffer.h>
+
+namespace android {
+
+class ADataTest : public ::testing::Test {
+};
+
+// ============ AUnion
+
+struct Events {
+    int dtor;
+    int ctor_empty;
+    int ctor_copy;
+};
+
+struct EventCounter : public RefBase {
+    EventCounter(int *counter, int magic=1234) : mCounter(counter), mMagic(magic) { }
+    virtual ~EventCounter() { ++*mCounter; mMagic = 0; }
+    int magic() const { return mMagic; }
+private:
+    int *mCounter;
+    int mMagic;
+};
+
+struct DerivedCounter : public EventCounter {
+    DerivedCounter(int *counter, int magic=1234) : EventCounter(counter, magic) { }
+};
+
+TEST_F(ADataTest, AUnion_Test) {
+    AUnion<int, const char *, char> u;
+    u.emplace<int>(4);
+    u.del<int>();
+    EXPECT_EQ(4, u.get<int>()); // verify that del<> is a no-op for trivial types, such as int.
+                                // specifically, verify that it does not clear the objet memory
+
+    u.emplace<const char *>("hello");
+    EXPECT_STREQ("hello", u.get<const char *>());
+    u.del<const char *>();
+
+    // u.del<char *>();
+    // u.emplace<const int>(4);
+    u.emplace<void>();
+    u.del<void>();
+
+    u.emplace<int>(~0);
+    u.del<int>();
+    EXPECT_EQ(~0, u.get<int>());
+    u.emplace<char>(0x15);
+    // verify that rest of memory after char is cleared upon construction
+    EXPECT_EQ(0, memcmp((char *)(&u) + sizeof(char), "\0\0\0", 3));
+    EXPECT_EQ(0x15, u.get<char>());
+    u.del<char>();
+
+    AUnion<EventCounter, EventCounter *> d;
+    int destructions = 0;
+
+    d.emplace<EventCounter>(&destructions);
+    d.del<EventCounter>();
+    EXPECT_EQ(1, destructions);
+
+    EventCounter *ctr = new EventCounter(&destructions);
+    d.emplace<EventCounter *>(ctr);
+    d.del<EventCounter *>();
+    EXPECT_EQ(1, destructions);
+
+    delete ctr;
+    EXPECT_EQ(2, destructions);
+
+    AUnion<std::shared_ptr<EventCounter>, std::unique_ptr<EventCounter>> md;
+    md.emplace<std::shared_ptr<EventCounter>>(new EventCounter(&destructions));
+    std::shared_ptr<EventCounter> copy(md.get<std::shared_ptr<EventCounter>>());
+    std::weak_ptr<EventCounter> weak(copy);
+    EXPECT_EQ(2, destructions);
+
+    copy.reset();
+    EXPECT_EQ(2, destructions);
+    md.del<std::shared_ptr<EventCounter>>();
+    EXPECT_EQ(3, destructions);
+    EXPECT_TRUE(weak.expired());
+
+    md.emplace<std::unique_ptr<EventCounter>>(new EventCounter(&destructions));
+    EXPECT_EQ(3, destructions);
+
+    std::unique_ptr<EventCounter> unique = std::move(md.get<std::unique_ptr<EventCounter>>());
+    EXPECT_EQ(3, destructions);
+    EXPECT_FALSE((bool)md.get<std::unique_ptr<EventCounter>>());
+
+    md.del<std::unique_ptr<EventCounter>>();
+    EXPECT_EQ(3, destructions);
+    md.emplace<std::unique_ptr<EventCounter>>(std::move(unique));
+    EXPECT_TRUE((bool)md.get<std::unique_ptr<EventCounter>>());
+    EXPECT_EQ(3, destructions);
+
+    md.del<std::unique_ptr<EventCounter>>();
+    EXPECT_EQ(4, destructions);
+}
+
+TEST_F(ADataTest, AData_StaticTest) {
+    using namespace std;
+
+    static_assert(is_copy_assignable<shared_ptr<EventCounter>>::value, "");
+    static_assert(is_copy_constructible<shared_ptr<EventCounter>>::value, "");
+    static_assert(is_default_constructible<shared_ptr<EventCounter>>::value, "");
+
+    static_assert(is_copy_assignable<weak_ptr<DerivedCounter>>::value, "");
+    static_assert(is_copy_constructible<weak_ptr<DerivedCounter>>::value, "");
+    static_assert(is_default_constructible<weak_ptr<DerivedCounter>>::value, "");
+
+    static_assert(!is_copy_assignable<unique_ptr<DerivedCounter>>::value, "");
+    static_assert(!is_copy_constructible<unique_ptr<DerivedCounter>>::value, "");
+    static_assert(is_default_constructible<unique_ptr<DerivedCounter>>::value, "");
+
+    static_assert(is_copy_assignable<sp<EventCounter>>::value, "");
+    static_assert(is_copy_constructible<sp<EventCounter>>::value, "");
+    static_assert(is_default_constructible<sp<EventCounter>>::value, "");
+
+    static_assert(is_copy_assignable<wp<EventCounter>>::value, "");
+    static_assert(is_copy_constructible<wp<EventCounter>>::value, "");
+    static_assert(is_default_constructible<wp<EventCounter>>::value, "");
+
+    static_assert(is_convertible<shared_ptr<DerivedCounter>, shared_ptr<EventCounter>>::value, "");
+    static_assert(!is_convertible<shared_ptr<EventCounter>, shared_ptr<DerivedCounter>>::value, "");
+
+    static_assert(is_convertible<unique_ptr<DerivedCounter>, unique_ptr<EventCounter>>::value, "");
+    static_assert(!is_convertible<unique_ptr<EventCounter>, unique_ptr<DerivedCounter>>::value, "");
+
+    static_assert(is_convertible<unique_ptr<DerivedCounter>, shared_ptr<EventCounter>>::value, "");
+    static_assert(!is_convertible<shared_ptr<DerivedCounter>, unique_ptr<EventCounter>>::value, "");
+
+    static_assert(is_convertible<weak_ptr<DerivedCounter>, weak_ptr<EventCounter>>::value, "");
+    static_assert(!is_convertible<weak_ptr<EventCounter>, weak_ptr<DerivedCounter>>::value, "");
+
+    static_assert(is_convertible<shared_ptr<DerivedCounter>, weak_ptr<EventCounter>>::value, "");
+    static_assert(!is_convertible<weak_ptr<DerivedCounter>, shared_ptr<EventCounter>>::value, "");
+
+    static_assert(is_convertible<sp<EventCounter>, sp<RefBase>>::value, "");
+    static_assert(is_convertible<sp<RefBase>, sp<EventCounter>>::value, "YES");
+
+    static_assert(is_convertible<wp<EventCounter>, wp<RefBase>>::value, "");
+    static_assert(is_convertible<wp<RefBase>, wp<EventCounter>>::value, "YES");
+
+    static_assert(is_convertible<sp<EventCounter>, wp<RefBase>>::value, "");
+    static_assert(!is_convertible<wp<EventCounter>, sp<RefBase>>::value, "");
+}
+
+TEST_F(ADataTest, AData_SampleTest) {
+    AData<int, float>::Basic data;
+    int i = 1;
+    float f = 7.0f;
+
+    data.set(5);
+    EXPECT_TRUE(data.find(&i));
+    EXPECT_FALSE(data.find(&f));
+    EXPECT_EQ(i, 5);
+
+    data.set(6.0f);
+    EXPECT_FALSE(data.find(&i));
+    EXPECT_TRUE(data.find(&f));
+    EXPECT_EQ(f, 6.0f);
+
+    AData<int, sp<RefBase>>::RelaxedBasic objdata; // relaxed type support
+    sp<ABuffer> buf = new ABuffer(16), buf2;
+    sp<RefBase> obj;
+
+    objdata.set(buf);
+    EXPECT_TRUE(objdata.find(&buf2));
+    EXPECT_EQ(buf, buf2);
+    EXPECT_FALSE(objdata.find(&i));
+    EXPECT_TRUE(objdata.find(&obj));
+    EXPECT_TRUE(obj == buf);
+
+    obj = buf;
+    objdata.set(obj); // storing as sp<RefBase>
+    EXPECT_FALSE(objdata.find(&buf2));  // not stored as ABuffer(!)
+    EXPECT_TRUE(objdata.find(&obj));
+}
+
+struct SampleTypeFlagger {
+    typedef unsigned type;
+    enum Flags : type {
+        kEmpty = 100,
+        kInt,
+        kConstCharPtr,
+        kEventCounter,
+        kEventCounterPointer,
+        kEventCounterSharedPointer,
+        kEventCounterUniquePointer,
+        kEventCounterWeakPointer,
+        kEventCounterSP,
+        kEventCounterWP,
+    };
+    constexpr static type mask = ~Flags(0);
+    constexpr static type flagFor(void*) { return kEmpty; }
+    constexpr static type flagFor(int*) { return kInt; }
+    constexpr static type flagFor(const char**) { return kConstCharPtr; }
+    constexpr static type flagFor(EventCounter*) { return kEventCounter; }
+    constexpr static type flagFor(EventCounter**) { return kEventCounterPointer; }
+    constexpr static
+    type flagFor(std::shared_ptr<EventCounter>*) { return kEventCounterSharedPointer; }
+    constexpr static
+    type flagFor(std::unique_ptr<EventCounter>*) { return kEventCounterUniquePointer; }
+    constexpr static type flagFor(std::weak_ptr<EventCounter>*) { return kEventCounterWeakPointer; }
+    constexpr static type flagFor(sp<EventCounter>*) { return kEventCounterSP; }
+    constexpr static type flagFor(wp<EventCounter>*) { return kEventCounterWP; }
+    constexpr static bool canDeleteAs(type object, type del) { return del == object; }
+    template <typename T> struct store { typedef T as_type; };
+};
+
+TEST_F(ADataTest, AData_SimpleTest) {
+    int _int = 0;
+    const char *_constCharPtr = NULL;
+    AData<int, const char *>::Custom<SampleTypeFlagger> u;
+    EXPECT_FALSE(u.used());
+    EXPECT_FALSE(u.find<int>(&_int));
+    EXPECT_FALSE(u.find<const char *>(&_constCharPtr));
+
+    EXPECT_TRUE(u.set<int>(4));
+    EXPECT_TRUE(u.used());
+    EXPECT_TRUE(u.find<int>(&_int));
+    EXPECT_EQ(4, _int);
+    EXPECT_FALSE(u.find<const char *>(&_constCharPtr));
+    EXPECT_EQ(NULL, _constCharPtr);
+
+    EXPECT_TRUE(u.clear());
+    EXPECT_FALSE(u.used());
+    EXPECT_FALSE(u.find<int>(&_int));
+    EXPECT_FALSE(u.find<const char *>(&_constCharPtr));
+
+    EXPECT_TRUE(u.set<int>(5));
+    EXPECT_TRUE(u.set<int>(6));
+    EXPECT_TRUE(u.find<int>(&_int));
+    EXPECT_EQ(6, _int);
+
+    EXPECT_TRUE(u.set<const char *>("hello"));
+    EXPECT_TRUE(u.used());
+    EXPECT_FALSE(u.find<int>(&_int));
+    EXPECT_TRUE(u.find<const char *>(&_constCharPtr));
+    EXPECT_STREQ("hello", _constCharPtr);
+
+    EXPECT_TRUE(u.clear());
+    EXPECT_FALSE(u.used());
+    EXPECT_FALSE(u.find<int>(&_int));
+    EXPECT_FALSE(u.find<const char *>(&_constCharPtr));
+
+    EXPECT_TRUE(u.set<const char *>("world"));
+    EXPECT_TRUE(u.set<const char *>("!!"));
+    EXPECT_TRUE(u.used());
+    EXPECT_FALSE(u.find<int>(&_int));
+    EXPECT_TRUE(u.find<const char *>(&_constCharPtr));
+    EXPECT_STREQ("!!", _constCharPtr);
+
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_TRUE(u.find(&_constCharPtr));
+}
+
+void set(std::unique_ptr<int> &dst, std::unique_ptr<int> &&src) {
+    dst = std::move(src);
+}
+
+void set(std::unique_ptr<int> &dst, std::unique_ptr<int> &src) {
+    dst = std::move(src);
+}
+
+TEST_F(ADataTest, AData_CopyMoveTest) {
+    int destructions = 0;
+    int _int = 0;
+    std::shared_ptr<EventCounter> _shared;
+    std::unique_ptr<EventCounter> _unique;
+    std::weak_ptr<EventCounter> _weak;
+    const std::shared_ptr<EventCounter> _constShared(new EventCounter(&destructions));
+    const std::unique_ptr<EventCounter> _constUnique = nullptr;
+
+    AData<int, std::weak_ptr<EventCounter>, std::shared_ptr<EventCounter>,
+            std::unique_ptr<EventCounter>>::Basic u;
+
+    // test that data is empty
+    EXPECT_FALSE(u.used());
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.find(&_shared));
+    EXPECT_FALSE(u.remove(&_unique));
+    EXPECT_FALSE(u.find(&_weak));
+
+    // test that integer can be stored and read
+    EXPECT_TRUE(u.set<int>(1));
+    EXPECT_TRUE(u.used());
+    EXPECT_TRUE(u.find(&_int));
+    EXPECT_EQ(1, _int);
+    EXPECT_FALSE(u.find(&_shared));
+    EXPECT_FALSE(u.remove(&_unique));
+    EXPECT_FALSE(u.find(&_weak));
+
+    // test that movable type (unique_ptr) can be moved in and read out, and it moves
+    _unique = std::unique_ptr<EventCounter>(new EventCounter(&destructions, 123));
+    EXPECT_TRUE(u.set(std::move(_unique)));
+    EXPECT_FALSE((bool)_unique);
+    EXPECT_TRUE(u.used());
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.find(&_shared));
+    EXPECT_FALSE(u.find(&_weak));
+    EXPECT_TRUE(u.remove(&_unique));
+    EXPECT_TRUE((bool)_unique);
+    if (_unique) {
+        EXPECT_EQ(123, _unique->magic());
+    }
+
+    // the unique value should have been removed but still accessible as nullptr
+    EXPECT_TRUE(u.remove(&_unique));
+    EXPECT_FALSE((bool)_unique);
+    EXPECT_EQ(1, destructions);
+
+    // test that movable-only type (unique_ptr) can be stored without moving (and is still
+    // moved)
+    _unique = std::unique_ptr<EventCounter>(new EventCounter(&destructions, 321));
+    EXPECT_TRUE(u.set(std::move(_unique)));
+    EXPECT_FALSE((bool)_unique);
+    EXPECT_TRUE(u.set(std::unique_ptr<EventCounter>(new EventCounter(&destructions, 1234))));
+    EXPECT_EQ(2, destructions);
+    EXPECT_TRUE(u.remove(&_unique));
+    EXPECT_TRUE((bool)_unique);
+    if (_unique) {
+        EXPECT_EQ(1234, _unique->magic());
+    }
+    EXPECT_TRUE(u.set(std::move(_unique)));
+    EXPECT_EQ(2, destructions);
+    EXPECT_TRUE(u.clear());
+    EXPECT_EQ(3, destructions);
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.find(&_shared));
+    EXPECT_FALSE(u.remove(&_unique));
+    EXPECT_FALSE(u.find(&_weak));
+
+    // u.set(_constUnique);
+
+    // test that copiable & movable type (shared_ptr) is copied unless explicitly moved.
+    _shared = std::make_shared<EventCounter>(&destructions, 234);
+    EXPECT_EQ(1L, _shared.use_count());
+    EXPECT_TRUE(u.set(_shared));
+    EXPECT_TRUE((bool)_shared);
+    if (_shared) {
+        EXPECT_EQ(234, _shared->magic());
+    }
+
+    EXPECT_EQ(2L, _shared.use_count());
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.remove(&_unique));
+    EXPECT_FALSE(u.find(&_weak));
+    EXPECT_TRUE(u.find(&_shared));
+    EXPECT_EQ(2L, _shared.use_count());
+    EXPECT_TRUE((bool)_shared);
+    if (_shared) {
+        EXPECT_EQ(234, _shared->magic());
+    }
+
+    // explicitly move in shared_ptr
+    EXPECT_TRUE(u.set(std::move(_shared)));
+    EXPECT_EQ(0, _shared.use_count()); // shared should be nullptr
+    EXPECT_FALSE((bool)_shared);
+    EXPECT_TRUE(u.find(&_shared));
+    EXPECT_EQ(2L, _shared.use_count()); // now both u and _shared contains the object
+    EXPECT_TRUE((bool)_shared);
+    if (_shared) {
+        EXPECT_EQ(234, _shared->magic());
+    }
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.remove(&_unique));
+    EXPECT_FALSE(u.find(&_weak));
+    EXPECT_TRUE(u.find(&_shared));
+    EXPECT_EQ(2L, _shared.use_count()); // still both u and _shared contains the object
+
+    EXPECT_TRUE(u.clear());
+    EXPECT_TRUE(_shared.unique()); // now only _shared contains the object
+
+    EXPECT_TRUE(u.set(_constShared));
+    EXPECT_EQ(2L, _constShared.use_count()); // even though it is const, we can add a use count
+    EXPECT_TRUE(u.find(&_shared));
+    EXPECT_EQ(3L, _shared.use_count()); // now u, _shared and _constShared contains the const object
+    EXPECT_TRUE((bool)_shared);
+    if (_shared) {
+        EXPECT_EQ(1234, _shared->magic());
+    }
+
+    // test that weak pointer can be copied in (support for moving is from C++14 only)
+    _weak = _shared;
+    EXPECT_EQ(_weak.use_count(), _shared.use_count());
+    EXPECT_TRUE(u.set(_weak));
+
+    _weak.reset();
+    EXPECT_EQ(_weak.use_count(), 0);
+
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.remove(&_unique));
+    EXPECT_FALSE(u.find(&_shared));
+    EXPECT_TRUE(u.find(&_weak));
+    EXPECT_EQ(_weak.use_count(), _shared.use_count());
+    EXPECT_EQ(_weak.lock(), _shared);
+
+    // we can remove a weak pointer multiple times
+    _weak.reset();
+    EXPECT_TRUE(u.find(&_weak));
+    EXPECT_EQ(_weak.use_count(), _shared.use_count());
+    EXPECT_EQ(_weak.lock(), _shared);
+    EXPECT_TRUE(u.clear());
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.remove(&_unique));
+    EXPECT_FALSE(u.find(&_shared));
+    EXPECT_FALSE(u.find(&_weak));
+};
+
+TEST_F(ADataTest, AData_RelaxedCopyMoveTest) {
+    int destructions = 0;
+    int _int = 0;
+    std::shared_ptr<DerivedCounter> _shared;
+    std::unique_ptr<DerivedCounter> _unique, _unique2;
+    std::weak_ptr<DerivedCounter> _weak;
+    std::shared_ptr<EventCounter> _shared_base;
+    std::unique_ptr<EventCounter> _unique_base;
+    std::weak_ptr<EventCounter> _weak_base;
+    const std::shared_ptr<DerivedCounter> _constShared(new DerivedCounter(&destructions));
+    const std::unique_ptr<DerivedCounter> _constUnique = nullptr;
+
+    AData<int, std::unique_ptr<EventCounter>, std::shared_ptr<EventCounter>,
+            std::weak_ptr<EventCounter>>::RelaxedBasic u;
+
+    // test that data is empty
+    EXPECT_FALSE(u.used());
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.find(&_shared));
+    EXPECT_FALSE(u.remove(&_unique));
+    EXPECT_FALSE(u.find(&_weak));
+    EXPECT_FALSE(u.find(&_shared_base));
+    EXPECT_FALSE(u.remove(&_unique_base));
+    EXPECT_FALSE(u.find(&_weak_base));
+
+    // test that integer can be stored and read
+    EXPECT_TRUE(u.set<int>(1));
+    EXPECT_TRUE(u.used());
+    EXPECT_TRUE(u.find(&_int));
+    EXPECT_EQ(1, _int);
+    EXPECT_FALSE(u.find(&_shared));
+    EXPECT_FALSE(u.remove(&_unique));
+    EXPECT_FALSE(u.find(&_weak));
+    EXPECT_FALSE(u.find(&_shared_base));
+    EXPECT_FALSE(u.remove(&_unique_base));
+    EXPECT_FALSE(u.find(&_weak_base));
+
+    // test that movable type (unique_ptr) can be moved in and read out, and it moves
+    _unique = std::unique_ptr<DerivedCounter>(new DerivedCounter(&destructions, 123));
+    EXPECT_TRUE(u.set(std::move(_unique)));
+    EXPECT_FALSE((bool)_unique);
+    EXPECT_TRUE(u.used());
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.find(&_shared));
+    EXPECT_FALSE(u.find(&_weak));
+    EXPECT_TRUE(u.remove(&_unique));
+    EXPECT_TRUE((bool)_unique);
+    if (_unique) {
+        EXPECT_EQ(123, _unique->magic());
+    }
+
+    // the unique value should have been removed but still accessible as nullptr
+    EXPECT_TRUE(u.remove(&_unique));
+    EXPECT_FALSE((bool)_unique);
+    EXPECT_EQ(1, destructions);
+
+    EXPECT_FALSE(u.find(&_shared_base));
+    EXPECT_TRUE(u.remove(&_unique_base));
+    EXPECT_FALSE((bool)_unique_base);
+    EXPECT_FALSE(u.find(&_weak_base));
+
+    // test that movable-only type (unique_ptr) can be stored without moving (and is still
+    // moved)
+    _unique = std::unique_ptr<DerivedCounter>(new DerivedCounter(&destructions, 321));
+    EXPECT_TRUE(u.set(std::move(_unique)));
+    EXPECT_FALSE((bool)_unique);
+    EXPECT_TRUE(u.set(std::unique_ptr<DerivedCounter>(new DerivedCounter(&destructions, 1234))));
+    EXPECT_EQ(2, destructions);
+    EXPECT_TRUE(u.remove(&_unique));
+    EXPECT_TRUE((bool)_unique);
+    if (_unique) {
+        EXPECT_EQ(1234, _unique->magic());
+    }
+    EXPECT_TRUE(u.set(std::move(_unique)));
+    EXPECT_EQ(2, destructions);
+    EXPECT_TRUE(u.clear());
+    EXPECT_EQ(3, destructions);
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.find(&_shared));
+    EXPECT_FALSE(u.remove(&_unique));
+    EXPECT_FALSE(u.find(&_weak));
+    EXPECT_FALSE(u.find(&_shared_base));
+    EXPECT_FALSE(u.remove(&_unique_base));
+    EXPECT_FALSE(u.find(&_weak_base));
+
+    // test that unique pointer can be set and removed as base type (but removed as derived only
+    // if it was set as derived type)
+    _unique = std::unique_ptr<DerivedCounter>(new DerivedCounter(&destructions, 321));
+    EXPECT_TRUE(u.set(std::move(_unique)));
+    EXPECT_FALSE((bool)_unique);
+    EXPECT_TRUE(u.remove(&_unique_base));
+    EXPECT_TRUE((bool)_unique_base);
+    if (_unique_base) {
+        EXPECT_EQ(321, _unique_base->magic());
+    }
+    EXPECT_TRUE(u.remove(&_unique));
+    EXPECT_FALSE((bool)_unique);
+
+    EXPECT_TRUE(u.set(std::move(_unique_base)));
+    EXPECT_FALSE((bool)_unique_base);
+    EXPECT_FALSE(u.remove(&_unique));
+    EXPECT_FALSE((bool)_unique);
+    EXPECT_TRUE(u.remove(&_unique_base));
+    EXPECT_TRUE((bool)_unique_base);
+    if (_unique_base) {
+        EXPECT_EQ(321, _unique_base->magic());
+    }
+
+    EXPECT_EQ(3, destructions);
+    EXPECT_TRUE(u.remove(&_unique_base));
+    EXPECT_EQ(4, destructions);
+    EXPECT_FALSE((bool)_unique_base);
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.find(&_shared));
+    EXPECT_FALSE(u.find(&_shared_base));
+    EXPECT_FALSE(u.find(&_weak));
+    EXPECT_FALSE(u.find(&_weak_base));
+
+    // u.set(_constUnique);
+
+    // test that copiable & movable type (shared_ptr) is copied unless explicitly moved.
+    _shared = std::make_shared<DerivedCounter>(&destructions, 234);
+    EXPECT_EQ(1L, _shared.use_count());
+    EXPECT_TRUE(u.set(_shared));
+    EXPECT_TRUE((bool)_shared);
+    if (_shared) {
+        EXPECT_EQ(234, _shared->magic());
+    }
+
+    EXPECT_EQ(2L, _shared.use_count());
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.remove(&_unique));
+    EXPECT_FALSE(u.find(&_weak));
+    EXPECT_TRUE(u.find(&_shared));
+    EXPECT_FALSE(u.remove(&_unique_base));
+    EXPECT_FALSE(u.find(&_weak_base));
+    EXPECT_EQ(2L, _shared.use_count());
+    EXPECT_TRUE((bool)_shared);
+    if (_shared) {
+        EXPECT_EQ(234, _shared->magic());
+    }
+
+    // explicitly move in shared_ptr
+    EXPECT_TRUE(u.set(std::move(_shared)));
+    EXPECT_EQ(0, _shared.use_count()); // shared should be nullptr
+    EXPECT_FALSE((bool)_shared);
+    EXPECT_TRUE(u.find(&_shared));
+    EXPECT_EQ(2L, _shared.use_count()); // now both u and _shared contains the object
+    EXPECT_TRUE((bool)_shared);
+    if (_shared) {
+        EXPECT_EQ(234, _shared->magic());
+    }
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.remove(&_unique));
+    EXPECT_FALSE(u.find(&_weak));
+    EXPECT_FALSE(u.remove(&_unique_base));
+    EXPECT_FALSE(u.find(&_weak_base));
+    EXPECT_TRUE(u.find(&_shared));
+    EXPECT_EQ(2L, _shared.use_count()); // still both u and _shared contains the object
+
+    EXPECT_TRUE(u.clear());
+    EXPECT_TRUE(_shared.unique()); // now only _shared contains the object
+
+    EXPECT_TRUE(u.set(_constShared));
+    EXPECT_EQ(2L, _constShared.use_count()); // even though it is const, we can add a use count
+    EXPECT_TRUE(u.find(&_shared));
+    EXPECT_EQ(3L, _shared.use_count()); // now u, _shared and _constShared contains the const object
+    EXPECT_TRUE((bool)_shared);
+    if (_shared) {
+        EXPECT_EQ(1234, _shared->magic());
+    }
+
+    // test that shared pointer can be set and removed as base type (but removed as derived only
+    // if it was set as derived type)
+    EXPECT_TRUE(u.find(&_shared_base));
+    EXPECT_TRUE((bool)_shared_base);
+    if (_shared_base) {
+        EXPECT_EQ(1234, _shared_base->magic());
+    }
+    EXPECT_EQ(4L, _shared.use_count()); // now u, _shared, _constShared and _shared_base contains
+                                        // the const object
+    _shared.reset();
+    EXPECT_EQ(3L, _shared_base.use_count()); // now u, _constShared and _shared_base contains it
+    EXPECT_TRUE(u.clear());
+    EXPECT_EQ(2L, _shared_base.use_count()); // now _constShared and _shared_base contains it
+
+    EXPECT_TRUE(u.set(_shared_base));        // now u_ also contains it as base class
+    EXPECT_EQ(3L, _shared_base.use_count());
+    EXPECT_FALSE(u.find(&_shared)); // cannot get it as derived type
+    EXPECT_FALSE((bool)_shared);
+    _shared_base.reset();
+    EXPECT_TRUE(u.find(&_shared_base)); // can still get it as base type
+    EXPECT_TRUE((bool)_shared_base);
+    if (_shared_base) {
+        EXPECT_EQ(1234, _shared_base->magic());
+    }
+    _shared = std::static_pointer_cast<DerivedCounter>(_shared_base);
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.remove(&_unique));
+    EXPECT_FALSE(u.remove(&_unique_base));
+    EXPECT_FALSE(u.find(&_weak));
+    EXPECT_FALSE(u.find(&_weak_base));
+
+    // test that weak pointer can be copied in (support for moving is from C++14 only)
+    _weak = _shared;
+    EXPECT_EQ(_weak.use_count(), _shared.use_count());
+    EXPECT_TRUE(u.set(_weak));
+
+    _weak.reset();
+    EXPECT_EQ(_weak.use_count(), 0);
+
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.remove(&_unique));
+    EXPECT_FALSE(u.find(&_shared));
+    EXPECT_FALSE(u.remove(&_unique_base));
+    EXPECT_FALSE(u.find(&_shared_base));
+    EXPECT_TRUE(u.find(&_weak));
+    EXPECT_EQ(_weak.use_count(), _shared.use_count());
+    EXPECT_EQ(_weak.lock(), _shared);
+
+    // we can remove a weak pointer multiple times
+    _weak.reset();
+    EXPECT_TRUE(u.find(&_weak));
+    EXPECT_EQ(_weak.use_count(), _shared.use_count());
+    EXPECT_EQ(_weak.lock(), _shared);
+    EXPECT_TRUE(u.clear());
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.remove(&_unique));
+    EXPECT_FALSE(u.find(&_shared));
+    EXPECT_FALSE(u.find(&_weak));
+    EXPECT_FALSE(u.remove(&_unique_base));
+    EXPECT_FALSE(u.find(&_shared_base));
+    EXPECT_FALSE(u.find(&_weak_base));
+
+    // test that weak pointer can be set and removed as base type (but removed as derived only
+    // if it was set as derived type)
+    _weak = _shared;
+    EXPECT_TRUE(u.set(_weak));
+    EXPECT_TRUE(u.find(&_weak_base));
+    EXPECT_FALSE(_weak_base.expired());
+    if (!_weak_base.expired()) {
+        EXPECT_EQ(1234, _weak_base.lock()->magic());
+    }
+    // now _shared, _constShared and _shared_base contains the const object
+    EXPECT_EQ(3L, _weak.use_count());
+    _weak.reset();
+    EXPECT_EQ(3L, _weak_base.use_count()); // _weak did not hold a reference
+    _shared.reset();
+    EXPECT_EQ(2L, _weak_base.use_count()); // now u, _constShared and _shared_base contains it
+    EXPECT_TRUE(u.clear());
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.remove(&_unique));
+    EXPECT_FALSE(u.find(&_shared));
+    EXPECT_FALSE(u.find(&_weak));
+    EXPECT_FALSE(u.remove(&_unique_base));
+    EXPECT_FALSE(u.find(&_shared_base));
+    EXPECT_FALSE(u.find(&_weak_base));
+
+    EXPECT_TRUE(u.set(_weak_base)); // now u_ also contains it as base class
+    EXPECT_FALSE(u.find(&_weak));   // cannot get it as derived type
+    EXPECT_TRUE(_weak.expired());
+    _weak_base.reset();
+    EXPECT_TRUE(u.find(&_weak_base)); // can still get it as base type
+    EXPECT_FALSE(_weak_base.expired());
+    if (!_weak_base.expired()) {
+        EXPECT_EQ(1234, _weak_base.lock()->magic());
+    }
+};
+
+TEST_F(ADataTest, AData_AndroidSpTest) {
+    int destructions = 0;
+    int _int = 0;
+    sp<EventCounter> _shared;
+    wp<EventCounter> _weak;
+    const sp<EventCounter> _constShared(new EventCounter(&destructions));
+
+    AData<int, sp<EventCounter>, wp<EventCounter>>::Strict<uint8_t> u;
+
+    // test that data is empty
+    EXPECT_FALSE(u.used());
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.find(&_shared));
+    EXPECT_FALSE(u.find(&_weak));
+
+    // test that integer can be stored and read
+    EXPECT_TRUE(u.set<int>(1));
+    EXPECT_TRUE(u.used());
+    EXPECT_TRUE(u.find(&_int));
+    EXPECT_EQ(1, _int);
+    EXPECT_FALSE(u.find(&_shared));
+    EXPECT_FALSE(u.find(&_weak));
+
+    // test that copiable & movable type (shared_ptr) is copied unless explicitly moved.
+    _shared = new EventCounter(&destructions, 234);
+    _weak = _shared; // used for tracking #234
+
+    EXPECT_TRUE(u.set(_shared));
+    EXPECT_TRUE((bool)_shared.get());
+    if (_shared.get()) {
+        EXPECT_EQ(234, _shared->magic());
+    }
+
+    _shared.clear();
+    EXPECT_EQ(NULL, _shared.get());
+    EXPECT_NE(nullptr, _weak.promote().get()); // u still holds object
+
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.find(&_weak));
+    EXPECT_TRUE(u.find(&_shared)); // now u and _shared both hold object
+    EXPECT_TRUE((bool)_shared.get());
+    if (_shared.get()) {
+        EXPECT_EQ(234, _shared->magic());
+    }
+    // verify the find did not move out object
+    _shared.clear();
+    EXPECT_EQ(NULL, _shared.get());
+    EXPECT_NE(nullptr, _weak.promote().get()); // u still holds object
+    EXPECT_TRUE(u.find(&_shared)); // now u and _shared both hold object
+    if (_shared.get()) {
+        EXPECT_EQ(234, _shared->magic());
+    }
+
+    // verify that we can set object multiple times
+    EXPECT_TRUE(u.set(_shared));
+
+    // explicitly move in sp
+    EXPECT_TRUE(u.set(std::move(_shared)));
+    EXPECT_FALSE((bool)_shared.get()); // android also clears sp<> on move...
+    EXPECT_TRUE(u.find(&_shared)); // still can get it back
+    EXPECT_TRUE((bool)_shared.get());
+    if (_shared.get()) {
+        EXPECT_EQ(234, _shared->magic());
+    }
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.find(&_weak));
+
+    EXPECT_TRUE(u.used());
+    EXPECT_TRUE(u.clear()); // now only _shared contains the object
+    EXPECT_FALSE(u.used());
+
+    // we still hold a copy
+    EXPECT_TRUE((bool)_shared.get());
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.find(&_shared)); // _shared still contains the object
+
+    EXPECT_TRUE(u.set(_constShared));
+    EXPECT_TRUE(u.find(&_shared)); // now _shared contains _constShared
+    EXPECT_EQ(NULL, _weak.promote().get()); // original _shared is now lost
+
+    EXPECT_TRUE((bool)_shared.get());
+    if (_shared.get()) {
+        EXPECT_EQ(1234, _shared->magic());
+    }
+    EXPECT_TRUE(u.clear());
+
+    // test that wp can be copied in
+    _weak = _shared;
+    EXPECT_TRUE(u.set(_weak));
+
+    _weak.clear();
+
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.find(&_shared));
+    EXPECT_TRUE(u.find(&_weak));
+    EXPECT_EQ(_weak.promote(), _shared);
+
+    // we can remove a weak pointer multiple times
+    _weak.clear();
+    EXPECT_TRUE(u.find(&_weak));
+    EXPECT_EQ(_weak.promote(), _shared);
+    EXPECT_TRUE(u.clear());
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.find(&_shared));
+    EXPECT_FALSE(u.find(&_weak));
+};
+
+TEST_F(ADataTest, AData_RelaxedAndroidSpTest) {
+    int destructions = 0;
+    int _int = 0;
+    sp<EventCounter> _shared;
+    wp<EventCounter> _weak;
+    sp<RefBase> _shared_base;
+    wp<RefBase> _weak_base;
+    const sp<EventCounter> _constShared(new EventCounter(&destructions));
+
+    AData<int, sp<RefBase>, wp<RefBase>>::Relaxed<uint16_t> u;
+
+    // test that data is empty
+    EXPECT_FALSE(u.used());
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.find(&_shared));
+    EXPECT_FALSE(u.find(&_weak));
+    EXPECT_FALSE(u.find(&_shared_base));
+    EXPECT_FALSE(u.find(&_weak_base));
+
+    // test that integer can be stored and read
+    EXPECT_TRUE(u.set<int>(1));
+    EXPECT_TRUE(u.used());
+    EXPECT_TRUE(u.find(&_int));
+    EXPECT_EQ(1, _int);
+    EXPECT_FALSE(u.find(&_shared));
+    EXPECT_FALSE(u.find(&_weak));
+    EXPECT_FALSE(u.find(&_shared_base));
+    EXPECT_FALSE(u.find(&_weak_base));
+
+    // test that copiable & movable type (shared_ptr) is copied unless explicitly moved.
+    _shared = new EventCounter(&destructions, 234);
+    _weak = _shared; // used for tracking #234
+
+    EXPECT_TRUE(u.set(_shared));
+    EXPECT_TRUE((bool)_shared.get());
+    if (_shared.get()) {
+        EXPECT_EQ(234, _shared->magic());
+    }
+
+    _shared.clear();
+    EXPECT_EQ(NULL, _shared.get());
+    EXPECT_NE(nullptr, _weak.promote().get()); // u still holds object
+
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_TRUE(u.find(&_shared)); // now u and _shared both hold object
+    EXPECT_TRUE((bool)_shared.get());
+    if (_shared.get()) {
+        EXPECT_EQ(234, _shared->magic());
+    }
+    // verify the find did not move out object
+    _shared.clear();
+    EXPECT_EQ(NULL, _shared.get());
+    EXPECT_NE(nullptr, _weak.promote().get()); // u still holds object
+    EXPECT_TRUE(u.find(&_shared)); // now u and _shared both hold object
+    if (_shared.get()) {
+        EXPECT_EQ(234, _shared->magic());
+    }
+
+    // verify that we can set object multiple times
+    EXPECT_TRUE(u.set(_shared));
+
+    // explicitly move in sp
+    EXPECT_TRUE(u.set(std::move(_shared)));
+    EXPECT_FALSE((bool)_shared.get()); // android also clears sp<> on move...
+    EXPECT_TRUE(u.find(&_shared)); // still can get it back
+    EXPECT_TRUE((bool)_shared.get());
+    if (_shared.get()) {
+        EXPECT_EQ(234, _shared->magic());
+    }
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.find(&_weak));
+    EXPECT_FALSE(u.find(&_weak_base));
+
+    EXPECT_TRUE(u.used());
+    EXPECT_TRUE(u.clear()); // now only _shared contains the object
+    EXPECT_FALSE(u.used());
+
+    // we still hold a copy
+    EXPECT_TRUE((bool)_shared.get());
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.find(&_shared)); // _shared still contains the object
+
+    EXPECT_TRUE(u.set(_constShared));
+    EXPECT_TRUE(u.find(&_shared)); // now _shared contains _constShared
+    EXPECT_EQ(NULL, _weak.promote().get()); // original _shared is now lost
+
+    EXPECT_TRUE((bool)_shared.get());
+    if (_shared.get()) {
+        EXPECT_EQ(1234, _shared->magic());
+    }
+    EXPECT_TRUE(u.clear());
+
+    // test that shared pointer can be set and removed as base type (but removed as derived only
+    // if it was set as derived type)
+    EXPECT_TRUE(u.set(_constShared));
+    EXPECT_TRUE(u.find(&_shared_base));
+    EXPECT_TRUE((bool)_shared_base.get());
+    if (_shared_base.get()) {
+        EXPECT_EQ(1234, static_cast<EventCounter*>(_shared_base.get())->magic());
+    }
+    _shared.clear();
+    EXPECT_TRUE(u.clear());
+    EXPECT_TRUE((bool)_shared_base.get());
+    if (_shared_base.get()) {
+        EXPECT_EQ(1234, static_cast<EventCounter*>(_shared_base.get())->magic());
+    }
+
+    EXPECT_TRUE(u.set(_shared_base)); // now u contains it as base class
+    EXPECT_TRUE((bool)_shared_base.get());
+    EXPECT_FALSE(u.find(&_shared)); // cannot get it as derived type
+    EXPECT_FALSE((bool)_shared.get());
+    _shared_base.clear();
+    EXPECT_TRUE(u.find(&_shared_base)); // can still get it as base type
+    EXPECT_TRUE((bool)_shared_base.get());
+    if (_shared_base.get()) {
+        EXPECT_EQ(1234, static_cast<EventCounter*>(_shared_base.get())->magic());
+    }
+    _shared = static_cast<DerivedCounter*>(_shared_base.get());
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.find(&_weak));
+    EXPECT_FALSE(u.find(&_weak_base));
+
+    // test that wp can be copied in
+    _weak = _shared;
+    EXPECT_TRUE(u.set(_weak));
+
+    _weak.clear();
+
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.find(&_shared));
+    EXPECT_FALSE(u.find(&_shared_base));
+    EXPECT_TRUE(u.find(&_weak));
+    EXPECT_EQ(_weak.promote(), _shared);
+
+    // we can remove a weak pointer multiple times
+    _weak.clear();
+    EXPECT_TRUE(u.find(&_weak));
+    EXPECT_EQ(_weak.promote(), _shared);
+    EXPECT_TRUE(u.clear());
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.find(&_shared));
+    EXPECT_FALSE(u.find(&_weak));
+    EXPECT_FALSE(u.find(&_shared_base));
+    EXPECT_FALSE(u.find(&_weak_base));
+
+    // test that weak pointer can be set and removed as base type (but removed as derived only
+    // if it was set as derived type)
+    _weak = _shared;
+    EXPECT_TRUE(u.set(_weak));
+    EXPECT_TRUE(u.find(&_weak_base));
+    EXPECT_TRUE(_weak_base.promote().get() == _shared.get());
+
+    _weak.clear();
+    _shared.clear();
+    EXPECT_TRUE(u.clear());
+    EXPECT_FALSE(u.find(&_int));
+    EXPECT_FALSE(u.find(&_shared));
+    EXPECT_FALSE(u.find(&_weak));
+    EXPECT_FALSE(u.find(&_shared_base));
+    EXPECT_FALSE(u.find(&_weak_base));
+
+    EXPECT_TRUE(u.set(_weak_base)); // now u_ also contains it as base class
+    EXPECT_FALSE(u.find(&_weak));   // cannot get it as derived type
+    EXPECT_FALSE(_weak.promote().get());
+    _weak_base.clear();
+    EXPECT_TRUE(u.find(&_weak_base)); // can still get it as base type
+    EXPECT_TRUE(_weak_base.promote().get());
+    if (_weak_base.promote().get()) {
+        EXPECT_EQ(1234, static_cast<EventCounter*>(_weak_base.promote().get())->magic());
+    }
+};
+
+} // namespace android
diff --git a/media/libstagefright/foundation/tests/Android.mk b/media/libstagefright/foundation/tests/Android.mk
new file mode 100644
index 0000000..d741c6f
--- /dev/null
+++ b/media/libstagefright/foundation/tests/Android.mk
@@ -0,0 +1,35 @@
+# Build the unit tests.
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
+
+LOCAL_MODULE := sf_foundation_test
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_SRC_FILES := \
+	AData_test.cpp \
+	Flagged_test.cpp \
+	TypeTraits_test.cpp \
+	Utils_test.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+	libstagefright_foundation \
+	libutils \
+
+LOCAL_C_INCLUDES := \
+	frameworks/av/include \
+
+LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CLANG := true
+
+include $(BUILD_NATIVE_TEST)
+
+# Include subdirectory makefiles
+# ============================================================
+
+# If we're building with ONE_SHOT_MAKEFILE (mm, mmm), then what the framework
+# team really wants is to build the stuff defined by this makefile.
+ifeq (,$(ONE_SHOT_MAKEFILE))
+include $(call first-makefiles-under,$(LOCAL_PATH))
+endif
diff --git a/media/libstagefright/foundation/tests/Flagged_test.cpp b/media/libstagefright/foundation/tests/Flagged_test.cpp
new file mode 100644
index 0000000..3c90699
--- /dev/null
+++ b/media/libstagefright/foundation/tests/Flagged_test.cpp
@@ -0,0 +1,639 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "Flagged_test"
+
+#include <gtest/gtest.h>
+
+#include <media/stagefright/foundation/Flagged.h>
+
+namespace android {
+
+/**
+ * Helper template that can be used to print values in static_assert error messages.
+ *
+ * Use integers here.
+ */
+template<bool, int ...N>
+struct _print_as_warning { };
+
+template<int ...N>
+struct _print_as_warning<true, N...> : std::true_type { };
+
+#define static_assert_equals(a, b, msg) \
+static_assert(_print_as_warning<(a) == (b), a, b>::value, msg)
+
+class FlaggedTest : public ::testing::Test {
+protected:
+    // empty structs
+    struct A0 { };
+    struct A1 { };
+    struct A_A0 : public A0 { };
+
+    // simple struct
+    struct BB {
+        int32_t i;
+        uint32_t u;
+    };
+
+    // struct inheriting from A0
+    struct BB_A0 : public A0 {
+        int32_t i;
+        uint32_t u;
+    };
+
+    // struct inheriting from struct inheriting A0
+    struct BB_AA0 : public A_A0 {
+        int32_t i;
+        uint32_t u;
+    };
+
+    // struct that wraps
+    struct WBBA0 {
+        BB_A0 b;
+    };
+
+    struct WBBA0_A1 : public A1 {
+        BB_A0 b;
+    };
+
+    struct WBBA0_A0 : public A0 {
+        BB_A0 b;
+    };
+
+    struct WBB_A0 : public A0 {
+        BB b;
+    };
+
+    struct WBBA0_AA0 : public A_A0 {
+        BB_A0 b;
+    };
+
+    struct WBBAA0_A0 : public A0 {
+        BB_AA0 b;
+    };
+
+    struct WWBBA0_A0 : public A0 {
+        WBBA0 b;
+    };
+};
+
+/**
+ * This test is here to confirm the handling of wrapping classes that inherit from an interface
+ * while also inheriting from that same interface. While we no longer use this construct, we want
+ * to track if this defect is ever fixed.
+ */
+TEST_F(FlaggedTest, StaticSanityTests) {
+    static_assert(sizeof(A0) == 1, "");
+    static_assert(sizeof(A1) == 1, "");
+    static_assert(sizeof(A_A0) == 1, "");
+
+    static constexpr size_t size = sizeof(BB); // original [pair]
+
+    // inheriting from A0 does not increase size
+    static_assert(sizeof(BB_A0) == size, ""); // [pair]:A0
+    static_assert(sizeof(BB_AA0) == size, ""); // [pair]:[:A0]
+
+     // wrapping a class that inherits from A0 does not increase size
+    static_assert(sizeof(WBBA0) == size, ""); // [ [pair]:[:A0] ]
+
+    // wrapping a class that inherits from A0 while also inheriting from A1 does not increase size
+    static_assert(sizeof(WBBA0_A1) == size, ""); // [ [pair]:A0 ]:A1
+
+    // wrapping a class that inherits from A0 while also inheriting from A0 DOES increase size
+    EXPECT_GT(sizeof(WBBA0_A0), size); // [ [pair]:A0 ]:A0
+
+    // wrapping a class that does not inherit from A0 while inheriting from A0 does not increase
+    // size
+    static_assert(sizeof(WBB_A0) == size, ""); // [[pair]]:A0
+
+    // wrapping a class that inherits from A0 while also inheriting from a class that inherits
+    // from A0 does increase size
+    EXPECT_GT(sizeof(WBBA0_AA0), size); // [ [pair]:A0 ]:[:A0]
+
+    // wrapping a class that indirectly inherits from A0 while also inheriting from A0 does
+    // increase size
+    EXPECT_GT(sizeof(WBBAA0_A0), size); // [ [pair]:[:A0] ]:A0
+
+    // wrapping a class that inherits from A0 while also inheriting A0 does increase size
+    EXPECT_GT(sizeof(WWBBA0_A0), size); //  [ [pair]:A0 ]:A0
+}
+
+enum FLAG : int32_t {
+    kMask0 = 0x0FF,
+    kFlag0_A = 0x0AA,
+    kFlag0_B = 0x0BB,
+    kFlag0_C = 0x0CC,
+    kMask1 = 0xFF0,
+    kFlag1_A = 0xAA0,
+    kFlag1_B = 0xBB0,
+    kFlag1_C = 0xCC0,
+    kMaskCommon = 0x0F0,
+};
+
+TEST_F(FlaggedTest, BasicExample) {
+    enum SafeFlags : uint32_t {
+      kUnsafe,
+      kSafe,
+      kSafeMask = _Flagged_helper::minMask(kSafe),
+    };
+    typedef Flagged<int32_t, SafeFlags, kSafeMask> safeInt32;
+
+    safeInt32 a(kUnsafe);
+    a.setFlags(kSafe);
+    a.get() = 15;
+    EXPECT_EQ(a.flags(), kSafe);
+    EXPECT_EQ(a.get(), 15);
+
+    enum OriginFlags : uint32_t {
+      kUnknown,
+      kConst,
+      kCalculated,
+      kComponent,
+      kApplication,
+      kFile,
+      kBinder,
+      kOriginMask = _Flagged_helper::minMask(kBinder),
+    };
+    typedef Flagged<safeInt32, OriginFlags, kOriginMask>
+             trackedSafeInt32;
+
+    static_assert(sizeof(trackedSafeInt32) == sizeof(safeInt32), "");
+
+    trackedSafeInt32 b(kConst, kSafe, 1);
+    EXPECT_EQ(b.flags(), kConst);
+    EXPECT_EQ(b.get().flags(), kSafe);
+    EXPECT_EQ(b.get().get(), 1);
+    b.setFlags(kCalculated);
+    volatile bool overflow = true;
+    b.get().setFlags(overflow ? kUnsafe : kSafe);
+
+    enum ValidatedFlags : uint32_t {
+      kUnsafeV = kUnsafe,
+      kSafeV = kSafe,
+      kValidated = kSafe | 2,
+      kSharedMaskV = kSafeMask,
+      kValidatedMask = _Flagged_helper::minMask(kValidated),
+    };
+    typedef Flagged<safeInt32, ValidatedFlags, kValidatedMask, kSharedMaskV> validatedInt32;
+
+    validatedInt32 v(kUnsafeV, kSafe, 10);
+    EXPECT_EQ(v.flags(), kUnsafeV);
+    EXPECT_EQ(v.get().flags(), kUnsafe);  // !kUnsafeV overrides kSafe
+    EXPECT_EQ(v.get().get(), 10);
+    v.setFlags(kValidated);
+    EXPECT_EQ(v.flags(), kValidated);
+    EXPECT_EQ(v.get().flags(), kSafe);
+    v.get().setFlags(kUnsafe);
+    EXPECT_EQ(v.flags(), 2);  // NOTE: sharing masks with enums allows strange situations to occur
+}
+
+TEST_F(FlaggedTest, _Flagged_helper_Test) {
+    using helper = _Flagged_helper;
+
+    using i32 = int32_t;
+    using u32 = uint32_t;
+    using u8 = uint8_t;
+
+    // base2
+    static_assert(Flagged<i32, u32, 0u, 0u, 0>::sFlagMask == 0u, "");
+    static_assert(Flagged<i32, u32, 0u, 0u, 0>::sFlagShift == 0, "");
+    static_assert(Flagged<i32, u32, 0u, 0u, 0>::sEffectiveMask == 0u, "");
+
+    static_assert(Flagged<i32, u32, 0u, 0u, 10>::sFlagMask == 0u, "");
+    static_assert(Flagged<i32, u32, 0u, 0u, 10>::sFlagShift == 10, "");
+    static_assert(Flagged<i32, u32, 0u, 0u, 10>::sEffectiveMask == 0u, "");
+
+    static_assert(Flagged<i32, u32, 0u, 0u, -1>::sFlagMask == 0u, "");
+    static_assert(Flagged<i32, u32, 0u, 0u, -1>::sFlagShift == 0, "");
+    static_assert(Flagged<i32, u32, 0u, 0u, -1>::sEffectiveMask == 0u, "");
+
+    static_assert(Flagged<i32, u32, 99u, 0u, 0>::sFlagMask == 99u, "");
+    static_assert(Flagged<i32, u32, 99u, 0u, 0>::sFlagShift == 0, "");
+    static_assert(Flagged<i32, u32, 99u, 0u, 0>::sEffectiveMask == 99u, "");
+
+    static_assert(Flagged<i32, u32, 0x99u, 0u, 12>::sFlagMask == 0x99u, "");
+    static_assert(Flagged<i32, u32, 0x99u, 0u, 12>::sFlagShift == 12, "");
+    static_assert(Flagged<i32, u32, 0x99u, 0u, 12>::sEffectiveMask == 0x99000u, "");
+
+    static_assert(Flagged<i32, u32, 99u, 0u, -1>::sFlagMask == 99u, "");
+    static_assert(Flagged<i32, u32, 99u, 0u, -1>::sFlagShift == 0, "");
+    static_assert(Flagged<i32, u32, 99u, 0u, -1>::sEffectiveMask == 99u, "");
+
+    // mask_of<T, Flag>
+    // also Flagged<> no default
+    typedef Flagged<i32, u32, 0x800F /* mask */, 0 /* shared mask */, 0 /* shift */> i32_800f_0;
+    typedef Flagged<i32, u32, 0x800F /* mask */, 0 /* shared mask */, 4 /* shift */> i32_800f_4;
+    // this also tests that these types can be instantiated
+    static_assert(sizeof(i32_800f_0) >= sizeof(i32) + sizeof(u32),
+                  "should be at least size of component types");
+    static_assert(sizeof(i32_800f_4) == sizeof(i32_800f_0), "regardless of shift");
+    static_assert(!i32_800f_0::sFlagCombined, "");
+    static_assert(!i32_800f_4::sFlagCombined, "");
+
+    static_assert(helper::mask_of<i32_800f_0, u32>::value == 0x800F, "incorrect mask");
+    static_assert(helper::mask_of<i32_800f_0, i32>::value == 0,
+                  "mask should be 0 when types mismatch");
+    static_assert(helper::mask_of<i32_800f_0, u32>::effective_value == 0x800F, "incorrect mask");
+    static_assert(helper::mask_of<i32_800f_0, i32>::effective_value == 0,
+                  "mask should be 0 when types mismatch");
+    static_assert(helper::mask_of<i32_800f_0, u32>::shift == 0, "incorrect shift");
+    static_assert(helper::mask_of<i32_800f_0, i32>::shift == 0,
+                  "shift should be 0 when types mismatch");
+
+    static_assert(helper::mask_of<i32_800f_4, u32>::value == 0x800F, "incorrect mask");
+    static_assert(helper::mask_of<i32_800f_4, i32>::value == 0,
+                  "mask should be 0 when types mismatch");
+    static_assert(helper::mask_of<i32_800f_4, u32>::effective_value == 0x800F0, "incorrect mask");
+    static_assert(helper::mask_of<i32_800f_4, i32>::effective_value == 0,
+                  "mask should be 0 when types mismatch");
+    static_assert(helper::mask_of<i32_800f_4, u32>::shift == 4, "incorrect shift");
+    static_assert(helper::mask_of<i32_800f_4, i32>::shift == 0,
+                  "shift should be 0 when types mismatch");
+    static_assert(helper::mask_of<i32, u32>::value == 0, "mask should be 0 if not masked");
+    static_assert(helper::mask_of<i32, i32>::value == 0, "mask should be 0 if not masked");
+
+    // lshift(value, n)
+    static_assert(helper::lshift(0U, 0) == 0U, "");
+    static_assert(helper::lshift(0U, 30) == 0U, "");
+    static_assert(helper::lshift(1U, 0) == 1U, "");
+    static_assert(helper::lshift(1U, 10) == 1024U, "");
+    static_assert(helper::lshift(10U, 10) == 10240U, "");
+    static_assert(helper::lshift(10, 10) == 10240, "");
+    static_assert(helper::lshift(-10, 0) == -10, "");
+    // static_assert(helper::lshift(-10, 10) == -10240, ""); // error: left shift of negative value
+
+    // minMask(maxValue)
+    static_assert(helper::minMask(0U) == 0U, "lowest 0 bits");
+    static_assert(helper::minMask(1U) == 1U, "lowest 1 bit");
+    static_assert(helper::minMask(2U) == 3U, "lowest 2 bits");
+    static_assert(helper::minMask(3U) == 3U, "lowest 2 bits");
+    static_assert(helper::minMask(4U) == 7U, "lowest 3 bits");
+    static_assert(helper::minMask(~0U) == ~0U, "all bits");
+    // static_assert(helper::minMask(10) == 0xF, "all bits"); // error: must be unsigned
+
+    // topBits(n)
+    static_assert(helper::topBits<u32>(0) == 0U, "top 0 bit");
+    static_assert(helper::topBits<u32>(1) == 0x80000000U, "top 1 bit");
+    static_assert(helper::topBits<u32>(2) == 0xC0000000U, "top 2 bits");
+    static_assert(helper::topBits<u32>(12) == 0xFFF00000U, "top 12 bits");
+    static_assert(helper::topBits<u32>(32) == 0xFFFFFFFFU, "all bits");
+    // static_assert(helper::topBits<u32>(33) == 0xFFFFFFFFU, ""); // should OVERFLOW
+
+    static_assert(helper::topBits<u8>(0) == 0U, "top 0 bit");
+    static_assert(helper::topBits<u8>(1) == 0x80U, "top 1 bit");
+    static_assert(helper::topBits<u8>(2) == 0xC0U, "top 2 bit");
+    static_assert(helper::topBits<u8>(8) == 0xFFU, "all bits");
+    // static_assert(helper::topBits<u8>(9) == 0xFFU, ""); // should OVERFLOW
+
+    // getShift(mask, base, shared, base-shift, base-effective)
+    static_assert(helper::getShift(0u, 0u, 0u, 0, 0u) == 0, "no flag require no shift");
+    static_assert(helper::getShift(0u, 0u, 1u, 0, 0u) == -1,
+                  "shared must be within mask and base mask");
+    static_assert(helper::getShift(0u, 1u, 1u, 0, 1u) == -1, "shared must be within mask");
+    static_assert(helper::getShift(0u, 1u, 0u, 0, 1u) == 0,
+                  "no flags require no shift even with base mask");
+    static_assert(helper::getShift(0u, 1u, 0u, 1, 2u) == 0,
+                  "no flags require no shift even with shifted base mask");
+    static_assert(helper::getShift(1u, 0u, 0u, 0, 0u) == 0, "no base mask requires no shift");
+    static_assert(helper::getShift(1u, 1u, 0u, 0, 1u) == 1,
+                  "overlapping mask and basemask requires shift");
+    static_assert(helper::getShift(1u, 1u, 0u, 0, 1u) == 1,
+                  "overlapping mask and basemask requires shift");
+    static_assert(helper::getShift(1u, 1u, 1u, 0, 1u) == 0,
+                  "shared mask requires using base shift");
+    static_assert(helper::getShift(1u, 1u, 1u, 1, 2u) == 1,
+                  "shared mask requires using base shift");
+    static_assert(helper::getShift(3u, 5u, 1u, 0, 5u) == 0,
+                  "mask and basemask that overlap only in shared region requires no shift");
+    static_assert(helper::getShift(3u, 7u, 1u, 0, 7u) == -1,
+                  "mask and basemask must not overlap in more than shared region");
+    static_assert(helper::getShift(1u, 0u, 1u, 0, 0u) == -1, "shared must be within base mask");
+
+    static_assert(helper::getShift(0u, 1u, 0u, 1, 1u) == -2, "effective mask must cover base mask");
+    static_assert(helper::getShift(0u, 5u, 0u, 1, 2u) == -2, "effective mask must cover base mask");
+    static_assert(helper::getShift(0u, 5u, 0u, 1, 10u) == 0, "");
+    static_assert(helper::getShift(0u, 5u, 0u, 1, 31u) == 0,
+                  "effective mask can be larger than base mask");
+
+    static_assert(helper::getShift(0x800Fu, 0x800Fu, 0x800Fu, 0, 0x800Fu) == 0,
+                  "(0x800F << 0) & 0x800F == 0x800F");
+    static_assert(helper::getShift(0x800Fu, 0x800Fu, 0x800Fu, 16, 0x800F0000u) == 16,
+                  "(0x800F << 0) & 0x800F == 0x800F");
+    static_assert(helper::getShift(0x1800Fu, 0x800Fu, 0x800Fu, 0, 0x800Fu) == 0,
+                  "(0x1800F << 0) & 0x800F == 0x800F");
+    static_assert(helper::getShift(0x1800Fu, 0x800Fu, 0x800Fu, 16, 0x800F0000u) == -1,
+                  "(0x1800F << 16) overflows");
+
+    // verify that when not sharing masks, effective mask makes the difference
+    static_assert(helper::getShift(0x800Fu, 0u, 0u, 0, 0x800Fu) == 4,
+                  "(0x800F << 4) & 0x800F == 0");
+    static_assert(helper::getShift(0x800Fu, 0x2u, 0u, 0, 0x8002u) == 2,
+                  "(0x800F << 2) & 0x8002 == 0");
+    static_assert(helper::getShift(0x800Fu, 0x1u, 0u, 15, 0x8001u) == 1,
+                  "(0x800F << 1) & 0x8001 == 0");
+    static_assert(helper::getShift(0x800Fu, 0x800Fu, 0u, 16, 0x800F0000u) == 0,
+                  "0x800F & 0x800F0000 == 0");
+    static_assert(helper::getShift(0x800Fu, 0x800F8000u, 0u, 0, 0x800F8000u) == 5,
+                  "(0x800F << 5) & 0x800F8000 == 0");
+    static_assert(helper::getShift(0x800Fu, 0xF0000u, 0u, 0, 0x800F8000u) == 5,
+                  "(0x800F << 5) & 0x800F8000 == 0");
+    static_assert(helper::getShift(0x800Fu, 0x1Fu, 0u, 15, 0x800F8000u) == 5,
+                  "(0x800F << 5) & 0x800F8000 == 0");
+    static_assert(helper::getShift(0xFFu, 0x80808080u, 0u, 0, 0x80808080u) == -1,
+                  "0xFF always overlaps with 0x80808080");
+    static_assert(helper::getShift(0xFFu, 0x10001000u, 0u, 3, 0x80808080u) == -1,
+                  "0xFF always overlaps with 0x80808080");
+    static_assert(helper::getShift(0xFFu, 0x80808040u, 0u, 0, 0x80808040u) == 7,
+                  "(0xFF << 7) & 0x 80808040 == 0");
+
+    // verify min_shift (mask must be positive or no shift can be required)
+    static_assert(helper::getShift(0xFF, 0x40808040, 0, 0, 0x40808040) == 7, "");
+    static_assert(helper::getShift((i32)0x800000FF, 0x40808040, 0, 0, 0x40808040) == -1, "");
+    static_assert(helper::getShift(0x100000FF, 0x40808040, 0, 0, 0x40808040) == -1, "");
+    static_assert(helper::getShift(0xFF, (i32)0x80808040, 0, 0, (i32)0x80808040) == 7, "");
+    static_assert(helper::getShift((i32)0x80007F80, 0x40808040, 0, 0, 0x40808040) == 0, "");
+
+    // shared mask can also be negative (but not shift can be required)
+    static_assert(helper::getShift((i32)0x80007F80, (i32)0xC0808040, (i32)0x80000000,
+                                   0, (i32)0xC0808040) == 0, "");
+    static_assert(helper::getShift((i32)0x80007F80, (i32)0xC0808040, (i32)0xC0000000,
+                                   0, (i32)0xC0808040) == -1, "");
+    static_assert(helper::getShift((i32)0x80007F80, (i32)0x60404020, (i32)0x60000000,
+                                   1, (i32)0xC0808040) == -1, "");
+
+    // min_shift
+    typedef Flagged<i32, u32, 0u> i32_0_0;
+    typedef Flagged<i32, u32, 1u> i32_1_0;
+    typedef Flagged<i32, u32, 1u, 0u, 1> i32_1_1;
+
+    // this is a wrapper over getShift, so same test cases apply when T is flagged
+    static_assert(helper::min_shift<i32_0_0, u32, 0u, 0u>::value == 0, "");
+    static_assert(helper::min_shift<i32_0_0, u32, 0u, 1u>::value == -1, "");
+    static_assert(helper::min_shift<i32_1_0, u32, 0u, 1u>::value == -1, "");
+    static_assert(helper::min_shift<i32_1_0, u32, 0u, 0u>::value == 0, "");
+    static_assert(helper::min_shift<i32_0_0, u32, 1u, 0u>::value == 0, "");
+    static_assert(helper::min_shift<i32_1_0, u32, 1u, 0u>::value == 1, "");
+    static_assert(helper::min_shift<i32_1_0, u32, 1u, 1u>::value == 0, "");
+    static_assert(helper::min_shift<i32_1_1, u32, 1u, 1u>::value == 1, "");
+    static_assert(helper::min_shift<i32_1_1, u32, 3u, 0u>::value == 2, "");
+    static_assert(helper::min_shift<Flagged<i32, u32, 5u>, u32, 3u, 1u>::value == 0, "");
+    static_assert(helper::min_shift<Flagged<i32, u32, 7u>, u32, 3u, 1u>::value == -1, "");
+    static_assert(helper::min_shift<i32_0_0, u32, 1u, 1u>::value == -1, "");
+
+    static_assert(helper::min_shift<i32_800f_0, u32, 0x800Fu, 0u>::value == 4, "");
+    static_assert(helper::min_shift<i32_800f_4, u32, 0x1800Fu, 0x800Fu>::value == 4, "");
+    static_assert(helper::min_shift<i32_800f_4, u32, 0x800Fu, 0u>::value == 0, "");
+    static_assert(helper::min_shift<Flagged<i32, u32, 0x8002u>, u32, 0x800Fu, 0u>::value == 2, "");
+    static_assert(helper::min_shift<Flagged<i32, u32, 0x8001u>, u32, 0x800Fu, 0u>::value == 1, "");
+    static_assert(
+            helper::min_shift<Flagged<i32, u32, 0x800Fu, 0u, 16>, u32, 0x800Fu, 0u>::value == 0, "");
+    static_assert(
+            helper::min_shift<Flagged<i32, u32, 0x800F8000u>, u32, 0x800Fu, 0u>::value == 5, "");
+    static_assert(
+            helper::min_shift<Flagged<i32, u32, 0x80808080u>, u32, 0xFFu, 0u>::value == -1, "");
+    static_assert(
+            helper::min_shift<Flagged<i32, u32, 0x80808040u>, u32, 0xFFu, 0u>::value == 7, "");
+
+    // for min_shift, non-tagged type behaves as if having base mask of 0
+    static_assert(helper::min_shift<i32, u32, 0u, 0u>::value == 0, "");
+    static_assert(helper::min_shift<u32, u32, 0u, 0u>::value == 0, "");
+    static_assert(helper::min_shift<i32, u32, 0u, 0u>::value == 0, "");
+    static_assert(helper::min_shift<i32, u32, 0u, 1u>::value == -1, "");
+    static_assert(helper::min_shift<i32, u32, 1u, 0u>::value == 0, "");
+    static_assert(helper::min_shift<i32, u32, 1u, 1u>::value == -1, "");
+
+    // verify min_shift (mask must be positive or no shift can be required)
+    static_assert(helper::min_shift<Flagged<i32, i32, 0x40808040>, i32, 0xFF, 0>::value == 7, "");
+    static_assert(helper::min_shift<Flagged<i32, i32, 0x40808040>,
+                                    i32, (i32)0x800000FF, 0>::value == -1, "");
+    static_assert(helper::min_shift<Flagged<i32, i32, 0x40808040>,
+                                    i32, 0x100000FF, 0>::value == -1, "");
+    static_assert(helper::min_shift<Flagged<i32, i32, (i32)0x80808040>,
+                                    i32, 0xFF, 0>::value == 7, "");
+    static_assert(helper::min_shift<Flagged<i32, i32, 0x40808040>,
+                                    i32, (i32)0x80007F80, 0>::value == 0, "");
+
+    static_assert(helper::min_shift<Flagged<i32, i32, (i32)0x80808040>,
+                                    i32, (i32)0x80007F80, (i32)0x80000000>::value == 0, "");
+    static_assert(helper::min_shift<Flagged<i32, i32, (i32)0xC0808040>,
+                                    i32, (i32)0x80007F80, (i32)0xC0000000>::value == -1, "");
+    // note: cannot create a flagged type with signed flag and shift
+    // static_assert(helper::min_shift<Flagged<i32, i32, (i32)0x60404020, 0, 1>,
+    //                                i32, (i32)0x40003FC0, (i32)0x40000000>::value == -1, "");
+
+    typedef Flagged<i32, u32, 0x800F /* mask */, 0 /* shared mask */, 16 /* shift */> i32_800f_16;
+    static_assert_equals(sizeof(i32_800f_16), sizeof(i32_800f_0), "");
+    // shifted mask overflows!
+    // typedef Flagged<i32, u32, 0x800F /* mask */, 0 /* shared mask */, 17 /* shift */> i32_800f_17;
+    // static_assert(sizeof(i32_800f_17) == sizeof(i32_800f_0), "");
+    typedef Flagged<i32, i32, 0x800F /* mask */, 0 /* shared mask */, 15 /* shift */> i32_800f_15i;
+    static_assert_equals(sizeof(i32_800f_15i), sizeof(i32_800f_0), "");
+    // shifted mask overflows!
+    // typedef Flagged<i32, i32, 0x800F /* mask */, 0 /* shared mask */, 16 /* shift */> i32_800f_16i;
+    // static_assert(sizeof(i32_800f_16i) == sizeof(i32_800f_0), "");
+
+    // canCombine(mask, base, shared, shift, base-shift, base-effective)
+    static_assert(helper::canCombine(0u, 0u, 0u, 0, 0, 0u), "using no mask is valid");
+    static_assert(helper::canCombine(0u, 0u, 0u, 0, 0, 0u), "");
+    static_assert(helper::canCombine(0u, 0u, 0u, 4, 0, 0u), "");
+    static_assert(!helper::canCombine(0u, 0u, 1u, 0, 0, 0u),
+                  "shared mask must be the overlap of masks");
+    static_assert(helper::canCombine(1u, 0u, 0u, 0, 0, 0u), "");
+    static_assert(helper::canCombine(1u, 0u, 0u, 4, 0, 0u), "");
+    static_assert(helper::canCombine(3u, 5u, 1u, 0, 0, 5u), "");
+    static_assert(!helper::canCombine(3u, 3u, 3u, 1, 0, 3u), "shift must match when sharing mask");
+    static_assert(helper::canCombine(3u, 3u, 3u, 1, 1, 6u), "");
+    static_assert(!helper::canCombine(3u, 3u, 3u, 1, 2, 12u), "shift must match when sharing mask");
+    static_assert(!helper::canCombine(3u, 7u, 1u, 0, 0, 7u), "");
+    static_assert(!helper::canCombine(1u, 0u, 1u, 0, 0, 0u), "");
+
+    static_assert(!helper::canCombine(0u, 1u, 1u, 0, 0, 1u),
+                  "shared mask must be the overlap of masks");
+    static_assert(helper::canCombine(0u, 1u, 0u, 0, 0, 1u), "");
+    static_assert(helper::canCombine(0u, 1u, 0u, 4, 0, 1u), "");
+    static_assert(helper::canCombine(1u, 1u, 0u, 1, 0, 1u), "");
+    static_assert(!helper::canCombine(1u, 1u, 0u, 0, 0, 1u), "");
+    static_assert(helper::canCombine(1u, 1u, 0u, 1, 0, 1u), "");
+    static_assert(helper::canCombine(1u, 1u, 1u, 0, 0, 1u), "");
+    static_assert(!helper::canCombine(1u, 1u, 1u, 1, 0, 1u), "shift must match when sharing mask");
+
+    static_assert(helper::canCombine(0x800Fu, 0x800Fu, 0u, 4, 0, 0x800Fu), "");
+    static_assert(!helper::canCombine(0x800Fu, 0x800Fu, 0u, 1, 0, 0x800Fu), "");
+    static_assert(helper::canCombine(0x800Fu, 0x8002u, 0u, 2, 0, 0x8002u), "");
+    static_assert(helper::canCombine(0x800Fu, 0x8001u, 0u, 1, 0, 0x8001u), "");
+    static_assert(helper::canCombine(0x800Fu, 0x800Fu, 0u, 0, 16, 0x800F0000u), "");
+    static_assert(helper::canCombine(0x800Fu, 0x800Fu, 0x800Fu, 16, 16, 0x800F0000u), "");
+    static_assert(!helper::canCombine(0x1800Fu, 0x800Fu, 0u, 0, 16, 0x800F0000u), "");
+    static_assert(!helper::canCombine(0x1800Fu, 0x800Fu, 0x800Fu, 16, 16, 0x800F0000u), "");
+    static_assert(helper::canCombine(0x800Fu, 0x800F8000u, 0u, 8, 0, 0x800F8000u), "");
+    static_assert(!helper::canCombine(0xFFu, 0x80808080u, 0u, -1, 0, 0x80808080u), "");
+    static_assert(helper::canCombine(0xFFu, 0x80808040u, 0u, 7, 0, 0x80808040u), "");
+    static_assert(helper::canCombine(0xFFu, 0x8000u, 0u, 7, 0, 0x80808040u), "");
+    static_assert(helper::canCombine(0xFFu, 0x101u, 0u, 7, 15, 0x80808040u), "");
+
+    // can combine signed-flagged types only if mask is positive or no shift is required
+    static_assert(!helper::canCombine(0xFF, 0x40808040, 0, 0, 0, 0x40808040), "");
+    static_assert(helper::canCombine(0xFF, 0x40808040, 0, 7, 0, 0x40808040), "");
+    static_assert(!helper::canCombine((i32)0x800000FF, 0x40808040, 0, 0, 0, 0x40808040), "");
+    static_assert(!helper::canCombine((i32)0x800000FF, 0x40808040, 0, 7, 0, 0x40808040), "");
+    static_assert(!helper::canCombine(0x100000FF, 0x40808040, 0, 0, 0, 0x40808040), "");
+    static_assert(!helper::canCombine(0x100000FF, 0x40808040, 0, 7, 0, 0x40808040), "");
+    static_assert(!helper::canCombine(0xFF, (i32)0x80808040, 0, 0, 0, (i32)0x80808040), "");
+    static_assert(helper::canCombine(0xFF, (i32)0x80808040, 0, 7, 0, (i32)0x80808040), "");
+    static_assert(helper::canCombine((i32)0x80007F80, 0x40808040, 0, 0, 0, 0x40808040), "");
+
+    static_assert(helper::canCombine((i32)0x80007F80, (i32)0x80808040, (i32)0x80000000, 0, 0, (i32)0x80808040), "");
+    static_assert(!helper::canCombine((i32)0xC0007F80, (i32)0x80808040, (i32)0xC0000000, 0, 0, (i32)0x80808040), "");
+    static_assert(!helper::canCombine((i32)0x80007F80, (i32)0x80808040, (i32)0x80000000, 1, 0, (i32)0x80808040), "");
+    static_assert(!helper::canCombine((i32)0xC0007F80, (i32)0x80808040, (i32)0xC0000000, 1, 0, (i32)0x80808040), "");
+
+    // can_combine<T, Flag, MASK, [SHARED_MASK], [SHIFT]
+    static_assert(helper::can_combine<i32_0_0, u32, 0u>::value, "");
+    static_assert(helper::can_combine<i32_0_0, u32, 0u, 0u>::value, "");
+    static_assert(helper::can_combine<i32_0_0, u32, 0u, 0u, 4>::value, "");
+    static_assert(!helper::can_combine<i32_0_0, u32, 0u, 1u>::value, "");
+    static_assert(helper::can_combine<i32_0_0, u32, 1u, 0u>::value, "");
+    static_assert(helper::can_combine<i32_0_0, u32, 1u, 0u, 4>::value, "");
+    static_assert(!helper::can_combine<i32_0_0, u32, 1u, 1u>::value, "");
+
+    static_assert(!helper::can_combine<i32_1_0, u32, 0u, 1u>::value, "");
+    static_assert(helper::can_combine<i32_1_0, u32, 0u, 0u>::value, "");
+    static_assert(helper::can_combine<i32_1_0, u32, 0u, 0u, 4>::value, "");
+    static_assert(helper::can_combine<i32_1_0, u32, 1u, 0u>::value, "");
+    static_assert(!helper::can_combine<i32_1_0, u32, 1u, 0u, 0>::value, "");
+    static_assert(helper::can_combine<i32_1_0, u32, 1u, 0u, 1>::value, "");
+    static_assert(helper::can_combine<i32_1_0, u32, 1u, 1u>::value, "");
+    static_assert(helper::can_combine<i32_1_0, u32, 1u, 1u, 0>::value, "");
+    static_assert(!helper::can_combine<i32_1_0, u32, 1u, 1u, 1>::value,
+                  "shouldn't be able to use SHIFT with SHARED_MASK");
+
+    static_assert(helper::can_combine<i32_800f_0, u32, 0x800Fu, 0u, 4>::value, "");
+    static_assert(!helper::can_combine<i32_800f_0, u32, 0x800Fu, 0u, 1>::value, "");
+    static_assert(helper::can_combine<i32_800f_0, u32, 0x800Fu, 0u>::value, "");
+    static_assert(helper::can_combine<Flagged<i32, u32, 0x8002u>, u32, 0x800Fu, 0u>::value, "");
+    static_assert(helper::can_combine<Flagged<i32, u32, 0x8001u>, u32, 0x800Fu, 0u>::value, "");
+    static_assert(helper::can_combine<Flagged<i32, u32, 0x800F0000u>, u32, 0x800Fu, 0u>::value, "");
+    static_assert(helper::can_combine<Flagged<i32, u32, 0x800F8000u>, u32, 0x800Fu, 0u>::value, "");
+    static_assert(!helper::can_combine<Flagged<i32, u32, 0x80808080u>, u32, 0xFFu, 0u>::value, "");
+    static_assert(helper::can_combine<Flagged<i32, u32, 0x80808040u>, u32, 0xFFu, 0u>::value, "");
+
+    // can combine signed-flagged types only if mask is positive or no shift is required
+    static_assert(helper::can_combine<Flagged<i32, i32, 0x40808040>, i32, 0xFF, 0>::value, "");
+    static_assert(!helper::can_combine<Flagged<i32, i32, 0x40808040>,
+                                       i32, (i32)0x800000FF, 0>::value, "");
+    static_assert(!helper::can_combine<Flagged<i32, i32, 0x40808040>,
+                                       i32, 0x100000FF, 0>::value, "");
+    static_assert(helper::can_combine<Flagged<i32, i32, (i32)0x80808040>, i32, 0xFF, 0>::value, "");
+    static_assert(helper::can_combine<Flagged<i32, i32, 0x40808040>,
+                                      i32, (i32)0x80007F80, 0>::value, "");
+
+    static_assert(helper::can_combine<Flagged<i32, i32, (i32)0x80808040>,
+                                      i32, (i32)0x80007F80, (i32)0x80000000>::value, "");
+    static_assert(!helper::can_combine<Flagged<i32, i32, (i32)0xC0808040>,
+                                       i32, (i32)0x80007F80, (i32)0xC0000000>::value, "");
+
+    static_assert(helper::min_shift<Flagged<i32, FLAG, (FLAG)0x80808040>,
+                                    FLAG, (FLAG)0x80007F80, (FLAG)0x80000000>::value == 0, "");
+    static_assert(helper::can_combine<Flagged<i32, FLAG, (FLAG)0x80808040>,
+                                      FLAG, (FLAG)0x80007F80, (FLAG)0x80000000>::value, "");
+
+    // cannot combine non-tagged types
+    static_assert(!helper::can_combine<i32, u32, 0u, 0u>::value, "");
+    static_assert(!helper::can_combine<u32, u32, 0u, 0u>::value, "");
+    static_assert(!helper::can_combine<i32, u32, 0u, 0u>::value, "");
+    static_assert(!helper::can_combine<i32, u32, 0u, 1u>::value, "");
+    static_assert(!helper::can_combine<i32, u32, 1u, 0u>::value, "");
+    static_assert(!helper::can_combine<i32, u32, 1u, 1u>::value, "");
+
+    typedef Flagged<i32_800f_0, u32, 0x800F /* mask */, 0 /* shared mask */> i32_800f_800f;
+    static_assert(i32_800f_800f::sFlagMask == 0x800F, "");
+    static_assert(i32_800f_800f::sFlagShift == 4, "");
+    static_assert(i32_800f_800f::sEffectiveMask == 0x880FF, "");
+    static_assert(!i32_800f_0::sFlagCombined, "");
+    static_assert(!i32_800f_4::sFlagCombined, "");
+
+    static_assert(i32_800f_800f::sFlagCombined, "");
+    static_assert_equals(sizeof(i32_800f_800f), sizeof(i32_800f_0), "");
+
+    typedef Flagged<i32_800f_0, u32, 0x1FFFF /* mask */> i32_800f_1ffff;
+    static_assert(i32_800f_1ffff::sFlagMask == 0x1FFFF, "");
+    static_assert(i32_800f_1ffff::sFlagShift == 0, "");
+    static_assert(i32_800f_1ffff::sEffectiveMask == 0x1FFFF, "");
+    static_assert(!i32_800f_1ffff::sFlagCombined, "");
+
+    // operational tests
+    i32_800f_800f val(0x8000, 0x1234, 56);
+    EXPECT_EQ(val.get().get(), 56);
+    EXPECT_EQ(val.flags(), 0x8000u);
+    EXPECT_EQ(val.get().flags(), 0x1234u & 0x800F);
+    val.setFlags(0x12345);
+    EXPECT_EQ(val.flags(), 0x12345u & 0x800F);
+    EXPECT_EQ(val.get().flags(), 0x1234u & 0x800F);
+    val.get().setFlags(0x54321);
+    EXPECT_EQ(val.flags(), 0x12345u & 0x800F);
+    EXPECT_EQ(val.get().flags(), 0x54321u & 0x800F);
+    EXPECT_EQ(val.get().get(), 56);
+
+    typedef Flagged<i32_800f_4, u32, 0x800F /* mask */, 0 /* shared mask */> i32_800f_800f_B;
+    static_assert(i32_800f_800f_B::sFlagMask == 0x800F, "");
+    static_assert(i32_800f_800f_B::sFlagShift == 0, "");
+    static_assert(i32_800f_800f_B::sEffectiveMask == 0x880FF, "");
+
+    i32_800f_800f_B valB(0x8000, 0x1234, -987);
+    EXPECT_EQ(valB.get().get(), -987);
+    EXPECT_EQ(valB.flags(), 0x8000u);
+    EXPECT_EQ(valB.get().flags(), 0x1234u & 0x800F);
+    valB.setFlags(0x12345);
+    EXPECT_EQ(valB.flags(), 0x12345u & 0x800F);
+    EXPECT_EQ(valB.get().flags(), 0x1234u & 0x800F);
+    valB.get().setFlags(0x5C321);
+    EXPECT_EQ(valB.flags(), 0x12345u & 0x800F);
+    EXPECT_EQ(valB.get().flags(), 0x5C321u & 0x800F);
+    EXPECT_EQ(valB.get().get(), -987);
+
+    typedef Flagged<Flagged<i32, u32, 0xFF>, u32, 0xFF0, 0xF0> i32_ff_ff0;
+    i32_ff_ff0 valC(0xABCD, 0x1234, 101);
+    EXPECT_EQ(valC.get().get(), 101);
+    EXPECT_EQ(valC.flags(), 0xBC0u);
+    EXPECT_EQ(valC.get().flags(), 0xC4u);
+    valC.setFlags(0x12345);
+    EXPECT_EQ(valC.flags(), 0x340u);
+    EXPECT_EQ(valC.get().flags(), 0x44u);
+    valC.get().setFlags(0x54321);
+    EXPECT_EQ(valC.flags(), 0x320u);
+    EXPECT_EQ(valC.get().flags(), 0x21u);
+    EXPECT_EQ(valC.get().get(), 101);
+
+    // when combining flags (with no shift), it should work with signed flags
+    typedef Flagged<Flagged<i32, FLAG, kMask0>, FLAG, kMask1, kMaskCommon> i32_F_ff_ff0;
+    static_assert(i32_F_ff_ff0::sFlagCombined, "flags should be combined");
+
+    i32_F_ff_ff0 valD(kFlag1_A, kFlag0_A, 1023);
+    EXPECT_EQ(valD.get().get(), 1023);
+    EXPECT_EQ(valD.flags(), kFlag1_A);
+    EXPECT_EQ(valD.get().flags(), kFlag0_A);
+    valD.setFlags(kFlag1_B);
+    EXPECT_EQ(valD.flags(), kFlag1_B);
+    EXPECT_EQ(valD.get().flags(), FLAG(0x0BA));
+    valD.get().setFlags(kFlag0_C);
+    EXPECT_EQ(valD.flags(), FLAG(0xBC0));
+    EXPECT_EQ(valD.get().flags(), kFlag0_C);
+    EXPECT_EQ(valD.get().get(), 1023);
+}
+
+} // namespace android
diff --git a/media/libstagefright/foundation/tests/TypeTraits_test.cpp b/media/libstagefright/foundation/tests/TypeTraits_test.cpp
new file mode 100644
index 0000000..1e2049d
--- /dev/null
+++ b/media/libstagefright/foundation/tests/TypeTraits_test.cpp
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "TypeTraits_test"
+
+#include <gtest/gtest.h>
+
+#include <media/stagefright/foundation/TypeTraits.h>
+
+namespace android {
+
+class TypeTraitsTest : public ::testing::Test {
+protected:
+    enum A { };
+    enum UA : uint32_t { };
+    enum IA : int32_t { };
+};
+
+// =========== basic sanity tests for type-support templates
+TEST_F(TypeTraitsTest, StaticTests) {
+
+    // ============ is_integral_or_enum
+
+    static_assert(!std::is_integral<A>::value, "enums should not be integral");
+    static_assert(!std::is_integral<UA>::value, "enums should not be integral");
+    static_assert(!std::is_integral<IA>::value, "enums should not be integral");
+    static_assert(is_integral_or_enum<A>::value, "enums should be integral_or_enum");
+    static_assert(is_integral_or_enum<UA>::value, "enums should be integral_or_enum");
+    static_assert(is_integral_or_enum<IA>::value, "enums should be integral_or_enum");
+    static_assert(is_integral_or_enum<int>::value, "ints should be integral_or_enum");
+    static_assert(is_integral_or_enum<unsigned>::value, "unsigned ints should be integral_or_enum");
+    static_assert(!is_integral_or_enum<float>::value, "floats should not be integral_or_enum");
+
+    // ============ is_unsigned_integral
+
+    static_assert(!std::is_unsigned<UA>::value,
+                  "unsigned enums should not be unsigned");
+    static_assert(!std::is_unsigned<IA>::value,
+                  "unsigned enums should not be unsigned");
+    static_assert(std::is_unsigned<typename std::underlying_type<UA>::type>::value,
+                  "underlying type of unsigned enums should be unsigned");
+    static_assert(!std::is_unsigned<typename std::underlying_type<IA>::type>::value,
+                  "underlying type of unsigned enums should be unsigned");
+    static_assert(is_unsigned_integral<UA>::value,
+                  "unsigned enums should be unsigned_integral");
+    static_assert(!is_unsigned_integral<IA>::value,
+                  "signed enums should not be unsigned_integral");
+    static_assert(is_unsigned_integral<unsigned>::value,
+                  "unsigned ints should be unsigned_integral");
+    static_assert(!is_unsigned_integral<int>::value,
+                  "ints should not be unsigned_integral");
+    static_assert(!is_unsigned_integral<float>::value,
+                  "floats should not be unsigned_integral");
+
+    // ============ is_signed_integral
+
+    static_assert(!std::is_signed<UA>::value,
+                  "unsigned enums should not be signed");
+    static_assert(!std::is_signed<IA>::value,
+                  "unsigned enums should not be signed");
+    static_assert(!std::is_signed<typename std::underlying_type<UA>::type>::value,
+                  "underlying type of unsigned enums should be signed");
+    static_assert(std::is_signed<typename std::underlying_type<IA>::type>::value,
+                  "underlying type of unsigned enums should be signed");
+    static_assert(!is_signed_integral<UA>::value,
+                  "unsigned enums should not be signed_integral");
+    static_assert(is_signed_integral<IA>::value,
+                  "signed enums should be signed_integral");
+    static_assert(!is_signed_integral<unsigned>::value,
+                  "unsigned ints should not be signed_integral");
+    static_assert(is_signed_integral<int>::value,
+                  "ints should be signed_integral");
+    static_assert(!is_signed_integral<float>::value,
+                  "floats should not be signed_integral");
+
+    // ============ underlying_integral_type
+
+    static_assert(std::is_same<uint64_t, typename underlying_integral_type<uint64_t>::type>::value,
+                  "underlying integral type of uint64_t should be uint64_t");
+    static_assert(std::is_same<uint32_t, typename underlying_integral_type<UA>::type>::value,
+                  "underlying integral type of uint32_t based enums should be uint32_t");
+    static_assert(std::is_same<int64_t, typename underlying_integral_type<int64_t>::type>::value,
+                  "underlying integral type of int64_t should be int64_t");
+    static_assert(std::is_same<int32_t, typename underlying_integral_type<IA>::type>::value,
+                  "underlying integral type of int32_t based enums should be int32_t");
+    //typedef underlying_integral_type<float>::type no_type;
+    static_assert(std::is_same<void, typename underlying_integral_type<float, void>::type>::value,
+                  "underlying integral type of float cannot be specified");
+
+    // ============ is_one_of
+
+    static_assert(!is_one_of<int>::value, "int shouldn't be one of {}");
+    static_assert(!is_one_of<int, unsigned>::value, "int shouldn't be one of {unsigned}");
+    static_assert(!is_one_of<int, unsigned, float>::value,
+                  "int shouldn't be one of {unsigned, float}");
+    static_assert(is_one_of<int, int>::value, "int should be one of {int}");
+    static_assert(is_one_of<int, int, float>::value, "int should be one of {int, float}");
+    static_assert(is_one_of<int, float, int>::value, "int should be one of {float, int}");
+    static_assert(is_one_of<int, float, int, unsigned>::value,
+                  "int should be one of {float, int, unsigned}");
+    static_assert(is_one_of<int, float, unsigned, int>::value,
+                  "int should be one of {float, unsigned, int}");
+    static_assert(!is_one_of<int, int&>::value, "int shouldn't be one of {int&}");
+
+    // ============ are_unique
+
+    static_assert(are_unique<>::value, "{} should be unique");
+    static_assert(are_unique<int>::value, "{int} should be unique");
+    static_assert(are_unique<int, float>::value, "{int, float} should be unique");
+    static_assert(!are_unique<int, int>::value, "{int, int} shouldn't be unique");
+    static_assert(!are_unique<int, float, int>::value, "{int, float, int} shouldn't be unique");
+    static_assert(!are_unique<float, int, int>::value, "{float, int, int} shouldn't be unique");
+    static_assert(!are_unique<int, int, float>::value, "{int, int, float} shouldn't be unique");
+
+    // ============ find_first
+
+    static_assert(find_first<int>::index == 0, "int is not in {}");
+    static_assert(find_first<int, unsigned>::index == 0, "int is not in {unsigned}");
+    static_assert(find_first<int, unsigned, float>::index == 0, "int is not in {unsigned, float}");
+    static_assert(find_first<int, int>::index == 1, "int is 1st in {int}");
+    static_assert(find_first<int, int, float>::index == 1, "int is 1st in {int, float}");
+    static_assert(find_first<int, float, int>::index == 2, "int is 2nd in {float, int}");
+    static_assert(find_first<int, float, int, unsigned>::index == 2,
+                  "int is 2nd in {float, int, unsigned}");
+    static_assert(find_first<int, float, int, unsigned>::index == 2,
+                  "int is 2nd and 3rd in {float, int, int, unsigned}");
+    static_assert(find_first<int, float, unsigned, int>::index == 3,
+                  "int is 3rd in {float, unsigned, int}");
+    static_assert(find_first<int, int&>::index == 0, "int is not in {int&}");
+
+    // ============ find_first_convertible_to
+
+    static_assert(find_first_convertible_to<int>::index == 0, "int is not convertible to {}");
+    static_assert(find_first_convertible_to<int, unsigned*>::index == 0,
+                  "int is not convertible to {unsigned*}");
+    static_assert(find_first_convertible_to<int, unsigned*, float&>::index == 0,
+                  "int is not convertible to {unsigned, float&}");
+    static_assert(find_first_convertible_to<int, int>::index == 1, "int is convertible to {int}");
+    static_assert(find_first_convertible_to<int, unsigned, int>::index == 1,
+                  "int is convertible to 1st of {unsigned, int}");
+    static_assert(find_first_convertible_to<int, int&, float>::index == 2,
+                  "int is convertible to 2nd of {int&, float}");
+    static_assert(find_first_convertible_to<float, float*, int, unsigned>::index == 2,
+                  "float is convertible to 2nd of {float*, int, unsigned}");
+    static_assert(find_first_convertible_to<float, void, float[1], int>::index == 3,
+                  "int is 3rd convertible to {void, float[], int}");
+    static_assert(find_first_convertible_to<int&, const int&>::index == 1,
+                  "int& is convertible to {const int&}");
+    static_assert(find_first_convertible_to<const int&, int&>::index == 0,
+                  "const int& is not convertible to {int&}");
+}
+
+} // namespace android
diff --git a/media/libstagefright/tests/Utils_test.cpp b/media/libstagefright/foundation/tests/Utils_test.cpp
similarity index 98%
rename from media/libstagefright/tests/Utils_test.cpp
rename to media/libstagefright/foundation/tests/Utils_test.cpp
index d736501..0439d5c 100644
--- a/media/libstagefright/tests/Utils_test.cpp
+++ b/media/libstagefright/foundation/tests/Utils_test.cpp
@@ -18,15 +18,11 @@
 #define LOG_TAG "Utils_test"
 
 #include <gtest/gtest.h>
-#include <utils/String8.h>
-#include <utils/Errors.h>
-#include <fcntl.h>
-#include <unistd.h>
 
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AStringUtils.h>
 #include <media/stagefright/foundation/AUtils.h>
-#include <media/stagefright/Utils.h>
+#include <media/stagefright/Utils.h> // for FOURCC
 
 namespace android {
 
diff --git a/media/libstagefright/http/Android.mk b/media/libstagefright/http/Android.mk
index 33b8361..a7bd6a2 100644
--- a/media/libstagefright/http/Android.mk
+++ b/media/libstagefright/http/Android.mk
@@ -22,7 +22,6 @@
 LOCAL_CFLAGS += -Wno-multichar
 
 LOCAL_CFLAGS += -Werror -Wall
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/httplive/Android.mk b/media/libstagefright/httplive/Android.mk
index 2c985fc..6a71b7c 100644
--- a/media/libstagefright/httplive/Android.mk
+++ b/media/libstagefright/httplive/Android.mk
@@ -14,7 +14,6 @@
 	$(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Werror -Wall
-LOCAL_CLANG := true
 LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
 
 LOCAL_SHARED_LIBRARIES := \
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index f673426..e144942 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -49,11 +49,6 @@
 const int64_t LiveSession::kUpSwitchMarginUs = 5000000ll;
 const int64_t LiveSession::kResumeThresholdUs = 100000ll;
 
-// Buffer Prepare/Ready/Underflow Marks
-const int64_t LiveSession::kReadyMarkUs = 5000000ll;
-const int64_t LiveSession::kPrepareMarkUs = 1500000ll;
-const int64_t LiveSession::kUnderflowMarkUs = 1000000ll;
-
 struct LiveSession::BandwidthEstimator : public RefBase {
     BandwidthEstimator();
 
@@ -495,6 +490,13 @@
     return new HTTPDownloader(mHTTPService, mExtraHeaders);
 }
 
+void LiveSession::setBufferingSettings(
+        const BufferingSettings &buffering) {
+    sp<AMessage> msg = new AMessage(kWhatSetBufferingSettings, this);
+    writeToAMessage(msg, buffering);
+    msg->post();
+}
+
 void LiveSession::connectAsync(
         const char *url, const KeyedVector<String8, String8> *headers) {
     sp<AMessage> msg = new AMessage(kWhatConnect, this);
@@ -518,9 +520,10 @@
     return err;
 }
 
-status_t LiveSession::seekTo(int64_t timeUs) {
+status_t LiveSession::seekTo(int64_t timeUs, MediaPlayerSeekMode mode) {
     sp<AMessage> msg = new AMessage(kWhatSeek, this);
     msg->setInt64("timeUs", timeUs);
+    msg->setInt32("mode", mode);
 
     sp<AMessage> response;
     status_t err = msg->postAndAwaitResponse(&response);
@@ -619,6 +622,12 @@
 
 void LiveSession::onMessageReceived(const sp<AMessage> &msg) {
     switch (msg->what()) {
+        case kWhatSetBufferingSettings:
+        {
+            readFromAMessage(msg, &mBufferingSettings);
+            break;
+        }
+
         case kWhatConnect:
         {
             onConnect(msg);
@@ -829,7 +838,10 @@
                     // If switching up, require a cushion bigger than kUnderflowMark
                     // to avoid buffering immediately after the switch.
                     // (If we don't have that cushion we'd rather cancel and try again.)
-                    int64_t delayUs = switchUp ? (kUnderflowMarkUs + 1000000ll) : 0;
+                    int64_t delayUs =
+                        switchUp ?
+                            (mBufferingSettings.mRebufferingWatermarkLowMs * 1000ll + 1000000ll)
+                            : 0;
                     bool needResumeUntil = false;
                     sp<AMessage> stopParams = msg;
                     if (checkSwitchProgress(stopParams, delayUs, &needResumeUntil)) {
@@ -1441,8 +1453,11 @@
 
 void LiveSession::onSeek(const sp<AMessage> &msg) {
     int64_t timeUs;
+    int32_t mode;
     CHECK(msg->findInt64("timeUs", &timeUs));
-    changeConfiguration(timeUs);
+    CHECK(msg->findInt32("mode", &mode));
+    // TODO: add "mode" to changeConfiguration.
+    changeConfiguration(timeUs/* , (MediaPlayerSeekMode)mode */);
 }
 
 status_t LiveSession::getDuration(int64_t *durationUs) const {
@@ -2185,13 +2200,16 @@
         }
 
         ++activeCount;
-        int64_t readyMark = mInPreparationPhase ? kPrepareMarkUs : kReadyMarkUs;
-        if (bufferedDurationUs > readyMark
+        int64_t readyMarkUs =
+            (mInPreparationPhase ?
+                mBufferingSettings.mInitialWatermarkMs :
+                mBufferingSettings.mRebufferingWatermarkHighMs) * 1000ll;
+        if (bufferedDurationUs > readyMarkUs
                 || mPacketSources[i]->isFinished(0)) {
             ++readyCount;
         }
         if (!mPacketSources[i]->isFinished(0)) {
-            if (bufferedDurationUs < kUnderflowMarkUs) {
+            if (bufferedDurationUs < mBufferingSettings.mRebufferingWatermarkLowMs * 1000ll) {
                 ++underflowCount;
             }
             if (bufferedDurationUs > mUpSwitchMark) {
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index 65a824e..abf8cf0 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -18,6 +18,7 @@
 
 #define LIVE_SESSION_H_
 
+#include <media/BufferingSettings.h>
 #include <media/stagefright/foundation/AHandler.h>
 #include <media/mediaplayer.h>
 
@@ -72,6 +73,8 @@
             uint32_t flags,
             const sp<IMediaHTTPService> &httpService);
 
+    void setBufferingSettings(const BufferingSettings &buffering);
+
     int64_t calculateMediaTimeUs(int64_t firstTimeUs, int64_t timeUs, int32_t discontinuitySeq);
     status_t dequeueAccessUnit(StreamType stream, sp<ABuffer> *accessUnit);
 
@@ -86,7 +89,7 @@
     status_t disconnect();
 
     // Blocks until seek is complete.
-    status_t seekTo(int64_t timeUs);
+    status_t seekTo(int64_t timeUs, MediaPlayerSeekMode mode);
 
     status_t getDuration(int64_t *durationUs) const;
     size_t getTrackCount() const;
@@ -129,6 +132,7 @@
         kWhatChangeConfiguration2       = 'chC2',
         kWhatChangeConfiguration3       = 'chC3',
         kWhatPollBuffering              = 'poll',
+        kWhatSetBufferingSettings       = 'sBuS',
     };
 
     // Bandwidth Switch Mark Defaults
@@ -138,9 +142,7 @@
     static const int64_t kResumeThresholdUs;
 
     // Buffer Prepare/Ready/Underflow Marks
-    static const int64_t kReadyMarkUs;
-    static const int64_t kPrepareMarkUs;
-    static const int64_t kUnderflowMarkUs;
+    BufferingSettings mBufferingSettings;
 
     struct BandwidthEstimator;
     struct BandwidthItem {
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 7ad7fee..bbcea51 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -956,6 +956,38 @@
     return false;
 }
 
+void PlaylistFetcher::initSeqNumberForLiveStream(
+        int32_t &firstSeqNumberInPlaylist,
+        int32_t &lastSeqNumberInPlaylist) {
+    // start at least 3 target durations from the end.
+    int64_t timeFromEnd = 0;
+    size_t index = mPlaylist->size();
+    sp<AMessage> itemMeta;
+    int64_t itemDurationUs;
+    int32_t targetDuration;
+    if (mPlaylist->meta()->findInt32("target-duration", &targetDuration)) {
+        do {
+            --index;
+            if (!mPlaylist->itemAt(index, NULL /* uri */, &itemMeta)
+                    || !itemMeta->findInt64("durationUs", &itemDurationUs)) {
+                ALOGW("item or itemDurationUs missing");
+                mSeqNumber = lastSeqNumberInPlaylist - 3;
+                break;
+            }
+
+            timeFromEnd += itemDurationUs;
+            mSeqNumber = firstSeqNumberInPlaylist + index;
+        } while (timeFromEnd < targetDuration * 3E6 && index > 0);
+    } else {
+        ALOGW("target-duration missing");
+        mSeqNumber = lastSeqNumberInPlaylist - 3;
+    }
+
+    if (mSeqNumber < firstSeqNumberInPlaylist) {
+        mSeqNumber = firstSeqNumberInPlaylist;
+    }
+}
+
 bool PlaylistFetcher::initDownloadState(
         AString &uri,
         sp<AMessage> &itemMeta,
@@ -982,11 +1014,8 @@
 
         if (mSegmentStartTimeUs < 0) {
             if (!mPlaylist->isComplete() && !mPlaylist->isEvent()) {
-                // If this is a live session, start 3 segments from the end on connect
-                mSeqNumber = lastSeqNumberInPlaylist - 3;
-                if (mSeqNumber < firstSeqNumberInPlaylist) {
-                    mSeqNumber = firstSeqNumberInPlaylist;
-                }
+                // this is a live session
+                initSeqNumberForLiveStream(firstSeqNumberInPlaylist, lastSeqNumberInPlaylist);
             } else {
                 // When seeking mSegmentStartTimeUs is unavailable (< 0), we
                 // use mStartTimeUs (client supplied timestamp) to determine both start segment
diff --git a/media/libstagefright/httplive/PlaylistFetcher.h b/media/libstagefright/httplive/PlaylistFetcher.h
index c8ca457..ee7d3a1 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.h
+++ b/media/libstagefright/httplive/PlaylistFetcher.h
@@ -218,6 +218,9 @@
     void onStop(const sp<AMessage> &msg);
     void onMonitorQueue();
     void onDownloadNext();
+    void initSeqNumberForLiveStream(
+            int32_t &firstSeqNumberInPlaylist,
+            int32_t &lastSeqNumberInPlaylist);
     bool initDownloadState(
             AString &uri,
             sp<AMessage> &itemMeta,
diff --git a/media/libstagefright/id3/Android.mk b/media/libstagefright/id3/Android.mk
index bd2e459..19ada73 100644
--- a/media/libstagefright/id3/Android.mk
+++ b/media/libstagefright/id3/Android.mk
@@ -5,7 +5,6 @@
 	ID3.cpp
 
 LOCAL_CFLAGS += -Werror -Wall
-LOCAL_CLANG := true
 LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
 
 LOCAL_SHARED_LIBRARIES := libmedia
@@ -22,7 +21,6 @@
 	testid3.cpp
 
 LOCAL_CFLAGS += -Werror -Wall
-LOCAL_CLANG := true
 
 LOCAL_SHARED_LIBRARIES := \
 	libstagefright libutils liblog libbinder libstagefright_foundation
diff --git a/media/libstagefright/id3/ID3.cpp b/media/libstagefright/id3/ID3.cpp
index a0eb630..9105084 100644
--- a/media/libstagefright/id3/ID3.cpp
+++ b/media/libstagefright/id3/ID3.cpp
@@ -517,7 +517,7 @@
         if (mOffset == 126 || mOffset == 127) {
             // Special treatment for the track number and genre.
             char tmp[16];
-            sprintf(tmp, "%d", (int)*frameData);
+            snprintf(tmp, sizeof(tmp), "%d", (int)*frameData);
 
             id->setTo(tmp);
             return;
diff --git a/media/libstagefright/id3/testid3.cpp b/media/libstagefright/id3/testid3.cpp
index b2f4188..442a3ff 100644
--- a/media/libstagefright/id3/testid3.cpp
+++ b/media/libstagefright/id3/testid3.cpp
@@ -154,8 +154,6 @@
 int main(int argc, char **argv) {
     android::ProcessState::self()->startThreadPool();
 
-    DataSource::RegisterDefaultSniffers();
-
     for (int i = 1; i < argc; ++i) {
         scan(argv[i]);
     }
diff --git a/media/libstagefright/include/ACodecBufferChannel.h b/media/libstagefright/include/ACodecBufferChannel.h
new file mode 100644
index 0000000..d52ce53
--- /dev/null
+++ b/media/libstagefright/include/ACodecBufferChannel.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef A_BUFFER_CHANNEL_H_
+
+#define A_BUFFER_CHANNEL_H_
+
+#include <map>
+#include <memory>
+#include <mutex>
+#include <vector>
+
+#include <media/openmax/OMX_Types.h>
+#include <media/stagefright/CodecBase.h>
+#include <media/ICrypto.h>
+#include <media/IOMX.h>
+
+namespace android {
+
+/**
+ * BufferChannelBase implementation for ACodec.
+ */
+class ACodecBufferChannel : public BufferChannelBase {
+public:
+    struct BufferAndId {
+        sp<MediaCodecBuffer> mBuffer;
+        IOMX::buffer_id mBufferId;
+    };
+
+    struct BufferInfo {
+        BufferInfo(
+                const sp<MediaCodecBuffer> &buffer,
+                IOMX::buffer_id bufferId,
+                const sp<IMemory> &sharedEncryptedBuffer);
+
+        BufferInfo() = delete;
+
+        // Buffer facing MediaCodec and its clients.
+        const sp<MediaCodecBuffer> mClientBuffer;
+        // Buffer facing CodecBase.
+        const sp<MediaCodecBuffer> mCodecBuffer;
+        // OMX buffer ID.
+        const IOMX::buffer_id mBufferId;
+        // Encrypted buffer in case of secure input.
+        const sp<IMemory> mSharedEncryptedBuffer;
+    };
+
+    ACodecBufferChannel(
+            const sp<AMessage> &inputBufferFilled, const sp<AMessage> &outputBufferDrained);
+    virtual ~ACodecBufferChannel() = default;
+
+    // BufferChannelBase interface
+    virtual status_t queueInputBuffer(const sp<MediaCodecBuffer> &buffer) override;
+    virtual status_t queueSecureInputBuffer(
+            const sp<MediaCodecBuffer> &buffer,
+            bool secure,
+            const uint8_t *key,
+            const uint8_t *iv,
+            CryptoPlugin::Mode mode,
+            CryptoPlugin::Pattern pattern,
+            const CryptoPlugin::SubSample *subSamples,
+            size_t numSubSamples,
+            AString *errorDetailMsg) override;
+    virtual status_t renderOutputBuffer(
+            const sp<MediaCodecBuffer> &buffer, int64_t timestampNs) override;
+    virtual status_t discardBuffer(const sp<MediaCodecBuffer> &buffer) override;
+    virtual void getInputBufferArray(Vector<sp<MediaCodecBuffer>> *array) override;
+    virtual void getOutputBufferArray(Vector<sp<MediaCodecBuffer>> *array) override;
+
+    // Methods below are interface for ACodec to use.
+
+    /**
+     * Set input buffer array.
+     *
+     * @param array     Newly allocated buffers. Empty if buffers are
+     *                  deallocated.
+     */
+    void setInputBufferArray(const std::vector<BufferAndId> &array);
+    /**
+     * Set output buffer array.
+     *
+     * @param array     Newly allocated buffers. Empty if buffers are
+     *                  deallocated.
+     */
+    void setOutputBufferArray(const std::vector<BufferAndId> &array);
+    /**
+     * Request MediaCodec to fill the specified input buffer.
+     *
+     * @param bufferId  ID of the buffer, assigned by underlying component.
+     */
+    void fillThisBuffer(IOMX::buffer_id bufferID);
+    /**
+     * Request MediaCodec to drain the specified output buffer.
+     *
+     * @param bufferId  ID of the buffer, assigned by underlying component.
+     * @param omxFlags  flags associated with this buffer (e.g. EOS).
+     */
+    void drainThisBuffer(IOMX::buffer_id bufferID, OMX_U32 omxFlags);
+
+private:
+    const sp<AMessage> mInputBufferFilled;
+    const sp<AMessage> mOutputBufferDrained;
+
+    sp<MemoryDealer> mDealer;
+
+    // These should only be accessed via std::atomic_* functions.
+    //
+    // Note on thread safety: since the vector and BufferInfo are const, it's
+    // safe to read them at any thread once the shared_ptr object is atomically
+    // obtained. Inside BufferInfo, mBufferId and mSharedEncryptedBuffer are
+    // immutable objects. We write internal states of mClient/CodecBuffer when
+    // the caller has given up the reference, so that access is also safe.
+    std::shared_ptr<const std::vector<const BufferInfo>> mInputBuffers;
+    std::shared_ptr<const std::vector<const BufferInfo>> mOutputBuffers;
+};
+
+}  // namespace android
+
+#endif  // A_BUFFER_CHANNEL_H_
diff --git a/media/libstagefright/include/DRMExtractor.h b/media/libstagefright/include/DRMExtractor.h
deleted file mode 100644
index 3dc7df8..0000000
--- a/media/libstagefright/include/DRMExtractor.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef DRM_EXTRACTOR_H_
-
-#define DRM_EXTRACTOR_H_
-
-#include <media/IMediaSource.h>
-#include <media/stagefright/MediaExtractor.h>
-#include <drm/DrmManagerClient.h>
-
-namespace android {
-
-struct AMessage;
-class DataSource;
-class SampleTable;
-class String8;
-class DecryptHandle;
-
-class DRMExtractor : public MediaExtractor {
-public:
-    DRMExtractor(const sp<DataSource> &source, const char *mime);
-
-    virtual size_t countTracks();
-    virtual sp<IMediaSource> getTrack(size_t index);
-    virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
-    virtual sp<MetaData> getMetaData();
-    virtual const char * name() { return "DRMExtractor"; }
-
-protected:
-    virtual ~DRMExtractor();
-
-private:
-    sp<DataSource> mDataSource;
-
-    sp<IMediaExtractor> mOriginalExtractor;
-    sp<DecryptHandle> mDecryptHandle;
-    DrmManagerClient* mDrmManagerClient;
-
-    DRMExtractor(const DRMExtractor &);
-    DRMExtractor &operator=(const DRMExtractor &);
-};
-
-bool SniffDRM(
-        const sp<DataSource> &source, String8 *mimeType, float *confidence,
-            sp<AMessage> *);
-
-}  // namespace android
-
-#endif  // DRM_EXTRACTOR_H_
-
diff --git a/media/libstagefright/include/DataConverter.h b/media/libstagefright/include/DataConverter.h
index 8d67921..60ebad1 100644
--- a/media/libstagefright/include/DataConverter.h
+++ b/media/libstagefright/include/DataConverter.h
@@ -24,18 +24,18 @@
 
 namespace android {
 
-struct ABuffer;
+class MediaCodecBuffer;
 
 // DataConverter base class, defaults to memcpy
 struct DataConverter : public RefBase {
     virtual size_t sourceSize(size_t targetSize); // will clamp to SIZE_MAX
     virtual size_t targetSize(size_t sourceSize); // will clamp to SIZE_MAX
 
-    status_t convert(const sp<ABuffer> &source, sp<ABuffer> &target);
+    status_t convert(const sp<MediaCodecBuffer> &source, sp<MediaCodecBuffer> &target);
     virtual ~DataConverter();
 
 protected:
-    virtual status_t safeConvert(const sp<ABuffer> &source, sp<ABuffer> &target);
+    virtual status_t safeConvert(const sp<MediaCodecBuffer> &source, sp<MediaCodecBuffer> &target);
 };
 
 // SampleConverterBase uses a ratio to calculate the source and target sizes
@@ -45,7 +45,7 @@
     virtual size_t targetSize(size_t sourceSize);
 
 protected:
-    virtual status_t safeConvert(const sp<ABuffer> &source, sp<ABuffer> &target) = 0;
+    virtual status_t safeConvert(const sp<MediaCodecBuffer> &source, sp<MediaCodecBuffer> &target) = 0;
 
     // sourceSize = sourceSampleSize / targetSampleSize * targetSize
     SampleConverterBase(uint32_t sourceSampleSize, uint32_t targetSampleSize)
@@ -61,7 +61,7 @@
     static AudioConverter *Create(AudioEncoding source, AudioEncoding target);
 
 protected:
-    virtual status_t safeConvert(const sp<ABuffer> &src, sp<ABuffer> &tgt);
+    virtual status_t safeConvert(const sp<MediaCodecBuffer> &source, sp<MediaCodecBuffer> &target);
 
 private:
     AudioConverter(
diff --git a/media/libstagefright/include/MPEG2TSExtractor.h b/media/libstagefright/include/MPEG2TSExtractor.h
index 93e9a4b..ef55620 100644
--- a/media/libstagefright/include/MPEG2TSExtractor.h
+++ b/media/libstagefright/include/MPEG2TSExtractor.h
@@ -89,6 +89,8 @@
     // Add a SynPoint derived from |event|.
     void addSyncPoint_l(const ATSParser::SyncEvent &event);
 
+    status_t  estimateDurationsFromTimesUsAtEnd();
+
     DISALLOW_EVIL_CONSTRUCTORS(MPEG2TSExtractor);
 };
 
diff --git a/media/libstagefright/include/MPEG4Extractor.h b/media/libstagefright/include/MPEG4Extractor.h
index 89ad137..fa05886 100644
--- a/media/libstagefright/include/MPEG4Extractor.h
+++ b/media/libstagefright/include/MPEG4Extractor.h
@@ -140,6 +140,9 @@
 
     Track *findTrackByMimePrefix(const char *mimePrefix);
 
+    status_t parseAC3SampleEntry(off64_t offset);
+    status_t parseAC3SpecificBox(off64_t offset, uint16_t sampleRate);
+
     MPEG4Extractor(const MPEG4Extractor &);
     MPEG4Extractor &operator=(const MPEG4Extractor &);
 };
diff --git a/media/libstagefright/include/OMX.h b/media/libstagefright/include/OMX.h
index 6c073f0..5b22a2f 100644
--- a/media/libstagefright/include/OMX.h
+++ b/media/libstagefright/include/OMX.h
@@ -21,168 +21,41 @@
 #include <utils/threads.h>
 #include <utils/KeyedVector.h>
 
+#include "OmxNodeOwner.h"
+
 namespace android {
 
 struct OMXMaster;
 struct OMXNodeInstance;
 
 class OMX : public BnOMX,
+            public OmxNodeOwner,
             public IBinder::DeathRecipient {
 public:
     OMX();
 
-    virtual bool livesLocally(node_id node, pid_t pid);
-
     virtual status_t listNodes(List<ComponentInfo> *list);
 
     virtual status_t allocateNode(
             const char *name, const sp<IOMXObserver> &observer,
-            sp<IBinder> *nodeBinder,
-            node_id *node);
-
-    virtual status_t freeNode(node_id node);
-
-    virtual status_t sendCommand(
-            node_id node, OMX_COMMANDTYPE cmd, OMX_S32 param);
-
-    virtual status_t getParameter(
-            node_id node, OMX_INDEXTYPE index,
-            void *params, size_t size);
-
-    virtual status_t setParameter(
-            node_id node, OMX_INDEXTYPE index,
-            const void *params, size_t size);
-
-    virtual status_t getConfig(
-            node_id node, OMX_INDEXTYPE index,
-            void *params, size_t size);
-
-    virtual status_t setConfig(
-            node_id node, OMX_INDEXTYPE index,
-            const void *params, size_t size);
-
-    virtual status_t getState(
-            node_id node, OMX_STATETYPE* state);
-
-    virtual status_t enableNativeBuffers(
-            node_id node, OMX_U32 port_index, OMX_BOOL graphic, OMX_BOOL enable);
-
-    virtual status_t getGraphicBufferUsage(
-            node_id node, OMX_U32 port_index, OMX_U32* usage);
-
-    virtual status_t storeMetaDataInBuffers(
-            node_id node, OMX_U32 port_index, OMX_BOOL enable, MetadataBufferType *type);
-
-    virtual status_t prepareForAdaptivePlayback(
-            node_id node, OMX_U32 portIndex, OMX_BOOL enable,
-            OMX_U32 max_frame_width, OMX_U32 max_frame_height);
-
-    virtual status_t configureVideoTunnelMode(
-            node_id node, OMX_U32 portIndex, OMX_BOOL tunneled,
-            OMX_U32 audioHwSync, native_handle_t **sidebandHandle);
-
-    virtual status_t useBuffer(
-            node_id node, OMX_U32 port_index, const sp<IMemory> &params,
-            buffer_id *buffer, OMX_U32 allottedSize);
-
-    virtual status_t useGraphicBuffer(
-            node_id node, OMX_U32 port_index,
-            const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer);
-
-    virtual status_t updateGraphicBufferInMeta(
-            node_id node, OMX_U32 port_index,
-            const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer);
-
-    virtual status_t updateNativeHandleInMeta(
-            node_id node, OMX_U32 port_index,
-            const sp<NativeHandle> &nativeHandle, buffer_id buffer);
+            sp<IOMXNode> *omxNode);
 
     virtual status_t createInputSurface(
-            node_id node, OMX_U32 port_index, android_dataspace dataSpace,
             sp<IGraphicBufferProducer> *bufferProducer,
-            MetadataBufferType *type);
-
-    virtual status_t createPersistentInputSurface(
-            sp<IGraphicBufferProducer> *bufferProducer,
-            sp<IGraphicBufferConsumer> *bufferConsumer);
-
-    virtual status_t setInputSurface(
-            node_id node, OMX_U32 port_index,
-            const sp<IGraphicBufferConsumer> &bufferConsumer,
-            MetadataBufferType *type);
-
-    virtual status_t signalEndOfInputStream(node_id node);
-
-    virtual status_t allocateSecureBuffer(
-            node_id node, OMX_U32 port_index, size_t size,
-            buffer_id *buffer, void **buffer_data, sp<NativeHandle> *native_handle);
-
-    virtual status_t allocateBufferWithBackup(
-            node_id node, OMX_U32 port_index, const sp<IMemory> &params,
-            buffer_id *buffer, OMX_U32 allottedSize);
-
-    virtual status_t freeBuffer(
-            node_id node, OMX_U32 port_index, buffer_id buffer);
-
-    virtual status_t fillBuffer(node_id node, buffer_id buffer, int fenceFd);
-
-    virtual status_t emptyBuffer(
-            node_id node,
-            buffer_id buffer,
-            OMX_U32 range_offset, OMX_U32 range_length,
-            OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
-
-    virtual status_t getExtensionIndex(
-            node_id node,
-            const char *parameter_name,
-            OMX_INDEXTYPE *index);
-
-    virtual status_t setInternalOption(
-            node_id node,
-            OMX_U32 port_index,
-            InternalOptionType type,
-            const void *data,
-            size_t size);
+            sp<IGraphicBufferSource> *bufferSource);
 
     virtual void binderDied(const wp<IBinder> &the_late_who);
 
-    virtual bool isSecure(IOMX::node_id node);
-
-    OMX_ERRORTYPE OnEvent(
-            node_id node,
-            OMX_IN OMX_EVENTTYPE eEvent,
-            OMX_IN OMX_U32 nData1,
-            OMX_IN OMX_U32 nData2,
-            OMX_IN OMX_PTR pEventData);
-
-    OMX_ERRORTYPE OnEmptyBufferDone(
-            node_id node, buffer_id buffer, OMX_IN OMX_BUFFERHEADERTYPE *pBuffer, int fenceFd);
-
-    OMX_ERRORTYPE OnFillBufferDone(
-            node_id node, buffer_id buffer, OMX_IN OMX_BUFFERHEADERTYPE *pBuffer, int fenceFd);
-
-    void invalidateNodeID(node_id node);
+    virtual status_t freeNode(const sp<OMXNodeInstance>& instance);
 
 protected:
     virtual ~OMX();
 
 private:
-    struct CallbackDispatcherThread;
-    struct CallbackDispatcher;
-
     Mutex mLock;
     OMXMaster *mMaster;
-    size_t mNodeCounter;
 
-    KeyedVector<wp<IBinder>, OMXNodeInstance *> mLiveNodes;
-    KeyedVector<node_id, OMXNodeInstance *> mNodeIDToInstance;
-    KeyedVector<node_id, sp<CallbackDispatcher> > mDispatchers;
-
-    node_id makeNodeID_l(OMXNodeInstance *instance);
-    OMXNodeInstance *findInstance(node_id node);
-    sp<CallbackDispatcher> findDispatcher(node_id node);
-
-    void invalidateNodeID_l(node_id node);
+    KeyedVector<wp<IBinder>, sp<OMXNodeInstance> > mLiveNodes;
 
     OMX(const OMX &);
     OMX &operator=(const OMX &);
diff --git a/media/libstagefright/include/OMXNodeInstance.h b/media/libstagefright/include/OMXNodeInstance.h
index 6411267..ca24c2f 100644
--- a/media/libstagefright/include/OMXNodeInstance.h
+++ b/media/libstagefright/include/OMXNodeInstance.h
@@ -18,29 +18,29 @@
 
 #define OMX_NODE_INSTANCE_H_
 
-#include "OMX.h"
-
+#include <media/IOMX.h>
 #include <utils/RefBase.h>
-#include <utils/SortedVector.h>
 #include <utils/threads.h>
+#include <utils/KeyedVector.h>
+#include <utils/SortedVector.h>
+#include "OmxNodeOwner.h"
 
 namespace android {
-
+class IOMXBufferSource;
 class IOMXObserver;
 struct OMXMaster;
-class GraphicBufferSource;
+class OMXBuffer;
 
-struct OMXNodeInstance {
+struct OMXNodeInstance : public BnOMXNode {
     OMXNodeInstance(
-            OMX *owner, const sp<IOMXObserver> &observer, const char *name);
+            OmxNodeOwner *owner, const sp<IOMXObserver> &observer, const char *name);
 
-    void setHandle(OMX::node_id node_id, OMX_HANDLETYPE handle);
+    void setHandle(OMX_HANDLETYPE handle);
 
-    OMX *owner();
+    OMX_HANDLETYPE handle();
     sp<IOMXObserver> observer();
-    OMX::node_id nodeID();
 
-    status_t freeNode(OMXMaster *master);
+    status_t freeNode() override;
 
     status_t sendCommand(OMX_COMMANDTYPE cmd, OMX_S32 param);
     status_t getParameter(OMX_INDEXTYPE index, void *params, size_t size);
@@ -51,15 +51,10 @@
     status_t getConfig(OMX_INDEXTYPE index, void *params, size_t size);
     status_t setConfig(OMX_INDEXTYPE index, const void *params, size_t size);
 
-    status_t getState(OMX_STATETYPE* state);
-
-    status_t enableNativeBuffers(OMX_U32 portIndex, OMX_BOOL graphic, OMX_BOOL enable);
+    status_t setPortMode(OMX_U32 port_index, IOMX::PortMode mode);
 
     status_t getGraphicBufferUsage(OMX_U32 portIndex, OMX_U32* usage);
 
-    status_t storeMetaDataInBuffers(
-            OMX_U32 portIndex, OMX_BOOL enable, MetadataBufferType *type);
-
     status_t prepareForAdaptivePlayback(
             OMX_U32 portIndex, OMX_BOOL enable,
             OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight);
@@ -68,113 +63,80 @@
             OMX_U32 portIndex, OMX_BOOL tunneled,
             OMX_U32 audioHwSync, native_handle_t **sidebandHandle);
 
-    status_t useBuffer(
-            OMX_U32 portIndex, const sp<IMemory> &params,
-            OMX::buffer_id *buffer, OMX_U32 allottedSize);
-
-    status_t useGraphicBuffer(
-            OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
-            OMX::buffer_id *buffer);
-
-    status_t updateGraphicBufferInMeta(
-            OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
-            OMX::buffer_id buffer);
-
-    status_t updateNativeHandleInMeta(
-            OMX_U32 portIndex, const sp<NativeHandle> &nativeHandle,
-            OMX::buffer_id buffer);
-
-    status_t createInputSurface(
-            OMX_U32 portIndex, android_dataspace dataSpace,
-            sp<IGraphicBufferProducer> *bufferProducer,
-            MetadataBufferType *type);
-
-    static status_t createPersistentInputSurface(
-            sp<IGraphicBufferProducer> *bufferProducer,
-            sp<IGraphicBufferConsumer> *bufferConsumer);
-
     status_t setInputSurface(
-            OMX_U32 portIndex, const sp<IGraphicBufferConsumer> &bufferConsumer,
-            MetadataBufferType *type);
-
-    status_t signalEndOfInputStream();
-
-    void signalEvent(OMX_EVENTTYPE event, OMX_U32 arg1, OMX_U32 arg2);
+            const sp<IOMXBufferSource> &bufferSource);
 
     status_t allocateSecureBuffer(
-            OMX_U32 portIndex, size_t size, OMX::buffer_id *buffer,
+            OMX_U32 portIndex, size_t size, IOMX::buffer_id *buffer,
             void **buffer_data, sp<NativeHandle> *native_handle);
 
-    status_t allocateBufferWithBackup(
-            OMX_U32 portIndex, const sp<IMemory> &params,
-            OMX::buffer_id *buffer, OMX_U32 allottedSize);
+    status_t useBuffer(
+            OMX_U32 portIndex, const OMXBuffer &omxBuf, buffer_id *buffer);
 
-    status_t freeBuffer(OMX_U32 portIndex, OMX::buffer_id buffer);
+    status_t freeBuffer(
+            OMX_U32 portIndex, buffer_id buffer);
 
-    status_t fillBuffer(OMX::buffer_id buffer, int fenceFd);
+    status_t fillBuffer(
+            buffer_id buffer, const OMXBuffer &omxBuf, int fenceFd = -1);
 
     status_t emptyBuffer(
-            OMX::buffer_id buffer,
-            OMX_U32 rangeOffset, OMX_U32 rangeLength,
-            OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
-
-    status_t emptyGraphicBuffer(
-            OMX_BUFFERHEADERTYPE *header, const sp<GraphicBuffer> &buffer,
-            OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
+            buffer_id buffer, const OMXBuffer &omxBuf,
+            OMX_U32 flags, OMX_TICKS timestamp, int fenceFd = -1);
 
     status_t getExtensionIndex(
             const char *parameterName, OMX_INDEXTYPE *index);
 
-    status_t setInternalOption(
-            OMX_U32 portIndex,
-            IOMX::InternalOptionType type,
-            const void *data,
-            size_t size);
+    status_t setQuirks(OMX_U32 quirks);
 
     bool isSecure() const {
         return mIsSecure;
     }
 
+    status_t dispatchMessage(const omx_message &msg) override;
+
     // handles messages and removes them from the list
     void onMessages(std::list<omx_message> &messages);
-    void onMessage(const omx_message &msg);
-    void onObserverDied(OMXMaster *master);
-    void onGetHandleFailed();
+    void onObserverDied();
     void onEvent(OMX_EVENTTYPE event, OMX_U32 arg1, OMX_U32 arg2);
 
     static OMX_CALLBACKTYPE kCallbacks;
 
 private:
+    struct CallbackDispatcherThread;
+    struct CallbackDispatcher;
+
     Mutex mLock;
 
-    OMX *mOwner;
-    OMX::node_id mNodeID;
+    OmxNodeOwner *mOwner;
     OMX_HANDLETYPE mHandle;
     sp<IOMXObserver> mObserver;
+    sp<CallbackDispatcher> mDispatcher;
     bool mDying;
     bool mSailed;  // configuration is set (no more meta-mode changes)
     bool mQueriedProhibitedExtensions;
     SortedVector<OMX_INDEXTYPE> mProhibitedExtensions;
     bool mIsSecure;
+    uint32_t mQuirks;
 
-    // Lock only covers mGraphicBufferSource.  We can't always use mLock
-    // because of rare instances where we'd end up locking it recursively.
-    Mutex mGraphicBufferSourceLock;
-    // Access this through getGraphicBufferSource().
-    sp<GraphicBufferSource> mGraphicBufferSource;
-
+    // Lock only covers mOMXBufferSource and mOMXOutputListener.  We can't always
+    // use mLock because of rare instances where we'd end up locking it recursively.
+    Mutex mOMXBufferSourceLock;
+    // Access these through getBufferSource().
+    sp<IOMXBufferSource> mOMXBufferSource;
 
     struct ActiveBuffer {
         OMX_U32 mPortIndex;
-        OMX::buffer_id mID;
+        IOMX::buffer_id mID;
     };
     Vector<ActiveBuffer> mActiveBuffers;
     // for buffer ptr to buffer id translation
     Mutex mBufferIDLock;
     uint32_t mBufferIDCount;
-    KeyedVector<OMX::buffer_id, OMX_BUFFERHEADERTYPE *> mBufferIDToBufferHeader;
-    KeyedVector<OMX_BUFFERHEADERTYPE *, OMX::buffer_id> mBufferHeaderToBufferID;
+    KeyedVector<IOMX::buffer_id, OMX_BUFFERHEADERTYPE *> mBufferIDToBufferHeader;
+    KeyedVector<OMX_BUFFERHEADERTYPE *, IOMX::buffer_id> mBufferHeaderToBufferID;
 
+    bool mLegacyAdaptiveExperiment;
+    IOMX::PortMode mPortMode[2];
     // metadata and secure buffer type tracking
     MetadataBufferType mMetadataType[2];
     enum SecureBufferType {
@@ -184,6 +146,14 @@
     };
     SecureBufferType mSecureBufferType[2];
 
+    // Following are OMX parameters managed by us (instead of the component)
+    // OMX_IndexParamMaxFrameDurationForBitrateControl
+    KeyedVector<int64_t, int64_t> mOriginalTimeUs;
+    bool mRestorePtsFailed;
+    int64_t mMaxTimestampGapUs;
+    int64_t mPrevOriginalTimeUs;
+    int64_t mPrevModifiedTimeUs;
+
     // For debug support
     char *mName;
     int DEBUG;
@@ -198,21 +168,51 @@
 
     ~OMXNodeInstance();
 
-    void addActiveBuffer(OMX_U32 portIndex, OMX::buffer_id id);
-    void removeActiveBuffer(OMX_U32 portIndex, OMX::buffer_id id);
+    void addActiveBuffer(OMX_U32 portIndex, IOMX::buffer_id id);
+    void removeActiveBuffer(OMX_U32 portIndex, IOMX::buffer_id id);
     void freeActiveBuffers();
 
     // For buffer id management
-    OMX::buffer_id makeBufferID(OMX_BUFFERHEADERTYPE *bufferHeader);
-    OMX_BUFFERHEADERTYPE *findBufferHeader(OMX::buffer_id buffer, OMX_U32 portIndex);
-    OMX::buffer_id findBufferID(OMX_BUFFERHEADERTYPE *bufferHeader);
-    void invalidateBufferID(OMX::buffer_id buffer);
+    IOMX::buffer_id makeBufferID(OMX_BUFFERHEADERTYPE *bufferHeader);
+    OMX_BUFFERHEADERTYPE *findBufferHeader(IOMX::buffer_id buffer, OMX_U32 portIndex);
+    IOMX::buffer_id findBufferID(OMX_BUFFERHEADERTYPE *bufferHeader);
+    void invalidateBufferID(IOMX::buffer_id buffer);
 
     bool isProhibitedIndex_l(OMX_INDEXTYPE index);
 
+    status_t useBuffer_l(
+            OMX_U32 portIndex, const sp<IMemory> &params,
+            IOMX::buffer_id *buffer);
+
+    status_t useGraphicBuffer_l(
+            OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
+            IOMX::buffer_id *buffer);
+
+    status_t useGraphicBufferWithMetadata_l(
+            OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
+            IOMX::buffer_id *buffer);
+
     status_t useGraphicBuffer2_l(
             OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
-            OMX::buffer_id *buffer);
+            IOMX::buffer_id *buffer);
+
+    status_t emptyBuffer_l(
+            IOMX::buffer_id buffer,
+            OMX_U32 rangeOffset, OMX_U32 rangeLength,
+            OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
+
+    status_t emptyGraphicBuffer_l(
+            IOMX::buffer_id buffer, const sp<GraphicBuffer> &graphicBuffer,
+            OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
+
+    status_t emptyNativeHandleBuffer_l(
+            IOMX::buffer_id buffer, const sp<NativeHandle> &nativeHandle,
+            OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
+
+    status_t emptyBuffer_l(
+            OMX_BUFFERHEADERTYPE *header,
+            OMX_U32 flags, OMX_TICKS timestamp, intptr_t debugAddr, int fenceFd);
+
     static OMX_ERRORTYPE OnEvent(
             OMX_IN OMX_HANDLETYPE hComponent,
             OMX_IN OMX_PTR pAppData,
@@ -231,6 +231,9 @@
             OMX_IN OMX_PTR pAppData,
             OMX_IN OMX_BUFFERHEADERTYPE *pBuffer);
 
+    status_t enableNativeBuffers_l(
+            OMX_U32 portIndex, OMX_BOOL graphic, OMX_BOOL enable);
+
     status_t storeMetaDataInBuffers_l(
             OMX_U32 portIndex, OMX_BOOL enable, MetadataBufferType *type);
 
@@ -243,10 +246,6 @@
     int retrieveFenceFromMeta_l(
             OMX_BUFFERHEADERTYPE *header, OMX_U32 portIndex);
 
-    status_t emptyBuffer_l(
-            OMX_BUFFERHEADERTYPE *header,
-            OMX_U32 flags, OMX_TICKS timestamp, intptr_t debugAddr, int fenceFd);
-
     // Updates the graphic buffer handle in the metadata buffer for |buffer| and |header| to
     // |graphicBuffer|'s handle. If |updateCodecBuffer| is true, the update will happen in
     // the actual codec buffer (use this if not using emptyBuffer (with no _l) later to
@@ -254,18 +253,27 @@
     // buffer.)
     status_t updateGraphicBufferInMeta_l(
             OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
-            OMX::buffer_id buffer, OMX_BUFFERHEADERTYPE *header, bool updateCodecBuffer);
+            IOMX::buffer_id buffer, OMX_BUFFERHEADERTYPE *header);
 
-    status_t createGraphicBufferSource(
-            OMX_U32 portIndex, const sp<IGraphicBufferConsumer> &consumer /* nullable */,
-            MetadataBufferType *type);
-    sp<GraphicBufferSource> getGraphicBufferSource();
-    void setGraphicBufferSource(const sp<GraphicBufferSource> &bufferSource);
+    status_t updateNativeHandleInMeta_l(
+            OMX_U32 portIndex, const sp<NativeHandle> &nativeHandle,
+            IOMX::buffer_id buffer, OMX_BUFFERHEADERTYPE *header);
+
+    sp<IOMXBufferSource> getBufferSource();
+    void setBufferSource(const sp<IOMXBufferSource> &bufferSource);
+    // Called when omx_message::FILL_BUFFER_DONE is received. (Currently the
+    // buffer source will fix timestamp in the header if needed.)
+    void codecBufferFilled(omx_message &msg);
 
     // Handles |msg|, and may modify it. Returns true iff completely handled it and
     // |msg| does not need to be sent to the event listener.
     bool handleMessage(omx_message &msg);
 
+    bool handleDataSpaceChanged(omx_message &msg);
+
+    status_t setMaxPtsGapUs(const void *params, size_t size);
+    int64_t getCodecTimestamp(OMX_TICKS timestamp);
+
     OMXNodeInstance(const OMXNodeInstance &);
     OMXNodeInstance &operator=(const OMXNodeInstance &);
 };
diff --git a/media/libstagefright/include/OmxNodeOwner.h b/media/libstagefright/include/OmxNodeOwner.h
new file mode 100644
index 0000000..64ec7f7
--- /dev/null
+++ b/media/libstagefright/include/OmxNodeOwner.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OMX_NODE_OWNER_H_
+
+#define OMX_NODE_OWNER_H_
+
+namespace android {
+
+struct OMXNodeInstance;
+
+/**
+ * This struct is needed to separate OMX from OMXNodeInstance.
+ *
+ * TODO: This might not be needed after Treble transition is complete.
+ */
+struct OmxNodeOwner {
+    virtual status_t freeNode(const sp<OMXNodeInstance> &instance) = 0;
+    virtual ~OmxNodeOwner() {}
+};
+
+}
+
+#endif  // OMX_NODE_OWNER_H_
diff --git a/media/libstagefright/include/SecureBuffer.h b/media/libstagefright/include/SecureBuffer.h
new file mode 100644
index 0000000..cf7933a
--- /dev/null
+++ b/media/libstagefright/include/SecureBuffer.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SECURE_BUFFER_H_
+
+#define SECURE_BUFFER_H_
+
+#include <media/ICrypto.h>
+#include <media/MediaCodecBuffer.h>
+
+namespace android {
+
+class NativeHandle;
+
+/**
+ * Secure MediaCodecBuffer implementation.
+ *
+ * For classes outside of MediaCodec, this buffer is an opaque buffer only with
+ * the size information. For decryption, it exposes underlying handle/pointer
+ * and its type, which can be fed to ICrypto::decrypt().
+ */
+class SecureBuffer : public MediaCodecBuffer {
+public:
+    SecureBuffer(const sp<AMessage> &format, const void *ptr, size_t size);
+    SecureBuffer(const sp<AMessage> &format, const sp<NativeHandle> &handle, size_t size);
+
+    virtual ~SecureBuffer() = default;
+
+    void *getDestinationPointer();
+    ICrypto::DestinationType getDestinationType();
+
+private:
+    SecureBuffer() = delete;
+
+    const void *mPointer;
+    const sp<NativeHandle> mHandle;
+};
+
+}  // namespace android
+
+#endif  // SECURE_BUFFER_H_
diff --git a/media/libstagefright/include/SharedMemoryBuffer.h b/media/libstagefright/include/SharedMemoryBuffer.h
new file mode 100644
index 0000000..1d7f7a6
--- /dev/null
+++ b/media/libstagefright/include/SharedMemoryBuffer.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SHARED_MEMORY_BUFFER_H_
+
+#define SHARED_MEMORY_BUFFER_H_
+
+#include <media/MediaCodecBuffer.h>
+
+namespace android {
+
+struct AMessage;
+class IMemory;
+
+/**
+ * MediaCodecBuffer implementation based on IMemory.
+ */
+class SharedMemoryBuffer : public MediaCodecBuffer {
+public:
+    SharedMemoryBuffer(const sp<AMessage> &format, const sp<IMemory> &mem);
+
+    virtual ~SharedMemoryBuffer() = default;
+
+private:
+    SharedMemoryBuffer() = delete;
+
+    const sp<IMemory> mMemory;
+};
+
+}  // namespace android
+
+#endif  // SHARED_MEMORY_BUFFER_H_
diff --git a/media/libstagefright/include/SimpleSoftOMXComponent.h b/media/libstagefright/include/SimpleSoftOMXComponent.h
index 591b38e..1d1f2bd 100644
--- a/media/libstagefright/include/SimpleSoftOMXComponent.h
+++ b/media/libstagefright/include/SimpleSoftOMXComponent.h
@@ -29,6 +29,11 @@
 
 struct ALooper;
 
+struct CodecProfileLevel {
+    OMX_U32 mProfile;
+    OMX_U32 mLevel;
+};
+
 struct SimpleSoftOMXComponent : public SoftOMXComponent {
     SimpleSoftOMXComponent(
             const char *name,
diff --git a/media/libstagefright/include/WVMExtractor.h b/media/libstagefright/include/WVMExtractor.h
deleted file mode 100644
index 65cb99a..0000000
--- a/media/libstagefright/include/WVMExtractor.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef WVM_EXTRACTOR_H_
-
-#define WVM_EXTRACTOR_H_
-
-#include <media/stagefright/MediaExtractor.h>
-#include <utils/Errors.h>
-
-namespace android {
-
-struct AMessage;
-class String8;
-class DataSource;
-
-class WVMLoadableExtractor : public MediaExtractor {
-public:
-    WVMLoadableExtractor() {}
-    virtual ~WVMLoadableExtractor() {}
-
-    virtual int64_t getCachedDurationUs(status_t *finalStatus) = 0;
-    virtual status_t getError() = 0;
-    virtual status_t getEstimatedBandwidthKbps(int32_t *kbps) = 0;
-    virtual void setAdaptiveStreamingMode(bool adaptive) = 0;
-    virtual void setCryptoPluginMode(bool cryptoPluginMode) = 0;
-    virtual void setError(status_t err) = 0;
-    virtual void setUID(uid_t uid) = 0;
-};
-
-class WVMExtractor : public MediaExtractor {
-public:
-    explicit WVMExtractor(const sp<DataSource> &source);
-
-    virtual size_t countTracks();
-    virtual sp<IMediaSource> getTrack(size_t index);
-    virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
-    virtual sp<MetaData> getMetaData();
-    virtual void setUID(uid_t uid);
-
-    // Return the amount of data cached from the current
-    // playback positiion (in us).
-    // While more data is still being fetched *finalStatus == OK,
-    // Once fetching is completed (no more data available), *finalStatus != OK
-    // If fetching completed normally (i.e. reached EOS instead of IO error)
-    // *finalStatus == ERROR_END_OF_STREAM
-    int64_t getCachedDurationUs(status_t *finalStatus);
-
-    // Return the current estimated bandwidth
-    status_t getEstimatedBandwidthKbps(int32_t *kbps);
-
-    // Set to use adaptive streaming mode by the WV component.
-    // If adaptive == true, adaptive streaming mode will be used.
-    // Default mode is non-adaptive streaming mode.
-    // Should set to use adaptive streaming mode only if widevine:// protocol
-    // is used.
-    void setAdaptiveStreamingMode(bool adaptive);
-
-    // setCryptoPluginMode(true) to select crypto plugin mode.
-    // In this mode, the extractor returns encrypted data for use
-    // with the MediaCodec model, which handles the decryption in the
-    // codec.
-    void setCryptoPluginMode(bool cryptoPluginMode);
-
-    static bool getVendorLibHandle();
-
-    status_t getError();
-
-    void setError(status_t err);
-
-protected:
-    virtual ~WVMExtractor();
-
-private:
-    sp<DataSource> mDataSource;
-    sp<WVMLoadableExtractor> mImpl;
-
-    WVMExtractor(const WVMExtractor &);
-    WVMExtractor &operator=(const WVMExtractor &);
-};
-
-bool SniffWVM(
-        const sp<DataSource> &source, String8 *mimeType, float *confidence,
-        sp<AMessage> *);
-
-}  // namespace android
-
-#endif  // DRM_EXTRACTOR_H_
-
diff --git a/media/libstagefright/include/avc_utils.h b/media/libstagefright/include/avc_utils.h
index b2ef360..d05906a 100644
--- a/media/libstagefright/include/avc_utils.h
+++ b/media/libstagefright/include/avc_utils.h
@@ -18,6 +18,7 @@
 
 #define AVC_UTILS_H_
 
+#include <media/MediaCodecBuffer.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <utils/Errors.h>
 
@@ -84,6 +85,7 @@
 sp<MetaData> MakeAVCCodecSpecificData(const sp<ABuffer> &accessUnit);
 
 bool IsIDR(const sp<ABuffer> &accessUnit);
+bool IsIDR(const sp<MediaCodecBuffer> &accessUnit);
 bool IsAVCReferenceFrame(const sp<ABuffer> &accessUnit);
 uint32_t FindAVCLayerId(const uint8_t *data, size_t size);
 
diff --git a/media/libstagefright/matroska/Android.mk b/media/libstagefright/matroska/Android.mk
index 89d7ff2..7dd0863 100644
--- a/media/libstagefright/matroska/Android.mk
+++ b/media/libstagefright/matroska/Android.mk
@@ -10,7 +10,6 @@
         $(TOP)/frameworks/av/media/libstagefright/include \
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-LOCAL_CLANG := true
 LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
 
 LOCAL_SHARED_LIBRARIES := libmedia
diff --git a/media/libstagefright/matroska/MatroskaExtractor.cpp b/media/libstagefright/matroska/MatroskaExtractor.cpp
index 8e82486..a974671 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.cpp
+++ b/media/libstagefright/matroska/MatroskaExtractor.cpp
@@ -377,6 +377,16 @@
 
     *actualFrameTimeUs = -1ll;
 
+    if (seekTimeUs > INT64_MAX / 1000ll ||
+            seekTimeUs < INT64_MIN / 1000ll ||
+            (mExtractor->mSeekPreRollNs > 0 &&
+                    (seekTimeUs * 1000ll) < INT64_MIN + mExtractor->mSeekPreRollNs) ||
+            (mExtractor->mSeekPreRollNs < 0 &&
+                    (seekTimeUs * 1000ll) > INT64_MAX + mExtractor->mSeekPreRollNs)) {
+        ALOGE("cannot seek to %lld", (long long) seekTimeUs);
+        return;
+    }
+
     const int64_t seekTimeNs = seekTimeUs * 1000ll - mExtractor->mSeekPreRollNs;
 
     mkvparser::Segment* const pSegment = mExtractor->mSegment;
@@ -605,16 +615,27 @@
     int64_t timeUs = mBlockIter.blockTimeUs();
 
     for (int i = 0; i < block->GetFrameCount(); ++i) {
+        MatroskaExtractor::TrackInfo *trackInfo = &mExtractor->mTracks.editItemAt(mTrackIndex);
         const mkvparser::Block::Frame &frame = block->GetFrame(i);
+        size_t len = frame.len;
+        if (SIZE_MAX - len < trackInfo->mHeaderLen) {
+            return ERROR_MALFORMED;
+        }
 
-        MediaBuffer *mbuf = new MediaBuffer(frame.len);
+        len += trackInfo->mHeaderLen;
+        MediaBuffer *mbuf = new MediaBuffer(len);
+        uint8_t *data = static_cast<uint8_t *>(mbuf->data());
+        if (trackInfo->mHeader) {
+            memcpy(data, trackInfo->mHeader, trackInfo->mHeaderLen);
+        }
+
         mbuf->meta_data()->setInt64(kKeyTime, timeUs);
         mbuf->meta_data()->setInt32(kKeyIsSyncFrame, block->IsKey());
 
-        status_t err = frame.Read(mExtractor->mReader, static_cast<uint8_t *>(mbuf->data()));
+        status_t err = frame.Read(mExtractor->mReader, data + trackInfo->mHeaderLen);
         if (err == OK
                 && mExtractor->mIsWebm
-                && mExtractor->mTracks.itemAt(mTrackIndex).mEncrypted) {
+                && trackInfo->mEncrypted) {
             err = setWebmBlockCryptoInfo(mbuf);
         }
 
@@ -1164,6 +1185,42 @@
     }
 }
 
+status_t MatroskaExtractor::initTrackInfo(
+        const mkvparser::Track *track, const sp<MetaData> &meta, TrackInfo *trackInfo) {
+    trackInfo->mTrackNum = track->GetNumber();
+    trackInfo->mMeta = meta;
+    trackInfo->mExtractor = this;
+    trackInfo->mEncrypted = false;
+    trackInfo->mHeader = NULL;
+    trackInfo->mHeaderLen = 0;
+
+    for(size_t i = 0; i < track->GetContentEncodingCount(); i++) {
+        const mkvparser::ContentEncoding *encoding = track->GetContentEncodingByIndex(i);
+        for(size_t j = 0; j < encoding->GetEncryptionCount(); j++) {
+            const mkvparser::ContentEncoding::ContentEncryption *encryption;
+            encryption = encoding->GetEncryptionByIndex(j);
+            trackInfo->mMeta->setData(kKeyCryptoKey, 0, encryption->key_id, encryption->key_id_len);
+            trackInfo->mEncrypted = true;
+            break;
+        }
+
+        for(size_t j = 0; j < encoding->GetCompressionCount(); j++) {
+            const mkvparser::ContentEncoding::ContentCompression *compression;
+            compression = encoding->GetCompressionByIndex(j);
+            ALOGV("compression algo %llu settings_len %lld",
+                compression->algo, compression->settings_len);
+            if (compression->algo == 3
+                    && compression->settings
+                    && compression->settings_len > 0) {
+                trackInfo->mHeader = compression->settings;
+                trackInfo->mHeaderLen = compression->settings_len;
+            }
+        }
+    }
+
+    return OK;
+}
+
 void MatroskaExtractor::addTracks() {
     const mkvparser::Tracks *tracks = mSegment->GetTracks();
 
@@ -1288,21 +1345,7 @@
         mTracks.push();
         size_t n = mTracks.size() - 1;
         TrackInfo *trackInfo = &mTracks.editItemAt(n);
-        trackInfo->mTrackNum = track->GetNumber();
-        trackInfo->mMeta = meta;
-        trackInfo->mExtractor = this;
-
-        trackInfo->mEncrypted = false;
-        for(size_t i = 0; i < track->GetContentEncodingCount() && !trackInfo->mEncrypted; i++) {
-            const mkvparser::ContentEncoding *encoding = track->GetContentEncodingByIndex(i);
-            for(size_t j = 0; j < encoding->GetEncryptionCount(); j++) {
-                const mkvparser::ContentEncoding::ContentEncryption *encryption;
-                encryption = encoding->GetEncryptionByIndex(j);
-                meta->setData(kKeyCryptoKey, 0, encryption->key_id, encryption->key_id_len);
-                trackInfo->mEncrypted = true;
-                break;
-            }
-        }
+        initTrackInfo(track, meta, trackInfo);
 
         if (!strcmp("V_MPEG4/ISO/AVC", codecID) && codecPrivateSize == 0) {
             // Attempt to recover from AVC track without codec private data
diff --git a/media/libstagefright/matroska/MatroskaExtractor.h b/media/libstagefright/matroska/MatroskaExtractor.h
index 588bd39..19775ce 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.h
+++ b/media/libstagefright/matroska/MatroskaExtractor.h
@@ -63,6 +63,12 @@
         const MatroskaExtractor *mExtractor;
         Vector<const mkvparser::CuePoint*> mCuePoints;
 
+        // mHeader points to memory managed by mkvparser;
+        // mHeader would be deleted when mSegment is deleted
+        // in ~MatroskaExtractor.
+        unsigned char *mHeader;
+        size_t mHeaderLen;
+
         const mkvparser::Track* getTrack() const;
         const mkvparser::CuePoint::TrackPosition *find(long long timeNs) const;
     };
@@ -79,6 +85,7 @@
     int64_t mSeekPreRollNs;
 
     status_t synthesizeAVCC(TrackInfo *trackInfo, size_t index);
+    status_t initTrackInfo(const mkvparser::Track *track, const sp<MetaData> &meta, TrackInfo *trackInfo);
     void addTracks();
     void findThumbnails();
     void getColorInformation(const mkvparser::VideoTrack *vtrack, sp<MetaData> &meta);
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 844479e..4975d9a 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -90,6 +90,10 @@
         return mParser->mFlags;
     }
 
+    uint64_t firstPTS() const {
+        return mFirstPTS;
+    }
+
 private:
     struct StreamInfo {
         unsigned mType;
@@ -135,6 +139,7 @@
 
     void signalEOS(status_t finalResult);
 
+    SourceType getSourceType();
     sp<MediaSource> getSource(SourceType type);
 
     bool isAudio() const;
@@ -208,11 +213,12 @@
     : mHasReturnedData(false), mOffset(offset), mTimeUs(0) {}
 
 void ATSParser::SyncEvent::init(off64_t offset, const sp<MediaSource> &source,
-        int64_t timeUs) {
+        int64_t timeUs, SourceType type) {
     mHasReturnedData = true;
     mOffset = offset;
     mMediaSource = source;
     mTimeUs = timeUs;
+    mType = type;
 }
 
 void ATSParser::SyncEvent::reset() {
@@ -1121,13 +1127,24 @@
                 int64_t timeUs;
                 if (accessUnit->meta()->findInt64("timeUs", &timeUs)) {
                     found = true;
-                    event->init(pesStartOffset, mSource, timeUs);
+                    event->init(pesStartOffset, mSource, timeUs, getSourceType());
                 }
             }
         }
     }
 }
 
+ATSParser::SourceType ATSParser::Stream::getSourceType() {
+    if (isVideo()) {
+        return VIDEO;
+    } else if (isAudio()) {
+        return AUDIO;
+    } else if (isMeta()) {
+        return META;
+    }
+    return NUM_SOURCE_TYPES;
+}
+
 sp<MediaSource> ATSParser::Stream::getSource(SourceType type) {
     switch (type) {
         case VIDEO:
@@ -1565,6 +1582,16 @@
     return mPrograms.editItemAt(0)->PTSTimeDeltaEstablished();
 }
 
+int64_t ATSParser::getFirstPTSTimeUs() {
+    for (size_t i = 0; i < mPrograms.size(); ++i) {
+        sp<ATSParser::Program> program = mPrograms.itemAt(i);
+        if (program->PTSTimeDeltaEstablished()) {
+            return (program->firstPTS() * 100) / 9;
+        }
+    }
+    return -1;
+}
+
 __attribute__((no_sanitize("integer")))
 void ATSParser::updatePCR(
         unsigned /* PID */, uint64_t PCR, uint64_t byteOffsetFromStart) {
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h
index 2b166f0..faae6c9 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.h
+++ b/media/libstagefright/mpeg2ts/ATSParser.h
@@ -62,18 +62,26 @@
         ALIGNED_VIDEO_DATA         = 2,
     };
 
+    enum SourceType {
+        VIDEO = 0,
+        AUDIO = 1,
+        META  = 2,
+        NUM_SOURCE_TYPES = 3
+    };
+
     // Event is used to signal sync point event at feedTSPacket().
     struct SyncEvent {
         explicit SyncEvent(off64_t offset);
 
         void init(off64_t offset, const sp<MediaSource> &source,
-                int64_t timeUs);
+                int64_t timeUs, SourceType type);
 
         bool hasReturnedData() const { return mHasReturnedData; }
         void reset();
         off64_t getOffset() const { return mOffset; }
         const sp<MediaSource> &getMediaSource() const { return mMediaSource; }
         int64_t getTimeUs() const { return mTimeUs; }
+        SourceType getType() const { return mType; }
 
     private:
         bool mHasReturnedData;
@@ -87,6 +95,7 @@
         sp<MediaSource> mMediaSource;
         /* The timestamp of the sync frame. */
         int64_t mTimeUs;
+        SourceType mType;
     };
 
     explicit ATSParser(uint32_t flags = 0);
@@ -107,17 +116,13 @@
 
     void signalEOS(status_t finalResult);
 
-    enum SourceType {
-        VIDEO = 0,
-        AUDIO = 1,
-        META  = 2,
-        NUM_SOURCE_TYPES = 3
-    };
     sp<MediaSource> getSource(SourceType type);
     bool hasSource(SourceType type) const;
 
     bool PTSTimeDeltaEstablished();
 
+    int64_t getFirstPTSTimeUs();
+
     enum {
         // From ISO/IEC 13818-1: 2000 (E), Table 2-29
         STREAMTYPE_RESERVED             = 0x00,
diff --git a/media/libstagefright/mpeg2ts/Android.mk b/media/libstagefright/mpeg2ts/Android.mk
index 66722a8..92c386c 100644
--- a/media/libstagefright/mpeg2ts/Android.mk
+++ b/media/libstagefright/mpeg2ts/Android.mk
@@ -14,7 +14,6 @@
 	$(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Werror -Wall
-LOCAL_CLANG := true
 LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
 
 LOCAL_SHARED_LIBRARIES := libmedia
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
index 4fcf7b5..548f44e 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
@@ -44,6 +44,7 @@
       mEnabled(true),
       mFormat(NULL),
       mLastQueuedTimeUs(0),
+      mEstimatedBufferDurationUs(-1),
       mEOSResult(OK),
       mLatestEnqueuedMeta(NULL),
       mLatestDequeuedMeta(NULL) {
@@ -309,6 +310,8 @@
 
     mFormat = NULL;
     mLatestEnqueuedMeta = NULL;
+
+    mEstimatedBufferDurationUs = -1;
 }
 
 void AnotherPacketSource::queueDiscontinuity(
@@ -431,6 +434,31 @@
     return durationUs;
 }
 
+int64_t AnotherPacketSource::getEstimatedBufferDurationUs() {
+    Mutex::Autolock autoLock(mLock);
+    if (mEstimatedBufferDurationUs >= 0) {
+        return mEstimatedBufferDurationUs;
+    }
+
+    SortedVector<int64_t> maxTimesUs;
+    List<sp<ABuffer> >::iterator it;
+    int64_t t1 = 0, t2 = 0;
+    for (it = mBuffers.begin(); it != mBuffers.end(); ++it) {
+        int64_t timeUs = 0;
+        const sp<ABuffer> &buffer = *it;
+        if (!buffer->meta()->findInt64("timeUs", &timeUs)) {
+            continue;
+        }
+        maxTimesUs.add(timeUs);
+        while (maxTimesUs.size() > 2) {
+            maxTimesUs.removeAt(0);
+            t1 = maxTimesUs.itemAt(0);
+            t2 = maxTimesUs.itemAt(1);
+        }
+    }
+    return mEstimatedBufferDurationUs = t2 - t1;
+}
+
 status_t AnotherPacketSource::nextBufferTime(int64_t *timeUs) {
     *timeUs = 0;
 
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.h b/media/libstagefright/mpeg2ts/AnotherPacketSource.h
index dd6849e..b0890d7 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.h
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.h
@@ -57,6 +57,9 @@
     // presentation timestamps since the last discontinuity (if any).
     int64_t getBufferedDurationUs(status_t *finalResult);
 
+    // Returns the difference between the two largest timestamps queued
+    int64_t getEstimatedBufferDurationUs();
+
     status_t nextBufferTime(int64_t *timeUs);
 
     void queueAccessUnit(const sp<ABuffer> &buffer);
@@ -113,6 +116,7 @@
     bool mEnabled;
     sp<MetaData> mFormat;
     int64_t mLastQueuedTimeUs;
+    int64_t mEstimatedBufferDurationUs;
     List<sp<ABuffer> > mBuffers;
     status_t mEOSResult;
     sp<AMessage> mLatestEnqueuedMeta;
diff --git a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
index fb5e079..bde33dc 100644
--- a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
+++ b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
@@ -26,6 +26,7 @@
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AUtils.h>
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
@@ -40,6 +41,8 @@
 namespace android {
 
 static const size_t kTSPacketSize = 188;
+static const int kMaxDurationReadSize = 250000LL;
+static const int kMaxDurationRetry = 6;
 
 struct MPEG2TSSource : public MediaSource {
     MPEG2TSSource(
@@ -212,23 +215,22 @@
                         - mSeekSyncPoints->keyAt(0);
                 off64_t diffOffset = mSeekSyncPoints->valueAt(prevSyncSize - 1)
                         - mSeekSyncPoints->valueAt(0);
-                durationUs = size * diffUs / diffOffset;
-                durations.push_back(durationUs);
+                int64_t currentDurationUs = size * diffUs / diffOffset;
+                durations.push_back(currentDurationUs);
                 if (durations.size() > 5) {
                     durations.erase(durations.begin());
                     int64_t min = *durations.begin();
                     int64_t max = *durations.begin();
-                    for (List<int64_t>::iterator i = durations.begin();
-                            i != durations.end();
-                            ++i) {
-                        if (min > *i) {
-                            min = *i;
+                    for (auto duration : durations) {
+                        if (min > duration) {
+                            min = duration;
                         }
-                        if (max < *i) {
-                            max = *i;
+                        if (max < duration) {
+                            max = duration;
                         }
                     }
                     if (max - min < 500 * 1000) {
+                        durationUs = currentDurationUs;
                         break;
                     }
                 }
@@ -244,6 +246,8 @@
             const sp<MetaData> meta = impl->getFormat();
             meta->setInt64(kKeyDuration, durationUs);
             impl->setFormat(meta);
+        } else {
+            estimateDurationsFromTimesUsAtEnd();
         }
     }
 
@@ -302,6 +306,106 @@
     }
 }
 
+status_t MPEG2TSExtractor::estimateDurationsFromTimesUsAtEnd()  {
+    if (!(mDataSource->flags() & DataSource::kIsLocalFileSource)) {
+        return ERROR_UNSUPPORTED;
+    }
+
+    off64_t size = 0;
+    status_t err = mDataSource->getSize(&size);
+    if (err != OK) {
+        return err;
+    }
+
+    uint8_t packet[kTSPacketSize];
+    const off64_t zero = 0;
+    off64_t offset = max(zero, size - kMaxDurationReadSize);
+    if (mDataSource->readAt(offset, &packet, 0) < 0) {
+        return ERROR_IO;
+    }
+
+    int retry = 0;
+    bool allDurationsFound = false;
+    int64_t timeAnchorUs = mParser->getFirstPTSTimeUs();
+    do {
+        int bytesRead = 0;
+        sp<ATSParser> parser = new ATSParser(ATSParser::TS_TIMESTAMPS_ARE_ABSOLUTE);
+        ATSParser::SyncEvent ev(0);
+        offset = max(zero, size - (kMaxDurationReadSize << retry));
+        offset = (offset / kTSPacketSize) * kTSPacketSize;
+        for (;;) {
+            if (bytesRead >= kMaxDurationReadSize << max(0, retry - 1)) {
+                break;
+            }
+
+            ssize_t n = mDataSource->readAt(offset, packet, kTSPacketSize);
+            if (n < 0) {
+                return n;
+            } else if (n < (ssize_t)kTSPacketSize) {
+                break;
+            }
+
+            offset += kTSPacketSize;
+            bytesRead += kTSPacketSize;
+            err = parser->feedTSPacket(packet, kTSPacketSize, &ev);
+            if (err != OK) {
+                return err;
+            }
+
+            if (ev.hasReturnedData()) {
+                int64_t durationUs = ev.getTimeUs();
+                ATSParser::SourceType type = ev.getType();
+                ev.reset();
+
+                int64_t firstTimeUs;
+                sp<AnotherPacketSource> src =
+                    (AnotherPacketSource *)mParser->getSource(type).get();
+                if (src == NULL || src->nextBufferTime(&firstTimeUs) != OK) {
+                    continue;
+                }
+                durationUs += src->getEstimatedBufferDurationUs();
+                durationUs -= timeAnchorUs;
+                durationUs -= firstTimeUs;
+                if (durationUs > 0) {
+                    int64_t origDurationUs, lastDurationUs;
+                    const sp<MetaData> meta = src->getFormat();
+                    const uint32_t kKeyLastDuration = 'ldur';
+                    // Require two consecutive duration calculations to be within 1 sec before
+                    // updating; use MetaData to store previous duration estimate in per-stream
+                    // context.
+                    if (!meta->findInt64(kKeyDuration, &origDurationUs)
+                            || !meta->findInt64(kKeyLastDuration, &lastDurationUs)
+                            || (origDurationUs < durationUs
+                             && abs(durationUs - lastDurationUs) < 60000000)) {
+                        meta->setInt64(kKeyDuration, durationUs);
+                    }
+                    meta->setInt64(kKeyLastDuration, durationUs);
+                }
+            }
+        }
+
+        if (!allDurationsFound) {
+            allDurationsFound = true;
+            for (auto t: {ATSParser::VIDEO, ATSParser::AUDIO}) {
+                sp<AnotherPacketSource> src = (AnotherPacketSource *)mParser->getSource(t).get();
+                if (src == NULL) {
+                    continue;
+                }
+                int64_t durationUs;
+                const sp<MetaData> meta = src->getFormat();
+                if (!meta->findInt64(kKeyDuration, &durationUs)) {
+                    allDurationsFound = false;
+                    break;
+                }
+            }
+        }
+
+        ++retry;
+    } while(!allDurationsFound && offset > 0 && retry <= kMaxDurationRetry);
+
+    return allDurationsFound? OK : ERROR_UNSUPPORTED;
+}
+
 uint32_t MPEG2TSExtractor::flags() const {
     return CAN_PAUSE | CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD;
 }
diff --git a/media/libstagefright/omx/Android.mk b/media/libstagefright/omx/Android.mk
index 8cbfc0d..72f8043 100644
--- a/media/libstagefright/omx/Android.mk
+++ b/media/libstagefright/omx/Android.mk
@@ -32,7 +32,6 @@
 
 LOCAL_MODULE:= libstagefright_omx
 LOCAL_CFLAGS += -Werror -Wall
-LOCAL_CLANG := true
 LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index 267f24d..4909100 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -23,10 +23,6 @@
 #define STRINGIFY_ENUMS // for asString in HardwareAPI.h/VideoAPI.h
 
 #include "GraphicBufferSource.h"
-#include "OMXUtils.h"
-
-#include <OMX_Core.h>
-#include <OMX_IndexExt.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/foundation/ColorUtils.h>
@@ -35,108 +31,33 @@
 #include <ui/GraphicBuffer.h>
 #include <gui/BufferItem.h>
 #include <HardwareAPI.h>
+#include "omx/OMXUtils.h"
+#include <OMX_Component.h>
+#include <OMX_IndexExt.h>
+#include "OMXBuffer.h"
 
 #include <inttypes.h>
 #include "FrameDropper.h"
 
 namespace android {
 
-static const bool EXTRA_CHECK = true;
-
 static const OMX_U32 kPortIndexInput = 0;
 
-GraphicBufferSource::PersistentProxyListener::PersistentProxyListener(
-        const wp<IGraphicBufferConsumer> &consumer,
-        const wp<ConsumerListener>& consumerListener) :
-    mConsumerListener(consumerListener),
-    mConsumer(consumer) {}
-
-GraphicBufferSource::PersistentProxyListener::~PersistentProxyListener() {}
-
-void GraphicBufferSource::PersistentProxyListener::onFrameAvailable(
-        const BufferItem& item) {
-    sp<ConsumerListener> listener(mConsumerListener.promote());
-    if (listener != NULL) {
-        listener->onFrameAvailable(item);
-    } else {
-        sp<IGraphicBufferConsumer> consumer(mConsumer.promote());
-        if (consumer == NULL) {
-            return;
-        }
-        BufferItem bi;
-        status_t err = consumer->acquireBuffer(&bi, 0);
-        if (err != OK) {
-            ALOGE("PersistentProxyListener: acquireBuffer failed (%d)", err);
-            return;
-        }
-
-        err = consumer->detachBuffer(bi.mSlot);
-        if (err != OK) {
-            ALOGE("PersistentProxyListener: detachBuffer failed (%d)", err);
-            return;
-        }
-
-        err = consumer->attachBuffer(&bi.mSlot, bi.mGraphicBuffer);
-        if (err != OK) {
-            ALOGE("PersistentProxyListener: attachBuffer failed (%d)", err);
-            return;
-        }
-
-        err = consumer->releaseBuffer(bi.mSlot, 0,
-                EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, bi.mFence);
-        if (err != OK) {
-            ALOGE("PersistentProxyListener: releaseBuffer failed (%d)", err);
-        }
-    }
-}
-
-void GraphicBufferSource::PersistentProxyListener::onFrameReplaced(
-        const BufferItem& item) {
-    sp<ConsumerListener> listener(mConsumerListener.promote());
-    if (listener != NULL) {
-        listener->onFrameReplaced(item);
-    }
-}
-
-void GraphicBufferSource::PersistentProxyListener::onBuffersReleased() {
-    sp<ConsumerListener> listener(mConsumerListener.promote());
-    if (listener != NULL) {
-        listener->onBuffersReleased();
-    }
-}
-
-void GraphicBufferSource::PersistentProxyListener::onSidebandStreamChanged() {
-    sp<ConsumerListener> listener(mConsumerListener.promote());
-    if (listener != NULL) {
-        listener->onSidebandStreamChanged();
-    }
-}
-
-GraphicBufferSource::GraphicBufferSource(
-        OMXNodeInstance* nodeInstance,
-        uint32_t bufferWidth,
-        uint32_t bufferHeight,
-        uint32_t bufferCount,
-        uint32_t consumerUsage,
-        const sp<IGraphicBufferConsumer> &consumer) :
+GraphicBufferSource::GraphicBufferSource() :
     mInitCheck(UNKNOWN_ERROR),
-    mNodeInstance(nodeInstance),
     mExecuting(false),
     mSuspended(false),
     mLastDataSpace(HAL_DATASPACE_UNKNOWN),
-    mIsPersistent(false),
-    mConsumer(consumer),
     mNumFramesAvailable(0),
     mNumBufferAcquired(0),
     mEndOfStream(false),
     mEndOfStreamSent(false),
-    mMaxTimestampGapUs(-1ll),
     mPrevOriginalTimeUs(-1ll),
-    mPrevModifiedTimeUs(-1ll),
     mSkipFramesBeforeNs(-1ll),
     mRepeatAfterUs(-1ll),
     mRepeatLastFrameGeneration(0),
     mRepeatLastFrameTimestamp(-1ll),
+    mRepeatLastFrameCount(0),
     mLatestBufferId(-1),
     mLatestBufferFrameNum(0),
     mLatestBufferFence(Fence::NO_FENCE),
@@ -146,48 +67,21 @@
     mPrevCaptureUs(-1ll),
     mPrevFrameUs(-1ll),
     mInputBufferTimeOffsetUs(0ll) {
+    ALOGV("GraphicBufferSource");
 
-    ALOGV("GraphicBufferSource w=%u h=%u c=%u",
-            bufferWidth, bufferHeight, bufferCount);
+    String8 name("GraphicBufferSource");
 
-    if (bufferWidth == 0 || bufferHeight == 0) {
-        ALOGE("Invalid dimensions %ux%u", bufferWidth, bufferHeight);
-        mInitCheck = BAD_VALUE;
-        return;
-    }
+    BufferQueue::createBufferQueue(&mProducer, &mConsumer);
+    mConsumer->setConsumerName(name);
 
-    if (mConsumer == NULL) {
-        String8 name("GraphicBufferSource");
-
-        BufferQueue::createBufferQueue(&mProducer, &mConsumer);
-        mConsumer->setConsumerName(name);
-
-        // use consumer usage bits queried from encoder, but always add HW_VIDEO_ENCODER
-        // for backward compatibility.
-        consumerUsage |= GRALLOC_USAGE_HW_VIDEO_ENCODER;
-        mConsumer->setConsumerUsageBits(consumerUsage);
-
-        mInitCheck = mConsumer->setMaxAcquiredBufferCount(bufferCount);
-        if (mInitCheck != NO_ERROR) {
-            ALOGE("Unable to set BQ max acquired buffer count to %u: %d",
-                    bufferCount, mInitCheck);
-            return;
-        }
-    } else {
-        mIsPersistent = true;
-    }
-    mConsumer->setDefaultBufferSize(bufferWidth, bufferHeight);
     // Note that we can't create an sp<...>(this) in a ctor that will not keep a
     // reference once the ctor ends, as that would cause the refcount of 'this'
     // dropping to 0 at the end of the ctor.  Since all we need is a wp<...>
     // that's what we create.
-    wp<BufferQueue::ConsumerListener> listener = static_cast<BufferQueue::ConsumerListener*>(this);
-    sp<IConsumerListener> proxy;
-    if (!mIsPersistent) {
-        proxy = new BufferQueue::ProxyConsumerListener(listener);
-    } else {
-        proxy = new PersistentProxyListener(mConsumer, listener);
-    }
+    wp<BufferQueue::ConsumerListener> listener =
+            static_cast<BufferQueue::ConsumerListener*>(this);
+    sp<IConsumerListener> proxy =
+            new BufferQueue::ProxyConsumerListener(listener);
 
     mInitCheck = mConsumer->consumerConnect(proxy, false);
     if (mInitCheck != NO_ERROR) {
@@ -202,15 +96,14 @@
 }
 
 GraphicBufferSource::~GraphicBufferSource() {
+    ALOGV("~GraphicBufferSource");
     if (mLatestBufferId >= 0) {
-        releaseBuffer(
-                mLatestBufferId, mLatestBufferFrameNum,
-                mBufferSlot[mLatestBufferId], mLatestBufferFence);
+        releaseBuffer(mLatestBufferId, mLatestBufferFrameNum, mLatestBufferFence);
     }
     if (mNumBufferAcquired != 0) {
         ALOGW("potential buffer leak (acquired %d)", mNumBufferAcquired);
     }
-    if (mConsumer != NULL && !mIsPersistent) {
+    if (mConsumer != NULL) {
         status_t err = mConsumer->consumerDisconnect();
         if (err != NO_ERROR) {
             ALOGW("consumerDisconnect failed: %d", err);
@@ -218,7 +111,7 @@
     }
 }
 
-void GraphicBufferSource::omxExecuting() {
+Status GraphicBufferSource::onOmxExecuting() {
     Mutex::Autolock autoLock(mMutex);
     ALOGV("--> executing; avail=%zu, codec vec size=%zd",
             mNumFramesAvailable, mCodecBuffers.size());
@@ -267,9 +160,11 @@
             msg->post(mRepeatAfterUs);
         }
     }
+
+    return Status::ok();
 }
 
-void GraphicBufferSource::omxIdle() {
+Status GraphicBufferSource::onOmxIdle() {
     ALOGV("omxIdle");
 
     Mutex::Autolock autoLock(mMutex);
@@ -279,9 +174,10 @@
         // not loaded->idle.
         mExecuting = false;
     }
+    return Status::ok();
 }
 
-void GraphicBufferSource::omxLoaded(){
+Status GraphicBufferSource::onOmxLoaded(){
     Mutex::Autolock autoLock(mMutex);
     if (!mExecuting) {
         // This can happen if something failed very early.
@@ -296,53 +192,77 @@
         mLooper.clear();
     }
 
-    ALOGV("--> loaded; avail=%zu eos=%d eosSent=%d",
-            mNumFramesAvailable, mEndOfStream, mEndOfStreamSent);
+    ALOGV("--> loaded; avail=%zu eos=%d eosSent=%d acquired=%d",
+            mNumFramesAvailable, mEndOfStream, mEndOfStreamSent, mNumBufferAcquired);
 
-    // Codec is no longer executing.  Discard all codec-related state.
+    // Codec is no longer executing.  Releasing all buffers to bq.
+    for (int i = (int)mCodecBuffers.size() - 1; i >= 0; --i) {
+        if (mCodecBuffers[i].mGraphicBuffer != NULL) {
+            int id = mCodecBuffers[i].mSlot;
+            if (id != mLatestBufferId) {
+                ALOGV("releasing buffer for codec: slot=%d, useCount=%d, latest=%d",
+                        id, mBufferUseCount[id], mLatestBufferId);
+                sp<Fence> fence = new Fence(-1);
+                releaseBuffer(id, mCodecBuffers[i].mFrameNumber, fence);
+                mBufferUseCount[id] = 0;
+            }
+        }
+    }
+    // Also release the latest buffer
+    if (mLatestBufferId >= 0) {
+        releaseBuffer(mLatestBufferId, mLatestBufferFrameNum, mLatestBufferFence);
+        mBufferUseCount[mLatestBufferId] = 0;
+        mLatestBufferId = -1;
+    }
+
     mCodecBuffers.clear();
-    // TODO: scan mCodecBuffers to verify that all mGraphicBuffer entries
-    //       are null; complain if not
-
+    mOMXNode.clear();
     mExecuting = false;
+
+    return Status::ok();
 }
 
-void GraphicBufferSource::addCodecBuffer(OMX_BUFFERHEADERTYPE* header) {
+Status GraphicBufferSource::onInputBufferAdded(int32_t bufferID) {
     Mutex::Autolock autoLock(mMutex);
 
     if (mExecuting) {
         // This should never happen -- buffers can only be allocated when
         // transitioning from "loaded" to "idle".
         ALOGE("addCodecBuffer: buffer added while executing");
-        return;
+        return Status::fromServiceSpecificError(INVALID_OPERATION);
     }
 
-    ALOGV("addCodecBuffer h=%p size=%" PRIu32 " p=%p",
-            header, header->nAllocLen, header->pBuffer);
+    ALOGV("addCodecBuffer: bufferID=%u", bufferID);
+
     CodecBuffer codecBuffer;
-    codecBuffer.mHeader = header;
+    codecBuffer.mBufferID = bufferID;
     mCodecBuffers.add(codecBuffer);
+    return Status::ok();
 }
 
-void GraphicBufferSource::codecBufferEmptied(OMX_BUFFERHEADERTYPE* header, int fenceFd) {
+Status GraphicBufferSource::onInputBufferEmptied(
+        int32_t bufferID, const OMXFenceParcelable &fenceParcel) {
+    int fenceFd = fenceParcel.get();
+
     Mutex::Autolock autoLock(mMutex);
     if (!mExecuting) {
-        return;
-    }
-
-    int cbi = findMatchingCodecBuffer_l(header);
-    if (cbi < 0) {
-        // This should never happen.
-        ALOGE("codecBufferEmptied: buffer not recognized (h=%p)", header);
         if (fenceFd >= 0) {
             ::close(fenceFd);
         }
-        return;
+        return Status::fromServiceSpecificError(INVALID_OPERATION);
     }
 
-    ALOGV("codecBufferEmptied h=%p size=%" PRIu32 " filled=%" PRIu32 " p=%p",
-            header, header->nAllocLen, header->nFilledLen,
-            header->pBuffer);
+    int cbi = findMatchingCodecBuffer_l(bufferID);
+    if (cbi < 0) {
+        // This should never happen.
+        ALOGE("codecBufferEmptied: buffer not recognized (bufferID=%u)", bufferID);
+        if (fenceFd >= 0) {
+            ::close(fenceFd);
+        }
+        return Status::fromServiceSpecificError(BAD_VALUE);
+    }
+
+    ALOGV("codecBufferEmptied: bufferID=%u, cbi=%d", bufferID, cbi);
     CodecBuffer& codecBuffer(mCodecBuffers.editItemAt(cbi));
 
     // header->nFilledLen may not be the original value, so we can't compare
@@ -361,33 +281,7 @@
         if (fenceFd >= 0) {
             ::close(fenceFd);
         }
-        return;
-    }
-
-    if (EXTRA_CHECK && header->nAllocLen >= sizeof(MetadataBufferType)) {
-        // Pull the graphic buffer handle back out of the buffer, and confirm
-        // that it matches expectations.
-        OMX_U8* data = header->pBuffer;
-        MetadataBufferType type = *(MetadataBufferType *)data;
-        if (type == kMetadataBufferTypeGrallocSource
-                && header->nAllocLen >= sizeof(VideoGrallocMetadata)) {
-            VideoGrallocMetadata &grallocMeta = *(VideoGrallocMetadata *)data;
-            if (grallocMeta.pHandle != codecBuffer.mGraphicBuffer->handle) {
-                // should never happen
-                ALOGE("codecBufferEmptied: buffer's handle is %p, expected %p",
-                        grallocMeta.pHandle, codecBuffer.mGraphicBuffer->handle);
-                CHECK(!"codecBufferEmptied: mismatched buffer");
-            }
-        } else if (type == kMetadataBufferTypeANWBuffer
-                && header->nAllocLen >= sizeof(VideoNativeMetadata)) {
-            VideoNativeMetadata &nativeMeta = *(VideoNativeMetadata *)data;
-            if (nativeMeta.pBuffer != codecBuffer.mGraphicBuffer->getNativeBuffer()) {
-                // should never happen
-                ALOGE("codecBufferEmptied: buffer is %p, expected %p",
-                        nativeMeta.pBuffer, codecBuffer.mGraphicBuffer->getNativeBuffer());
-                CHECK(!"codecBufferEmptied: mismatched buffer");
-            }
-        }
+        return Status::fromServiceSpecificError(BAD_VALUE);
     }
 
     // Find matching entry in our cached copy of the BufferQueue slots.
@@ -399,19 +293,19 @@
         mBufferSlot[id]->handle == codecBuffer.mGraphicBuffer->handle) {
         mBufferUseCount[id]--;
 
-        ALOGV("codecBufferEmptied: slot=%d, cbi=%d, useCount=%d, handle=%p",
-                id, cbi, mBufferUseCount[id], mBufferSlot[id]->handle);
-
         if (mBufferUseCount[id] < 0) {
             ALOGW("mBufferUseCount for bq slot %d < 0 (=%d)", id, mBufferUseCount[id]);
             mBufferUseCount[id] = 0;
         }
         if (id != mLatestBufferId && mBufferUseCount[id] == 0) {
-            releaseBuffer(id, codecBuffer.mFrameNumber, mBufferSlot[id], fence);
+            releaseBuffer(id, codecBuffer.mFrameNumber, fence);
         }
+        ALOGV("codecBufferEmptied: slot=%d, cbi=%d, useCount=%d, acquired=%d, handle=%p",
+                id, cbi, mBufferUseCount[id], mNumBufferAcquired, mBufferSlot[id]->handle);
     } else {
-        ALOGV("codecBufferEmptied: no match for emptied buffer in cbi %d",
-                cbi);
+        ALOGV("codecBufferEmptied: no match for emptied buffer, "
+                "slot=%d, cbi=%d, useCount=%d, acquired=%d",
+                id, cbi, mBufferUseCount[id], mNumBufferAcquired);
         // we will not reuse codec buffer, so there is no need to wait for fence
     }
 
@@ -439,75 +333,7 @@
         mRepeatBufferDeferred = false;
     }
 
-    return;
-}
-
-void GraphicBufferSource::codecBufferFilled(OMX_BUFFERHEADERTYPE* header) {
-    Mutex::Autolock autoLock(mMutex);
-
-    if (mMaxTimestampGapUs > 0ll
-            && !(header->nFlags & OMX_BUFFERFLAG_CODECCONFIG)) {
-        ssize_t index = mOriginalTimeUs.indexOfKey(header->nTimeStamp);
-        if (index >= 0) {
-            ALOGV("OUT timestamp: %lld -> %lld",
-                    static_cast<long long>(header->nTimeStamp),
-                    static_cast<long long>(mOriginalTimeUs[index]));
-            header->nTimeStamp = mOriginalTimeUs[index];
-            mOriginalTimeUs.removeItemsAt(index);
-        } else {
-            // giving up the effort as encoder doesn't appear to preserve pts
-            ALOGW("giving up limiting timestamp gap (pts = %lld)",
-                    header->nTimeStamp);
-            mMaxTimestampGapUs = -1ll;
-        }
-        if (mOriginalTimeUs.size() > BufferQueue::NUM_BUFFER_SLOTS) {
-            // something terribly wrong must have happened, giving up...
-            ALOGE("mOriginalTimeUs has too many entries (%zu)",
-                    mOriginalTimeUs.size());
-            mMaxTimestampGapUs = -1ll;
-        }
-    }
-}
-
-void GraphicBufferSource::suspend(bool suspend) {
-    Mutex::Autolock autoLock(mMutex);
-
-    if (suspend) {
-        mSuspended = true;
-
-        while (mNumFramesAvailable > 0) {
-            BufferItem item;
-            status_t err = mConsumer->acquireBuffer(&item, 0);
-
-            if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
-                // shouldn't happen.
-                ALOGW("suspend: frame was not available");
-                break;
-            } else if (err != OK) {
-                ALOGW("suspend: acquireBuffer returned err=%d", err);
-                break;
-            }
-
-            ++mNumBufferAcquired;
-            --mNumFramesAvailable;
-
-            releaseBuffer(item.mSlot, item.mFrameNumber,
-                    item.mGraphicBuffer, item.mFence);
-        }
-        return;
-    }
-
-    mSuspended = false;
-
-    if (mExecuting && mNumFramesAvailable == 0 && mRepeatBufferDeferred) {
-        if (repeatLatestBuffer_l()) {
-            ALOGV("suspend/deferred repeatLatestBuffer_l SUCCESS");
-
-            mRepeatBufferDeferred = false;
-        } else {
-            ALOGV("suspend/deferred repeatLatestBuffer_l FAILURE");
-        }
-    }
+    return Status::ok();
 }
 
 void GraphicBufferSource::onDataSpaceChanged_l(
@@ -516,67 +342,15 @@
     mLastDataSpace = dataSpace;
 
     if (ColorUtils::convertDataSpaceToV0(dataSpace)) {
-        ColorAspects aspects = mColorAspects; // initially requested aspects
+        omx_message msg;
+        msg.type = omx_message::EVENT;
+        msg.fenceFd = -1;
+        msg.u.event_data.event = OMX_EventDataSpaceChanged;
+        msg.u.event_data.data1 = mLastDataSpace;
+        msg.u.event_data.data2 = ColorUtils::packToU32(mColorAspects);
+        msg.u.event_data.data3 = pixelFormat;
 
-        // request color aspects to encode
-        OMX_INDEXTYPE index;
-        status_t err = mNodeInstance->getExtensionIndex(
-                "OMX.google.android.index.describeColorAspects", &index);
-        if (err == OK) {
-            // V0 dataspace
-            DescribeColorAspectsParams params;
-            InitOMXParams(&params);
-            params.nPortIndex = kPortIndexInput;
-            params.nDataSpace = mLastDataSpace;
-            params.nPixelFormat = pixelFormat;
-            params.bDataSpaceChanged = OMX_TRUE;
-            params.sAspects = mColorAspects;
-
-            err = mNodeInstance->getConfig(index, &params, sizeof(params));
-            if (err == OK) {
-                aspects = params.sAspects;
-                ALOGD("Codec resolved it to (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
-                        params.sAspects.mRange, asString(params.sAspects.mRange),
-                        params.sAspects.mPrimaries, asString(params.sAspects.mPrimaries),
-                        params.sAspects.mMatrixCoeffs, asString(params.sAspects.mMatrixCoeffs),
-                        params.sAspects.mTransfer, asString(params.sAspects.mTransfer),
-                        err, asString(err));
-            } else {
-                params.sAspects = aspects;
-                err = OK;
-            }
-            params.bDataSpaceChanged = OMX_FALSE;
-            for (int triesLeft = 2; --triesLeft >= 0; ) {
-                status_t err = mNodeInstance->setConfig(index, &params, sizeof(params));
-                if (err == OK) {
-                    err = mNodeInstance->getConfig(index, &params, sizeof(params));
-                }
-                if (err != OK || !ColorUtils::checkIfAspectsChangedAndUnspecifyThem(
-                        params.sAspects, aspects)) {
-                    // if we can't set or get color aspects, still communicate dataspace to client
-                    break;
-                }
-
-                ALOGW_IF(triesLeft == 0, "Codec repeatedly changed requested ColorAspects.");
-            }
-        }
-
-        ALOGV("Set color aspects to (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
-                aspects.mRange, asString(aspects.mRange),
-                aspects.mPrimaries, asString(aspects.mPrimaries),
-                aspects.mMatrixCoeffs, asString(aspects.mMatrixCoeffs),
-                aspects.mTransfer, asString(aspects.mTransfer),
-                err, asString(err));
-
-        // signal client that the dataspace has changed; this will update the output format
-        // TODO: we should tie this to an output buffer somehow, and signal the change
-        // just before the output buffer is returned to the client, but there are many
-        // ways this could fail (e.g. flushing), and we are not yet supporting this scenario.
-
-        mNodeInstance->signalEvent(
-                OMX_EventDataSpaceChanged, dataSpace,
-                (aspects.mRange << 24) | (aspects.mPrimaries << 16)
-                        | (aspects.mMatrixCoeffs << 8) | aspects.mTransfer);
+        mOMXNode->dispatchMessage(msg);
     }
 }
 
@@ -598,34 +372,19 @@
     ALOGV("fillCodecBuffer_l: acquiring buffer, avail=%zu",
             mNumFramesAvailable);
     BufferItem item;
-    status_t err = mConsumer->acquireBuffer(&item, 0);
-    if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
-        // shouldn't happen
-        ALOGW("fillCodecBuffer_l: frame was not available");
-        return false;
-    } else if (err != OK) {
-        // now what? fake end-of-stream?
-        ALOGW("fillCodecBuffer_l: acquireBuffer returned err=%d", err);
+    status_t err = acquireBuffer(&item);
+    if (err != OK) {
+        ALOGE("fillCodecBuffer_l: acquireBuffer returned err=%d", err);
         return false;
     }
 
-    mNumBufferAcquired++;
     mNumFramesAvailable--;
 
-    // If this is the first time we're seeing this buffer, add it to our
-    // slot table.
-    if (item.mGraphicBuffer != NULL) {
-        ALOGV("fillCodecBuffer_l: setting mBufferSlot %d", item.mSlot);
-        mBufferSlot[item.mSlot] = item.mGraphicBuffer;
-        mBufferUseCount[item.mSlot] = 0;
-    }
-
     if (item.mDataSpace != mLastDataSpace) {
         onDataSpaceChanged_l(
                 item.mDataSpace, (android_pixel_format)mBufferSlot[item.mSlot]->getPixelFormat());
     }
 
-
     err = UNKNOWN_ERROR;
 
     // only submit sample if start time is unspecified, or sample
@@ -650,10 +409,18 @@
 
     if (err != OK) {
         ALOGV("submitBuffer_l failed, releasing bq slot %d", item.mSlot);
-        releaseBuffer(item.mSlot, item.mFrameNumber, item.mGraphicBuffer, item.mFence);
+        releaseBuffer(item.mSlot, item.mFrameNumber, item.mFence);
     } else {
-        ALOGV("buffer submitted (bq %d, cbi %d)", item.mSlot, cbi);
-        setLatestBuffer_l(item, dropped);
+        // Don't set the last buffer id if we're not repeating,
+        // we'll be holding on to the last buffer for nothing.
+        if (mRepeatAfterUs > 0ll) {
+            setLatestBuffer_l(item);
+        }
+        if (!dropped) {
+            ++mBufferUseCount[item.mSlot];
+        }
+        ALOGV("buffer submitted: slot=%d, cbi=%d, useCount=%d, acquired=%d",
+                item.mSlot, cbi, mBufferUseCount[item.mSlot], mNumBufferAcquired);
     }
 
     return true;
@@ -722,24 +489,16 @@
     return true;
 }
 
-void GraphicBufferSource::setLatestBuffer_l(
-        const BufferItem &item, bool dropped) {
-    if (mLatestBufferId >= 0) {
-        if (mBufferUseCount[mLatestBufferId] == 0) {
-            releaseBuffer(mLatestBufferId, mLatestBufferFrameNum,
-                    mBufferSlot[mLatestBufferId], mLatestBufferFence);
-            // mLatestBufferFence will be set to new fence just below
-        }
+void GraphicBufferSource::setLatestBuffer_l(const BufferItem &item) {
+    if (mLatestBufferId >= 0 && mBufferUseCount[mLatestBufferId] == 0) {
+        releaseBuffer(mLatestBufferId, mLatestBufferFrameNum, mLatestBufferFence);
+        // mLatestBufferFence will be set to new fence just below
     }
 
     mLatestBufferId = item.mSlot;
     mLatestBufferFrameNum = item.mFrameNumber;
     mRepeatLastFrameTimestamp = item.mTimestamp + mRepeatAfterUs * 1000;
 
-    if (!dropped) {
-        ++mBufferUseCount[item.mSlot];
-    }
-
     ALOGV("setLatestBuffer_l: slot=%d, useCount=%d",
             item.mSlot, mBufferUseCount[item.mSlot]);
 
@@ -754,34 +513,8 @@
     }
 }
 
-status_t GraphicBufferSource::signalEndOfInputStream() {
-    Mutex::Autolock autoLock(mMutex);
-    ALOGV("signalEndOfInputStream: exec=%d avail=%zu eos=%d",
-            mExecuting, mNumFramesAvailable, mEndOfStream);
-
-    if (mEndOfStream) {
-        ALOGE("EOS was already signaled");
-        return INVALID_OPERATION;
-    }
-
-    // Set the end-of-stream flag.  If no frames are pending from the
-    // BufferQueue, and a codec buffer is available, and we're executing,
-    // we initiate the EOS from here.  Otherwise, we'll let
-    // codecBufferEmptied() (or omxExecuting) do it.
-    //
-    // Note: if there are no pending frames and all codec buffers are
-    // available, we *must* submit the EOS from here or we'll just
-    // stall since no future events are expected.
-    mEndOfStream = true;
-
-    if (mExecuting && mNumFramesAvailable == 0) {
-        submitEndOfInputStream_l();
-    }
-
-    return OK;
-}
-
-int64_t GraphicBufferSource::getTimestamp(const BufferItem &item) {
+bool GraphicBufferSource::getTimestamp(
+        const BufferItem &item, int64_t *codecTimeUs) {
     int64_t timeUs = item.mTimestamp / 1000;
     timeUs += mInputBufferTimeOffsetUs;
 
@@ -801,7 +534,7 @@
             if (nFrames <= 0) {
                 // skip this frame as it's too close to previous capture
                 ALOGV("skipping frame, timeUs %lld", static_cast<long long>(timeUs));
-                return -1;
+                return false;
             }
             mPrevCaptureUs = mPrevCaptureUs + nFrames * mTimePerCaptureUs;
             mPrevFrameUs += mTimePerFrameUs * nFrames;
@@ -812,49 +545,29 @@
                 static_cast<long long>(mPrevCaptureUs),
                 static_cast<long long>(mPrevFrameUs));
 
-        return mPrevFrameUs;
+        *codecTimeUs = mPrevFrameUs;
+        return true;
     } else {
         int64_t originalTimeUs = timeUs;
         if (originalTimeUs <= mPrevOriginalTimeUs) {
                 // Drop the frame if it's going backward in time. Bad timestamp
                 // could disrupt encoder's rate control completely.
             ALOGW("Dropping frame that's going backward in time");
-            return -1;
-        }
-
-        if (mMaxTimestampGapUs > 0ll) {
-            //TODO: Fix the case when mMaxTimestampGapUs and mTimePerCaptureUs are both set.
-
-            /* Cap timestamp gap between adjacent frames to specified max
-             *
-             * In the scenario of cast mirroring, encoding could be suspended for
-             * prolonged periods. Limiting the pts gap to workaround the problem
-             * where encoder's rate control logic produces huge frames after a
-             * long period of suspension.
-             */
-            if (mPrevOriginalTimeUs >= 0ll) {
-                int64_t timestampGapUs = originalTimeUs - mPrevOriginalTimeUs;
-                timeUs = (timestampGapUs < mMaxTimestampGapUs ?
-                    timestampGapUs : mMaxTimestampGapUs) + mPrevModifiedTimeUs;
-            }
-            mOriginalTimeUs.add(timeUs, originalTimeUs);
-            ALOGV("IN  timestamp: %lld -> %lld",
-                static_cast<long long>(originalTimeUs),
-                static_cast<long long>(timeUs));
+            return false;
         }
 
         mPrevOriginalTimeUs = originalTimeUs;
-        mPrevModifiedTimeUs = timeUs;
     }
 
-    return timeUs;
+    *codecTimeUs = timeUs;
+    return true;
 }
 
 status_t GraphicBufferSource::submitBuffer_l(const BufferItem &item, int cbi) {
     ALOGV("submitBuffer_l: slot=%d, cbi=%d", item.mSlot, cbi);
 
-    int64_t timeUs = getTimestamp(item);
-    if (timeUs < 0ll) {
+    int64_t codecTimeUs;
+    if (!getTimestamp(item, &codecTimeUs)) {
         return UNKNOWN_ERROR;
     }
 
@@ -863,19 +576,21 @@
     codecBuffer.mSlot = item.mSlot;
     codecBuffer.mFrameNumber = item.mFrameNumber;
 
-    OMX_BUFFERHEADERTYPE* header = codecBuffer.mHeader;
-    sp<GraphicBuffer> buffer = codecBuffer.mGraphicBuffer;
-    status_t err = mNodeInstance->emptyGraphicBuffer(
-            header, buffer, OMX_BUFFERFLAG_ENDOFFRAME, timeUs,
-            item.mFence->isValid() ? item.mFence->dup() : -1);
+    IOMX::buffer_id bufferID = codecBuffer.mBufferID;
+    const sp<GraphicBuffer> &buffer = codecBuffer.mGraphicBuffer;
+    int fenceID = item.mFence->isValid() ? item.mFence->dup() : -1;
+
+    status_t err = mOMXNode->emptyBuffer(
+            bufferID, buffer, OMX_BUFFERFLAG_ENDOFFRAME, codecTimeUs, fenceID);
+
     if (err != OK) {
-        ALOGW("WARNING: emptyNativeWindowBuffer failed: 0x%x", err);
+        ALOGW("WARNING: emptyGraphicBuffer failed: 0x%x", err);
         codecBuffer.mGraphicBuffer = NULL;
         return err;
     }
 
-    ALOGV("emptyNativeWindowBuffer succeeded, h=%p p=%p buf=%p bufhandle=%p",
-            header, header->pBuffer, buffer->getNativeBuffer(), buffer->handle);
+    ALOGV("emptyGraphicBuffer succeeded, bufferID=%u buf=%p bufhandle=%p",
+            bufferID, buffer->getNativeBuffer(), buffer->handle);
     return OK;
 }
 
@@ -896,16 +611,17 @@
     // to stick a placeholder into codecBuffer.mGraphicBuffer to mark it as
     // in-use.
     CodecBuffer& codecBuffer(mCodecBuffers.editItemAt(cbi));
+    IOMX::buffer_id bufferID = codecBuffer.mBufferID;
 
-    OMX_BUFFERHEADERTYPE* header = codecBuffer.mHeader;
-    status_t err = mNodeInstance->emptyGraphicBuffer(
-            header, NULL /* buffer */, OMX_BUFFERFLAG_ENDOFFRAME | OMX_BUFFERFLAG_EOS,
+    status_t err = mOMXNode->emptyBuffer(
+            bufferID, (sp<GraphicBuffer>)NULL,
+            OMX_BUFFERFLAG_ENDOFFRAME | OMX_BUFFERFLAG_EOS,
             0 /* timestamp */, -1 /* fenceFd */);
     if (err != OK) {
         ALOGW("emptyDirectBuffer EOS failed: 0x%x", err);
     } else {
-        ALOGV("submitEndOfInputStream_l: buffer submitted, header=%p cbi=%d",
-                header, cbi);
+        ALOGV("submitEndOfInputStream_l: buffer submitted, bufferID=%u cbi=%d",
+                bufferID, cbi);
         mEndOfStreamSent = true;
     }
 }
@@ -921,44 +637,48 @@
     return -1;
 }
 
-int GraphicBufferSource::findMatchingCodecBuffer_l(
-        const OMX_BUFFERHEADERTYPE* header) {
+int GraphicBufferSource::findMatchingCodecBuffer_l(IOMX::buffer_id bufferID) {
     for (int i = (int)mCodecBuffers.size() - 1; i>= 0; --i) {
-        if (mCodecBuffers[i].mHeader == header) {
+        if (mCodecBuffers[i].mBufferID == bufferID) {
             return i;
         }
     }
     return -1;
 }
 
+status_t GraphicBufferSource::acquireBuffer(BufferItem *bi) {
+    status_t err = mConsumer->acquireBuffer(bi, 0);
+    if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
+        // shouldn't happen
+        ALOGW("acquireBuffer: frame was not available");
+        return err;
+    } else if (err != OK) {
+        ALOGW("acquireBuffer: failed with err=%d", err);
+        return err;
+    }
+    // If this is the first time we're seeing this buffer, add it to our
+    // slot table.
+    if (bi->mGraphicBuffer != NULL) {
+        ALOGV("acquireBuffer: setting mBufferSlot %d", bi->mSlot);
+        mBufferSlot[bi->mSlot] = bi->mGraphicBuffer;
+        mBufferUseCount[bi->mSlot] = 0;
+    }
+    mNumBufferAcquired++;
+    return OK;
+}
+
 /*
- * Releases an acquired buffer back to the consumer for either persistent
- * or non-persistent surfaces.
+ * Releases an acquired buffer back to the consumer.
  *
- * id: buffer slot to release (in persistent case the id might be changed)
+ * id: buffer slot to release
  * frameNum: frame number of the frame being released
- * buffer: GraphicBuffer pointer to release (note this must not be & as we
- *         will clear the original mBufferSlot in persistent case)
- *         Use NOLINT to supress warning on the copy of 'buffer'.
  * fence: fence of the frame being released
  */
 void GraphicBufferSource::releaseBuffer(
-        int &id, uint64_t frameNum,
-        const sp<GraphicBuffer> buffer, const sp<Fence> &fence) {  // NOLINT
+        int id, uint64_t frameNum, const sp<Fence> &fence) {
     ALOGV("releaseBuffer: slot=%d", id);
-    if (mIsPersistent) {
-        mConsumer->detachBuffer(id);
-        mBufferSlot[id] = NULL;
-
-        if (mConsumer->attachBuffer(&id, buffer) == OK) {
-            mConsumer->releaseBuffer(
-                    id, 0, EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, fence);
-        }
-    } else {
-        mConsumer->releaseBuffer(
-                id, frameNum, EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, fence);
-    }
-    id = -1; // invalidate id
+    mConsumer->releaseBuffer(
+            id, frameNum, EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, fence);
     mNumBufferAcquired--;
 }
 
@@ -969,7 +689,7 @@
     ALOGV("onFrameAvailable exec=%d avail=%zu",
             mExecuting, mNumFramesAvailable);
 
-    if (mEndOfStream || mSuspended) {
+    if (mOMXNode == NULL || mEndOfStream || mSuspended) {
         if (mEndOfStream) {
             // This should only be possible if a new buffer was queued after
             // EOS was signaled, i.e. the app is misbehaving.
@@ -980,20 +700,11 @@
         }
 
         BufferItem item;
-        status_t err = mConsumer->acquireBuffer(&item, 0);
+        status_t err = acquireBuffer(&item);
         if (err == OK) {
-            mNumBufferAcquired++;
-
-            // If this is the first time we're seeing this buffer, add it to our
-            // slot table.
-            if (item.mGraphicBuffer != NULL) {
-                ALOGV("onFrameAvailable: setting mBufferSlot %d", item.mSlot);
-                mBufferSlot[item.mSlot] = item.mGraphicBuffer;
-                mBufferUseCount[item.mSlot] = 0;
-            }
-
-            releaseBuffer(item.mSlot, item.mFrameNumber,
-                    item.mGraphicBuffer, item.mFence);
+            releaseBuffer(item.mSlot, item.mFrameNumber, item.mFence);
+        } else {
+            ALOGE("onFrameAvailable: acquireBuffer returned err=%d", err);
         }
         return;
     }
@@ -1022,6 +733,18 @@
 
     for (int i = 0; i < BufferQueue::NUM_BUFFER_SLOTS; i++) {
         if ((slotMask & 0x01) != 0) {
+            // Last buffer (if set) is always acquired even if its use count
+            // is 0, because we could have skipped that frame but kept it for
+            // repeating. Otherwise a buffer is only acquired if use count>0.
+            if (mBufferSlot[i] != NULL &&
+                    (mBufferUseCount[i] > 0 || mLatestBufferId == i)) {
+                ALOGV("releasing acquired buffer: slot=%d, useCount=%d, latest=%d",
+                        i, mBufferUseCount[i], mLatestBufferId);
+                mNumBufferAcquired--;
+            }
+            if (mLatestBufferId == i) {
+                mLatestBufferId = -1;
+            }
             mBufferSlot[i] = NULL;
             mBufferUseCount[i] = 0;
         }
@@ -1034,95 +757,245 @@
     ALOG_ASSERT(false, "GraphicBufferSource can't consume sideband streams");
 }
 
-void GraphicBufferSource::setDefaultDataSpace(android_dataspace dataSpace) {
-    // no need for mutex as we are not yet running
-    ALOGD("setting dataspace: %#x", dataSpace);
-    mConsumer->setDefaultBufferDataSpace(dataSpace);
-    mLastDataSpace = dataSpace;
+Status GraphicBufferSource::configure(
+        const sp<IOMXNode>& omxNode, int32_t dataSpace) {
+    if (omxNode == NULL) {
+        return Status::fromServiceSpecificError(BAD_VALUE);
+    }
+
+    // Do setInputSurface() first, the node will try to enable metadata
+    // mode on input, and does necessary error checking. If this fails,
+    // we can't use this input surface on the node.
+    status_t err = omxNode->setInputSurface(this);
+    if (err != NO_ERROR) {
+        ALOGE("Unable to set input surface: %d", err);
+        return Status::fromServiceSpecificError(err);
+    }
+
+    // use consumer usage bits queried from encoder, but always add
+    // HW_VIDEO_ENCODER for backward compatibility.
+    uint32_t consumerUsage;
+    if (omxNode->getParameter(
+            (OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits,
+            &consumerUsage, sizeof(consumerUsage)) != OK) {
+        consumerUsage = 0;
+    }
+
+    OMX_PARAM_PORTDEFINITIONTYPE def;
+    InitOMXParams(&def);
+    def.nPortIndex = kPortIndexInput;
+
+    err = omxNode->getParameter(
+            OMX_IndexParamPortDefinition, &def, sizeof(def));
+    if (err != NO_ERROR) {
+        ALOGE("Failed to get port definition: %d", err);
+        return Status::fromServiceSpecificError(UNKNOWN_ERROR);
+    }
+
+    // Call setMaxAcquiredBufferCount without lock.
+    // setMaxAcquiredBufferCount could call back to onBuffersReleased
+    // if the buffer count change results in releasing of existing buffers,
+    // which would lead to deadlock.
+    err = mConsumer->setMaxAcquiredBufferCount(def.nBufferCountActual);
+    if (err != NO_ERROR) {
+        ALOGE("Unable to set BQ max acquired buffer count to %u: %d",
+                def.nBufferCountActual, err);
+        return Status::fromServiceSpecificError(err);
+    }
+
+    {
+        Mutex::Autolock autoLock(mMutex);
+        mOMXNode = omxNode;
+
+        err = mConsumer->setDefaultBufferSize(
+                def.format.video.nFrameWidth,
+                def.format.video.nFrameHeight);
+        if (err != NO_ERROR) {
+            ALOGE("Unable to set BQ default buffer size to %ux%u: %d",
+                    def.format.video.nFrameWidth,
+                    def.format.video.nFrameHeight,
+                    err);
+            return Status::fromServiceSpecificError(err);
+        }
+
+        consumerUsage |= GRALLOC_USAGE_HW_VIDEO_ENCODER;
+        mConsumer->setConsumerUsageBits(consumerUsage);
+
+        // Sets the default buffer data space
+        ALOGD("setting dataspace: %#x, acquired=%d", dataSpace, mNumBufferAcquired);
+        mConsumer->setDefaultBufferDataSpace((android_dataspace)dataSpace);
+        mLastDataSpace = (android_dataspace)dataSpace;
+
+        mExecuting = false;
+        mSuspended = false;
+        mEndOfStream = false;
+        mEndOfStreamSent = false;
+        mPrevOriginalTimeUs = -1ll;
+        mSkipFramesBeforeNs = -1ll;
+        mRepeatAfterUs = -1ll;
+        mRepeatLastFrameGeneration = 0;
+        mRepeatLastFrameTimestamp = -1ll;
+        mRepeatLastFrameCount = 0;
+        mLatestBufferId = -1;
+        mLatestBufferFrameNum = 0;
+        mLatestBufferFence = Fence::NO_FENCE;
+        mRepeatBufferDeferred = false;
+        mTimePerCaptureUs = -1ll;
+        mTimePerFrameUs = -1ll;
+        mPrevCaptureUs = -1ll;
+        mPrevFrameUs = -1ll;
+        mInputBufferTimeOffsetUs = 0;
+    }
+
+    return Status::ok();
 }
 
-status_t GraphicBufferSource::setRepeatPreviousFrameDelayUs(
-        int64_t repeatAfterUs) {
+Status GraphicBufferSource::setSuspend(bool suspend) {
+    ALOGV("setSuspend=%d", suspend);
+
+    Mutex::Autolock autoLock(mMutex);
+
+    if (suspend) {
+        mSuspended = true;
+
+        while (mNumFramesAvailable > 0) {
+            BufferItem item;
+            status_t err = acquireBuffer(&item);
+
+            if (err != OK) {
+                ALOGE("setSuspend: acquireBuffer returned err=%d", err);
+                break;
+            }
+
+            --mNumFramesAvailable;
+
+            releaseBuffer(item.mSlot, item.mFrameNumber, item.mFence);
+        }
+        return Status::ok();
+    }
+
+    mSuspended = false;
+
+    if (mExecuting && mNumFramesAvailable == 0 && mRepeatBufferDeferred) {
+        if (repeatLatestBuffer_l()) {
+            ALOGV("suspend/deferred repeatLatestBuffer_l SUCCESS");
+
+            mRepeatBufferDeferred = false;
+        } else {
+            ALOGV("suspend/deferred repeatLatestBuffer_l FAILURE");
+        }
+    }
+    return Status::ok();
+}
+
+Status GraphicBufferSource::setRepeatPreviousFrameDelayUs(int64_t repeatAfterUs) {
+    ALOGV("setRepeatPreviousFrameDelayUs: delayUs=%lld", (long long)repeatAfterUs);
+
     Mutex::Autolock autoLock(mMutex);
 
     if (mExecuting || repeatAfterUs <= 0ll) {
-        return INVALID_OPERATION;
+        return Status::fromServiceSpecificError(INVALID_OPERATION);
     }
 
     mRepeatAfterUs = repeatAfterUs;
-
-    return OK;
+    return Status::ok();
 }
 
-status_t GraphicBufferSource::setMaxTimestampGapUs(int64_t maxGapUs) {
-    Mutex::Autolock autoLock(mMutex);
-
-    if (mExecuting || maxGapUs <= 0ll) {
-        return INVALID_OPERATION;
-    }
-
-    mMaxTimestampGapUs = maxGapUs;
-
-    return OK;
-}
-
-status_t GraphicBufferSource::setInputBufferTimeOffset(int64_t timeOffsetUs) {
+Status GraphicBufferSource::setTimeOffsetUs(int64_t timeOffsetUs) {
     Mutex::Autolock autoLock(mMutex);
 
     // timeOffsetUs must be negative for adjustment.
     if (timeOffsetUs >= 0ll) {
-        return INVALID_OPERATION;
+        return Status::fromServiceSpecificError(INVALID_OPERATION);
     }
 
     mInputBufferTimeOffsetUs = timeOffsetUs;
-    return OK;
+    return Status::ok();
 }
 
-status_t GraphicBufferSource::setMaxFps(float maxFps) {
+Status GraphicBufferSource::setMaxFps(float maxFps) {
+    ALOGV("setMaxFps: maxFps=%lld", (long long)maxFps);
+
     Mutex::Autolock autoLock(mMutex);
 
     if (mExecuting) {
-        return INVALID_OPERATION;
+        return Status::fromServiceSpecificError(INVALID_OPERATION);
     }
 
     mFrameDropper = new FrameDropper();
     status_t err = mFrameDropper->setMaxFrameRate(maxFps);
     if (err != OK) {
         mFrameDropper.clear();
-        return err;
+        return Status::fromServiceSpecificError(err);
     }
 
-    return OK;
+    return Status::ok();
 }
 
-void GraphicBufferSource::setSkipFramesBeforeUs(int64_t skipFramesBeforeUs) {
+Status GraphicBufferSource::setStartTimeUs(int64_t skipFramesBeforeUs) {
+    ALOGV("setStartTimeUs: skipFramesBeforeUs=%lld", (long long)skipFramesBeforeUs);
+
     Mutex::Autolock autoLock(mMutex);
 
     mSkipFramesBeforeNs =
             (skipFramesBeforeUs > 0) ? (skipFramesBeforeUs * 1000) : -1ll;
+
+    return Status::ok();
 }
 
-status_t GraphicBufferSource::setTimeLapseConfig(const TimeLapseConfig &config) {
+Status GraphicBufferSource::setTimeLapseConfig(int64_t timePerFrameUs, int64_t timePerCaptureUs) {
+    ALOGV("setTimeLapseConfig: timePerFrameUs=%lld, timePerCaptureUs=%lld",
+            (long long)timePerFrameUs, (long long)timePerCaptureUs);
+
     Mutex::Autolock autoLock(mMutex);
 
-    if (mExecuting || config.mTimePerFrameUs <= 0ll || config.mTimePerCaptureUs <= 0ll) {
-        return INVALID_OPERATION;
+    if (mExecuting || timePerFrameUs <= 0ll || timePerCaptureUs <= 0ll) {
+        return Status::fromServiceSpecificError(INVALID_OPERATION);
     }
 
-    mTimePerFrameUs = config.mTimePerFrameUs;
-    mTimePerCaptureUs = config.mTimePerCaptureUs;
+    mTimePerFrameUs = timePerFrameUs;
+    mTimePerCaptureUs = timePerCaptureUs;
 
-    return OK;
+    return Status::ok();
 }
 
-void GraphicBufferSource::setColorAspects(const ColorAspects &aspects) {
+Status GraphicBufferSource::setColorAspects(int32_t aspectsPacked) {
     Mutex::Autolock autoLock(mMutex);
-    mColorAspects = aspects;
+    mColorAspects = ColorUtils::unpackToColorAspects(aspectsPacked);
     ALOGD("requesting color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s))",
-            aspects.mRange, asString(aspects.mRange),
-            aspects.mPrimaries, asString(aspects.mPrimaries),
-            aspects.mMatrixCoeffs, asString(aspects.mMatrixCoeffs),
-            aspects.mTransfer, asString(aspects.mTransfer));
+            mColorAspects.mRange, asString(mColorAspects.mRange),
+            mColorAspects.mPrimaries, asString(mColorAspects.mPrimaries),
+            mColorAspects.mMatrixCoeffs, asString(mColorAspects.mMatrixCoeffs),
+            mColorAspects.mTransfer, asString(mColorAspects.mTransfer));
+
+    return Status::ok();
+}
+
+Status GraphicBufferSource::signalEndOfInputStream() {
+    Mutex::Autolock autoLock(mMutex);
+    ALOGV("signalEndOfInputStream: exec=%d avail=%zu eos=%d",
+            mExecuting, mNumFramesAvailable, mEndOfStream);
+
+    if (mEndOfStream) {
+        ALOGE("EOS was already signaled");
+        return Status::fromStatusT(INVALID_OPERATION);
+    }
+
+    // Set the end-of-stream flag.  If no frames are pending from the
+    // BufferQueue, and a codec buffer is available, and we're executing,
+    // we initiate the EOS from here.  Otherwise, we'll let
+    // codecBufferEmptied() (or omxExecuting) do it.
+    //
+    // Note: if there are no pending frames and all codec buffers are
+    // available, we *must* submit the EOS from here or we'll just
+    // stall since no future events are expected.
+    mEndOfStream = true;
+
+    if (mExecuting && mNumFramesAvailable == 0) {
+        submitEndOfInputStream_l();
+    }
+
+    return Status::ok();
 }
 
 void GraphicBufferSource::onMessageReceived(const sp<AMessage> &msg) {
diff --git a/media/libstagefright/omx/GraphicBufferSource.h b/media/libstagefright/omx/GraphicBufferSource.h
index aa4ceb3..80fe078 100644
--- a/media/libstagefright/omx/GraphicBufferSource.h
+++ b/media/libstagefright/omx/GraphicBufferSource.h
@@ -22,15 +22,20 @@
 #include <gui/BufferQueue.h>
 #include <utils/RefBase.h>
 
-#include <OMX_Core.h>
 #include <VideoAPI.h>
-#include "../include/OMXNodeInstance.h"
+#include <media/IOMX.h>
+#include <media/OMXFenceParcelable.h>
 #include <media/stagefright/foundation/ABase.h>
 #include <media/stagefright/foundation/AHandlerReflector.h>
 #include <media/stagefright/foundation/ALooper.h>
 
+#include <android/BnGraphicBufferSource.h>
+#include <android/BnOMXBufferSource.h>
+
 namespace android {
 
+using ::android::binder::Status;
+
 struct FrameDropper;
 
 /*
@@ -49,16 +54,11 @@
  * before the codec is in the "executing" state, so we need to queue
  * things up until we're ready to go.
  */
-class GraphicBufferSource : public BufferQueue::ConsumerListener {
+class GraphicBufferSource : public BnGraphicBufferSource,
+                            public BnOMXBufferSource,
+                            public BufferQueue::ConsumerListener {
 public:
-    GraphicBufferSource(
-            OMXNodeInstance* nodeInstance,
-            uint32_t bufferWidth,
-            uint32_t bufferHeight,
-            uint32_t bufferCount,
-            uint32_t consumerUsage,
-            const sp<IGraphicBufferConsumer> &consumer = NULL
-    );
+    GraphicBufferSource();
 
     virtual ~GraphicBufferSource();
 
@@ -74,44 +74,42 @@
         return mProducer;
     }
 
-    // Sets the default buffer data space
-    void setDefaultDataSpace(android_dataspace dataSpace);
-
     // This is called when OMX transitions to OMX_StateExecuting, which means
     // we can start handing it buffers.  If we already have buffers of data
     // sitting in the BufferQueue, this will send them to the codec.
-    void omxExecuting();
+    Status onOmxExecuting() override;
 
     // This is called when OMX transitions to OMX_StateIdle, indicating that
     // the codec is meant to return all buffers back to the client for them
     // to be freed. Do NOT submit any more buffers to the component.
-    void omxIdle();
+    Status onOmxIdle() override;
 
     // This is called when OMX transitions to OMX_StateLoaded, indicating that
     // we are shutting down.
-    void omxLoaded();
+    Status onOmxLoaded() override;
 
     // A "codec buffer", i.e. a buffer that can be used to pass data into
     // the encoder, has been allocated.  (This call does not call back into
     // OMXNodeInstance.)
-    void addCodecBuffer(OMX_BUFFERHEADERTYPE* header);
+    Status onInputBufferAdded(int32_t bufferID) override;
 
     // Called from OnEmptyBufferDone.  If we have a BQ buffer available,
     // fill it with a new frame of data; otherwise, just mark it as available.
-    void codecBufferEmptied(OMX_BUFFERHEADERTYPE* header, int fenceFd);
+    Status onInputBufferEmptied(
+            int32_t bufferID, const OMXFenceParcelable& fenceParcel) override;
 
-    // Called when omx_message::FILL_BUFFER_DONE is received. (Currently the
-    // buffer source will fix timestamp in the header if needed.)
-    void codecBufferFilled(OMX_BUFFERHEADERTYPE* header);
+    // Configure the buffer source to be used with an OMX node with the default
+    // data space.
+    Status configure(const sp<IOMXNode>& omxNode, int32_t dataSpace) override;
 
     // This is called after the last input frame has been submitted.  We
     // need to submit an empty buffer with the EOS flag set.  If we don't
     // have a codec buffer ready, we just set the mEndOfStream flag.
-    status_t signalEndOfInputStream();
+    Status signalEndOfInputStream() override;
 
     // If suspend is true, all incoming buffers (including those currently
     // in the BufferQueue) will be discarded until the suspension is lifted.
-    void suspend(bool suspend);
+    Status setSuspend(bool suspend) override;
 
     // Specifies the interval after which we requeue the buffer previously
     // queued to the encoder. This is useful in the case of surface flinger
@@ -120,40 +118,26 @@
     // the decoder on the remote end would be unable to decode the latest frame.
     // This API must be called before transitioning the encoder to "executing"
     // state and once this behaviour is specified it cannot be reset.
-    status_t setRepeatPreviousFrameDelayUs(int64_t repeatAfterUs);
-
-    // When set, the timestamp fed to the encoder will be modified such that
-    // the gap between two adjacent frames is capped at maxGapUs. Timestamp
-    // will be restored to the original when the encoded frame is returned to
-    // the client.
-    // This is to solve a problem in certain real-time streaming case, where
-    // encoder's rate control logic produces huge frames after a long period
-    // of suspension on input.
-    status_t setMaxTimestampGapUs(int64_t maxGapUs);
+    Status setRepeatPreviousFrameDelayUs(int64_t repeatAfterUs) override;
 
     // Sets the input buffer timestamp offset.
     // When set, the sample's timestamp will be adjusted with the timeOffsetUs.
-    status_t setInputBufferTimeOffset(int64_t timeOffsetUs);
+    Status setTimeOffsetUs(int64_t timeOffsetUs) override;
 
     // When set, the max frame rate fed to the encoder will be capped at maxFps.
-    status_t setMaxFps(float maxFps);
-
-    struct TimeLapseConfig {
-        int64_t mTimePerFrameUs;   // the time (us) between two frames for playback
-        int64_t mTimePerCaptureUs; // the time (us) between two frames for capture
-    };
+    Status setMaxFps(float maxFps) override;
 
     // Sets the time lapse (or slow motion) parameters.
     // When set, the sample's timestamp will be modified to playback framerate,
     // and capture timestamp will be modified to capture rate.
-    status_t setTimeLapseConfig(const TimeLapseConfig &config);
+    Status setTimeLapseConfig(int64_t timePerFrameUs, int64_t timePerCaptureUs) override;
 
     // Sets the start time us (in system time), samples before which should
     // be dropped and not submitted to encoder
-    void setSkipFramesBeforeUs(int64_t startTimeUs);
+    Status setStartTimeUs(int64_t startTimeUs) override;
 
     // Sets the desired color aspects, e.g. to be used when producer does not specify a dataspace.
-    void setColorAspects(const ColorAspects &aspects);
+    Status setColorAspects(int32_t aspectsPacked) override;
 
 protected:
     // BufferQueue::ConsumerListener interface, called when a new frame of
@@ -162,48 +146,24 @@
     // into the codec buffer, and call Empty[This]Buffer.  If we're not yet
     // executing or there's no codec buffer available, we just increment
     // mNumFramesAvailable and return.
-    virtual void onFrameAvailable(const BufferItem& item);
+    void onFrameAvailable(const BufferItem& item) override;
 
     // BufferQueue::ConsumerListener interface, called when the client has
     // released one or more GraphicBuffers.  We clear out the appropriate
     // set of mBufferSlot entries.
-    virtual void onBuffersReleased();
+    void onBuffersReleased() override;
 
     // BufferQueue::ConsumerListener interface, called when the client has
     // changed the sideband stream. GraphicBufferSource doesn't handle sideband
     // streams so this is a no-op (and should never be called).
-    virtual void onSidebandStreamChanged();
+    void onSidebandStreamChanged() override;
 
 private:
-    // PersistentProxyListener is similar to BufferQueue::ProxyConsumerListener
-    // except that it returns (acquire/detach/re-attache/release) buffers
-    // in onFrameAvailable() if the actual consumer object is no longer valid.
-    //
-    // This class is used in persistent input surface case to prevent buffer
-    // loss when onFrameAvailable() is received while we don't have a valid
-    // consumer around.
-    class PersistentProxyListener : public BnConsumerListener {
-        public:
-            PersistentProxyListener(
-                    const wp<IGraphicBufferConsumer> &consumer,
-                    const wp<ConsumerListener>& consumerListener);
-            virtual ~PersistentProxyListener();
-            virtual void onFrameAvailable(const BufferItem& item) override;
-            virtual void onFrameReplaced(const BufferItem& item) override;
-            virtual void onBuffersReleased() override;
-            virtual void onSidebandStreamChanged() override;
-         private:
-            // mConsumerListener is a weak reference to the IConsumerListener.
-            wp<ConsumerListener> mConsumerListener;
-            // mConsumer is a weak reference to the IGraphicBufferConsumer, use
-            // a weak ref to avoid circular ref between mConsumer and this class
-            wp<IGraphicBufferConsumer> mConsumer;
-    };
 
     // Keep track of codec input buffers.  They may either be available
     // (mGraphicBuffer == NULL) or in use by the codec.
     struct CodecBuffer {
-        OMX_BUFFERHEADERTYPE* mHeader;
+        IOMX::buffer_id mBufferID;
 
         // buffer producer's frame-number for buffer
         uint64_t mFrameNumber;
@@ -224,7 +184,7 @@
     }
 
     // Finds the mCodecBuffers entry that matches.  Returns -1 if not found.
-    int findMatchingCodecBuffer_l(const OMX_BUFFERHEADERTYPE* header);
+    int findMatchingCodecBuffer_l(IOMX::buffer_id bufferID);
 
     // Fills a codec buffer with a frame from the BufferQueue.  This must
     // only be called when we know that a frame of data is ready (i.e. we're
@@ -243,14 +203,15 @@
     // doing anything if we don't have a codec buffer available.
     void submitEndOfInputStream_l();
 
-    // Release buffer to the consumer
-    void releaseBuffer(
-            int &id, uint64_t frameNum,
-            const sp<GraphicBuffer> buffer, const sp<Fence> &fence);
+    // Acquire buffer from the consumer
+    status_t acquireBuffer(BufferItem *bi);
 
-    void setLatestBuffer_l(const BufferItem &item, bool dropped);
+    // Release buffer to the consumer
+    void releaseBuffer(int id, uint64_t frameNum, const sp<Fence> &fence);
+
+    void setLatestBuffer_l(const BufferItem &item);
     bool repeatLatestBuffer_l();
-    int64_t getTimestamp(const BufferItem &item);
+    bool getTimestamp(const BufferItem &item, int64_t *codecTimeUs);
 
     // called when the data space of the input buffer changes
     void onDataSpaceChanged_l(android_dataspace dataSpace, android_pixel_format pixelFormat);
@@ -261,8 +222,8 @@
     // Used to report constructor failure.
     status_t mInitCheck;
 
-    // Pointer back to the object that contains us.  We send buffers here.
-    OMXNodeInstance* mNodeInstance;
+    // Pointer back to the IOMXNode that created us.  We send buffers here.
+    sp<IOMXNode> mOMXNode;
 
     // Set by omxExecuting() / omxIdling().
     bool mExecuting;
@@ -275,7 +236,6 @@
     // Our BufferQueue interfaces. mProducer is passed to the producer through
     // getIGraphicBufferProducer, and mConsumer is used internally to retrieve
     // the buffers queued by the producer.
-    bool mIsPersistent;
     sp<IGraphicBufferProducer> mProducer;
     sp<IGraphicBufferConsumer> mConsumer;
 
@@ -310,10 +270,7 @@
         kRepeatLastFrameCount = 10,
     };
 
-    KeyedVector<int64_t, int64_t> mOriginalTimeUs;
-    int64_t mMaxTimestampGapUs;
     int64_t mPrevOriginalTimeUs;
-    int64_t mPrevModifiedTimeUs;
     int64_t mSkipFramesBeforeNs;
 
     sp<FrameDropper> mFrameDropper;
@@ -342,7 +299,6 @@
 
     int64_t mInputBufferTimeOffsetUs;
 
-    MetadataBufferType mMetadataBufferType;
     ColorAspects mColorAspects;
 
     void onMessageReceived(const sp<AMessage> &msg);
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index f7058d7..7907c62 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -26,149 +26,19 @@
 
 #include "../include/OMXNodeInstance.h"
 
-#include <binder/IMemory.h>
 #include <media/stagefright/foundation/ADebug.h>
-#include <utils/threads.h>
+#include "GraphicBufferSource.h"
 
 #include "OMXMaster.h"
 #include "OMXUtils.h"
 
-#include <OMX_AsString.h>
-#include <OMX_Component.h>
-#include <OMX_VideoExt.h>
-
 namespace android {
 
 // node ids are created by concatenating the pid with a 16-bit counter
 static size_t kMaxNodeInstances = (1 << 16);
 
-////////////////////////////////////////////////////////////////////////////////
-
-// This provides the underlying Thread used by CallbackDispatcher.
-// Note that deriving CallbackDispatcher from Thread does not work.
-
-struct OMX::CallbackDispatcherThread : public Thread {
-    explicit CallbackDispatcherThread(CallbackDispatcher *dispatcher)
-        : mDispatcher(dispatcher) {
-    }
-
-private:
-    CallbackDispatcher *mDispatcher;
-
-    bool threadLoop();
-
-    CallbackDispatcherThread(const CallbackDispatcherThread &);
-    CallbackDispatcherThread &operator=(const CallbackDispatcherThread &);
-};
-
-////////////////////////////////////////////////////////////////////////////////
-
-struct OMX::CallbackDispatcher : public RefBase {
-    explicit CallbackDispatcher(OMXNodeInstance *owner);
-
-    // Posts |msg| to the listener's queue. If |realTime| is true, the listener thread is notified
-    // that a new message is available on the queue. Otherwise, the message stays on the queue, but
-    // the listener is not notified of it. It will process this message when a subsequent message
-    // is posted with |realTime| set to true.
-    void post(const omx_message &msg, bool realTime = true);
-
-    bool loop();
-
-protected:
-    virtual ~CallbackDispatcher();
-
-private:
-    Mutex mLock;
-
-    OMXNodeInstance *mOwner;
-    bool mDone;
-    Condition mQueueChanged;
-    std::list<omx_message> mQueue;
-
-    sp<CallbackDispatcherThread> mThread;
-
-    void dispatch(std::list<omx_message> &messages);
-
-    CallbackDispatcher(const CallbackDispatcher &);
-    CallbackDispatcher &operator=(const CallbackDispatcher &);
-};
-
-OMX::CallbackDispatcher::CallbackDispatcher(OMXNodeInstance *owner)
-    : mOwner(owner),
-      mDone(false) {
-    mThread = new CallbackDispatcherThread(this);
-    mThread->run("OMXCallbackDisp", ANDROID_PRIORITY_FOREGROUND);
-}
-
-OMX::CallbackDispatcher::~CallbackDispatcher() {
-    {
-        Mutex::Autolock autoLock(mLock);
-
-        mDone = true;
-        mQueueChanged.signal();
-    }
-
-    // A join on self can happen if the last ref to CallbackDispatcher
-    // is released within the CallbackDispatcherThread loop
-    status_t status = mThread->join();
-    if (status != WOULD_BLOCK) {
-        // Other than join to self, the only other error return codes are
-        // whatever readyToRun() returns, and we don't override that
-        CHECK_EQ(status, (status_t)NO_ERROR);
-    }
-}
-
-void OMX::CallbackDispatcher::post(const omx_message &msg, bool realTime) {
-    Mutex::Autolock autoLock(mLock);
-
-    mQueue.push_back(msg);
-    if (realTime) {
-        mQueueChanged.signal();
-    }
-}
-
-void OMX::CallbackDispatcher::dispatch(std::list<omx_message> &messages) {
-    if (mOwner == NULL) {
-        ALOGV("Would have dispatched a message to a node that's already gone.");
-        return;
-    }
-    mOwner->onMessages(messages);
-}
-
-bool OMX::CallbackDispatcher::loop() {
-    for (;;) {
-        std::list<omx_message> messages;
-
-        {
-            Mutex::Autolock autoLock(mLock);
-            while (!mDone && mQueue.empty()) {
-                mQueueChanged.wait(mLock);
-            }
-
-            if (mDone) {
-                break;
-            }
-
-            messages.swap(mQueue);
-        }
-
-        dispatch(messages);
-    }
-
-    return false;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-bool OMX::CallbackDispatcherThread::threadLoop() {
-    return mDispatcher->loop();
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
 OMX::OMX()
-    : mMaster(new OMXMaster),
-      mNodeCounter(0) {
+    : mMaster(new OMXMaster) {
 }
 
 OMX::~OMX() {
@@ -177,7 +47,7 @@
 }
 
 void OMX::binderDied(const wp<IBinder> &the_late_who) {
-    OMXNodeInstance *instance;
+    sp<OMXNodeInstance> instance;
 
     {
         Mutex::Autolock autoLock(mLock);
@@ -192,24 +62,9 @@
 
         instance = mLiveNodes.editValueAt(index);
         mLiveNodes.removeItemsAt(index);
-
-        index = mDispatchers.indexOfKey(instance->nodeID());
-        CHECK(index >= 0);
-        mDispatchers.removeItemsAt(index);
-
-        invalidateNodeID_l(instance->nodeID());
     }
 
-    instance->onObserverDied(mMaster);
-}
-
-bool OMX::isSecure(node_id node) {
-    OMXNodeInstance *instance = findInstance(node);
-    return (instance == NULL ? false : instance->isSecure());
-}
-
-bool OMX::livesLocally(node_id /* node */, pid_t pid) {
-    return pid == getpid();
+    instance->onObserverDied();
 }
 
 status_t OMX::listNodes(List<ComponentInfo> *list) {
@@ -242,48 +97,38 @@
 
 status_t OMX::allocateNode(
         const char *name, const sp<IOMXObserver> &observer,
-        sp<IBinder> *nodeBinder, node_id *node) {
+        sp<IOMXNode> *omxNode) {
     Mutex::Autolock autoLock(mLock);
 
-    *node = 0;
-    if (nodeBinder != NULL) {
-        *nodeBinder = NULL;
-    }
+    omxNode->clear();
 
-    if (mNodeIDToInstance.size() == kMaxNodeInstances) {
-        // all possible node IDs are in use
+    if (mLiveNodes.size() == kMaxNodeInstances) {
         return NO_MEMORY;
     }
 
-    OMXNodeInstance *instance = new OMXNodeInstance(this, observer, name);
+    sp<OMXNodeInstance> instance = new OMXNodeInstance(this, observer, name);
 
     OMX_COMPONENTTYPE *handle;
     OMX_ERRORTYPE err = mMaster->makeComponentInstance(
             name, &OMXNodeInstance::kCallbacks,
-            instance, &handle);
+            instance.get(), &handle);
 
     if (err != OMX_ErrorNone) {
         ALOGE("FAILED to allocate omx component '%s' err=%s(%#x)", name, asString(err), err);
 
-        instance->onGetHandleFailed();
-
         return StatusFromOMXError(err);
     }
-
-    *node = makeNodeID_l(instance);
-    mDispatchers.add(*node, new CallbackDispatcher(instance));
-
-    instance->setHandle(*node, handle);
+    instance->setHandle(handle);
 
     mLiveNodes.add(IInterface::asBinder(observer), instance);
     IInterface::asBinder(observer)->linkToDeath(this);
 
+    *omxNode = instance;
+
     return OK;
 }
 
-status_t OMX::freeNode(node_id node) {
-    OMXNodeInstance *instance = findInstance(node);
-
+status_t OMX::freeNode(const sp<OMXNodeInstance> &instance) {
     if (instance == NULL) {
         return OK;
     }
@@ -301,466 +146,35 @@
 
     IInterface::asBinder(instance->observer())->unlinkToDeath(this);
 
-    status_t err = instance->freeNode(mMaster);
-
-    {
-        Mutex::Autolock autoLock(mLock);
-        ssize_t index = mDispatchers.indexOfKey(node);
-        CHECK(index >= 0);
-        mDispatchers.removeItemsAt(index);
+    OMX_ERRORTYPE err = OMX_ErrorNone;
+    if (instance->handle() != NULL) {
+        err = mMaster->destroyComponentInstance(
+                static_cast<OMX_COMPONENTTYPE *>(instance->handle()));
     }
 
-    return err;
-}
-
-status_t OMX::sendCommand(
-        node_id node, OMX_COMMANDTYPE cmd, OMX_S32 param) {
-    OMXNodeInstance *instance = findInstance(node);
-
-    if (instance == NULL) {
-        return NAME_NOT_FOUND;
-    }
-
-    return instance->sendCommand(cmd, param);
-}
-
-status_t OMX::getParameter(
-        node_id node, OMX_INDEXTYPE index,
-        void *params, size_t size) {
-    ALOGV("getParameter(%u %#x %p %zd)", node, index, params, size);
-    OMXNodeInstance *instance = findInstance(node);
-
-    if (instance == NULL) {
-        return NAME_NOT_FOUND;
-    }
-
-    return instance->getParameter(
-            index, params, size);
-}
-
-status_t OMX::setParameter(
-        node_id node, OMX_INDEXTYPE index,
-        const void *params, size_t size) {
-    ALOGV("setParameter(%u %#x %p %zd)", node, index, params, size);
-    OMXNodeInstance *instance = findInstance(node);
-
-    if (instance == NULL) {
-        return NAME_NOT_FOUND;
-    }
-
-    return instance->setParameter(
-            index, params, size);
-}
-
-status_t OMX::getConfig(
-        node_id node, OMX_INDEXTYPE index,
-        void *params, size_t size) {
-    OMXNodeInstance *instance = findInstance(node);
-
-    if (instance == NULL) {
-        return NAME_NOT_FOUND;
-    }
-
-    return instance->getConfig(
-            index, params, size);
-}
-
-status_t OMX::setConfig(
-        node_id node, OMX_INDEXTYPE index,
-        const void *params, size_t size) {
-    OMXNodeInstance *instance = findInstance(node);
-
-    if (instance == NULL) {
-        return NAME_NOT_FOUND;
-    }
-
-    return instance->setConfig(
-            index, params, size);
-}
-
-status_t OMX::getState(
-        node_id node, OMX_STATETYPE* state) {
-    OMXNodeInstance *instance = findInstance(node);
-
-    if (instance == NULL) {
-        return NAME_NOT_FOUND;
-    }
-
-    return instance->getState(
-            state);
-}
-
-status_t OMX::enableNativeBuffers(
-        node_id node, OMX_U32 port_index, OMX_BOOL graphic, OMX_BOOL enable) {
-    OMXNodeInstance *instance = findInstance(node);
-
-    if (instance == NULL) {
-        return NAME_NOT_FOUND;
-    }
-
-    return instance->enableNativeBuffers(port_index, graphic, enable);
-}
-
-status_t OMX::getGraphicBufferUsage(
-        node_id node, OMX_U32 port_index, OMX_U32* usage) {
-    OMXNodeInstance *instance = findInstance(node);
-
-    if (instance == NULL) {
-        return NAME_NOT_FOUND;
-    }
-
-    return instance->getGraphicBufferUsage(port_index, usage);
-}
-
-status_t OMX::storeMetaDataInBuffers(
-        node_id node, OMX_U32 port_index, OMX_BOOL enable, MetadataBufferType *type) {
-    OMXNodeInstance *instance = findInstance(node);
-
-    if (instance == NULL) {
-        return NAME_NOT_FOUND;
-    }
-
-    return instance->storeMetaDataInBuffers(port_index, enable, type);
-}
-
-status_t OMX::prepareForAdaptivePlayback(
-        node_id node, OMX_U32 portIndex, OMX_BOOL enable,
-        OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight) {
-    OMXNodeInstance *instance = findInstance(node);
-
-    if (instance == NULL) {
-        return NAME_NOT_FOUND;
-    }
-
-    return instance->prepareForAdaptivePlayback(
-            portIndex, enable, maxFrameWidth, maxFrameHeight);
-}
-
-status_t OMX::configureVideoTunnelMode(
-        node_id node, OMX_U32 portIndex, OMX_BOOL tunneled,
-        OMX_U32 audioHwSync, native_handle_t **sidebandHandle) {
-    OMXNodeInstance *instance = findInstance(node);
-
-    if (instance == NULL) {
-        return NAME_NOT_FOUND;
-    }
-
-    return instance->configureVideoTunnelMode(
-            portIndex, tunneled, audioHwSync, sidebandHandle);
-}
-
-status_t OMX::useBuffer(
-        node_id node, OMX_U32 port_index, const sp<IMemory> &params,
-        buffer_id *buffer, OMX_U32 allottedSize) {
-    OMXNodeInstance *instance = findInstance(node);
-
-    if (instance == NULL) {
-        return NAME_NOT_FOUND;
-    }
-
-    return instance->useBuffer(
-            port_index, params, buffer, allottedSize);
-}
-
-status_t OMX::useGraphicBuffer(
-        node_id node, OMX_U32 port_index,
-        const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer) {
-    OMXNodeInstance *instance = findInstance(node);
-
-    if (instance == NULL) {
-        return NAME_NOT_FOUND;
-    }
-
-    return instance->useGraphicBuffer(
-            port_index, graphicBuffer, buffer);
-}
-
-status_t OMX::updateGraphicBufferInMeta(
-        node_id node, OMX_U32 port_index,
-        const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer) {
-    OMXNodeInstance *instance = findInstance(node);
-
-    if (instance == NULL) {
-        return NAME_NOT_FOUND;
-    }
-
-    return instance->updateGraphicBufferInMeta(
-            port_index, graphicBuffer, buffer);
-}
-
-status_t OMX::updateNativeHandleInMeta(
-        node_id node, OMX_U32 port_index,
-        const sp<NativeHandle> &nativeHandle, buffer_id buffer) {
-    OMXNodeInstance *instance = findInstance(node);
-
-    if (instance == NULL) {
-        return NAME_NOT_FOUND;
-    }
-
-    return instance->updateNativeHandleInMeta(
-            port_index, nativeHandle, buffer);
+    return StatusFromOMXError(err);
 }
 
 status_t OMX::createInputSurface(
-        node_id node, OMX_U32 port_index, android_dataspace dataSpace,
-        sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
-    OMXNodeInstance *instance = findInstance(node);
-
-    if (instance == NULL) {
-        return NAME_NOT_FOUND;
-    }
-
-    return instance->createInputSurface(
-            port_index, dataSpace, bufferProducer, type);
-}
-
-status_t OMX::createPersistentInputSurface(
         sp<IGraphicBufferProducer> *bufferProducer,
-        sp<IGraphicBufferConsumer> *bufferConsumer) {
-    return OMXNodeInstance::createPersistentInputSurface(
-            bufferProducer, bufferConsumer);
-}
-
-status_t OMX::setInputSurface(
-        node_id node, OMX_U32 port_index,
-        const sp<IGraphicBufferConsumer> &bufferConsumer, MetadataBufferType *type) {
-    OMXNodeInstance *instance = findInstance(node);
-
-    if (instance == NULL) {
-        return NAME_NOT_FOUND;
+        sp<IGraphicBufferSource> *bufferSource) {
+    if (bufferProducer == NULL || bufferSource == NULL) {
+        ALOGE("b/25884056");
+        return BAD_VALUE;
     }
 
-    return instance->setInputSurface(port_index, bufferConsumer, type);
-}
-
-
-status_t OMX::signalEndOfInputStream(node_id node) {
-    OMXNodeInstance *instance = findInstance(node);
-
-    if (instance == NULL) {
-        return NAME_NOT_FOUND;
+    sp<GraphicBufferSource> graphicBufferSource = new GraphicBufferSource();
+    status_t err = graphicBufferSource->initCheck();
+    if (err != OK) {
+        ALOGE("Failed to create persistent input surface: %s (%d)",
+                strerror(-err), err);
+        return err;
     }
 
-    return instance->signalEndOfInputStream();
-}
+    *bufferProducer = graphicBufferSource->getIGraphicBufferProducer();
+    *bufferSource = graphicBufferSource;
 
-status_t OMX::allocateSecureBuffer(
-        node_id node, OMX_U32 port_index, size_t size,
-        buffer_id *buffer, void **buffer_data, sp<NativeHandle> *native_handle) {
-    OMXNodeInstance *instance = findInstance(node);
-
-    if (instance == NULL) {
-        return NAME_NOT_FOUND;
-    }
-
-    return instance->allocateSecureBuffer(
-            port_index, size, buffer, buffer_data, native_handle);
-}
-
-status_t OMX::allocateBufferWithBackup(
-        node_id node, OMX_U32 port_index, const sp<IMemory> &params,
-        buffer_id *buffer, OMX_U32 allottedSize) {
-    OMXNodeInstance *instance = findInstance(node);
-
-    if (instance == NULL) {
-        return NAME_NOT_FOUND;
-    }
-
-    return instance->allocateBufferWithBackup(
-            port_index, params, buffer, allottedSize);
-}
-
-status_t OMX::freeBuffer(node_id node, OMX_U32 port_index, buffer_id buffer) {
-    OMXNodeInstance *instance = findInstance(node);
-
-    if (instance == NULL) {
-        return NAME_NOT_FOUND;
-    }
-
-    return instance->freeBuffer(
-            port_index, buffer);
-}
-
-status_t OMX::fillBuffer(node_id node, buffer_id buffer, int fenceFd) {
-    OMXNodeInstance *instance = findInstance(node);
-
-    if (instance == NULL) {
-        return NAME_NOT_FOUND;
-    }
-
-    return instance->fillBuffer(buffer, fenceFd);
-}
-
-status_t OMX::emptyBuffer(
-        node_id node,
-        buffer_id buffer,
-        OMX_U32 range_offset, OMX_U32 range_length,
-        OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
-    OMXNodeInstance *instance = findInstance(node);
-
-    if (instance == NULL) {
-        return NAME_NOT_FOUND;
-    }
-
-    return instance->emptyBuffer(
-            buffer, range_offset, range_length, flags, timestamp, fenceFd);
-}
-
-status_t OMX::getExtensionIndex(
-        node_id node,
-        const char *parameter_name,
-        OMX_INDEXTYPE *index) {
-    OMXNodeInstance *instance = findInstance(node);
-
-    if (instance == NULL) {
-        return NAME_NOT_FOUND;
-    }
-
-    return instance->getExtensionIndex(
-            parameter_name, index);
-}
-
-status_t OMX::setInternalOption(
-        node_id node,
-        OMX_U32 port_index,
-        InternalOptionType type,
-        const void *data,
-        size_t size) {
-    OMXNodeInstance *instance = findInstance(node);
-
-    if (instance == NULL) {
-        return NAME_NOT_FOUND;
-    }
-
-    return instance->setInternalOption(port_index, type, data, size);
-}
-
-OMX_ERRORTYPE OMX::OnEvent(
-        node_id node,
-        OMX_IN OMX_EVENTTYPE eEvent,
-        OMX_IN OMX_U32 nData1,
-        OMX_IN OMX_U32 nData2,
-        OMX_IN OMX_PTR pEventData) {
-    ALOGV("OnEvent(%d, %" PRIu32", %" PRIu32 ")", eEvent, nData1, nData2);
-    OMXNodeInstance *instance = findInstance(node);
-
-    if (instance == NULL) {
-        return OMX_ErrorComponentNotFound;
-    }
-
-    // Forward to OMXNodeInstance.
-    instance->onEvent(eEvent, nData1, nData2);
-
-    sp<OMX::CallbackDispatcher> dispatcher = findDispatcher(node);
-
-    // output rendered events are not processed as regular events until they hit the observer
-    if (eEvent == OMX_EventOutputRendered) {
-        if (pEventData == NULL) {
-            return OMX_ErrorBadParameter;
-        }
-
-        // process data from array
-        OMX_VIDEO_RENDEREVENTTYPE *renderData = (OMX_VIDEO_RENDEREVENTTYPE *)pEventData;
-        for (size_t i = 0; i < nData1; ++i) {
-            omx_message msg;
-            msg.type = omx_message::FRAME_RENDERED;
-            msg.node = node;
-            msg.fenceFd = -1;
-            msg.u.render_data.timestamp = renderData[i].nMediaTimeUs;
-            msg.u.render_data.nanoTime = renderData[i].nSystemTimeNs;
-
-            dispatcher->post(msg, false /* realTime */);
-        }
-        return OMX_ErrorNone;
-    }
-
-    omx_message msg;
-    msg.type = omx_message::EVENT;
-    msg.node = node;
-    msg.fenceFd = -1;
-    msg.u.event_data.event = eEvent;
-    msg.u.event_data.data1 = nData1;
-    msg.u.event_data.data2 = nData2;
-
-    dispatcher->post(msg, true /* realTime */);
-
-    return OMX_ErrorNone;
-}
-
-OMX_ERRORTYPE OMX::OnEmptyBufferDone(
-        node_id node, buffer_id buffer, OMX_IN OMX_BUFFERHEADERTYPE *pBuffer, int fenceFd) {
-    ALOGV("OnEmptyBufferDone buffer=%p", pBuffer);
-
-    omx_message msg;
-    msg.type = omx_message::EMPTY_BUFFER_DONE;
-    msg.node = node;
-    msg.fenceFd = fenceFd;
-    msg.u.buffer_data.buffer = buffer;
-
-    findDispatcher(node)->post(msg);
-
-    return OMX_ErrorNone;
-}
-
-OMX_ERRORTYPE OMX::OnFillBufferDone(
-        node_id node, buffer_id buffer, OMX_IN OMX_BUFFERHEADERTYPE *pBuffer, int fenceFd) {
-    ALOGV("OnFillBufferDone buffer=%p", pBuffer);
-
-    omx_message msg;
-    msg.type = omx_message::FILL_BUFFER_DONE;
-    msg.node = node;
-    msg.fenceFd = fenceFd;
-    msg.u.extended_buffer_data.buffer = buffer;
-    msg.u.extended_buffer_data.range_offset = pBuffer->nOffset;
-    msg.u.extended_buffer_data.range_length = pBuffer->nFilledLen;
-    msg.u.extended_buffer_data.flags = pBuffer->nFlags;
-    msg.u.extended_buffer_data.timestamp = pBuffer->nTimeStamp;
-
-    findDispatcher(node)->post(msg);
-
-    return OMX_ErrorNone;
-}
-
-OMX::node_id OMX::makeNodeID_l(OMXNodeInstance *instance) {
-    // mLock is already held.
-
-    node_id prefix = node_id(getpid() << 16);
-    node_id node = 0;
-    do  {
-        if (++mNodeCounter >= kMaxNodeInstances) {
-            mNodeCounter = 0; // OK to use because we're combining with the pid
-        }
-        node = node_id(prefix | mNodeCounter);
-    } while (mNodeIDToInstance.indexOfKey(node) >= 0);
-    mNodeIDToInstance.add(node, instance);
-
-    return node;
-}
-
-OMXNodeInstance *OMX::findInstance(node_id node) {
-    Mutex::Autolock autoLock(mLock);
-
-    ssize_t index = mNodeIDToInstance.indexOfKey(node);
-
-    return index < 0 ? NULL : mNodeIDToInstance.valueAt(index);
-}
-
-sp<OMX::CallbackDispatcher> OMX::findDispatcher(node_id node) {
-    Mutex::Autolock autoLock(mLock);
-
-    ssize_t index = mDispatchers.indexOfKey(node);
-
-    return index < 0 ? NULL : mDispatchers.valueAt(index);
-}
-
-void OMX::invalidateNodeID(node_id node) {
-    Mutex::Autolock autoLock(mLock);
-    invalidateNodeID_l(node);
-}
-
-void OMX::invalidateNodeID_l(node_id node) {
-    // mLock is held.
-    mNodeIDToInstance.removeItem(node);
+    return OK;
 }
 
 }  // namespace android
diff --git a/media/libstagefright/omx/OMXMaster.cpp b/media/libstagefright/omx/OMXMaster.cpp
index 6132a2c..ac9b0c3 100644
--- a/media/libstagefright/omx/OMXMaster.cpp
+++ b/media/libstagefright/omx/OMXMaster.cpp
@@ -32,26 +32,23 @@
 OMXMaster::OMXMaster()
     : mVendorLibHandle(NULL) {
 
-    mProcessName[0] = 0;
-    if (mProcessName[0] == 0) {
-        pid_t pid = getpid();
-        char filename[20];
-        snprintf(filename, sizeof(filename), "/proc/%d/comm", pid);
-        int fd = open(filename, O_RDONLY);
-        if (fd < 0) {
-            ALOGW("couldn't determine process name");
-            sprintf(mProcessName, "<unknown>");
-        } else {
-            ssize_t len = read(fd, mProcessName, sizeof(mProcessName));
-            if (len < 2) {
-                ALOGW("couldn't determine process name");
-                sprintf(mProcessName, "<unknown>");
-            } else {
-                // the name is newline terminated, so erase the newline
-                mProcessName[len - 1] = 0;
-            }
-            close(fd);
-        }
+    pid_t pid = getpid();
+    char filename[20];
+    snprintf(filename, sizeof(filename), "/proc/%d/comm", pid);
+    int fd = open(filename, O_RDONLY);
+    if (fd < 0) {
+      ALOGW("couldn't determine process name");
+      strlcpy(mProcessName, "<unknown>", sizeof(mProcessName));
+    } else {
+      ssize_t len = read(fd, mProcessName, sizeof(mProcessName));
+      if (len < 2) {
+        ALOGW("couldn't determine process name");
+        strlcpy(mProcessName, "<unknown>", sizeof(mProcessName));
+      } else {
+        // the name is newline terminated, so erase the newline
+        mProcessName[len - 1] = 0;
+      }
+      close(fd);
     }
 
     addVendorPlugin();
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 355a2dd..ea86a37 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -23,10 +23,11 @@
 #include "../include/OMXNodeInstance.h"
 #include "OMXMaster.h"
 #include "OMXUtils.h"
-#include "GraphicBufferSource.h"
+#include <android/IOMXBufferSource.h>
 
 #include <OMX_Component.h>
 #include <OMX_IndexExt.h>
+#include <OMX_VideoExt.h>
 #include <OMX_AsString.h>
 
 #include <binder/IMemory.h>
@@ -35,26 +36,37 @@
 #include <HardwareAPI.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ColorUtils.h>
 #include <media/stagefright/MediaErrors.h>
 #include <utils/misc.h>
 #include <utils/NativeHandle.h>
+#include <media/OMXBuffer.h>
 
 static const OMX_U32 kPortIndexInput = 0;
 static const OMX_U32 kPortIndexOutput = 1;
 
-#define CLOGW(fmt, ...) ALOGW("[%x:%s] " fmt, mNodeID, mName, ##__VA_ARGS__)
+// Quirk still supported, even though deprecated
+enum Quirks {
+    kRequiresAllocateBufferOnInputPorts   = 1,
+    kRequiresAllocateBufferOnOutputPorts  = 2,
+
+    kQuirksMask = kRequiresAllocateBufferOnInputPorts
+                | kRequiresAllocateBufferOnOutputPorts,
+};
+
+#define CLOGW(fmt, ...) ALOGW("[%p:%s] " fmt, mHandle, mName, ##__VA_ARGS__)
 
 #define CLOG_ERROR_IF(cond, fn, err, fmt, ...) \
-    ALOGE_IF(cond, #fn "(%x:%s, " fmt ") ERROR: %s(%#x)", \
-    mNodeID, mName, ##__VA_ARGS__, asString(err), err)
+    ALOGE_IF(cond, #fn "(%p:%s, " fmt ") ERROR: %s(%#x)", \
+    mHandle, mName, ##__VA_ARGS__, asString(err), err)
 #define CLOG_ERROR(fn, err, fmt, ...) CLOG_ERROR_IF(true, fn, err, fmt, ##__VA_ARGS__)
 #define CLOG_IF_ERROR(fn, err, fmt, ...) \
     CLOG_ERROR_IF((err) != OMX_ErrorNone, fn, err, fmt, ##__VA_ARGS__)
 
 #define CLOGI_(level, fn, fmt, ...) \
-    ALOGI_IF(DEBUG >= (level), #fn "(%x:%s, " fmt ")", mNodeID, mName, ##__VA_ARGS__)
+    ALOGI_IF(DEBUG >= (level), #fn "(%p:%s, " fmt ")", mHandle, mName, ##__VA_ARGS__)
 #define CLOGD_(level, fn, fmt, ...) \
-    ALOGD_IF(DEBUG >= (level), #fn "(%x:%s, " fmt ")", mNodeID, mName, ##__VA_ARGS__)
+    ALOGD_IF(DEBUG >= (level), #fn "(%p:%s, " fmt ")", mHandle, mName, ##__VA_ARGS__)
 
 #define CLOG_LIFE(fn, fmt, ...)     CLOGI_(ADebug::kDebugLifeCycle,     fn, fmt, ##__VA_ARGS__)
 #define CLOG_STATE(fn, fmt, ...)    CLOGI_(ADebug::kDebugState,         fn, fmt, ##__VA_ARGS__)
@@ -62,7 +74,7 @@
 #define CLOG_INTERNAL(fn, fmt, ...) CLOGD_(ADebug::kDebugInternalState, fn, fmt, ##__VA_ARGS__)
 
 #define CLOG_DEBUG_IF(cond, fn, fmt, ...) \
-    ALOGD_IF(cond, #fn "(%x, " fmt ")", mNodeID, ##__VA_ARGS__)
+    ALOGD_IF(cond, #fn "(%p, " fmt ")", mHandle, ##__VA_ARGS__)
 
 #define CLOG_BUFFER(fn, fmt, ...) \
     CLOG_DEBUG_IF(DEBUG >= ADebug::kDebugAll, fn, fmt, ##__VA_ARGS__)
@@ -94,18 +106,16 @@
 
 struct BufferMeta {
     explicit BufferMeta(
-            const sp<IMemory> &mem, OMX_U32 portIndex, bool copyToOmx,
-            bool copyFromOmx, OMX_U8 *backup)
+            const sp<IMemory> &mem, OMX_U32 portIndex, bool copy, OMX_U8 *backup)
         : mMem(mem),
-          mCopyFromOmx(copyFromOmx),
-          mCopyToOmx(copyToOmx),
+          mCopyFromOmx(portIndex == kPortIndexOutput && copy),
+          mCopyToOmx(portIndex == kPortIndexInput && copy),
           mPortIndex(portIndex),
           mBackup(backup) {
     }
 
-    explicit BufferMeta(size_t size, OMX_U32 portIndex)
-        : mSize(size),
-          mCopyFromOmx(false),
+    explicit BufferMeta(OMX_U32 portIndex)
+        : mCopyFromOmx(false),
           mCopyToOmx(false),
           mPortIndex(portIndex),
           mBackup(NULL) {
@@ -125,7 +135,7 @@
         }
 
         // check component returns proper range
-        sp<ABuffer> codec = getBuffer(header, false /* backup */, true /* limit */);
+        sp<ABuffer> codec = getBuffer(header, true /* limit */);
 
         memcpy((OMX_U8 *)mMem->pointer() + header->nOffset, codec->data(), codec->size());
     }
@@ -140,14 +150,9 @@
                 header->nFilledLen);
     }
 
-    // return either the codec or the backup buffer
-    sp<ABuffer> getBuffer(const OMX_BUFFERHEADERTYPE *header, bool backup, bool limit) {
-        sp<ABuffer> buf;
-        if (backup && mMem != NULL) {
-            buf = new ABuffer(mMem->pointer(), mMem->size());
-        } else {
-            buf = new ABuffer(header->pBuffer, header->nAllocLen);
-        }
+    // return the codec buffer
+    sp<ABuffer> getBuffer(const OMX_BUFFERHEADERTYPE *header, bool limit) {
+        sp<ABuffer> buf = new ABuffer(header->pBuffer, header->nAllocLen);
         if (limit) {
             if (header->nOffset + header->nFilledLen > header->nOffset
                     && header->nOffset + header->nFilledLen <= header->nAllocLen) {
@@ -179,7 +184,6 @@
     sp<GraphicBuffer> mGraphicBuffer;
     sp<NativeHandle> mNativeHandle;
     sp<IMemory> mMem;
-    size_t mSize;
     bool mCopyFromOmx;
     bool mCopyToOmx;
     OMX_U32 mPortIndex;
@@ -203,16 +207,144 @@
     }
 }
 
-OMXNodeInstance::OMXNodeInstance(
-        OMX *owner, const sp<IOMXObserver> &observer, const char *name)
+////////////////////////////////////////////////////////////////////////////////
+
+// This provides the underlying Thread used by CallbackDispatcher.
+// Note that deriving CallbackDispatcher from Thread does not work.
+
+struct OMXNodeInstance::CallbackDispatcherThread : public Thread {
+    explicit CallbackDispatcherThread(CallbackDispatcher *dispatcher)
+        : mDispatcher(dispatcher) {
+    }
+
+private:
+    CallbackDispatcher *mDispatcher;
+
+    bool threadLoop();
+
+    CallbackDispatcherThread(const CallbackDispatcherThread &);
+    CallbackDispatcherThread &operator=(const CallbackDispatcherThread &);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+struct OMXNodeInstance::CallbackDispatcher : public RefBase {
+    explicit CallbackDispatcher(const sp<OMXNodeInstance> &owner);
+
+    // Posts |msg| to the listener's queue. If |realTime| is true, the listener thread is notified
+    // that a new message is available on the queue. Otherwise, the message stays on the queue, but
+    // the listener is not notified of it. It will process this message when a subsequent message
+    // is posted with |realTime| set to true.
+    void post(const omx_message &msg, bool realTime = true);
+
+    bool loop();
+
+protected:
+    virtual ~CallbackDispatcher();
+
+private:
+    Mutex mLock;
+
+    sp<OMXNodeInstance> const mOwner;
+    bool mDone;
+    Condition mQueueChanged;
+    std::list<omx_message> mQueue;
+
+    sp<CallbackDispatcherThread> mThread;
+
+    void dispatch(std::list<omx_message> &messages);
+
+    CallbackDispatcher(const CallbackDispatcher &);
+    CallbackDispatcher &operator=(const CallbackDispatcher &);
+};
+
+OMXNodeInstance::CallbackDispatcher::CallbackDispatcher(const sp<OMXNodeInstance> &owner)
     : mOwner(owner),
-      mNodeID(0),
+      mDone(false) {
+    mThread = new CallbackDispatcherThread(this);
+    mThread->run("OMXCallbackDisp", ANDROID_PRIORITY_FOREGROUND);
+}
+
+OMXNodeInstance::CallbackDispatcher::~CallbackDispatcher() {
+    {
+        Mutex::Autolock autoLock(mLock);
+
+        mDone = true;
+        mQueueChanged.signal();
+    }
+
+    // A join on self can happen if the last ref to CallbackDispatcher
+    // is released within the CallbackDispatcherThread loop
+    status_t status = mThread->join();
+    if (status != WOULD_BLOCK) {
+        // Other than join to self, the only other error return codes are
+        // whatever readyToRun() returns, and we don't override that
+        CHECK_EQ(status, (status_t)NO_ERROR);
+    }
+}
+
+void OMXNodeInstance::CallbackDispatcher::post(const omx_message &msg, bool realTime) {
+    Mutex::Autolock autoLock(mLock);
+
+    mQueue.push_back(msg);
+    if (realTime) {
+        mQueueChanged.signal();
+    }
+}
+
+void OMXNodeInstance::CallbackDispatcher::dispatch(std::list<omx_message> &messages) {
+    if (mOwner == NULL) {
+        ALOGV("Would have dispatched a message to a node that's already gone.");
+        return;
+    }
+    mOwner->onMessages(messages);
+}
+
+bool OMXNodeInstance::CallbackDispatcher::loop() {
+    for (;;) {
+        std::list<omx_message> messages;
+
+        {
+            Mutex::Autolock autoLock(mLock);
+            while (!mDone && mQueue.empty()) {
+                mQueueChanged.wait(mLock);
+            }
+
+            if (mDone) {
+                break;
+            }
+
+            messages.swap(mQueue);
+        }
+
+        dispatch(messages);
+    }
+
+    return false;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool OMXNodeInstance::CallbackDispatcherThread::threadLoop() {
+    return mDispatcher->loop();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+OMXNodeInstance::OMXNodeInstance(
+        OmxNodeOwner *owner, const sp<IOMXObserver> &observer, const char *name)
+    : mOwner(owner),
       mHandle(NULL),
       mObserver(observer),
       mDying(false),
       mSailed(false),
       mQueriedProhibitedExtensions(false),
-      mBufferIDCount(0)
+      mQuirks(0),
+      mBufferIDCount(0),
+      mRestorePtsFailed(false),
+      mMaxTimestampGapUs(-1ll),
+      mPrevOriginalTimeUs(-1ll),
+      mPrevModifiedTimeUs(-1ll)
 {
     mName = ADebug::GetDebugName(name);
     DEBUG = ADebug::GetDebugLevelFromProperty(name, "debug.stagefright.omx-debug");
@@ -224,9 +356,12 @@
     mDebugLevelBumpPendingBuffers[1] = 0;
     mMetadataType[0] = kMetadataBufferTypeInvalid;
     mMetadataType[1] = kMetadataBufferTypeInvalid;
+    mPortMode[0] = IOMX::kPortModePresetByteBuffer;
+    mPortMode[1] = IOMX::kPortModePresetByteBuffer;
     mSecureBufferType[0] = kSecureBufferTypeUnknown;
     mSecureBufferType[1] = kSecureBufferTypeUnknown;
     mIsSecure = AString(name).endsWith(".secure");
+    mLegacyAdaptiveExperiment = ADebug::isExperimentEnabled("legacy-adaptive");
 }
 
 OMXNodeInstance::~OMXNodeInstance() {
@@ -234,44 +369,42 @@
     CHECK(mHandle == NULL);
 }
 
-void OMXNodeInstance::setHandle(OMX::node_id node_id, OMX_HANDLETYPE handle) {
-    mNodeID = node_id;
+void OMXNodeInstance::setHandle(OMX_HANDLETYPE handle) {
     CLOG_LIFE(allocateNode, "handle=%p", handle);
     CHECK(mHandle == NULL);
     mHandle = handle;
+    if (handle != NULL) {
+        mDispatcher = new CallbackDispatcher(this);
+    }
 }
 
-sp<GraphicBufferSource> OMXNodeInstance::getGraphicBufferSource() {
-    Mutex::Autolock autoLock(mGraphicBufferSourceLock);
-    return mGraphicBufferSource;
+sp<IOMXBufferSource> OMXNodeInstance::getBufferSource() {
+    Mutex::Autolock autoLock(mOMXBufferSourceLock);
+    return mOMXBufferSource;
 }
 
-void OMXNodeInstance::setGraphicBufferSource(
-        const sp<GraphicBufferSource>& bufferSource) {
-    Mutex::Autolock autoLock(mGraphicBufferSourceLock);
-    CLOG_INTERNAL(setGraphicBufferSource, "%p", bufferSource.get());
-    mGraphicBufferSource = bufferSource;
+void OMXNodeInstance::setBufferSource(const sp<IOMXBufferSource>& bufferSource) {
+    Mutex::Autolock autoLock(mOMXBufferSourceLock);
+    CLOG_INTERNAL(setBufferSource, "%p", bufferSource.get());
+    mOMXBufferSource = bufferSource;
 }
 
-OMX *OMXNodeInstance::owner() {
-    return mOwner;
+OMX_HANDLETYPE OMXNodeInstance::handle() {
+    return mHandle;
 }
 
 sp<IOMXObserver> OMXNodeInstance::observer() {
     return mObserver;
 }
 
-OMX::node_id OMXNodeInstance::nodeID() {
-    return mNodeID;
-}
+status_t OMXNodeInstance::freeNode() {
 
-status_t OMXNodeInstance::freeNode(OMXMaster *master) {
     CLOG_LIFE(freeNode, "handle=%p", mHandle);
     static int32_t kMaxNumIterations = 10;
 
     // exit if we have already freed the node
     if (mHandle == NULL) {
-        return OK;
+        return mOwner->freeNode(this);
     }
 
     // Transition the node from its current state all the way down
@@ -350,43 +483,35 @@
             LOG_ALWAYS_FATAL("unknown state %s(%#x).", asString(state), state);
             break;
     }
+    status_t err = mOwner->freeNode(this);
 
-    ALOGV("[%x:%s] calling destroyComponentInstance", mNodeID, mName);
-    OMX_ERRORTYPE err = master->destroyComponentInstance(
-            static_cast<OMX_COMPONENTTYPE *>(mHandle));
+    mDispatcher.clear();
+    mOMXBufferSource.clear();
 
     mHandle = NULL;
     CLOG_IF_ERROR(freeNode, err, "");
     free(mName);
     mName = NULL;
 
-    mOwner->invalidateNodeID(mNodeID);
-    mNodeID = 0;
-
     ALOGV("OMXNodeInstance going away.");
-    delete this;
 
-    return StatusFromOMXError(err);
+    return err;
 }
 
 status_t OMXNodeInstance::sendCommand(
         OMX_COMMANDTYPE cmd, OMX_S32 param) {
-    if (cmd == OMX_CommandStateSet) {
-        // There are no configurations past first StateSet command.
-        mSailed = true;
-    }
-    const sp<GraphicBufferSource> bufferSource(getGraphicBufferSource());
+    const sp<IOMXBufferSource> bufferSource(getBufferSource());
     if (bufferSource != NULL && cmd == OMX_CommandStateSet) {
         if (param == OMX_StateIdle) {
             // Initiating transition from Executing -> Idle
             // ACodec is waiting for all buffers to be returned, do NOT
             // submit any more buffers to the codec.
-            bufferSource->omxIdle();
+            bufferSource->onOmxIdle();
         } else if (param == OMX_StateLoaded) {
             // Initiating transition from Idle/Executing -> Loaded
             // Buffers are about to be freed.
-            bufferSource->omxLoaded();
-            setGraphicBufferSource(NULL);
+            bufferSource->onOmxLoaded();
+            setBufferSource(NULL);
         }
 
         // fall through
@@ -394,6 +519,11 @@
 
     Mutex::Autolock autoLock(mLock);
 
+    if (cmd == OMX_CommandStateSet) {
+        // There are no configurations past first StateSet command.
+        mSailed = true;
+    }
+
     // bump internal-state debug level for 2 input and output frames past a command
     {
         Mutex::Autolock _l(mDebugLock);
@@ -422,18 +552,17 @@
         "OMX.google.android.index.getAndroidNativeBufferUsage",
     };
 
-    if ((index > OMX_IndexComponentStartUnused && index <= OMX_IndexParamStandardComponentRole)
-            || (index > OMX_IndexPortStartUnused && index <= OMX_IndexParamCompBufferSupplier)
-            || (index > OMX_IndexAudioStartUnused && index <= OMX_IndexConfigAudioChannelVolume)
-            || (index > OMX_IndexVideoStartUnused && index <= OMX_IndexConfigVideoNalSize)
-            || (index > OMX_IndexCommonStartUnused
-                    && index <= OMX_IndexConfigCommonTransitionEffect)
+    if ((index > OMX_IndexComponentStartUnused && index < OMX_IndexComponentEndUnused)
+            || (index > OMX_IndexPortStartUnused && index < OMX_IndexPortEndUnused)
+            || (index > OMX_IndexAudioStartUnused && index < OMX_IndexAudioEndUnused)
+            || (index > OMX_IndexVideoStartUnused && index < OMX_IndexVideoEndUnused)
+            || (index > OMX_IndexCommonStartUnused && index < OMX_IndexCommonEndUnused)
             || (index > (OMX_INDEXTYPE)OMX_IndexExtAudioStartUnused
-                    && index <= (OMX_INDEXTYPE)OMX_IndexParamAudioProfileQuerySupported)
+                    && index < (OMX_INDEXTYPE)OMX_IndexExtAudioEndUnused)
             || (index > (OMX_INDEXTYPE)OMX_IndexExtVideoStartUnused
-                    && index <= (OMX_INDEXTYPE)OMX_IndexConfigAndroidVideoTemporalLayering)
+                    && index < (OMX_INDEXTYPE)OMX_IndexExtVideoEndUnused)
             || (index > (OMX_INDEXTYPE)OMX_IndexExtOtherStartUnused
-                    && index <= (OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits)) {
+                    && index < (OMX_INDEXTYPE)OMX_IndexExtOtherEndUnused)) {
         return false;
     }
 
@@ -474,6 +603,10 @@
     OMX_INDEXEXTTYPE extIndex = (OMX_INDEXEXTTYPE)index;
     CLOG_CONFIG(setParameter, "%s(%#x), %zu@%p)", asString(extIndex), index, size, params);
 
+    if (extIndex == OMX_IndexParamMaxFrameDurationForBitrateControl) {
+        return setMaxPtsGapUs(params, size);
+    }
+
     if (isProhibitedIndex_l(index)) {
         android_errorWriteLog(0x534e4554, "29422020");
         return BAD_INDEX;
@@ -520,15 +653,114 @@
     return StatusFromOMXError(err);
 }
 
-status_t OMXNodeInstance::getState(OMX_STATETYPE* state) {
+status_t OMXNodeInstance::setPortMode(OMX_U32 portIndex, IOMX::PortMode mode) {
     Mutex::Autolock autoLock(mLock);
 
-    OMX_ERRORTYPE err = OMX_GetState(mHandle, state);
-    CLOG_IF_ERROR(getState, err, "");
-    return StatusFromOMXError(err);
+    if (portIndex >= NELEM(mPortMode)) {
+        ALOGE("b/31385713, portIndex(%u)", portIndex);
+        android_errorWriteLog(0x534e4554, "31385713");
+        return BAD_VALUE;
+    }
+
+    CLOG_CONFIG(setPortMode, "%s(%d), port %d", asString(mode), mode, portIndex);
+
+    switch (mode) {
+    case IOMX::kPortModeDynamicANWBuffer:
+    {
+        if (portIndex == kPortIndexOutput) {
+            if (mLegacyAdaptiveExperiment) {
+                CLOG_INTERNAL(setPortMode, "Legacy adaptive experiment: "
+                        "not setting port mode to %s(%d) on output",
+                        asString(mode), mode);
+                return StatusFromOMXError(OMX_ErrorUnsupportedIndex);
+            }
+
+            status_t err = enableNativeBuffers_l(
+                    portIndex, OMX_TRUE /*graphic*/, OMX_TRUE);
+            if (err != OK) {
+                return err;
+            }
+        }
+        (void)enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_FALSE);
+        return storeMetaDataInBuffers_l(portIndex, OMX_TRUE, NULL);
+    }
+
+    case IOMX::kPortModeDynamicNativeHandle:
+    {
+        if (portIndex != kPortIndexInput) {
+            CLOG_ERROR(setPortMode, BAD_VALUE,
+                    "%s(%d) mode is only supported on input port", asString(mode), mode);
+            return BAD_VALUE;
+        }
+        (void)enableNativeBuffers_l(portIndex, OMX_TRUE /*graphic*/, OMX_FALSE);
+        (void)enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_FALSE);
+
+        MetadataBufferType metaType = kMetadataBufferTypeNativeHandleSource;
+        return storeMetaDataInBuffers_l(portIndex, OMX_TRUE, &metaType);
+    }
+
+    case IOMX::kPortModePresetSecureBuffer:
+    {
+        // Allow on both input and output.
+        (void)storeMetaDataInBuffers_l(portIndex, OMX_FALSE, NULL);
+        (void)enableNativeBuffers_l(portIndex, OMX_TRUE /*graphic*/, OMX_FALSE);
+        return enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_TRUE);
+    }
+
+    case IOMX::kPortModePresetANWBuffer:
+    {
+        if (portIndex != kPortIndexOutput) {
+            CLOG_ERROR(setPortMode, BAD_VALUE,
+                    "%s(%d) mode is only supported on output port", asString(mode), mode);
+            return BAD_VALUE;
+        }
+
+        // Check if we're simulating legacy mode with metadata mode,
+        // if so, enable metadata mode.
+        if (mLegacyAdaptiveExperiment) {
+            if (storeMetaDataInBuffers_l(portIndex, OMX_TRUE, NULL) == OK) {
+                CLOG_INTERNAL(setPortMode, "Legacy adaptive experiment: "
+                        "metdata mode enabled successfully");
+                return OK;
+            }
+
+            CLOG_INTERNAL(setPortMode, "Legacy adaptive experiment: "
+                    "unable to enable metadata mode on output");
+
+            mLegacyAdaptiveExperiment = false;
+        }
+
+        // Disable secure buffer and enable graphic buffer
+        (void)enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_FALSE);
+        status_t err = enableNativeBuffers_l(portIndex, OMX_TRUE /*graphic*/, OMX_TRUE);
+        if (err != OK) {
+            return err;
+        }
+
+        // Not running experiment, or metadata is not supported.
+        // Disable metadata mode and use legacy mode.
+        (void)storeMetaDataInBuffers_l(portIndex, OMX_FALSE, NULL);
+        return OK;
+    }
+
+    case IOMX::kPortModePresetByteBuffer:
+    {
+        // Disable secure buffer, native buffer and metadata.
+        (void)enableNativeBuffers_l(portIndex, OMX_TRUE /*graphic*/, OMX_FALSE);
+        (void)enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_FALSE);
+        (void)storeMetaDataInBuffers_l(portIndex, OMX_FALSE, NULL);
+        return OK;
+    }
+
+    default:
+        break;
+    }
+
+    CLOG_ERROR(setPortMode, BAD_VALUE, "invalid port mode %d", mode);
+    return BAD_VALUE;
 }
 
-status_t OMXNodeInstance::enableNativeBuffers(
+status_t OMXNodeInstance::enableNativeBuffers_l(
         OMX_U32 portIndex, OMX_BOOL graphic, OMX_BOOL enable) {
     if (portIndex >= NELEM(mSecureBufferType)) {
         ALOGE("b/31385713, portIndex(%u)", portIndex);
@@ -536,7 +768,6 @@
         return BAD_VALUE;
     }
 
-    Mutex::Autolock autoLock(mLock);
     CLOG_CONFIG(enableNativeBuffers, "%s:%u%s, %d", portString(portIndex), portIndex,
                 graphic ? ", graphic" : "", enable);
     OMX_STRING name = const_cast<OMX_STRING>(
@@ -568,9 +799,7 @@
         if (!graphic) {
             // Extension not supported, check for manual override with system property
             // This is a temporary workaround until partners support the OMX extension
-            char value[PROPERTY_VALUE_MAX];
-            if (property_get("media.mediadrmservice.enable", value, NULL)
-                && (!strcmp("1", value) || !strcasecmp("true", value))) {
+            if (property_get_bool("media.mediadrmservice.enable", false)) {
                 CLOG_CONFIG(enableNativeBuffers, "system property override: using native-handles");
                 mSecureBufferType[portIndex] = kSecureBufferTypeNativeHandle;
             } else if (mSecureBufferType[portIndex] == kSecureBufferTypeUnknown) {
@@ -613,13 +842,6 @@
     return OK;
 }
 
-status_t OMXNodeInstance::storeMetaDataInBuffers(
-        OMX_U32 portIndex, OMX_BOOL enable, MetadataBufferType *type) {
-    Mutex::Autolock autolock(mLock);
-    CLOG_CONFIG(storeMetaDataInBuffers, "%s:%u en:%d", portString(portIndex), portIndex, enable);
-    return storeMetaDataInBuffers_l(portIndex, enable, type);
-}
-
 status_t OMXNodeInstance::storeMetaDataInBuffers_l(
         OMX_U32 portIndex, OMX_BOOL enable, MetadataBufferType *type) {
     if (mSailed) {
@@ -710,6 +932,12 @@
     CLOG_CONFIG(prepareForAdaptivePlayback, "%s:%u en=%d max=%ux%u",
             portString(portIndex), portIndex, enable, maxFrameWidth, maxFrameHeight);
 
+    if (mLegacyAdaptiveExperiment) {
+        CLOG_INTERNAL(prepareForAdaptivePlayback,
+                "Legacy adaptive experiment: reporting success");
+        return OK;
+    }
+
     OMX_INDEXTYPE index;
     OMX_STRING name = const_cast<OMX_STRING>(
             "OMX.google.android.index.prepareForAdaptivePlayback");
@@ -780,48 +1008,117 @@
 }
 
 status_t OMXNodeInstance::useBuffer(
-        OMX_U32 portIndex, const sp<IMemory> &params,
-        OMX::buffer_id *buffer, OMX_U32 allottedSize) {
-    if (params == NULL || buffer == NULL) {
+        OMX_U32 portIndex, const OMXBuffer &omxBuffer, IOMX::buffer_id *buffer) {
+    if (buffer == NULL) {
         ALOGE("b/25884056");
         return BAD_VALUE;
     }
 
-    Mutex::Autolock autoLock(mLock);
-    if (allottedSize > params->size() || portIndex >= NELEM(mNumPortBuffers)) {
+    if (portIndex >= NELEM(mNumPortBuffers)) {
         return BAD_VALUE;
     }
 
-    // metadata buffers are not connected cross process
-    // use a backup buffer instead of the actual buffer
-    BufferMeta *buffer_meta;
-    bool useBackup = mMetadataType[portIndex] != kMetadataBufferTypeInvalid;
-    OMX_U8 *data = static_cast<OMX_U8 *>(params->pointer());
-    // allocate backup buffer
-    if (useBackup) {
-        data = new (std::nothrow) OMX_U8[allottedSize];
-        if (data == NULL) {
-            return NO_MEMORY;
-        }
-        memset(data, 0, allottedSize);
+    Mutex::Autolock autoLock(mLock);
 
-        buffer_meta = new BufferMeta(
-                params, portIndex, false /* copyToOmx */, false /* copyFromOmx */, data);
-    } else {
-        buffer_meta = new BufferMeta(
-                params, portIndex, false /* copyToOmx */, false /* copyFromOmx */, NULL);
+    switch (omxBuffer.mBufferType) {
+    case OMXBuffer::kBufferTypePreset:
+        return useBuffer_l(portIndex, NULL, buffer);
+
+    case OMXBuffer::kBufferTypeSharedMem:
+        return useBuffer_l(portIndex, omxBuffer.mMem, buffer);
+
+    case OMXBuffer::kBufferTypeANWBuffer:
+        return useGraphicBuffer_l(portIndex, omxBuffer.mGraphicBuffer, buffer);
+
+    default:
+        break;
     }
 
-    OMX_BUFFERHEADERTYPE *header;
+    return BAD_VALUE;
+}
 
-    OMX_ERRORTYPE err = OMX_UseBuffer(
-            mHandle, &header, portIndex, buffer_meta,
-            allottedSize, data);
+status_t OMXNodeInstance::useBuffer_l(
+        OMX_U32 portIndex, const sp<IMemory> &params, IOMX::buffer_id *buffer) {
+    BufferMeta *buffer_meta;
+    OMX_BUFFERHEADERTYPE *header;
+    OMX_ERRORTYPE err = OMX_ErrorNone;
+    bool isMetadata = mMetadataType[portIndex] != kMetadataBufferTypeInvalid;
+
+    OMX_U32 allottedSize;
+    if (isMetadata) {
+        if (mMetadataType[portIndex] == kMetadataBufferTypeGrallocSource) {
+            allottedSize = sizeof(VideoGrallocMetadata);
+        } else if (mMetadataType[portIndex] == kMetadataBufferTypeANWBuffer) {
+            allottedSize = sizeof(VideoNativeMetadata);
+        } else if (mMetadataType[portIndex] == kMetadataBufferTypeNativeHandleSource) {
+            allottedSize = sizeof(VideoNativeHandleMetadata);
+        } else {
+            return BAD_VALUE;
+        }
+    } else {
+        // NULL params is allowed only in metadata mode.
+        if (params == NULL) {
+            ALOGE("b/25884056");
+            return BAD_VALUE;
+        }
+        allottedSize = params->size();
+    }
+
+    bool isOutputGraphicMetadata = (portIndex == kPortIndexOutput) &&
+            (mMetadataType[portIndex] == kMetadataBufferTypeGrallocSource ||
+                    mMetadataType[portIndex] == kMetadataBufferTypeANWBuffer);
+
+    uint32_t requiresAllocateBufferBit =
+        (portIndex == kPortIndexInput)
+            ? kRequiresAllocateBufferOnInputPorts
+            : kRequiresAllocateBufferOnOutputPorts;
+
+    // we use useBuffer for output metadata regardless of quirks
+    if (!isOutputGraphicMetadata && (mQuirks & requiresAllocateBufferBit)) {
+        // metadata buffers are not connected cross process; only copy if not meta.
+        buffer_meta = new BufferMeta(
+                    params, portIndex, !isMetadata /* copy */, NULL /* data */);
+
+        err = OMX_AllocateBuffer(
+                mHandle, &header, portIndex, buffer_meta, allottedSize);
+
+        if (err != OMX_ErrorNone) {
+            CLOG_ERROR(allocateBuffer, err,
+                    SIMPLE_BUFFER(portIndex, (size_t)allottedSize,
+                            params != NULL ? params->pointer() : NULL));
+        }
+    } else {
+        OMX_U8 *data = NULL;
+
+        // metadata buffers are not connected cross process
+        // use a backup buffer instead of the actual buffer
+        if (isMetadata) {
+            data = new (std::nothrow) OMX_U8[allottedSize];
+            if (data == NULL) {
+                return NO_MEMORY;
+            }
+            memset(data, 0, allottedSize);
+
+            buffer_meta = new BufferMeta(
+                    params, portIndex, false /* copy */, data);
+        } else {
+            data = static_cast<OMX_U8 *>(params->pointer());
+
+            buffer_meta = new BufferMeta(
+                    params, portIndex, false /* copy */, NULL);
+        }
+
+        err = OMX_UseBuffer(
+                mHandle, &header, portIndex, buffer_meta,
+                allottedSize, data);
+
+        if (err != OMX_ErrorNone) {
+            CLOG_ERROR(useBuffer, err, SIMPLE_BUFFER(
+                    portIndex, (size_t)allottedSize, data));
+        }
+    }
 
     if (err != OMX_ErrorNone) {
-        CLOG_ERROR(useBuffer, err, SIMPLE_BUFFER(
-                portIndex, (size_t)allottedSize, data));
-
         delete buffer_meta;
         buffer_meta = NULL;
 
@@ -836,9 +1133,9 @@
 
     addActiveBuffer(portIndex, *buffer);
 
-    sp<GraphicBufferSource> bufferSource(getGraphicBufferSource());
+    sp<IOMXBufferSource> bufferSource(getBufferSource());
     if (bufferSource != NULL && portIndex == kPortIndexInput) {
-        bufferSource->addCodecBuffer(header);
+        bufferSource->onInputBufferAdded(*buffer);
     }
 
     CLOG_BUFFER(useBuffer, NEW_BUFFER_FMT(
@@ -848,7 +1145,7 @@
 
 status_t OMXNodeInstance::useGraphicBuffer2_l(
         OMX_U32 portIndex, const sp<GraphicBuffer>& graphicBuffer,
-        OMX::buffer_id *buffer) {
+        IOMX::buffer_id *buffer) {
     if (graphicBuffer == NULL || buffer == NULL) {
         ALOGE("b/25884056");
         return BAD_VALUE;
@@ -902,14 +1199,20 @@
 // XXX: This function is here for backwards compatibility.  Once the OMX
 // implementations have been updated this can be removed and useGraphicBuffer2
 // can be renamed to useGraphicBuffer.
-status_t OMXNodeInstance::useGraphicBuffer(
+status_t OMXNodeInstance::useGraphicBuffer_l(
         OMX_U32 portIndex, const sp<GraphicBuffer>& graphicBuffer,
-        OMX::buffer_id *buffer) {
+        IOMX::buffer_id *buffer) {
     if (graphicBuffer == NULL || buffer == NULL) {
         ALOGE("b/25884056");
         return BAD_VALUE;
     }
-    Mutex::Autolock autoLock(mLock);
+
+    // First, see if we're in metadata mode. We could be running an experiment to simulate
+    // legacy behavior (preallocated buffers) on devices that supports meta.
+    if (mMetadataType[portIndex] != kMetadataBufferTypeInvalid) {
+        return useGraphicBufferWithMetadata_l(
+                portIndex, graphicBuffer, buffer);
+    }
 
     // See if the newer version of the extension is present.
     OMX_INDEXTYPE index;
@@ -966,9 +1269,32 @@
     return OK;
 }
 
+status_t OMXNodeInstance::useGraphicBufferWithMetadata_l(
+        OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
+        IOMX::buffer_id *buffer) {
+    if (portIndex != kPortIndexOutput) {
+        return BAD_VALUE;
+    }
+
+    if (mMetadataType[portIndex] != kMetadataBufferTypeGrallocSource &&
+            mMetadataType[portIndex] != kMetadataBufferTypeANWBuffer) {
+        return BAD_VALUE;
+    }
+
+    status_t err = useBuffer_l(portIndex, NULL, buffer);
+    if (err != OK) {
+        return err;
+    }
+
+    OMX_BUFFERHEADERTYPE *header = findBufferHeader(*buffer, portIndex);
+
+    return updateGraphicBufferInMeta_l(portIndex, graphicBuffer, *buffer, header);
+
+}
+
 status_t OMXNodeInstance::updateGraphicBufferInMeta_l(
         OMX_U32 portIndex, const sp<GraphicBuffer>& graphicBuffer,
-        OMX::buffer_id buffer, OMX_BUFFERHEADERTYPE *header, bool updateCodecBuffer) {
+        IOMX::buffer_id buffer, OMX_BUFFERHEADERTYPE *header) {
     // No need to check |graphicBuffer| since NULL is valid for it as below.
     if (header == NULL) {
         ALOGE("b/25884056");
@@ -980,14 +1306,9 @@
     }
 
     BufferMeta *bufferMeta = (BufferMeta *)(header->pAppPrivate);
-    sp<ABuffer> data = bufferMeta->getBuffer(
-            header, !updateCodecBuffer /* backup */, false /* limit */);
+    sp<ABuffer> data = bufferMeta->getBuffer(header, false /* limit */);
     bufferMeta->setGraphicBuffer(graphicBuffer);
     MetadataBufferType metaType = mMetadataType[portIndex];
-    // we use gralloc source only in the codec buffers
-    if (metaType == kMetadataBufferTypeGrallocSource && !updateCodecBuffer) {
-        metaType = kMetadataBufferTypeANWBuffer;
-    }
     if (metaType == kMetadataBufferTypeGrallocSource
             && data->capacity() >= sizeof(VideoGrallocMetadata)) {
         VideoGrallocMetadata &metadata = *(VideoGrallocMetadata *)(data->data());
@@ -1011,21 +1332,9 @@
     return OK;
 }
 
-status_t OMXNodeInstance::updateGraphicBufferInMeta(
-        OMX_U32 portIndex, const sp<GraphicBuffer>& graphicBuffer,
-        OMX::buffer_id buffer) {
-    Mutex::Autolock autoLock(mLock);
-    OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer, portIndex);
-    // update backup buffer for input, codec buffer for output
-    return updateGraphicBufferInMeta_l(
-            portIndex, graphicBuffer, buffer, header,
-            true /* updateCodecBuffer */);
-}
-
-status_t OMXNodeInstance::updateNativeHandleInMeta(
-        OMX_U32 portIndex, const sp<NativeHandle>& nativeHandle, OMX::buffer_id buffer) {
-    Mutex::Autolock autoLock(mLock);
-    OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer, portIndex);
+status_t OMXNodeInstance::updateNativeHandleInMeta_l(
+        OMX_U32 portIndex, const sp<NativeHandle>& nativeHandle,
+        IOMX::buffer_id buffer, OMX_BUFFERHEADERTYPE *header) {
     // No need to check |nativeHandle| since NULL is valid for it as below.
     if (header == NULL) {
         ALOGE("b/25884056");
@@ -1037,9 +1346,7 @@
     }
 
     BufferMeta *bufferMeta = (BufferMeta *)(header->pAppPrivate);
-    // update backup buffer
-    sp<ABuffer> data = bufferMeta->getBuffer(
-            header, false /* backup */, false /* limit */);
+    sp<ABuffer> data = bufferMeta->getBuffer(header, false /* limit */);
     bufferMeta->setNativeHandle(nativeHandle);
     if (mMetadataType[portIndex] == kMetadataBufferTypeNativeHandleSource
             && data->capacity() >= sizeof(VideoNativeHandleMetadata)) {
@@ -1059,32 +1366,23 @@
     return OK;
 }
 
-status_t OMXNodeInstance::createGraphicBufferSource(
-        OMX_U32 portIndex, const sp<IGraphicBufferConsumer> &bufferConsumer, MetadataBufferType *type) {
+status_t OMXNodeInstance::setInputSurface(
+        const sp<IOMXBufferSource> &bufferSource) {
+    Mutex::Autolock autolock(mLock);
+
     status_t err;
 
     // only allow graphic source on input port, when there are no allocated buffers yet
-    if (portIndex != kPortIndexInput) {
-        android_errorWriteLog(0x534e4554, "29422020");
-        return BAD_VALUE;
-    } else if (mNumPortBuffers[portIndex] > 0) {
+    if (mNumPortBuffers[kPortIndexInput] > 0) {
         android_errorWriteLog(0x534e4554, "29422020");
         return INVALID_OPERATION;
     }
 
-    const sp<GraphicBufferSource> surfaceCheck = getGraphicBufferSource();
-    if (surfaceCheck != NULL) {
-        if (portIndex < NELEM(mMetadataType) && type != NULL) {
-            *type = mMetadataType[portIndex];
-        }
+    if (getBufferSource() != NULL) {
         return ALREADY_EXISTS;
     }
 
-    // Input buffers will hold meta-data (ANativeWindowBuffer references).
-    if (type != NULL) {
-        *type = kMetadataBufferTypeANWBuffer;
-    }
-    err = storeMetaDataInBuffers_l(portIndex, OMX_TRUE, type);
+    err = storeMetaDataInBuffers_l(kPortIndexInput, OMX_TRUE, NULL);
     if (err != OK) {
         return err;
     }
@@ -1093,13 +1391,13 @@
     // codec was configured.
     OMX_PARAM_PORTDEFINITIONTYPE def;
     InitOMXParams(&def);
-    def.nPortIndex = portIndex;
+    def.nPortIndex = kPortIndexInput;
     OMX_ERRORTYPE oerr = OMX_GetParameter(
             mHandle, OMX_IndexParamPortDefinition, &def);
     if (oerr != OMX_ErrorNone) {
         OMX_INDEXTYPE index = OMX_IndexParamPortDefinition;
-        CLOG_ERROR(getParameter, oerr, "%s(%#x): %s:%u",
-                asString(index), index, portString(portIndex), portIndex);
+        CLOG_ERROR(getParameter, oerr, "%s(%#x): %s:%u", asString(index),
+                index, portString(kPortIndexInput), kPortIndexInput);
         return UNKNOWN_ERROR;
     }
 
@@ -1110,105 +1408,20 @@
         return INVALID_OPERATION;
     }
 
-    uint32_t usageBits;
-    oerr = OMX_GetParameter(
-            mHandle, (OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits, &usageBits);
-    if (oerr != OMX_ErrorNone) {
-        usageBits = 0;
-    }
-
-    sp<GraphicBufferSource> bufferSource = new GraphicBufferSource(this,
-            def.format.video.nFrameWidth,
-            def.format.video.nFrameHeight,
-            def.nBufferCountActual,
-            usageBits,
-            bufferConsumer);
-
-    if ((err = bufferSource->initCheck()) != OK) {
-        return err;
-    }
-    setGraphicBufferSource(bufferSource);
-
-    return OK;
-}
-
-status_t OMXNodeInstance::createInputSurface(
-        OMX_U32 portIndex, android_dataspace dataSpace,
-        sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
-    if (bufferProducer == NULL) {
-        ALOGE("b/25884056");
+    if (def.format.video.nFrameWidth == 0
+            || def.format.video.nFrameHeight == 0) {
+        ALOGE("Invalid video dimension %ux%u",
+                def.format.video.nFrameWidth,
+                def.format.video.nFrameHeight);
         return BAD_VALUE;
     }
 
-    Mutex::Autolock autolock(mLock);
-    status_t err = createGraphicBufferSource(portIndex, NULL /* bufferConsumer */, type);
-
-    if (err != OK) {
-        return err;
-    }
-
-    mGraphicBufferSource->setDefaultDataSpace(dataSpace);
-
-    *bufferProducer = mGraphicBufferSource->getIGraphicBufferProducer();
+    setBufferSource(bufferSource);
     return OK;
 }
 
-//static
-status_t OMXNodeInstance::createPersistentInputSurface(
-        sp<IGraphicBufferProducer> *bufferProducer,
-        sp<IGraphicBufferConsumer> *bufferConsumer) {
-    if (bufferProducer == NULL || bufferConsumer == NULL) {
-        ALOGE("b/25884056");
-        return BAD_VALUE;
-    }
-    String8 name("GraphicBufferSource");
-
-    sp<IGraphicBufferProducer> producer;
-    sp<IGraphicBufferConsumer> consumer;
-    BufferQueue::createBufferQueue(&producer, &consumer);
-    consumer->setConsumerName(name);
-    consumer->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER);
-
-    sp<BufferQueue::ProxyConsumerListener> proxy =
-        new BufferQueue::ProxyConsumerListener(NULL);
-    status_t err = consumer->consumerConnect(proxy, false);
-    if (err != NO_ERROR) {
-        ALOGE("Error connecting to BufferQueue: %s (%d)",
-                strerror(-err), err);
-        return err;
-    }
-
-    *bufferProducer = producer;
-    *bufferConsumer = consumer;
-
-    return OK;
-}
-
-status_t OMXNodeInstance::setInputSurface(
-        OMX_U32 portIndex, const sp<IGraphicBufferConsumer> &bufferConsumer,
-        MetadataBufferType *type) {
-    Mutex::Autolock autolock(mLock);
-    return createGraphicBufferSource(portIndex, bufferConsumer, type);
-}
-
-void OMXNodeInstance::signalEvent(OMX_EVENTTYPE event, OMX_U32 arg1, OMX_U32 arg2) {
-    mOwner->OnEvent(mNodeID, event, arg1, arg2, NULL);
-}
-
-status_t OMXNodeInstance::signalEndOfInputStream() {
-    // For non-Surface input, the MediaCodec should convert the call to a
-    // pair of requests (dequeue input buffer, queue input buffer with EOS
-    // flag set).  Seems easier than doing the equivalent from here.
-    sp<GraphicBufferSource> bufferSource(getGraphicBufferSource());
-    if (bufferSource == NULL) {
-        CLOGW("signalEndOfInputStream can only be used with Surface input");
-        return INVALID_OPERATION;
-    }
-    return bufferSource->signalEndOfInputStream();
-}
-
 status_t OMXNodeInstance::allocateSecureBuffer(
-        OMX_U32 portIndex, size_t size, OMX::buffer_id *buffer,
+        OMX_U32 portIndex, size_t size, IOMX::buffer_id *buffer,
         void **buffer_data, sp<NativeHandle> *native_handle) {
     if (buffer == NULL || buffer_data == NULL || native_handle == NULL) {
         ALOGE("b/25884056");
@@ -1223,7 +1436,7 @@
 
     Mutex::Autolock autoLock(mLock);
 
-    BufferMeta *buffer_meta = new BufferMeta(size, portIndex);
+    BufferMeta *buffer_meta = new BufferMeta(portIndex);
 
     OMX_BUFFERHEADERTYPE *header;
 
@@ -1254,9 +1467,9 @@
 
     addActiveBuffer(portIndex, *buffer);
 
-    sp<GraphicBufferSource> bufferSource(getGraphicBufferSource());
+    sp<IOMXBufferSource> bufferSource(getBufferSource());
     if (bufferSource != NULL && portIndex == kPortIndexInput) {
-        bufferSource->addCodecBuffer(header);
+        bufferSource->onInputBufferAdded(*buffer);
     }
     CLOG_BUFFER(allocateSecureBuffer, NEW_BUFFER_FMT(
             *buffer, portIndex, "%zu@%p:%p", size, *buffer_data,
@@ -1265,62 +1478,8 @@
     return OK;
 }
 
-status_t OMXNodeInstance::allocateBufferWithBackup(
-        OMX_U32 portIndex, const sp<IMemory> &params,
-        OMX::buffer_id *buffer, OMX_U32 allottedSize) {
-    if (params == NULL || buffer == NULL) {
-        ALOGE("b/25884056");
-        return BAD_VALUE;
-    }
-
-    Mutex::Autolock autoLock(mLock);
-    if (allottedSize > params->size() || portIndex >= NELEM(mNumPortBuffers)) {
-        return BAD_VALUE;
-    }
-
-    // metadata buffers are not connected cross process; only copy if not meta
-    bool copy = mMetadataType[portIndex] == kMetadataBufferTypeInvalid;
-
-    BufferMeta *buffer_meta = new BufferMeta(
-            params, portIndex,
-            (portIndex == kPortIndexInput) && copy /* copyToOmx */,
-            (portIndex == kPortIndexOutput) && copy /* copyFromOmx */,
-            NULL /* data */);
-
-    OMX_BUFFERHEADERTYPE *header;
-
-    OMX_ERRORTYPE err = OMX_AllocateBuffer(
-            mHandle, &header, portIndex, buffer_meta, allottedSize);
-    if (err != OMX_ErrorNone) {
-        CLOG_ERROR(allocateBufferWithBackup, err,
-                SIMPLE_BUFFER(portIndex, (size_t)allottedSize, params->pointer()));
-        delete buffer_meta;
-        buffer_meta = NULL;
-
-        *buffer = 0;
-
-        return StatusFromOMXError(err);
-    }
-
-    CHECK_EQ(header->pAppPrivate, buffer_meta);
-
-    *buffer = makeBufferID(header);
-
-    addActiveBuffer(portIndex, *buffer);
-
-    sp<GraphicBufferSource> bufferSource(getGraphicBufferSource());
-    if (bufferSource != NULL && portIndex == kPortIndexInput) {
-        bufferSource->addCodecBuffer(header);
-    }
-
-    CLOG_BUFFER(allocateBufferWithBackup, NEW_BUFFER_FMT(*buffer, portIndex, "%zu@%p :> %u@%p",
-            params->size(), params->pointer(), allottedSize, header->pBuffer));
-
-    return OK;
-}
-
 status_t OMXNodeInstance::freeBuffer(
-        OMX_U32 portIndex, OMX::buffer_id buffer) {
+        OMX_U32 portIndex, IOMX::buffer_id buffer) {
     Mutex::Autolock autoLock(mLock);
     CLOG_BUFFER(freeBuffer, "%s:%u %#x", portString(portIndex), portIndex, buffer);
 
@@ -1343,7 +1502,8 @@
     return StatusFromOMXError(err);
 }
 
-status_t OMXNodeInstance::fillBuffer(OMX::buffer_id buffer, int fenceFd) {
+status_t OMXNodeInstance::fillBuffer(
+        IOMX::buffer_id buffer, const OMXBuffer &omxBuffer, int fenceFd) {
     Mutex::Autolock autoLock(mLock);
 
     OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer, kPortIndexOutput);
@@ -1351,6 +1511,20 @@
         ALOGE("b/25884056");
         return BAD_VALUE;
     }
+
+    if (omxBuffer.mBufferType == OMXBuffer::kBufferTypeANWBuffer) {
+        status_t err = updateGraphicBufferInMeta_l(
+                kPortIndexOutput, omxBuffer.mGraphicBuffer, buffer, header);
+
+        if (err != OK) {
+            CLOG_ERROR(fillBuffer, err, FULL_BUFFER(
+                    (intptr_t)header->pBuffer, header, fenceFd));
+            return err;
+        }
+    } else if (omxBuffer.mBufferType != OMXBuffer::kBufferTypePreset) {
+        return BAD_VALUE;
+    }
+
     header->nFilledLen = 0;
     header->nOffset = 0;
     header->nFlags = 0;
@@ -1378,13 +1552,38 @@
 }
 
 status_t OMXNodeInstance::emptyBuffer(
-        OMX::buffer_id buffer,
-        OMX_U32 rangeOffset, OMX_U32 rangeLength,
+        buffer_id buffer, const OMXBuffer &omxBuffer,
         OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
     Mutex::Autolock autoLock(mLock);
 
+    switch (omxBuffer.mBufferType) {
+    case OMXBuffer::kBufferTypePreset:
+        return emptyBuffer_l(
+                buffer, omxBuffer.mRangeOffset, omxBuffer.mRangeLength,
+                flags, timestamp, fenceFd);
+
+    case OMXBuffer::kBufferTypeANWBuffer:
+        return emptyGraphicBuffer_l(
+                buffer, omxBuffer.mGraphicBuffer, flags, timestamp, fenceFd);
+
+    case OMXBuffer::kBufferTypeNativeHandle:
+        return emptyNativeHandleBuffer_l(
+                buffer, omxBuffer.mNativeHandle, flags, timestamp, fenceFd);
+
+    default:
+        break;
+    }
+
+    return BAD_VALUE;
+}
+
+status_t OMXNodeInstance::emptyBuffer_l(
+        IOMX::buffer_id buffer,
+        OMX_U32 rangeOffset, OMX_U32 rangeLength,
+        OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
+
     // no emptybuffer if using input surface
-    if (getGraphicBufferSource() != NULL) {
+    if (getBufferSource() != NULL) {
         android_errorWriteLog(0x534e4554, "29422020");
         return INVALID_OPERATION;
     }
@@ -1530,25 +1729,25 @@
 }
 
 // like emptyBuffer, but the data is already in header->pBuffer
-status_t OMXNodeInstance::emptyGraphicBuffer(
-        OMX_BUFFERHEADERTYPE *header, const sp<GraphicBuffer> &graphicBuffer,
+status_t OMXNodeInstance::emptyGraphicBuffer_l(
+        IOMX::buffer_id buffer, const sp<GraphicBuffer> &graphicBuffer,
         OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
+    OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer, kPortIndexInput);
     if (header == NULL) {
         ALOGE("b/25884056");
         return BAD_VALUE;
     }
 
-    Mutex::Autolock autoLock(mLock);
-    OMX::buffer_id buffer = findBufferID(header);
     status_t err = updateGraphicBufferInMeta_l(
-            kPortIndexInput, graphicBuffer, buffer, header,
-            true /* updateCodecBuffer */);
+            kPortIndexInput, graphicBuffer, buffer, header);
     if (err != OK) {
         CLOG_ERROR(emptyGraphicBuffer, err, FULL_BUFFER(
                 (intptr_t)header->pBuffer, header, fenceFd));
         return err;
     }
 
+    int64_t codecTimeUs = getCodecTimestamp(timestamp);
+
     header->nOffset = 0;
     if (graphicBuffer == NULL) {
         header->nFilledLen = 0;
@@ -1557,9 +1756,100 @@
     } else {
         header->nFilledLen = sizeof(VideoNativeMetadata);
     }
+    return emptyBuffer_l(header, flags, codecTimeUs, (intptr_t)header->pBuffer, fenceFd);
+}
+
+status_t OMXNodeInstance::setMaxPtsGapUs(const void *params, size_t size) {
+    if (params == NULL || size != sizeof(OMX_PARAM_U32TYPE)) {
+        CLOG_ERROR(setMaxPtsGapUs, BAD_VALUE, "invalid params (%p,%zu)", params, size);
+        return BAD_VALUE;
+    }
+
+    mMaxTimestampGapUs = (int64_t)((OMX_PARAM_U32TYPE*)params)->nU32;
+
+    return OK;
+}
+
+int64_t OMXNodeInstance::getCodecTimestamp(OMX_TICKS timestamp) {
+    int64_t originalTimeUs = timestamp;
+
+    if (mMaxTimestampGapUs > 0ll) {
+        /* Cap timestamp gap between adjacent frames to specified max
+         *
+         * In the scenario of cast mirroring, encoding could be suspended for
+         * prolonged periods. Limiting the pts gap to workaround the problem
+         * where encoder's rate control logic produces huge frames after a
+         * long period of suspension.
+         */
+        if (mPrevOriginalTimeUs >= 0ll) {
+            int64_t timestampGapUs = originalTimeUs - mPrevOriginalTimeUs;
+            timestamp = (timestampGapUs < mMaxTimestampGapUs ?
+                timestampGapUs : mMaxTimestampGapUs) + mPrevModifiedTimeUs;
+        }
+        ALOGV("IN  timestamp: %lld -> %lld",
+            static_cast<long long>(originalTimeUs),
+            static_cast<long long>(timestamp));
+    }
+
+    mPrevOriginalTimeUs = originalTimeUs;
+    mPrevModifiedTimeUs = timestamp;
+
+    if (mMaxTimestampGapUs > 0ll && !mRestorePtsFailed) {
+        mOriginalTimeUs.add(timestamp, originalTimeUs);
+    }
+
+    return timestamp;
+}
+
+status_t OMXNodeInstance::emptyNativeHandleBuffer_l(
+        IOMX::buffer_id buffer, const sp<NativeHandle> &nativeHandle,
+        OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
+    OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer, kPortIndexInput);
+    if (header == NULL) {
+        ALOGE("b/25884056");
+        return BAD_VALUE;
+    }
+
+    status_t err = updateNativeHandleInMeta_l(
+            kPortIndexInput, nativeHandle, buffer, header);
+    if (err != OK) {
+        CLOG_ERROR(emptyNativeHandleBuffer_l, err, FULL_BUFFER(
+                (intptr_t)header->pBuffer, header, fenceFd));
+        return err;
+    }
+
+    header->nOffset = 0;
+    header->nFilledLen = (nativeHandle == NULL) ? 0 : sizeof(VideoNativeMetadata);
+
     return emptyBuffer_l(header, flags, timestamp, (intptr_t)header->pBuffer, fenceFd);
 }
 
+void OMXNodeInstance::codecBufferFilled(omx_message &msg) {
+    Mutex::Autolock autoLock(mBufferIDLock);
+
+    if (mMaxTimestampGapUs <= 0ll || mRestorePtsFailed) {
+        return;
+    }
+
+    OMX_U32 &flags = msg.u.extended_buffer_data.flags;
+    OMX_TICKS &timestamp = msg.u.extended_buffer_data.timestamp;
+
+    if (!(flags & OMX_BUFFERFLAG_CODECCONFIG)) {
+        ssize_t index = mOriginalTimeUs.indexOfKey(timestamp);
+        if (index >= 0) {
+            ALOGV("OUT timestamp: %lld -> %lld",
+                    static_cast<long long>(timestamp),
+                    static_cast<long long>(mOriginalTimeUs[index]));
+            timestamp = mOriginalTimeUs[index];
+            mOriginalTimeUs.removeItemsAt(index);
+        } else {
+            // giving up the effort as encoder doesn't appear to preserve pts
+            ALOGW("giving up limiting timestamp gap (pts = %lld)", timestamp);
+            mRestorePtsFailed = true;
+        }
+    }
+}
+
 status_t OMXNodeInstance::getExtensionIndex(
         const char *parameterName, OMX_INDEXTYPE *index) {
     Mutex::Autolock autoLock(mLock);
@@ -1570,133 +1860,22 @@
     return StatusFromOMXError(err);
 }
 
-inline static const char *asString(IOMX::InternalOptionType i, const char *def = "??") {
-    switch (i) {
-        case IOMX::INTERNAL_OPTION_SUSPEND:           return "SUSPEND";
-        case IOMX::INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY:
-            return "REPEAT_PREVIOUS_FRAME_DELAY";
-        case IOMX::INTERNAL_OPTION_MAX_TIMESTAMP_GAP: return "MAX_TIMESTAMP_GAP";
-        case IOMX::INTERNAL_OPTION_MAX_FPS:           return "MAX_FPS";
-        case IOMX::INTERNAL_OPTION_START_TIME:        return "START_TIME";
-        case IOMX::INTERNAL_OPTION_TIME_LAPSE:        return "TIME_LAPSE";
-        case IOMX::INTERNAL_OPTION_TIME_OFFSET:       return "TIME_OFFSET";
-        default:                                      return def;
-    }
+status_t OMXNodeInstance::dispatchMessage(const omx_message &msg) {
+    mDispatcher->post(msg, true /*realTime*/);
+    return OK;
 }
 
-template<typename T>
-static bool getInternalOption(
-        const void *data, size_t size, T *out) {
-    if (size != sizeof(T)) {
-        return false;
+status_t OMXNodeInstance::setQuirks(OMX_U32 quirks) {
+    if (quirks & ~kQuirksMask) {
+        return BAD_VALUE;
     }
-    *out = *(T*)data;
-    return true;
-}
 
-status_t OMXNodeInstance::setInternalOption(
-        OMX_U32 portIndex,
-        IOMX::InternalOptionType type,
-        const void *data,
-        size_t size) {
-    CLOG_CONFIG(setInternalOption, "%s(%d): %s:%u %zu@%p",
-            asString(type), type, portString(portIndex), portIndex, size, data);
-    switch (type) {
-        case IOMX::INTERNAL_OPTION_SUSPEND:
-        case IOMX::INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY:
-        case IOMX::INTERNAL_OPTION_MAX_TIMESTAMP_GAP:
-        case IOMX::INTERNAL_OPTION_MAX_FPS:
-        case IOMX::INTERNAL_OPTION_START_TIME:
-        case IOMX::INTERNAL_OPTION_TIME_LAPSE:
-        case IOMX::INTERNAL_OPTION_TIME_OFFSET:
-        case IOMX::INTERNAL_OPTION_COLOR_ASPECTS:
-        {
-            const sp<GraphicBufferSource> &bufferSource =
-                getGraphicBufferSource();
+    mQuirks = quirks;
 
-            if (bufferSource == NULL || portIndex != kPortIndexInput) {
-                CLOGW("setInternalOption is only for Surface input");
-                return ERROR_UNSUPPORTED;
-            }
-
-            if (type == IOMX::INTERNAL_OPTION_SUSPEND) {
-                bool suspend;
-                if (!getInternalOption(data, size, &suspend)) {
-                    return INVALID_OPERATION;
-                }
-
-                CLOG_CONFIG(setInternalOption, "suspend=%d", suspend);
-                bufferSource->suspend(suspend);
-            } else if (type == IOMX::INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY) {
-                int64_t delayUs;
-                if (!getInternalOption(data, size, &delayUs)) {
-                    return INVALID_OPERATION;
-                }
-
-                CLOG_CONFIG(setInternalOption, "delayUs=%lld", (long long)delayUs);
-                return bufferSource->setRepeatPreviousFrameDelayUs(delayUs);
-            } else if (type == IOMX::INTERNAL_OPTION_TIME_OFFSET) {
-                int64_t timeOffsetUs;
-                if (!getInternalOption(data, size, &timeOffsetUs)) {
-                    return INVALID_OPERATION;
-                }
-                CLOG_CONFIG(setInternalOption, "bufferOffsetUs=%lld", (long long)timeOffsetUs);
-                return bufferSource->setInputBufferTimeOffset(timeOffsetUs);
-            } else if (type == IOMX::INTERNAL_OPTION_MAX_TIMESTAMP_GAP) {
-                int64_t maxGapUs;
-                if (!getInternalOption(data, size, &maxGapUs)) {
-                    return INVALID_OPERATION;
-                }
-
-                CLOG_CONFIG(setInternalOption, "gapUs=%lld", (long long)maxGapUs);
-                return bufferSource->setMaxTimestampGapUs(maxGapUs);
-            } else if (type == IOMX::INTERNAL_OPTION_MAX_FPS) {
-                float maxFps;
-                if (!getInternalOption(data, size, &maxFps)) {
-                    return INVALID_OPERATION;
-                }
-
-                CLOG_CONFIG(setInternalOption, "maxFps=%f", maxFps);
-                return bufferSource->setMaxFps(maxFps);
-            } else if (type == IOMX::INTERNAL_OPTION_START_TIME) {
-                int64_t skipFramesBeforeUs;
-                if (!getInternalOption(data, size, &skipFramesBeforeUs)) {
-                    return INVALID_OPERATION;
-                }
-
-                CLOG_CONFIG(setInternalOption, "beforeUs=%lld", (long long)skipFramesBeforeUs);
-                bufferSource->setSkipFramesBeforeUs(skipFramesBeforeUs);
-            } else if (type == IOMX::INTERNAL_OPTION_TIME_LAPSE) {
-                GraphicBufferSource::TimeLapseConfig config;
-                if (!getInternalOption(data, size, &config)) {
-                    return INVALID_OPERATION;
-                }
-
-                CLOG_CONFIG(setInternalOption, "perFrameUs=%lld perCaptureUs=%lld",
-                        (long long)config.mTimePerFrameUs, (long long)config.mTimePerCaptureUs);
-
-                return bufferSource->setTimeLapseConfig(config);
-            } else if (type == IOMX::INTERNAL_OPTION_COLOR_ASPECTS) {
-                ColorAspects aspects;
-                if (!getInternalOption(data, size, &aspects)) {
-                    return INVALID_OPERATION;
-                }
-
-                CLOG_CONFIG(setInternalOption, "setting color aspects");
-                bufferSource->setColorAspects(aspects);
-            }
-
-            return OK;
-        }
-
-        default:
-            return ERROR_UNSUPPORTED;
-    }
+    return OK;
 }
 
 bool OMXNodeInstance::handleMessage(omx_message &msg) {
-    const sp<GraphicBufferSource>& bufferSource(getGraphicBufferSource());
-
     if (msg.type == omx_message::FILL_BUFFER_DONE) {
         OMX_BUFFERHEADERTYPE *buffer =
             findBufferHeader(msg.u.extended_buffer_data.buffer, kPortIndexOutput);
@@ -1726,12 +1905,8 @@
         }
         buffer_meta->CopyFromOMX(buffer);
 
-        if (bufferSource != NULL) {
-            // fix up the buffer info (especially timestamp) if needed
-            bufferSource->codecBufferFilled(buffer);
-
-            msg.u.extended_buffer_data.timestamp = buffer->nTimeStamp;
-        }
+        // fix up the buffer info (especially timestamp) if needed
+        codecBufferFilled(msg);
     } else if (msg.type == omx_message::EMPTY_BUFFER_DONE) {
         OMX_BUFFERHEADERTYPE *buffer =
             findBufferHeader(msg.u.buffer_data.buffer, kPortIndexInput);
@@ -1747,20 +1922,100 @@
                     EBD, WITH_STATS(EMPTY_BUFFER(msg.u.buffer_data.buffer, buffer, msg.fenceFd)));
         }
 
+        const sp<IOMXBufferSource> bufferSource(getBufferSource());
+
         if (bufferSource != NULL) {
-            // This is one of the buffers used exclusively by
-            // GraphicBufferSource.
+            // This is one of the buffers used exclusively by IOMXBufferSource.
             // Don't dispatch a message back to ACodec, since it doesn't
             // know that anyone asked to have the buffer emptied and will
             // be very confused.
-            bufferSource->codecBufferEmptied(buffer, msg.fenceFd);
+            bufferSource->onInputBufferEmptied(
+                    msg.u.buffer_data.buffer, OMXFenceParcelable(msg.fenceFd));
             return true;
         }
+    } else if (msg.type == omx_message::EVENT &&
+            msg.u.event_data.event == OMX_EventDataSpaceChanged) {
+        handleDataSpaceChanged(msg);
     }
 
     return false;
 }
 
+bool OMXNodeInstance::handleDataSpaceChanged(omx_message &msg) {
+    android_dataspace dataSpace = (android_dataspace) msg.u.event_data.data1;
+    android_dataspace origDataSpace = dataSpace;
+
+    if (!ColorUtils::convertDataSpaceToV0(dataSpace)) {
+        // Do not process the data space change, don't notify client either
+        return true;
+    }
+
+    android_pixel_format pixelFormat = (android_pixel_format)msg.u.event_data.data3;
+
+    ColorAspects requestedAspects = ColorUtils::unpackToColorAspects(msg.u.event_data.data2);
+    ColorAspects aspects = requestedAspects; // initially requested aspects
+
+    // request color aspects to encode
+    OMX_INDEXTYPE index;
+    status_t err = getExtensionIndex(
+            "OMX.google.android.index.describeColorAspects", &index);
+    if (err == OK) {
+        // V0 dataspace
+        DescribeColorAspectsParams params;
+        InitOMXParams(&params);
+        params.nPortIndex = kPortIndexInput;
+        params.nDataSpace = origDataSpace;
+        params.nPixelFormat = pixelFormat;
+        params.bDataSpaceChanged = OMX_TRUE;
+        params.sAspects = requestedAspects;
+
+        err = getConfig(index, &params, sizeof(params));
+        if (err == OK) {
+            aspects = params.sAspects;
+            ALOGD("Codec resolved it to (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
+                    params.sAspects.mRange, asString(params.sAspects.mRange),
+                    params.sAspects.mPrimaries, asString(params.sAspects.mPrimaries),
+                    params.sAspects.mMatrixCoeffs, asString(params.sAspects.mMatrixCoeffs),
+                    params.sAspects.mTransfer, asString(params.sAspects.mTransfer),
+                    err, asString(err));
+        } else {
+            params.sAspects = aspects;
+            err = OK;
+        }
+        params.bDataSpaceChanged = OMX_FALSE;
+        for (int triesLeft = 2; --triesLeft >= 0; ) {
+            status_t err = setConfig(index, &params, sizeof(params));
+            if (err == OK) {
+                err = getConfig(index, &params, sizeof(params));
+            }
+            if (err != OK || !ColorUtils::checkIfAspectsChangedAndUnspecifyThem(
+                    params.sAspects, aspects)) {
+                // if we can't set or get color aspects, still communicate dataspace to client
+                break;
+            }
+
+            ALOGW_IF(triesLeft == 0, "Codec repeatedly changed requested ColorAspects.");
+        }
+    }
+
+    ALOGV("Set color aspects to (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
+            aspects.mRange, asString(aspects.mRange),
+            aspects.mPrimaries, asString(aspects.mPrimaries),
+            aspects.mMatrixCoeffs, asString(aspects.mMatrixCoeffs),
+            aspects.mTransfer, asString(aspects.mTransfer),
+            err, asString(err));
+
+    // signal client that the dataspace has changed; this will update the output format
+    // TODO: we should tie this to an output buffer somehow, and signal the change
+    // just before the output buffer is returned to the client, but there are many
+    // ways this could fail (e.g. flushing), and we are not yet supporting this scenario.
+
+    msg.u.event_data.data1 = (OMX_U32) dataSpace;
+    msg.u.event_data.data2 = (OMX_U32) ColorUtils::packToU32(aspects);
+
+    return false;
+}
+
 void OMXNodeInstance::onMessages(std::list<omx_message> &messages) {
     for (std::list<omx_message>::iterator it = messages.begin(); it != messages.end(); ) {
         if (handleMessage(*it)) {
@@ -1775,15 +2030,11 @@
     }
 }
 
-void OMXNodeInstance::onObserverDied(OMXMaster *master) {
+void OMXNodeInstance::onObserverDied() {
     ALOGE("!!! Observer died. Quickly, do something, ... anything...");
 
     // Try to force shutdown of the node and hope for the best.
-    freeNode(master);
-}
-
-void OMXNodeInstance::onGetHandleFailed() {
-    delete this;
+    freeNode();
 }
 
 // OMXNodeInstance::OnEvent calls OMX::OnEvent, which then calls here.
@@ -1827,13 +2078,13 @@
 
     CLOGI_(level, onEvent, "%s(%x), %s(%x), %s(%x)",
             asString(event), event, arg1String, arg1, arg2String, arg2);
-    const sp<GraphicBufferSource>& bufferSource(getGraphicBufferSource());
+    const sp<IOMXBufferSource> bufferSource(getBufferSource());
 
     if (bufferSource != NULL
             && event == OMX_EventCmdComplete
             && arg1 == OMX_CommandStateSet
             && arg2 == OMX_StateExecuting) {
-        bufferSource->omxExecuting();
+        bufferSource->onOmxExecuting();
     }
 
     // allow configuration if we return to the loaded state
@@ -1860,8 +2111,39 @@
     if (instance->mDying) {
         return OMX_ErrorNone;
     }
-    return instance->owner()->OnEvent(
-            instance->nodeID(), eEvent, nData1, nData2, pEventData);
+
+    instance->onEvent(eEvent, nData1, nData2);
+
+    // output rendered events are not processed as regular events until they hit the observer
+    if (eEvent == OMX_EventOutputRendered) {
+        if (pEventData == NULL) {
+            return OMX_ErrorBadParameter;
+        }
+
+        // process data from array
+        OMX_VIDEO_RENDEREVENTTYPE *renderData = (OMX_VIDEO_RENDEREVENTTYPE *)pEventData;
+        for (size_t i = 0; i < nData1; ++i) {
+            omx_message msg;
+            msg.type = omx_message::FRAME_RENDERED;
+            msg.fenceFd = -1;
+            msg.u.render_data.timestamp = renderData[i].nMediaTimeUs;
+            msg.u.render_data.nanoTime = renderData[i].nSystemTimeNs;
+
+            instance->mDispatcher->post(msg, false /* realTime */);
+        }
+        return OMX_ErrorNone;
+    }
+
+    omx_message msg;
+    msg.type = omx_message::EVENT;
+    msg.fenceFd = -1;
+    msg.u.event_data.event = eEvent;
+    msg.u.event_data.data1 = nData1;
+    msg.u.event_data.data2 = nData2;
+
+    instance->mDispatcher->post(msg, true /* realTime */);
+
+    return OMX_ErrorNone;
 }
 
 // static
@@ -1878,8 +2160,14 @@
         return OMX_ErrorNone;
     }
     int fenceFd = instance->retrieveFenceFromMeta_l(pBuffer, kPortIndexOutput);
-    return instance->owner()->OnEmptyBufferDone(instance->nodeID(),
-            instance->findBufferID(pBuffer), pBuffer, fenceFd);
+
+    omx_message msg;
+    msg.type = omx_message::EMPTY_BUFFER_DONE;
+    msg.fenceFd = fenceFd;
+    msg.u.buffer_data.buffer = instance->findBufferID(pBuffer);
+    instance->mDispatcher->post(msg);
+
+    return OMX_ErrorNone;
 }
 
 // static
@@ -1896,11 +2184,21 @@
         return OMX_ErrorNone;
     }
     int fenceFd = instance->retrieveFenceFromMeta_l(pBuffer, kPortIndexOutput);
-    return instance->owner()->OnFillBufferDone(instance->nodeID(),
-            instance->findBufferID(pBuffer), pBuffer, fenceFd);
+
+    omx_message msg;
+    msg.type = omx_message::FILL_BUFFER_DONE;
+    msg.fenceFd = fenceFd;
+    msg.u.extended_buffer_data.buffer = instance->findBufferID(pBuffer);
+    msg.u.extended_buffer_data.range_offset = pBuffer->nOffset;
+    msg.u.extended_buffer_data.range_length = pBuffer->nFilledLen;
+    msg.u.extended_buffer_data.flags = pBuffer->nFlags;
+    msg.u.extended_buffer_data.timestamp = pBuffer->nTimeStamp;
+    instance->mDispatcher->post(msg);
+
+    return OMX_ErrorNone;
 }
 
-void OMXNodeInstance::addActiveBuffer(OMX_U32 portIndex, OMX::buffer_id id) {
+void OMXNodeInstance::addActiveBuffer(OMX_U32 portIndex, IOMX::buffer_id id) {
     ActiveBuffer active;
     active.mPortIndex = portIndex;
     active.mID = id;
@@ -1912,7 +2210,7 @@
 }
 
 void OMXNodeInstance::removeActiveBuffer(
-        OMX_U32 portIndex, OMX::buffer_id id) {
+        OMX_U32 portIndex, IOMX::buffer_id id) {
     for (size_t i = 0; i < mActiveBuffers.size(); ++i) {
         if (mActiveBuffers[i].mPortIndex == portIndex
                 && mActiveBuffers[i].mID == id) {
@@ -1937,17 +2235,17 @@
     }
 }
 
-OMX::buffer_id OMXNodeInstance::makeBufferID(OMX_BUFFERHEADERTYPE *bufferHeader) {
+IOMX::buffer_id OMXNodeInstance::makeBufferID(OMX_BUFFERHEADERTYPE *bufferHeader) {
     if (bufferHeader == NULL) {
         return 0;
     }
     Mutex::Autolock autoLock(mBufferIDLock);
-    OMX::buffer_id buffer;
+    IOMX::buffer_id buffer;
     do { // handle the very unlikely case of ID overflow
         if (++mBufferIDCount == 0) {
             ++mBufferIDCount;
         }
-        buffer = (OMX::buffer_id)mBufferIDCount;
+        buffer = (IOMX::buffer_id)mBufferIDCount;
     } while (mBufferIDToBufferHeader.indexOfKey(buffer) >= 0);
     mBufferIDToBufferHeader.add(buffer, bufferHeader);
     mBufferHeaderToBufferID.add(bufferHeader, buffer);
@@ -1955,7 +2253,7 @@
 }
 
 OMX_BUFFERHEADERTYPE *OMXNodeInstance::findBufferHeader(
-        OMX::buffer_id buffer, OMX_U32 portIndex) {
+        IOMX::buffer_id buffer, OMX_U32 portIndex) {
     if (buffer == 0) {
         return NULL;
     }
@@ -1976,7 +2274,7 @@
     return header;
 }
 
-OMX::buffer_id OMXNodeInstance::findBufferID(OMX_BUFFERHEADERTYPE *bufferHeader) {
+IOMX::buffer_id OMXNodeInstance::findBufferID(OMX_BUFFERHEADERTYPE *bufferHeader) {
     if (bufferHeader == NULL) {
         return 0;
     }
@@ -1989,7 +2287,7 @@
     return mBufferHeaderToBufferID.valueAt(index);
 }
 
-void OMXNodeInstance::invalidateBufferID(OMX::buffer_id buffer) {
+void OMXNodeInstance::invalidateBufferID(IOMX::buffer_id buffer) {
     if (buffer == 0) {
         return;
     }
diff --git a/media/libstagefright/omx/OMXUtils.cpp b/media/libstagefright/omx/OMXUtils.cpp
index 799696c..38aad39 100644
--- a/media/libstagefright/omx/OMXUtils.cpp
+++ b/media/libstagefright/omx/OMXUtils.cpp
@@ -20,7 +20,10 @@
 #include <string.h>
 
 #include <media/hardware/HardwareAPI.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AUtils.h>
 #include <media/stagefright/MediaErrors.h>
+#include <media/MediaDefs.h>
 #include "OMXUtils.h"
 
 namespace android {
@@ -101,5 +104,273 @@
 
 /**************************************************************************************************/
 
+const char *GetComponentRole(bool isEncoder, const char *mime) {
+    struct MimeToRole {
+        const char *mime;
+        const char *decoderRole;
+        const char *encoderRole;
+    };
+
+    static const MimeToRole kMimeToRole[] = {
+        { MEDIA_MIMETYPE_AUDIO_MPEG,
+            "audio_decoder.mp3", "audio_encoder.mp3" },
+        { MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I,
+            "audio_decoder.mp1", "audio_encoder.mp1" },
+        { MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II,
+            "audio_decoder.mp2", "audio_encoder.mp2" },
+        { MEDIA_MIMETYPE_AUDIO_AMR_NB,
+            "audio_decoder.amrnb", "audio_encoder.amrnb" },
+        { MEDIA_MIMETYPE_AUDIO_AMR_WB,
+            "audio_decoder.amrwb", "audio_encoder.amrwb" },
+        { MEDIA_MIMETYPE_AUDIO_AAC,
+            "audio_decoder.aac", "audio_encoder.aac" },
+        { MEDIA_MIMETYPE_AUDIO_VORBIS,
+            "audio_decoder.vorbis", "audio_encoder.vorbis" },
+        { MEDIA_MIMETYPE_AUDIO_OPUS,
+            "audio_decoder.opus", "audio_encoder.opus" },
+        { MEDIA_MIMETYPE_AUDIO_G711_MLAW,
+            "audio_decoder.g711mlaw", "audio_encoder.g711mlaw" },
+        { MEDIA_MIMETYPE_AUDIO_G711_ALAW,
+            "audio_decoder.g711alaw", "audio_encoder.g711alaw" },
+        { MEDIA_MIMETYPE_VIDEO_AVC,
+            "video_decoder.avc", "video_encoder.avc" },
+        { MEDIA_MIMETYPE_VIDEO_HEVC,
+            "video_decoder.hevc", "video_encoder.hevc" },
+        { MEDIA_MIMETYPE_VIDEO_MPEG4,
+            "video_decoder.mpeg4", "video_encoder.mpeg4" },
+        { MEDIA_MIMETYPE_VIDEO_H263,
+            "video_decoder.h263", "video_encoder.h263" },
+        { MEDIA_MIMETYPE_VIDEO_VP8,
+            "video_decoder.vp8", "video_encoder.vp8" },
+        { MEDIA_MIMETYPE_VIDEO_VP9,
+            "video_decoder.vp9", "video_encoder.vp9" },
+        { MEDIA_MIMETYPE_AUDIO_RAW,
+            "audio_decoder.raw", "audio_encoder.raw" },
+        { MEDIA_MIMETYPE_VIDEO_DOLBY_VISION,
+            "video_decoder.dolby-vision", "video_encoder.dolby-vision" },
+        { MEDIA_MIMETYPE_AUDIO_FLAC,
+            "audio_decoder.flac", "audio_encoder.flac" },
+        { MEDIA_MIMETYPE_AUDIO_MSGSM,
+            "audio_decoder.gsm", "audio_encoder.gsm" },
+        { MEDIA_MIMETYPE_VIDEO_MPEG2,
+            "video_decoder.mpeg2", "video_encoder.mpeg2" },
+        { MEDIA_MIMETYPE_AUDIO_AC3,
+            "audio_decoder.ac3", "audio_encoder.ac3" },
+        { MEDIA_MIMETYPE_AUDIO_EAC3,
+            "audio_decoder.eac3", "audio_encoder.eac3" },
+    };
+
+    static const size_t kNumMimeToRole =
+        sizeof(kMimeToRole) / sizeof(kMimeToRole[0]);
+
+    size_t i;
+    for (i = 0; i < kNumMimeToRole; ++i) {
+        if (!strcasecmp(mime, kMimeToRole[i].mime)) {
+            break;
+        }
+    }
+
+    if (i == kNumMimeToRole) {
+        return NULL;
+    }
+
+    return isEncoder ? kMimeToRole[i].encoderRole
+                  : kMimeToRole[i].decoderRole;
+}
+
+status_t SetComponentRole(const sp<IOMXNode> &omxNode, const char *role) {
+    OMX_PARAM_COMPONENTROLETYPE roleParams;
+    InitOMXParams(&roleParams);
+
+    strncpy((char *)roleParams.cRole,
+            role, OMX_MAX_STRINGNAME_SIZE - 1);
+
+    roleParams.cRole[OMX_MAX_STRINGNAME_SIZE - 1] = '\0';
+
+    return omxNode->setParameter(
+            OMX_IndexParamStandardComponentRole,
+            &roleParams, sizeof(roleParams));
+}
+
+bool DescribeDefaultColorFormat(DescribeColorFormat2Params &params) {
+    MediaImage2 &image = params.sMediaImage;
+    memset(&image, 0, sizeof(image));
+
+    image.mType = MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
+    image.mNumPlanes = 0;
+
+    const OMX_COLOR_FORMATTYPE fmt = params.eColorFormat;
+    image.mWidth = params.nFrameWidth;
+    image.mHeight = params.nFrameHeight;
+
+    // only supporting YUV420
+    if (fmt != OMX_COLOR_FormatYUV420Planar &&
+        fmt != OMX_COLOR_FormatYUV420PackedPlanar &&
+        fmt != OMX_COLOR_FormatYUV420SemiPlanar &&
+        fmt != OMX_COLOR_FormatYUV420PackedSemiPlanar &&
+        fmt != (OMX_COLOR_FORMATTYPE)HAL_PIXEL_FORMAT_YV12) {
+        ALOGW("do not know color format 0x%x = %d", fmt, fmt);
+        return false;
+    }
+
+    // TEMPORARY FIX for some vendors that advertise sliceHeight as 0
+    if (params.nStride != 0 && params.nSliceHeight == 0) {
+        ALOGW("using sliceHeight=%u instead of what codec advertised (=0)",
+                params.nFrameHeight);
+        params.nSliceHeight = params.nFrameHeight;
+    }
+
+    // we need stride and slice-height to be non-zero and sensible. These values were chosen to
+    // prevent integer overflows further down the line, and do not indicate support for
+    // 32kx32k video.
+    if (params.nStride == 0 || params.nSliceHeight == 0
+            || params.nStride > 32768 || params.nSliceHeight > 32768) {
+        ALOGW("cannot describe color format 0x%x = %d with stride=%u and sliceHeight=%u",
+                fmt, fmt, params.nStride, params.nSliceHeight);
+        return false;
+    }
+
+    // set-up YUV format
+    image.mType = MediaImage2::MEDIA_IMAGE_TYPE_YUV;
+    image.mNumPlanes = 3;
+    image.mBitDepth = 8;
+    image.mBitDepthAllocated = 8;
+    image.mPlane[image.Y].mOffset = 0;
+    image.mPlane[image.Y].mColInc = 1;
+    image.mPlane[image.Y].mRowInc = params.nStride;
+    image.mPlane[image.Y].mHorizSubsampling = 1;
+    image.mPlane[image.Y].mVertSubsampling = 1;
+
+    switch ((int)fmt) {
+        case HAL_PIXEL_FORMAT_YV12:
+            if (params.bUsingNativeBuffers) {
+                size_t ystride = align(params.nStride, 16);
+                size_t cstride = align(params.nStride / 2, 16);
+                image.mPlane[image.Y].mRowInc = ystride;
+
+                image.mPlane[image.V].mOffset = ystride * params.nSliceHeight;
+                image.mPlane[image.V].mColInc = 1;
+                image.mPlane[image.V].mRowInc = cstride;
+                image.mPlane[image.V].mHorizSubsampling = 2;
+                image.mPlane[image.V].mVertSubsampling = 2;
+
+                image.mPlane[image.U].mOffset = image.mPlane[image.V].mOffset
+                        + (cstride * params.nSliceHeight / 2);
+                image.mPlane[image.U].mColInc = 1;
+                image.mPlane[image.U].mRowInc = cstride;
+                image.mPlane[image.U].mHorizSubsampling = 2;
+                image.mPlane[image.U].mVertSubsampling = 2;
+                break;
+            } else {
+                // fall through as YV12 is used for YUV420Planar by some codecs
+            }
+
+        case OMX_COLOR_FormatYUV420Planar:
+        case OMX_COLOR_FormatYUV420PackedPlanar:
+            image.mPlane[image.U].mOffset = params.nStride * params.nSliceHeight;
+            image.mPlane[image.U].mColInc = 1;
+            image.mPlane[image.U].mRowInc = params.nStride / 2;
+            image.mPlane[image.U].mHorizSubsampling = 2;
+            image.mPlane[image.U].mVertSubsampling = 2;
+
+            image.mPlane[image.V].mOffset = image.mPlane[image.U].mOffset
+                    + (params.nStride * params.nSliceHeight / 4);
+            image.mPlane[image.V].mColInc = 1;
+            image.mPlane[image.V].mRowInc = params.nStride / 2;
+            image.mPlane[image.V].mHorizSubsampling = 2;
+            image.mPlane[image.V].mVertSubsampling = 2;
+            break;
+
+        case OMX_COLOR_FormatYUV420SemiPlanar:
+            // FIXME: NV21 for sw-encoder, NV12 for decoder and hw-encoder
+        case OMX_COLOR_FormatYUV420PackedSemiPlanar:
+            // NV12
+            image.mPlane[image.U].mOffset = params.nStride * params.nSliceHeight;
+            image.mPlane[image.U].mColInc = 2;
+            image.mPlane[image.U].mRowInc = params.nStride;
+            image.mPlane[image.U].mHorizSubsampling = 2;
+            image.mPlane[image.U].mVertSubsampling = 2;
+
+            image.mPlane[image.V].mOffset = image.mPlane[image.U].mOffset + 1;
+            image.mPlane[image.V].mColInc = 2;
+            image.mPlane[image.V].mRowInc = params.nStride;
+            image.mPlane[image.V].mHorizSubsampling = 2;
+            image.mPlane[image.V].mVertSubsampling = 2;
+            break;
+
+        default:
+            TRESPASS();
+    }
+    return true;
+}
+
+bool DescribeColorFormat(
+        const sp<IOMXNode> &omxNode,
+        DescribeColorFormat2Params &describeParams)
+{
+    OMX_INDEXTYPE describeColorFormatIndex;
+    if (omxNode->getExtensionIndex(
+            "OMX.google.android.index.describeColorFormat",
+            &describeColorFormatIndex) == OK) {
+        DescribeColorFormatParams describeParamsV1(describeParams);
+        if (omxNode->getParameter(
+                describeColorFormatIndex,
+                &describeParamsV1, sizeof(describeParamsV1)) == OK) {
+            describeParams.initFromV1(describeParamsV1);
+            return describeParams.sMediaImage.mType != MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
+        }
+    } else if (omxNode->getExtensionIndex(
+            "OMX.google.android.index.describeColorFormat2", &describeColorFormatIndex) == OK
+               && omxNode->getParameter(
+                       describeColorFormatIndex, &describeParams, sizeof(describeParams)) == OK) {
+        return describeParams.sMediaImage.mType != MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
+    }
+
+    return DescribeDefaultColorFormat(describeParams);
+}
+
+// static
+bool IsFlexibleColorFormat(
+         const sp<IOMXNode> &omxNode,
+         uint32_t colorFormat, bool usingNativeBuffers, OMX_U32 *flexibleEquivalent) {
+    DescribeColorFormat2Params describeParams;
+    InitOMXParams(&describeParams);
+    describeParams.eColorFormat = (OMX_COLOR_FORMATTYPE)colorFormat;
+    // reasonable dummy values
+    describeParams.nFrameWidth = 128;
+    describeParams.nFrameHeight = 128;
+    describeParams.nStride = 128;
+    describeParams.nSliceHeight = 128;
+    describeParams.bUsingNativeBuffers = (OMX_BOOL)usingNativeBuffers;
+
+    CHECK(flexibleEquivalent != NULL);
+
+    if (!DescribeColorFormat(omxNode, describeParams)) {
+        return false;
+    }
+
+    const MediaImage2 &img = describeParams.sMediaImage;
+    if (img.mType == MediaImage2::MEDIA_IMAGE_TYPE_YUV) {
+        if (img.mNumPlanes != 3
+                || img.mPlane[img.Y].mHorizSubsampling != 1
+                || img.mPlane[img.Y].mVertSubsampling != 1) {
+            return false;
+        }
+
+        // YUV 420
+        if (img.mPlane[img.U].mHorizSubsampling == 2
+                && img.mPlane[img.U].mVertSubsampling == 2
+                && img.mPlane[img.V].mHorizSubsampling == 2
+                && img.mPlane[img.V].mVertSubsampling == 2) {
+            // possible flexible YUV420 format
+            if (img.mBitDepth <= 8) {
+               *flexibleEquivalent = OMX_COLOR_FormatYUV420Flexible;
+               return true;
+            }
+        }
+    }
+    return false;
+}
+
 }  // namespace android
 
diff --git a/media/libstagefright/omx/OMXUtils.h b/media/libstagefright/omx/OMXUtils.h
index 0c5e537..401d64b 100644
--- a/media/libstagefright/omx/OMXUtils.h
+++ b/media/libstagefright/omx/OMXUtils.h
@@ -17,6 +17,8 @@
 #ifndef OMX_UTILS_H_
 #define OMX_UTILS_H_
 
+#include <media/IOMX.h>
+
 /***** DO NOT USE THIS INCLUDE!!! INTERAL ONLY!!! UNLESS YOU RESIDE IN media/libstagefright *****/
 
 // OMXUtils contains omx-specific utility functions for stagefright/omx library
@@ -36,6 +38,43 @@
 
 status_t StatusFromOMXError(OMX_ERRORTYPE err);
 
+const char *GetComponentRole(bool isEncoder, const char *mime);
+status_t SetComponentRole(const sp<IOMXNode> &omxNode, const char *role);
+
+struct DescribeColorFormat2Params;
+
+bool IsFlexibleColorFormat(
+        const sp<IOMXNode> &omxNode, uint32_t colorFormat,
+        bool usingNativeBuffers, OMX_U32 *flexibleEquivalent);
+bool DescribeDefaultColorFormat(DescribeColorFormat2Params &describeParams);
+bool DescribeColorFormat(
+        const sp<IOMXNode> &omxNode,
+        DescribeColorFormat2Params &describeParams);
+
+inline static const char *asString(MetadataBufferType i, const char *def = "??") {
+    using namespace android;
+    switch (i) {
+        case kMetadataBufferTypeCameraSource:   return "CameraSource";
+        case kMetadataBufferTypeGrallocSource:  return "GrallocSource";
+        case kMetadataBufferTypeANWBuffer:      return "ANWBuffer";
+        case kMetadataBufferTypeNativeHandleSource: return "NativeHandleSource";
+        case kMetadataBufferTypeInvalid:        return "Invalid";
+        default:                                return def;
+    }
+}
+
+inline static const char *asString(IOMX::PortMode mode, const char *def = "??") {
+    using namespace android;
+    switch (mode) {
+        case IOMX::kPortModePresetByteBuffer:   return "PresetByteBuffer";
+        case IOMX::kPortModePresetANWBuffer:    return "PresetANWBuffer";
+        case IOMX::kPortModePresetSecureBuffer: return "PresetSecureBuffer";
+        case IOMX::kPortModeDynamicANWBuffer:   return "DynamicANWBuffer";
+        case IOMX::kPortModeDynamicNativeHandle:return "DynamicNativeHandle";
+        default:                                return def;
+    }
+}
+
 }  // namespace android
 
 #endif
diff --git a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
index 7c975f7..761b425 100644
--- a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
+++ b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
@@ -77,19 +77,34 @@
     switch (index) {
         case OMX_IndexParamPortDefinition:
         {
-            portIndex = ((OMX_PARAM_PORTDEFINITIONTYPE *)params)->nPortIndex;
+            const OMX_PARAM_PORTDEFINITIONTYPE *portDefs =
+                    (const OMX_PARAM_PORTDEFINITIONTYPE *) params;
+            if (!isValidOMXParam(portDefs)) {
+                return false;
+            }
+            portIndex = portDefs->nPortIndex;
             break;
         }
 
         case OMX_IndexParamAudioPcm:
         {
-            portIndex = ((OMX_AUDIO_PARAM_PCMMODETYPE *)params)->nPortIndex;
+            const OMX_AUDIO_PARAM_PCMMODETYPE *pcmMode =
+                    (const OMX_AUDIO_PARAM_PCMMODETYPE *) params;
+            if (!isValidOMXParam(pcmMode)) {
+                return false;
+            }
+            portIndex = pcmMode->nPortIndex;
             break;
         }
 
         case OMX_IndexParamAudioAac:
         {
-            portIndex = ((OMX_AUDIO_PARAM_AACPROFILETYPE *)params)->nPortIndex;
+            const OMX_AUDIO_PARAM_AACPROFILETYPE *aacMode =
+                    (const OMX_AUDIO_PARAM_AACPROFILETYPE *) params;
+            if (!isValidOMXParam(aacMode)) {
+                return false;
+            }
+            portIndex = aacMode->nPortIndex;
             break;
         }
 
diff --git a/media/libstagefright/omx/SoftOMXPlugin.cpp b/media/libstagefright/omx/SoftOMXPlugin.cpp
old mode 100755
new mode 100644
index 0f9c00c..a773ca2
--- a/media/libstagefright/omx/SoftOMXPlugin.cpp
+++ b/media/libstagefright/omx/SoftOMXPlugin.cpp
@@ -56,6 +56,7 @@
     { "OMX.google.vp8.decoder", "vpxdec", "video_decoder.vp8" },
     { "OMX.google.vp9.decoder", "vpxdec", "video_decoder.vp9" },
     { "OMX.google.vp8.encoder", "vpxenc", "video_encoder.vp8" },
+    { "OMX.google.vp9.encoder", "vpxenc", "video_encoder.vp9" },
     { "OMX.google.raw.decoder", "rawdec", "audio_decoder.raw" },
     { "OMX.google.flac.encoder", "flacenc", "audio_encoder.flac" },
     { "OMX.google.gsm.decoder", "gsmdec", "audio_decoder.gsm" },
diff --git a/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
index 0f9c118..adf846a 100644
--- a/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
@@ -480,18 +480,25 @@
             unsigned green = src[greenOffset];
             unsigned blue  = src[blueOffset];
 
-            // using ITU-R BT.601 conversion matrix
+            // Using ITU-R BT.601-7 (03/2011)
+            //   2.5.1: Ey'  = ( 0.299*R + 0.587*G + 0.114*B)
+            //   2.5.2: ECr' = ( 0.701*R - 0.587*G - 0.114*B) / 1.402
+            //          ECb' = (-0.299*R - 0.587*G + 0.886*B) / 1.772
+            //   2.5.3: Y  = 219 * Ey'  +  16
+            //          Cr = 224 * ECr' + 128
+            //          Cb = 224 * ECb' + 128
+
             unsigned luma =
-                ((red * 66 + green * 129 + blue * 25) >> 8) + 16;
+                ((red * 65 + green * 129 + blue * 25 + 128) >> 8) + 16;
 
             dstY[x] = luma;
 
             if ((x & 1) == 0 && (y & 1) == 0) {
                 unsigned U =
-                    ((-red * 38 - green * 74 + blue * 112) >> 8) + 128;
+                    ((-red * 38 - green * 74 + blue * 112 + 128) >> 8) + 128;
 
                 unsigned V =
-                    ((red * 112 - green * 94 - blue * 18) >> 8) + 128;
+                    ((red * 112 - green * 94 - blue * 18 + 128) >> 8) + 128;
 
                 dstU[x >> 1] = U;
                 dstV[x >> 1] = V;
diff --git a/media/libstagefright/omx/hal/1.0/Android.mk b/media/libstagefright/omx/hal/1.0/Android.mk
new file mode 100644
index 0000000..1633486
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/Android.mk
@@ -0,0 +1,38 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := android.hardware.media.omx@1.0-impl
+LOCAL_MODULE_RELATIVE_PATH := hw
+LOCAL_SRC_FILES := \
+    WGraphicBufferSource.cpp \
+    WOmx.cpp \
+    WOmxBufferSource.cpp \
+    WOmxNode.cpp \
+    WOmxObserver.cpp \
+    Omx.cpp \
+    OmxNode.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+    libmedia \
+    libstagefright_foundation \
+    libstagefright_omx \
+    libui \
+    libhidlbase \
+    libhidltransport \
+    libhwbinder \
+    libutils \
+    libcutils \
+    libbinder \
+    android.hardware.media.omx@1.0 \
+    android.hardware.graphics.common@1.0 \
+    android.hardware.media@1.0 \
+    android.hidl.base@1.0 \
+
+LOCAL_C_INCLUDES += \
+        $(TOP) \
+        $(TOP)/frameworks/av/include/media \
+        $(TOP)/frameworks/av/media/libstagefright/include \
+        $(TOP)/frameworks/native/include/media/hardware \
+        $(TOP)/frameworks/native/include/media/openmax
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/omx/hal/1.0/Conversion.h b/media/libstagefright/omx/hal/1.0/Conversion.h
new file mode 100644
index 0000000..d42e5bf
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/Conversion.h
@@ -0,0 +1,799 @@
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0__CONVERSION_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0__CONVERSION_H
+
+#include <hidl/MQDescriptor.h>
+
+#include <unistd.h>
+#include <vector>
+#include <list>
+
+#include <frameworks/native/include/binder/Binder.h>
+#include <frameworks/native/include/binder/Status.h>
+
+#include <OMXFenceParcelable.h>
+#include <cutils/native_handle.h>
+
+#include <IOMX.h>
+#include <VideoAPI.h>
+#include <OMXBuffer.h>
+#include <android/IOMXBufferSource.h>
+#include <android/IGraphicBufferSource.h>
+
+#include <android/hardware/media/omx/1.0/types.h>
+#include <android/hardware/media/omx/1.0/IOmx.h>
+#include <android/hardware/media/omx/1.0/IOmxNode.h>
+#include <android/hardware/media/omx/1.0/IOmxBufferSource.h>
+#include <android/hardware/media/omx/1.0/IOmxObserver.h>
+#include <android/hardware/media/omx/1.0/IGraphicBufferSource.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+using ::android::hardware::hidl_handle;
+using ::android::String8;
+using ::android::OMXFenceParcelable;
+
+using ::android::hardware::media::omx::V1_0::Message;
+using ::android::omx_message;
+
+using ::android::hardware::media::omx::V1_0::ColorAspects;
+
+using ::android::hardware::graphics::common::V1_0::Dataspace;
+
+using ::android::hardware::graphics::common::V1_0::PixelFormat;
+
+using ::android::OMXBuffer;
+
+using ::android::hardware::media::omx::V1_0::IOmx;
+using ::android::IOMX;
+
+using ::android::hardware::media::omx::V1_0::IOmxNode;
+using ::android::IOMXNode;
+
+using ::android::hardware::media::omx::V1_0::IOmxObserver;
+using ::android::IOMXObserver;
+
+using ::android::hardware::media::omx::V1_0::IOmxBufferSource;
+using ::android::IOMXBufferSource;
+
+// native_handle_t helper functions.
+
+/**
+ * \brief Take an fd and create a native handle containing only the given fd.
+ * The created handle will need to be deleted manually with
+ * `native_handle_delete()`.
+ *
+ * \param[in] fd The source file descriptor (of type `int`).
+ * \return The create `native_handle_t*` that contains the given \p fd. If the
+ * supplied \p fd is negative, the created native handle will contain no file
+ * descriptors.
+ *
+ * If the native handle cannot be created, the return value will be
+ * `nullptr`.
+ *
+ * This function does not duplicate the file descriptor.
+ */
+inline native_handle_t* native_handle_create_from_fd(int fd) {
+    if (fd < 0) {
+        return native_handle_create(0, 0);
+    }
+    native_handle_t* nh = native_handle_create(1, 0);
+    if (nh == nullptr) {
+        return nullptr;
+    }
+    nh->data[0] = fd;
+    return nh;
+}
+
+/**
+ * \brief Extract a file descriptor from a native handle.
+ *
+ * \param[in] nh The source `native_handle_t*`.
+ * \param[in] index The index of the file descriptor in \p nh to read from. This
+ * input has the default value of `0`.
+ * \return The `index`-th file descriptor in \p nh. If \p nh does not have
+ * enough file descriptors, the returned value will be `-1`.
+ *
+ * This function does not duplicate the file descriptor.
+ */
+inline int native_handle_read_fd(native_handle_t const* nh, int index = 0) {
+    return ((nh == nullptr) || (nh->numFds == 0) ||
+            (nh->numFds <= index) || (index < 0)) ?
+            -1 : nh->data[index];
+}
+
+/**
+ * Conversion functions
+ * ====================
+ *
+ * There are two main directions of conversion:
+ * - `inTargetType(...)`: Create a wrapper whose lifetime depends on the
+ *   input. The wrapper has type `TargetType`.
+ * - `toTargetType(...)`: Create a standalone object of type `TargetType` that
+ *   corresponds to the input. The lifetime of the output does not depend on the
+ *   lifetime of the input.
+ * - `wrapIn(TargetType*, ...)`: Same as `inTargetType()`, but for `TargetType`
+ *   that cannot be copied and/or moved efficiently, or when there are multiple
+ *   output arguments.
+ * - `convertTo(TargetType*, ...)`: Same as `toTargetType()`, but for
+ *   `TargetType` that cannot be copied and/or moved efficiently, or when there
+ *   are multiple output arguments.
+ *
+ * `wrapIn()` and `convertTo()` functions will take output arguments before
+ * input arguments. Some of these functions might return a value to indicate
+ * success or error.
+ *
+ * In converting or wrapping something as a Treble type that contains a
+ * `hidl_handle`, `native_handle_t*` will need to be created and returned as
+ * an additional output argument, hence only `wrapIn()` or `convertTo()` would
+ * be available. The caller must call `native_handle_delete()` to deallocate the
+ * returned native handle when it is no longer needed.
+ *
+ * For types that contain file descriptors, `inTargetType()` and `wrapAs()` do
+ * not perform duplication of file descriptors, while `toTargetType()` and
+ * `convertTo()` do.
+ */
+
+/**
+ * \brief Convert `binder::Status` to `Return<void>`.
+ *
+ * \param[in] l The source `binder::Status`.
+ * \return The corresponding `Return<void>`.
+ */
+// convert: ::android::binder::Status -> Return<void>
+inline Return<void> toHardwareStatus(
+        ::android::binder::Status const& l) {
+    if (l.exceptionCode() == ::android::binder::Status::EX_SERVICE_SPECIFIC) {
+        return ::android::hardware::Status::fromServiceSpecificError(
+                l.serviceSpecificErrorCode(),
+                l.exceptionMessage());
+    }
+    return ::android::hardware::Status::fromExceptionCode(
+            l.exceptionCode(),
+            l.exceptionMessage());
+}
+
+/**
+ * \brief Convert `Return<void>` to `binder::Status`.
+ *
+ * \param[in] t The source `Return<void>`.
+ * \return The corresponding `binder::Status`.
+ */
+// convert: Return<void> -> ::android::binder::Status
+inline ::android::binder::Status toBinderStatus(
+        Return<void> const& t) {
+    return ::android::binder::Status::fromExceptionCode(
+            t.isOk() ? OK : UNKNOWN_ERROR,
+            t.description().c_str());
+}
+
+/**
+ * \brief Convert `Return<Status>` to `status_t`. This is for legacy binder
+ * calls.
+ *
+ * \param[in] t The source `Return<Status>`.
+ * \return The corresponding `status_t`.
+ *
+ * This function first check if \p t has a transport error. If it does, then the
+ * return value is the transport error code. Otherwise, the return value is
+ * converted from `Status` contained inside \p t.
+ *
+ * Note:
+ * - This `Status` is omx-specific. It is defined in `types.hal`.
+ * - The name of this function is not `convert`.
+ */
+// convert: Status -> status_t
+inline status_t toStatusT(Return<Status> const& t) {
+    return t.isOk() ? static_cast<status_t>(static_cast<Status>(t)) : UNKNOWN_ERROR;
+}
+
+/**
+ * \brief Convert `Return<void>` to `status_t`. This is for legacy binder calls.
+ *
+ * \param[in] t The source `Return<void>`.
+ * \return The corresponding `status_t`.
+ */
+// convert: Return<void> -> status_t
+inline status_t toStatusT(Return<void> const& t) {
+    return t.isOk() ? OK : UNKNOWN_ERROR;
+}
+
+/**
+ * \brief Convert `Status` to `status_t`. This is for legacy binder calls.
+ *
+ * \param[in] t The source `Status`.
+ * \return the corresponding `status_t`.
+ */
+// convert: Status -> status_t
+inline status_t toStatusT(Status const& t) {
+    return static_cast<status_t>(t);
+}
+
+/**
+ * \brief Convert `status_t` to `Status`.
+ *
+ * \param[in] l The source `status_t`.
+ * \return The corresponding `Status`.
+ */
+// convert: status_t -> Status
+inline Status toStatus(status_t l) {
+    return static_cast<Status>(l);
+}
+
+/**
+ * \brief Wrap `native_handle_t*` in `hidl_handle`.
+ *
+ * \param[in] nh The source `native_handle_t*`.
+ * \return The `hidl_handle` that points to \p nh.
+ */
+// wrap: native_handle_t* -> hidl_handle
+inline hidl_handle inHidlHandle(native_handle_t const* nh) {
+    return hidl_handle(nh);
+}
+
+/**
+ * \brief Wrap an `omx_message` and construct the corresponding `Message`.
+ *
+ * \param[out] t The wrapper of type `Message`.
+ * \param[out] nh The native_handle_t referred to by `t->fence`.
+ * \param[in] l The source `omx_message`.
+ * \return `true` if the wrapping is successful; `false` otherwise.
+ *
+ * Upon success, \p nh will be created to hold the file descriptor stored in
+ * `l.fenceFd`, and `t->fence` will point to \p nh. \p nh will need to be
+ * destroyed manually by `native_handle_delete()` when \p t is no longer needed.
+ *
+ * Upon failure, \p nh will not be created and will not need to be deleted. \p t
+ * will be invalid.
+ */
+// wrap, omx_message -> Message, native_handle_t*
+inline bool wrapAs(Message* t, native_handle_t** nh, omx_message const& l) {
+    *nh = native_handle_create_from_fd(l.fenceFd);
+    if (!*nh) {
+        return false;
+    }
+    t->fence = inHidlHandle(*nh);
+    switch (l.type) {
+        case omx_message::EVENT:
+            t->type = Message::Type::EVENT;
+            t->data.eventData.data1 = l.u.event_data.data1;
+            t->data.eventData.data2 = l.u.event_data.data2;
+            t->data.eventData.data3 = l.u.event_data.data3;
+            t->data.eventData.data4 = l.u.event_data.data4;
+            break;
+        case omx_message::EMPTY_BUFFER_DONE:
+            t->type = Message::Type::EMPTY_BUFFER_DONE;
+            t->data.bufferData.buffer = l.u.buffer_data.buffer;
+            break;
+        case omx_message::FILL_BUFFER_DONE:
+            t->type = Message::Type::FILL_BUFFER_DONE;
+            t->data.extendedBufferData.buffer = l.u.extended_buffer_data.buffer;
+            t->data.extendedBufferData.rangeOffset = l.u.extended_buffer_data.range_offset;
+            t->data.extendedBufferData.rangeLength = l.u.extended_buffer_data.range_length;
+            t->data.extendedBufferData.flags = l.u.extended_buffer_data.flags;
+            t->data.extendedBufferData.timestampUs = l.u.extended_buffer_data.timestamp;
+            break;
+        case omx_message::FRAME_RENDERED:
+            t->type = Message::Type::FRAME_RENDERED;
+            t->data.renderData.timestampUs = l.u.render_data.timestamp;
+            t->data.renderData.systemTimeNs = l.u.render_data.nanoTime;
+            break;
+        default:
+            native_handle_delete(*nh);
+            return false;
+    }
+    return true;
+}
+
+/**
+ * \brief Wrap a `Message` inside an `omx_message`.
+ *
+ * \param[out] l The wrapper of type `omx_message`.
+ * \param[in] t The source `Message`.
+ * \return `true` if the wrapping is successful; `false` otherwise.
+ */
+// wrap: Message -> omx_message
+inline bool wrapAs(omx_message* l, Message const& t) {
+    l->fenceFd = native_handle_read_fd(t.fence);
+    switch (t.type) {
+        case Message::Type::EVENT:
+            l->type = omx_message::EVENT;
+            l->u.event_data.data1 = t.data.eventData.data1;
+            l->u.event_data.data2 = t.data.eventData.data2;
+            l->u.event_data.data3 = t.data.eventData.data3;
+            l->u.event_data.data4 = t.data.eventData.data4;
+            break;
+        case Message::Type::EMPTY_BUFFER_DONE:
+            l->type = omx_message::EMPTY_BUFFER_DONE;
+            l->u.buffer_data.buffer = t.data.bufferData.buffer;
+            break;
+        case Message::Type::FILL_BUFFER_DONE:
+            l->type = omx_message::FILL_BUFFER_DONE;
+            l->u.extended_buffer_data.buffer = t.data.extendedBufferData.buffer;
+            l->u.extended_buffer_data.range_offset = t.data.extendedBufferData.rangeOffset;
+            l->u.extended_buffer_data.range_length = t.data.extendedBufferData.rangeLength;
+            l->u.extended_buffer_data.flags = t.data.extendedBufferData.flags;
+            l->u.extended_buffer_data.timestamp = t.data.extendedBufferData.timestampUs;
+            break;
+        case Message::Type::FRAME_RENDERED:
+            l->type = omx_message::FRAME_RENDERED;
+            l->u.render_data.timestamp = t.data.renderData.timestampUs;
+            l->u.render_data.nanoTime = t.data.renderData.systemTimeNs;
+            break;
+        default:
+            return false;
+    }
+    return true;
+}
+
+/**
+ * \brief Similar to `wrapTo(omx_message*, Message const&)`, but the output will
+ * have an extended lifetime.
+ *
+ * \param[out] l The output `omx_message`.
+ * \param[in] t The source `Message`.
+ * \return `true` if the conversion is successful; `false` otherwise.
+ *
+ * This function calls `wrapto()`, then attempts to clone the file descriptor
+ * for the fence if it is not `-1`. If the clone cannot be made, `false` will be
+ * returned.
+ */
+// convert: Message -> omx_message
+inline bool convertTo(omx_message* l, Message const& t) {
+    if (!wrapAs(l, t)) {
+        return false;
+    }
+    if (l->fenceFd == -1) {
+        return true;
+    }
+    l->fenceFd = dup(l->fenceFd);
+    return l->fenceFd != -1;
+}
+
+/**
+ * \brief Wrap an `OMXFenceParcelable` inside a `hidl_handle`.
+ *
+ * \param[out] t The wrapper of type `hidl_handle`.
+ * \param[out] nh The native handle created to hold the file descriptor inside
+ * \p l.
+ * \param[in] l The source `OMXFenceParcelable`, which essentially contains one
+ * file descriptor.
+ * \return `true` if \p t and \p nh are successfully created to wrap around \p
+ * l; `false` otherwise.
+ *
+ * On success, \p nh needs to be deleted by the caller with
+ * `native_handle_delete()` after \p t and \p nh are no longer needed.
+ *
+ * On failure, \p nh will not need to be deleted, and \p t will hold an invalid
+ * value.
+ */
+// wrap: OMXFenceParcelable -> hidl_handle, native_handle_t*
+inline bool wrapAs(hidl_handle* t, native_handle_t** nh,
+        OMXFenceParcelable const& l) {
+    *nh = native_handle_create_from_fd(l.get());
+    if (!*nh) {
+        return false;
+    }
+    *t = *nh;
+    return true;
+}
+
+/**
+ * \brief Wrap a `hidl_handle` inside an `OMXFenceParcelable`.
+ *
+ * \param[out] l The wrapper of type `OMXFenceParcelable`.
+ * \param[in] t The source `hidl_handle`.
+ */
+// wrap: hidl_handle -> OMXFenceParcelable
+inline void wrapAs(OMXFenceParcelable* l, hidl_handle const& t) {
+    l->mFenceFd = native_handle_read_fd(t);
+}
+
+/**
+ * \brief Convert a `hidl_handle` to `OMXFenceParcelable`. If `hidl_handle`
+ * contains file descriptors, the first file descriptor will be duplicated and
+ * stored in the output `OMXFenceParcelable`.
+ *
+ * \param[out] l The output `OMXFenceParcelable`.
+ * \param[in] t The input `hidl_handle`.
+ * \return `false` if \p t contains a valid file descriptor but duplication
+ * fails; `true` otherwise.
+ */
+// convert: hidl_handle -> OMXFenceParcelable
+inline bool convertTo(OMXFenceParcelable* l, hidl_handle const& t) {
+    int fd = native_handle_read_fd(t);
+    if (fd != -1) {
+        fd = dup(fd);
+        if (fd == -1) {
+            return false;
+        }
+    }
+    l->mFenceFd = fd;
+    return true;
+}
+
+/**
+ * \brief Convert `::android::ColorAspects` to `ColorAspects`.
+ *
+ * \param[in] l The source `::android::ColorAspects`.
+ * \return The corresponding `ColorAspects`.
+ */
+// convert: ::android::ColorAspects -> ColorAspects
+inline ColorAspects toHardwareColorAspects(::android::ColorAspects const& l) {
+    return ColorAspects{
+            static_cast<ColorAspects::Range>(l.mRange),
+            static_cast<ColorAspects::Primaries>(l.mPrimaries),
+            static_cast<ColorAspects::Transfer>(l.mTransfer),
+            static_cast<ColorAspects::MatrixCoeffs>(l.mMatrixCoeffs)};
+}
+
+/**
+ * \brief Convert `int32_t` to `ColorAspects`.
+ *
+ * \param[in] l The source `int32_t`.
+ * \return The corresponding `ColorAspects`.
+ */
+// convert: int32_t -> ColorAspects
+inline ColorAspects toHardwareColorAspects(int32_t l) {
+    return ColorAspects{
+            static_cast<ColorAspects::Range>((l >> 24) & 0xFF),
+            static_cast<ColorAspects::Primaries>((l >> 16) & 0xFF),
+            static_cast<ColorAspects::Transfer>(l & 0xFF),
+            static_cast<ColorAspects::MatrixCoeffs>((l >> 8) & 0xFF)};
+}
+
+/**
+ * \brief Convert `ColorAspects` to `::android::ColorAspects`.
+ *
+ * \param[in] t The source `ColorAspects`.
+ * \return The corresponding `::android::ColorAspects`.
+ */
+// convert: ColorAspects -> ::android::ColorAspects
+inline int32_t toCompactColorAspects(ColorAspects const& t) {
+    return static_cast<int32_t>(
+            (static_cast<uint32_t>(t.range) << 24) |
+            (static_cast<uint32_t>(t.primaries) << 16) |
+            (static_cast<uint32_t>(t.transfer)) |
+            (static_cast<uint32_t>(t.matrixCoeffs) << 8));
+}
+
+/**
+ * \brief Convert `int32_t` to `Dataspace`.
+ *
+ * \param[in] l The source `int32_t`.
+ * \result The corresponding `Dataspace`.
+ */
+// convert: int32_t -> Dataspace
+inline Dataspace toHardwareDataspace(int32_t l) {
+    return static_cast<Dataspace>(l);
+}
+
+/**
+ * \brief Convert `Dataspace` to `int32_t`.
+ *
+ * \param[in] t The source `Dataspace`.
+ * \result The corresponding `int32_t`.
+ */
+// convert: Dataspace -> int32_t
+inline int32_t toRawDataspace(Dataspace const& t) {
+    return static_cast<int32_t>(t);
+}
+
+/**
+ * \brief Wrap an opaque buffer inside a `hidl_vec<uint8_t>`.
+ *
+ * \param[in] l The pointer to the beginning of the opaque buffer.
+ * \param[in] size The size of the buffer.
+ * \return A `hidl_vec<uint8_t>` that points to the buffer.
+ */
+// wrap: void*, size_t -> hidl_vec<uint8_t>
+inline hidl_vec<uint8_t> inHidlBytes(void const* l, size_t size) {
+    hidl_vec<uint8_t> t;
+    t.setToExternal(static_cast<uint8_t*>(const_cast<void*>(l)), size, false);
+    return t;
+}
+
+/**
+ * \brief Create a `hidl_vec<uint8_t>` that is a copy of an opaque buffer.
+ *
+ * \param[in] l The pointer to the beginning of the opaque buffer.
+ * \param[in] size The size of the buffer.
+ * \return A `hidl_vec<uint8_t>` that is a copy of the input buffer.
+ */
+// convert: void*, size_t -> hidl_vec<uint8_t>
+inline hidl_vec<uint8_t> toHidlBytes(void const* l, size_t size) {
+    hidl_vec<uint8_t> t;
+    t.resize(size);
+    uint8_t const* src = static_cast<uint8_t const*>(l);
+    std::copy(src, src + size, t.data());
+    return t;
+}
+
+/**
+ * \brief Wrap `OMXBuffer` in `CodecBuffer`.
+ *
+ * \param[out] t The wrapper of type `CodecBuffer`.
+ * \param[in] l The source `OMXBuffer`.
+ * \return `true` if the wrapping is successful; `false` otherwise.
+ *
+ * TODO: Use HIDL's shared memory.
+ */
+// wrap: OMXBuffer -> CodecBuffer
+inline bool wrapAs(CodecBuffer* t, OMXBuffer const& l) {
+    switch (l.mBufferType) {
+        case OMXBuffer::kBufferTypeInvalid: {
+            t->type = CodecBuffer::Type::INVALID;
+            return true;
+        }
+        case OMXBuffer::kBufferTypePreset: {
+            t->type = CodecBuffer::Type::PRESET;
+            t->attr.preset.rangeLength = static_cast<uint32_t>(l.mRangeLength);
+            return true;
+        }
+        case OMXBuffer::kBufferTypeSharedMem: {
+            t->type = CodecBuffer::Type::SHARED_MEM;
+/* TODO: Use HIDL's shared memory.
+            ssize_t offset;
+            size_t size;
+            native_handle_t* handle;
+            sp<IMemoryHeap> memoryHeap = l.mMem->getMemory(&offset, &size);
+            t->attr.sharedMem.size = static_cast<uint32_t>(size);
+            t->attr.sharedMem.flags = static_cast<uint32_t>(memoryHeap->getFlags());
+            t->attr.sharedMem.offset = static_cast<uint32_t>(offset);
+            if (!convertFd2Handle(memoryHeap->getHeapID(), nh)) {
+                return false;
+            }
+            t->nativeHandle = hidl_handle(*nh);
+            return true;*/
+            return false;
+        }
+        case OMXBuffer::kBufferTypeANWBuffer: {
+            t->type = CodecBuffer::Type::ANW_BUFFER;
+            t->attr.anwBuffer.width = l.mGraphicBuffer->getWidth();
+            t->attr.anwBuffer.height = l.mGraphicBuffer->getHeight();
+            t->attr.anwBuffer.stride = l.mGraphicBuffer->getStride();
+            t->attr.anwBuffer.format = static_cast<PixelFormat>(l.mGraphicBuffer->getPixelFormat());
+            t->attr.anwBuffer.layerCount = l.mGraphicBuffer->getLayerCount();
+            t->attr.anwBuffer.usage = l.mGraphicBuffer->getUsage();
+            t->nativeHandle = hidl_handle(l.mGraphicBuffer->handle);
+            return true;
+        }
+        case OMXBuffer::kBufferTypeNativeHandle: {
+            t->type = CodecBuffer::Type::NATIVE_HANDLE;
+            t->nativeHandle = hidl_handle(l.mNativeHandle->handle());
+            return true;
+        }
+    }
+    return false;
+}
+
+/**
+ * \brief Convert `CodecBuffer` to `OMXBuffer`.
+ *
+ * \param[out] l The destination `OMXBuffer`.
+ * \param[in] t The source `CodecBuffer`.
+ * \return `true` if successful; `false` otherwise.
+ *
+ * TODO: Use HIDL's shared memory.
+ */
+// convert: CodecBuffer -> OMXBuffer
+inline bool convertTo(OMXBuffer* l, CodecBuffer const& t) {
+    switch (t.type) {
+        case CodecBuffer::Type::INVALID: {
+            *l = OMXBuffer();
+            return true;
+        }
+        case CodecBuffer::Type::PRESET: {
+            *l = OMXBuffer(t.attr.preset.rangeLength);
+            return true;
+        }
+        case CodecBuffer::Type::SHARED_MEM: {
+/* TODO: Use HIDL's memory.
+            *l = OMXBuffer();
+            return true;*/
+            return false;
+        }
+        case CodecBuffer::Type::ANW_BUFFER: {
+            *l = OMXBuffer(sp<GraphicBuffer>(new GraphicBuffer(
+                    t.attr.anwBuffer.width,
+                    t.attr.anwBuffer.height,
+                    static_cast<::android::PixelFormat>(
+                            t.attr.anwBuffer.format),
+                    t.attr.anwBuffer.layerCount,
+                    t.attr.anwBuffer.usage,
+                    t.attr.anwBuffer.stride,
+                    native_handle_clone(t.nativeHandle),
+                    true)));
+            return true;
+        }
+        case CodecBuffer::Type::NATIVE_HANDLE: {
+            *l = OMXBuffer(NativeHandle::create(
+                    native_handle_clone(t.nativeHandle), true));
+            return true;
+        }
+    }
+    return false;
+}
+
+/**
+ * \brief Convert `IOMX::ComponentInfo` to `IOmx::ComponentInfo`.
+ *
+ * \param[out] t The destination `IOmx::ComponentInfo`.
+ * \param[in] l The source `IOMX::ComponentInfo`.
+ */
+// convert: IOMX::ComponentInfo -> IOmx::ComponentInfo
+inline bool convertTo(IOmx::ComponentInfo* t, IOMX::ComponentInfo const& l) {
+    t->mName = l.mName.string();
+    t->mRoles.resize(l.mRoles.size());
+    size_t i = 0;
+    for (auto& role : l.mRoles) {
+        t->mRoles[i++] = role.string();
+    }
+    return true;
+}
+
+/**
+ * \brief Convert `IOmx::ComponentInfo` to `IOMX::ComponentInfo`.
+ *
+ * \param[out] l The destination `IOMX::ComponentInfo`.
+ * \param[in] t The source `IOmx::ComponentInfo`.
+ */
+// convert: IOmx::ComponentInfo -> IOMX::ComponentInfo
+inline bool convertTo(IOMX::ComponentInfo* l, IOmx::ComponentInfo const& t) {
+    l->mName = t.mName.c_str();
+    l->mRoles.clear();
+    for (size_t i = 0; i < t.mRoles.size(); ++i) {
+        l->mRoles.push_back(String8(t.mRoles[i].c_str()));
+    }
+    return true;
+}
+
+/**
+ * \brief Convert `OMX_BOOL` to `bool`.
+ *
+ * \param[in] l The source `OMX_BOOL`.
+ * \return The destination `bool`.
+ */
+// convert: OMX_BOOL -> bool
+inline bool toRawBool(OMX_BOOL l) {
+    return l == OMX_FALSE ? false : true;
+}
+
+/**
+ * \brief Convert `bool` to `OMX_BOOL`.
+ *
+ * \param[in] t The source `bool`.
+ * \return The destination `OMX_BOOL`.
+ */
+// convert: bool -> OMX_BOOL
+inline OMX_BOOL toEnumBool(bool t) {
+    return t ? OMX_TRUE : OMX_FALSE;
+}
+
+/**
+ * \brief Convert `OMX_COMMANDTYPE` to `uint32_t`.
+ *
+ * \param[in] l The source `OMX_COMMANDTYPE`.
+ * \return The underlying value of type `uint32_t`.
+ *
+ * `OMX_COMMANDTYPE` is an enum type whose underlying type is `uint32_t`.
+ */
+// convert: OMX_COMMANDTYPE -> uint32_t
+inline uint32_t toRawCommandType(OMX_COMMANDTYPE l) {
+    return static_cast<uint32_t>(l);
+}
+
+/**
+ * \brief Convert `uint32_t` to `OMX_COMMANDTYPE`.
+ *
+ * \param[in] t The source `uint32_t`.
+ * \return The corresponding enum value of type `OMX_COMMANDTYPE`.
+ *
+ * `OMX_COMMANDTYPE` is an enum type whose underlying type is `uint32_t`.
+ */
+// convert: uint32_t -> OMX_COMMANDTYPE
+inline OMX_COMMANDTYPE toEnumCommandType(uint32_t t) {
+    return static_cast<OMX_COMMANDTYPE>(t);
+}
+
+/**
+ * \brief Convert `OMX_INDEXTYPE` to `uint32_t`.
+ *
+ * \param[in] l The source `OMX_INDEXTYPE`.
+ * \return The underlying value of type `uint32_t`.
+ *
+ * `OMX_INDEXTYPE` is an enum type whose underlying type is `uint32_t`.
+ */
+// convert: OMX_INDEXTYPE -> uint32_t
+inline uint32_t toRawIndexType(OMX_INDEXTYPE l) {
+    return static_cast<uint32_t>(l);
+}
+
+/**
+ * \brief Convert `uint32_t` to `OMX_INDEXTYPE`.
+ *
+ * \param[in] t The source `uint32_t`.
+ * \return The corresponding enum value of type `OMX_INDEXTYPE`.
+ *
+ * `OMX_INDEXTYPE` is an enum type whose underlying type is `uint32_t`.
+ */
+// convert: uint32_t -> OMX_INDEXTYPE
+inline OMX_INDEXTYPE toEnumIndexType(uint32_t t) {
+    return static_cast<OMX_INDEXTYPE>(t);
+}
+
+/**
+ * \brief Convert `IOMX::PortMode` to `PortMode`.
+ *
+ * \param[in] l The source `IOMX::PortMode`.
+ * \return The destination `PortMode`.
+ */
+// convert: IOMX::PortMode -> PortMode
+inline PortMode toHardwarePortMode(IOMX::PortMode l) {
+    return static_cast<PortMode>(l);
+}
+
+/**
+ * \brief Convert `PortMode` to `IOMX::PortMode`.
+ *
+ * \param[in] t The source `PortMode`.
+ * \return The destination `IOMX::PortMode`.
+ */
+// convert: PortMode -> IOMX::PortMode
+inline IOMX::PortMode toIOMXPortMode(PortMode t) {
+    return static_cast<IOMX::PortMode>(t);
+}
+
+/**
+ * \brief Convert `OMX_TICKS` to `uint64_t`.
+ *
+ * \param[in] l The source `OMX_TICKS`.
+ * \return The destination `uint64_t`.
+ */
+// convert: OMX_TICKS -> uint64_t
+inline uint64_t toRawTicks(OMX_TICKS l) {
+#ifndef OMX_SKIP64BIT
+    return static_cast<uint64_t>(l);
+#else
+    return static_cast<uint64_t>(l.nLowPart) |
+            static_cast<uint64_t>(l.nHighPart << 32);
+#endif
+}
+
+/**
+ * \brief Convert 'uint64_t` to `OMX_TICKS`.
+ *
+ * \param[in] l The source `uint64_t`.
+ * \return The destination `OMX_TICKS`.
+ */
+// convert: uint64_t -> OMX_TICKS
+inline OMX_TICKS toOMXTicks(uint64_t t) {
+#ifndef OMX_SKIP64BIT
+    return static_cast<OMX_TICKS>(t);
+#else
+    return OMX_TICKS{
+            static_cast<uint32_t>(t & 0xFFFFFFFF),
+            static_cast<uint32_t>(t >> 32)};
+#endif
+}
+
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
+
+#endif  // ANDROID_HARDWARE_MEDIA_OMX_V1_0__CONVERSION_H
diff --git a/media/libstagefright/omx/hal/1.0/Omx.cpp b/media/libstagefright/omx/hal/1.0/Omx.cpp
new file mode 100644
index 0000000..cb23191
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/Omx.cpp
@@ -0,0 +1,33 @@
+#include "Omx.h"
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+// Methods from ::android::hardware::media::omx::V1_0::IOmx follow.
+Return<void> Omx::listNodes(listNodes_cb _hidl_cb) {
+    // TODO implement
+    return Void();
+}
+
+Return<void> Omx::allocateNode(const hidl_string& name, const sp<IOmxObserver>& observer, allocateNode_cb _hidl_cb) {
+    // TODO implement
+    return Void();
+}
+
+
+// Methods from ::android::hidl::base::V1_0::IBase follow.
+
+IOmx* HIDL_FETCH_IOmx(const char* /* name */) {
+    return new Omx();
+}
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
diff --git a/media/libstagefright/omx/hal/1.0/Omx.h b/media/libstagefright/omx/hal/1.0/Omx.h
new file mode 100644
index 0000000..5d06444
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/Omx.h
@@ -0,0 +1,48 @@
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_OMX_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_OMX_H
+
+#include <android/hardware/media/omx/1.0/IOmx.h>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+#include <IOMX.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::hardware::media::omx::V1_0::IOmx;
+using ::android::hardware::media::omx::V1_0::IOmxNode;
+using ::android::hardware::media::omx::V1_0::IOmxObserver;
+using ::android::hardware::media::omx::V1_0::Status;
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+struct Omx : public IOmx {
+    // Methods from ::android::hardware::media::omx::V1_0::IOmx follow.
+    Return<void> listNodes(listNodes_cb _hidl_cb) override;
+    Return<void> allocateNode(const hidl_string& name, const sp<IOmxObserver>& observer, allocateNode_cb _hidl_cb) override;
+
+    // Methods from ::android::hidl::base::V1_0::IBase follow.
+
+};
+
+extern "C" IOmx* HIDL_FETCH_IOmx(const char* name);
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
+
+#endif  // ANDROID_HARDWARE_MEDIA_OMX_V1_0_OMX_H
diff --git a/media/libstagefright/omx/hal/1.0/OmxNode.cpp b/media/libstagefright/omx/hal/1.0/OmxNode.cpp
new file mode 100644
index 0000000..08d1cd7
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/OmxNode.cpp
@@ -0,0 +1,115 @@
+#include <IOMX.h>
+#include <OMXNodeInstance.h>
+#include "OmxNode.h"
+#include "WOmxNode.h"
+#include "WOmxObserver.h"
+#include "Conversion.h"
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+// Methods from ::android::hardware::media::omx::V1_0::IOmxNode follow.
+Return<Status> OmxNode::freeNode() {
+    // TODO implement
+    return ::android::hardware::media::omx::V1_0::Status {};
+}
+
+Return<Status> OmxNode::sendCommand(uint32_t cmd, int32_t param) {
+    // TODO implement
+    return ::android::hardware::media::omx::V1_0::Status {};
+}
+
+Return<void> OmxNode::getParameter(uint32_t index, const hidl_vec<uint8_t>& inParams, getParameter_cb _hidl_cb) {
+    // TODO implement
+    return Void();
+}
+
+Return<Status> OmxNode::setParameter(uint32_t index, const hidl_vec<uint8_t>& params) {
+    // TODO implement
+    return ::android::hardware::media::omx::V1_0::Status {};
+}
+
+Return<void> OmxNode::getConfig(uint32_t index, const hidl_vec<uint8_t>& inConfig, getConfig_cb _hidl_cb) {
+    // TODO implement
+    return Void();
+}
+
+Return<Status> OmxNode::setConfig(uint32_t index, const hidl_vec<uint8_t>& config) {
+    // TODO implement
+    return ::android::hardware::media::omx::V1_0::Status {};
+}
+
+Return<Status> OmxNode::setPortMode(uint32_t portIndex, PortMode mode) {
+    // TODO implement
+    return ::android::hardware::media::omx::V1_0::Status {};
+}
+
+Return<Status> OmxNode::prepareForAdaptivePlayback(uint32_t portIndex, bool enable, uint32_t maxFrameWidth, uint32_t maxFrameHeight) {
+    // TODO implement
+    return ::android::hardware::media::omx::V1_0::Status {};
+}
+
+Return<void> OmxNode::configureVideoTunnelMode(uint32_t portIndex, bool tunneled, uint32_t audioHwSync, configureVideoTunnelMode_cb _hidl_cb) {
+    // TODO implement
+    return Void();
+}
+
+Return<void> OmxNode::getGraphicBufferUsage(uint32_t portIndex, getGraphicBufferUsage_cb _hidl_cb) {
+    // TODO implement
+    return Void();
+}
+
+Return<Status> OmxNode::setInputSurface(const sp<IOmxBufferSource>& bufferSource) {
+    // TODO implement
+    return ::android::hardware::media::omx::V1_0::Status {};
+}
+
+Return<void> OmxNode::allocateSecureBuffer(uint32_t portIndex, uint64_t size, allocateSecureBuffer_cb _hidl_cb) {
+    // TODO implement
+    return Void();
+}
+
+Return<void> OmxNode::useBuffer(uint32_t portIndex, const CodecBuffer& omxBuffer, useBuffer_cb _hidl_cb) {
+    // TODO implement
+    return Void();
+}
+
+Return<Status> OmxNode::freeBuffer(uint32_t portIndex, uint32_t buffer) {
+    // TODO implement
+    return ::android::hardware::media::omx::V1_0::Status {};
+}
+
+Return<Status> OmxNode::fillBuffer(uint32_t buffer, const CodecBuffer& omxBuffer, const hidl_handle& fence) {
+    // TODO implement
+    return ::android::hardware::media::omx::V1_0::Status {};
+}
+
+Return<Status> OmxNode::emptyBuffer(uint32_t buffer, const CodecBuffer& omxBuffer, uint32_t flags, uint64_t timestampUs, const hidl_handle& fence) {
+    // TODO implement
+    return ::android::hardware::media::omx::V1_0::Status {};
+}
+
+Return<void> OmxNode::getExtensionIndex(const hidl_string& parameterName, getExtensionIndex_cb _hidl_cb) {
+    // TODO implement
+    return Void();
+}
+
+Return<Status> OmxNode::dispatchMessage(const Message& msg) {
+    // TODO implement
+    return ::android::hardware::media::omx::V1_0::Status {};
+}
+
+OmxNode::OmxNode(OmxNodeOwner* owner, sp<IOmxObserver> const& observer, char const* name) {
+    mLNode = new OMXNodeInstance(owner, new LWOmxObserver(observer), name);
+}
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
diff --git a/media/libstagefright/omx/hal/1.0/OmxNode.h b/media/libstagefright/omx/hal/1.0/OmxNode.h
new file mode 100644
index 0000000..e05e107
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/OmxNode.h
@@ -0,0 +1,78 @@
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_OMXNODE_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_OMXNODE_H
+
+#include <android/hardware/media/omx/1.0/IOmxNode.h>
+#include <android/hardware/media/omx/1.0/IOmxObserver.h>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+#include <OMXNodeInstance.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::hardware::media::omx::V1_0::CodecBuffer;
+using ::android::hardware::media::omx::V1_0::IOmxBufferSource;
+using ::android::hardware::media::omx::V1_0::IOmxNode;
+using ::android::hardware::media::omx::V1_0::IOmxObserver;
+using ::android::hardware::media::omx::V1_0::Message;
+using ::android::hardware::media::omx::V1_0::PortMode;
+using ::android::hardware::media::omx::V1_0::Status;
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+using ::android::OMXNodeInstance;
+using ::android::OmxNodeOwner;
+
+/**
+ * Wrapper classes for conversion
+ * ==============================
+ *
+ * Naming convention:
+ * - LW = Legacy Wrapper --- It wraps a Treble object inside a legacy object.
+ * - TW = Treble Wrapper --- It wraps a legacy object inside a Treble object.
+ */
+
+struct OmxNode : public IOmxNode {
+    Return<Status> freeNode() override;
+    Return<Status> sendCommand(uint32_t cmd, int32_t param) override;
+    Return<void> getParameter(uint32_t index, const hidl_vec<uint8_t>& inParams, getParameter_cb _hidl_cb) override;
+    Return<Status> setParameter(uint32_t index, const hidl_vec<uint8_t>& params) override;
+    Return<void> getConfig(uint32_t index, const hidl_vec<uint8_t>& inConfig, getConfig_cb _hidl_cb) override;
+    Return<Status> setConfig(uint32_t index, const hidl_vec<uint8_t>& config) override;
+    Return<Status> setPortMode(uint32_t portIndex, PortMode mode) override;
+    Return<Status> prepareForAdaptivePlayback(uint32_t portIndex, bool enable, uint32_t maxFrameWidth, uint32_t maxFrameHeight) override;
+    Return<void> configureVideoTunnelMode(uint32_t portIndex, bool tunneled, uint32_t audioHwSync, configureVideoTunnelMode_cb _hidl_cb) override;
+    Return<void> getGraphicBufferUsage(uint32_t portIndex, getGraphicBufferUsage_cb _hidl_cb) override;
+    Return<Status> setInputSurface(const sp<IOmxBufferSource>& bufferSource) override;
+    Return<void> allocateSecureBuffer(uint32_t portIndex, uint64_t size, allocateSecureBuffer_cb _hidl_cb) override;
+    Return<void> useBuffer(uint32_t portIndex, const CodecBuffer& omxBuffer, useBuffer_cb _hidl_cb) override;
+    Return<Status> freeBuffer(uint32_t portIndex, uint32_t buffer) override;
+    Return<Status> fillBuffer(uint32_t buffer, const CodecBuffer& omxBuffer, const hidl_handle& fence) override;
+    Return<Status> emptyBuffer(uint32_t buffer, const CodecBuffer& omxBuffer, uint32_t flags, uint64_t timestampUs, const hidl_handle& fence) override;
+    Return<void> getExtensionIndex(const hidl_string& parameterName, getExtensionIndex_cb _hidl_cb) override;
+    Return<Status> dispatchMessage(const Message& msg) override;
+
+    OmxNode(OmxNodeOwner* owner, sp<IOmxObserver> const& observer, char const* name);
+protected:
+    sp<OMXNodeInstance> mLNode;
+};
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
+
+#endif  // ANDROID_HARDWARE_MEDIA_OMX_V1_0_OMXNODE_H
diff --git a/media/libstagefright/omx/hal/1.0/WGraphicBufferSource.cpp b/media/libstagefright/omx/hal/1.0/WGraphicBufferSource.cpp
new file mode 100644
index 0000000..0ec31f2
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/WGraphicBufferSource.cpp
@@ -0,0 +1,124 @@
+#include "WGraphicBufferSource.h"
+#include "Conversion.h"
+#include "WOmxNode.h"
+#include <stagefright/foundation/ColorUtils.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+using android::ColorUtils;
+
+// LWGraphicBufferSource
+LWGraphicBufferSource::LWGraphicBufferSource(
+        sp<TGraphicBufferSource> const& base) : mBase(base) {
+}
+
+::android::binder::Status LWGraphicBufferSource::configure(
+        const sp<IOMXNode>& omxNode, int32_t dataSpace) {
+    return toBinderStatus(mBase->configure(
+            new TWOmxNode(omxNode), toHardwareDataspace(dataSpace)));
+}
+
+::android::binder::Status LWGraphicBufferSource::setSuspend(bool suspend) {
+    return toBinderStatus(mBase->setSuspend(suspend));
+}
+
+::android::binder::Status LWGraphicBufferSource::setRepeatPreviousFrameDelayUs(
+        int64_t repeatAfterUs) {
+    return toBinderStatus(mBase->setRepeatPreviousFrameDelayUs(repeatAfterUs));
+}
+
+::android::binder::Status LWGraphicBufferSource::setMaxFps(float maxFps) {
+    return toBinderStatus(mBase->setMaxFps(maxFps));
+}
+
+::android::binder::Status LWGraphicBufferSource::setTimeLapseConfig(
+        int64_t timePerFrameUs, int64_t timePerCaptureUs) {
+    return toBinderStatus(mBase->setTimeLapseConfig(
+            timePerFrameUs, timePerCaptureUs));
+}
+
+::android::binder::Status LWGraphicBufferSource::setStartTimeUs(
+        int64_t startTimeUs) {
+    return toBinderStatus(mBase->setStartTimeUs(startTimeUs));
+}
+
+::android::binder::Status LWGraphicBufferSource::setColorAspects(
+        int32_t aspects) {
+    return toBinderStatus(mBase->setColorAspects(
+            toHardwareColorAspects(aspects)));
+}
+
+::android::binder::Status LWGraphicBufferSource::setTimeOffsetUs(
+        int64_t timeOffsetsUs) {
+    return toBinderStatus(mBase->setTimeOffsetUs(timeOffsetsUs));
+}
+
+::android::binder::Status LWGraphicBufferSource::signalEndOfInputStream() {
+    return toBinderStatus(mBase->signalEndOfInputStream());
+}
+
+::android::IBinder* LWGraphicBufferSource::onAsBinder() {
+    return nullptr;
+}
+
+// TWGraphicBufferSource
+TWGraphicBufferSource::TWGraphicBufferSource(
+        sp<LGraphicBufferSource> const& base) : mBase(base) {
+}
+
+Return<void> TWGraphicBufferSource::configure(
+        const sp<IOmxNode>& omxNode, Dataspace dataspace) {
+    return toHardwareStatus(mBase->configure(
+            new LWOmxNode(omxNode),
+            toRawDataspace(dataspace)));
+}
+
+Return<void> TWGraphicBufferSource::setSuspend(bool suspend) {
+    return toHardwareStatus(mBase->setSuspend(suspend));
+}
+
+Return<void> TWGraphicBufferSource::setRepeatPreviousFrameDelayUs(
+        int64_t repeatAfterUs) {
+    return toHardwareStatus(mBase->setRepeatPreviousFrameDelayUs(
+            repeatAfterUs));
+}
+
+Return<void> TWGraphicBufferSource::setMaxFps(float maxFps) {
+    return toHardwareStatus(mBase->setMaxFps(maxFps));
+}
+
+Return<void> TWGraphicBufferSource::setTimeLapseConfig(
+        int64_t timePerFrameUs, int64_t timePerCaptureUs) {
+    return toHardwareStatus(mBase->setTimeLapseConfig(
+            timePerFrameUs, timePerCaptureUs));
+}
+
+Return<void> TWGraphicBufferSource::setStartTimeUs(int64_t startTimeUs) {
+    return toHardwareStatus(mBase->setStartTimeUs(startTimeUs));
+}
+
+Return<void> TWGraphicBufferSource::setColorAspects(
+        const ColorAspects& aspects) {
+    return toHardwareStatus(mBase->setColorAspects(toCompactColorAspects(
+            aspects)));
+}
+
+Return<void> TWGraphicBufferSource::setTimeOffsetUs(int64_t timeOffsetUs) {
+    return toHardwareStatus(mBase->setTimeOffsetUs(timeOffsetUs));
+}
+
+Return<void> TWGraphicBufferSource::signalEndOfInputStream() {
+    return toHardwareStatus(mBase->signalEndOfInputStream());
+}
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
diff --git a/media/libstagefright/omx/hal/1.0/WGraphicBufferSource.h b/media/libstagefright/omx/hal/1.0/WGraphicBufferSource.h
new file mode 100644
index 0000000..66977ad
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/WGraphicBufferSource.h
@@ -0,0 +1,92 @@
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WGRAPHICBUFFERSOURCE_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WGRAPHICBUFFERSOURCE_H
+
+#include <android/hardware/media/omx/1.0/IGraphicBufferSource.h>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+#include <frameworks/native/include/binder/Binder.h>
+#include <IOMX.h>
+#include <android/IGraphicBufferSource.h>
+#include <android/hardware/media/omx/1.0/IOmxNode.h>
+
+#include <android/hardware/graphics/common/1.0/types.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::hardware::graphics::common::V1_0::Dataspace;
+using ::android::hardware::media::omx::V1_0::ColorAspects;
+using ::android::hardware::media::omx::V1_0::IGraphicBufferSource;
+using ::android::hardware::media::omx::V1_0::IOmxNode;
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+using ::android::IOMXNode;
+
+/**
+ * Wrapper classes for conversion
+ * ==============================
+ *
+ * Naming convention:
+ * - LW = Legacy Wrapper --- It wraps a Treble object inside a legacy object.
+ * - TW = Treble Wrapper --- It wraps a legacy object inside a Treble object.
+ */
+
+typedef ::android::IGraphicBufferSource LGraphicBufferSource;
+typedef ::android::hardware::media::omx::V1_0::IGraphicBufferSource
+        TGraphicBufferSource;
+
+struct LWGraphicBufferSource : public LGraphicBufferSource {
+    sp<TGraphicBufferSource> mBase;
+    LWGraphicBufferSource(sp<TGraphicBufferSource> const& base);
+    ::android::binder::Status configure(
+            const sp<IOMXNode>& omxNode, int32_t dataSpace) override;
+    ::android::binder::Status setSuspend(bool suspend) override;
+    ::android::binder::Status setRepeatPreviousFrameDelayUs(
+            int64_t repeatAfterUs) override;
+    ::android::binder::Status setMaxFps(float maxFps) override;
+    ::android::binder::Status setTimeLapseConfig(
+            int64_t timePerFrameUs, int64_t timePerCaptureUs) override;
+    ::android::binder::Status setStartTimeUs(int64_t startTimeUs) override;
+    ::android::binder::Status setColorAspects(int32_t aspects) override;
+    ::android::binder::Status setTimeOffsetUs(int64_t timeOffsetsUs) override;
+    ::android::binder::Status signalEndOfInputStream() override;
+protected:
+    ::android::IBinder* onAsBinder() override;
+};
+
+struct TWGraphicBufferSource : public TGraphicBufferSource {
+    sp<LGraphicBufferSource> mBase;
+    TWGraphicBufferSource(sp<LGraphicBufferSource> const& base);
+    Return<void> configure(
+            const sp<IOmxNode>& omxNode, Dataspace dataspace) override;
+    Return<void> setSuspend(bool suspend) override;
+    Return<void> setRepeatPreviousFrameDelayUs(int64_t repeatAfterUs) override;
+    Return<void> setMaxFps(float maxFps) override;
+    Return<void> setTimeLapseConfig(
+            int64_t timePerFrameUs, int64_t timePerCaptureUs) override;
+    Return<void> setStartTimeUs(int64_t startTimeUs) override;
+    Return<void> setColorAspects(const ColorAspects& aspects) override;
+    Return<void> setTimeOffsetUs(int64_t timeOffsetUs) override;
+    Return<void> signalEndOfInputStream() override;
+};
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
+
+#endif  // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WGRAPHICBUFFERSOURCE_H
diff --git a/media/libstagefright/omx/hal/1.0/WOmx.cpp b/media/libstagefright/omx/hal/1.0/WOmx.cpp
new file mode 100644
index 0000000..25bcfe9
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/WOmx.cpp
@@ -0,0 +1,95 @@
+#include "WOmx.h"
+#include "WOmxNode.h"
+#include "WOmxObserver.h"
+#include "Conversion.h"
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+// LWOmx
+LWOmx::LWOmx(sp<IOmx> const& base) : mBase(base) {
+}
+
+status_t LWOmx::listNodes(List<IOMX::ComponentInfo>* list) {
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->listNodes(
+            [&fnStatus, list](
+                    Status status,
+                    hidl_vec<IOmx::ComponentInfo> const& nodeList) {
+                fnStatus = toStatusT(status);
+                list->clear();
+                for (size_t i = 0; i < nodeList.size(); ++i) {
+                    auto newInfo = list->insert(
+                            list->end(), IOMX::ComponentInfo());
+                    convertTo(&*newInfo, nodeList[i]);
+                }
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmx::allocateNode(
+        char const* name,
+        sp<IOMXObserver> const& observer,
+        sp<IOMXNode>* omxNode) {
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->allocateNode(
+            name, new TWOmxObserver(observer),
+            [&fnStatus, omxNode](Status status, sp<IOmxNode> const& node) {
+                fnStatus = toStatusT(status);
+                *omxNode = new LWOmxNode(node);
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmx::createInputSurface(
+        sp<::android::IGraphicBufferProducer>* /* bufferProducer */,
+        sp<::android::IGraphicBufferSource>* /* bufferSource */) {
+    // TODO: Implement.
+    return INVALID_OPERATION;
+}
+
+::android::IBinder* LWOmx::onAsBinder() {
+    return nullptr;
+}
+
+// TWOmx
+TWOmx::TWOmx(sp<IOMX> const& base) : mBase(base) {
+}
+
+Return<void> TWOmx::listNodes(listNodes_cb _hidl_cb) {
+    List<IOMX::ComponentInfo> lList;
+    Status status = toStatus(mBase->listNodes(&lList));
+
+    hidl_vec<IOmx::ComponentInfo> tList;
+    tList.resize(lList.size());
+    size_t i = 0;
+    for (auto const& lInfo : lList) {
+        convertTo(&(tList[i++]), lInfo);
+    }
+    _hidl_cb(status, tList);
+    return Void();
+}
+
+Return<void> TWOmx::allocateNode(
+        const hidl_string& name,
+        const sp<IOmxObserver>& observer,
+        allocateNode_cb _hidl_cb) {
+    sp<IOMXNode> omxNode;
+    Status status = toStatus(mBase->allocateNode(
+            name, new LWOmxObserver(observer), &omxNode));
+    _hidl_cb(status, new TWOmxNode(omxNode));
+    return Void();
+}
+
+// TODO: Add createInputSurface().
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
diff --git a/media/libstagefright/omx/hal/1.0/WOmx.h b/media/libstagefright/omx/hal/1.0/WOmx.h
new file mode 100644
index 0000000..b07c4f2
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/WOmx.h
@@ -0,0 +1,75 @@
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMX_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMX_H
+
+#include <android/hardware/media/omx/1.0/IOmx.h>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+#include <IOMX.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::hardware::media::omx::V1_0::IOmx;
+using ::android::hardware::media::omx::V1_0::IOmxNode;
+using ::android::hardware::media::omx::V1_0::IOmxObserver;
+using ::android::hardware::media::omx::V1_0::Status;
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+using ::android::List;
+using ::android::IOMX;
+
+/**
+ * Wrapper classes for conversion
+ * ==============================
+ *
+ * Naming convention:
+ * - LW = Legacy Wrapper --- It wraps a Treble object inside a legacy object.
+ * - TW = Treble Wrapper --- It wraps a legacy object inside a Treble object.
+ */
+
+struct LWOmx : public IOMX {
+    sp<IOmx> mBase;
+    LWOmx(sp<IOmx> const& base);
+    status_t listNodes(List<IOMX::ComponentInfo>* list) override;
+    status_t allocateNode(
+            char const* name,
+            sp<IOMXObserver> const& observer,
+            sp<IOMXNode>* omxNode) override;
+    status_t createInputSurface(
+            sp<::android::IGraphicBufferProducer>* bufferProducer,
+            sp<::android::IGraphicBufferSource>* bufferSource) override;
+protected:
+    ::android::IBinder* onAsBinder() override;
+};
+
+struct TWOmx : public IOmx {
+    sp<IOMX> mBase;
+    TWOmx(sp<IOMX> const& base);
+    Return<void> listNodes(listNodes_cb _hidl_cb) override;
+    Return<void> allocateNode(
+            const hidl_string& name,
+            const sp<IOmxObserver>& observer,
+            allocateNode_cb _hidl_cb) override;
+
+};
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
+
+#endif  // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMX_H
diff --git a/media/libstagefright/omx/hal/1.0/WOmxBufferSource.cpp b/media/libstagefright/omx/hal/1.0/WOmxBufferSource.cpp
new file mode 100644
index 0000000..79eb1be
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/WOmxBufferSource.cpp
@@ -0,0 +1,95 @@
+#include "WOmxBufferSource.h"
+#include "Conversion.h"
+#include <utils/String8.h>
+#include <cutils/native_handle.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+// LWOmxBufferSource
+LWOmxBufferSource::LWOmxBufferSource(sp<IOmxBufferSource> const& base) :
+    mBase(base) {
+}
+
+::android::binder::Status LWOmxBufferSource::onOmxExecuting() {
+    return toBinderStatus(mBase->onOmxExecuting());
+}
+
+::android::binder::Status LWOmxBufferSource::onOmxIdle() {
+    return toBinderStatus(mBase->onOmxIdle());
+}
+
+::android::binder::Status LWOmxBufferSource::onOmxLoaded() {
+    return toBinderStatus(mBase->onOmxLoaded());
+}
+
+::android::binder::Status LWOmxBufferSource::onInputBufferAdded(
+        int32_t bufferId) {
+    return toBinderStatus(mBase->onInputBufferAdded(
+            static_cast<uint32_t>(bufferId)));
+}
+
+::android::binder::Status LWOmxBufferSource::onInputBufferEmptied(
+        int32_t bufferId, OMXFenceParcelable const& fenceParcel) {
+    hidl_handle fence;
+    native_handle_t* fenceNh;
+    if (!wrapAs(&fence, &fenceNh, fenceParcel)) {
+        return ::android::binder::Status::fromExceptionCode(
+                ::android::binder::Status::EX_BAD_PARCELABLE,
+                "Invalid fence");
+    }
+    ::android::binder::Status status = toBinderStatus(
+            mBase->onInputBufferEmptied(
+            static_cast<uint32_t>(bufferId), fence));
+    if (native_handle_delete(fenceNh) != 0) {
+        return ::android::binder::Status::fromExceptionCode(
+                ::android::binder::Status::EX_NULL_POINTER,
+                "Cannot delete native handle");
+    }
+    return status;
+}
+
+::android::IBinder* LWOmxBufferSource::onAsBinder() {
+    return nullptr;
+}
+
+// TWOmxBufferSource
+TWOmxBufferSource::TWOmxBufferSource(sp<IOMXBufferSource> const& base) :
+    mBase(base) {
+}
+
+Return<void> TWOmxBufferSource::onOmxExecuting() {
+    return toHardwareStatus(mBase->onOmxExecuting());
+}
+
+Return<void> TWOmxBufferSource::onOmxIdle() {
+    return toHardwareStatus(mBase->onOmxIdle());
+}
+
+Return<void> TWOmxBufferSource::onOmxLoaded() {
+    return toHardwareStatus(mBase->onOmxLoaded());
+}
+
+Return<void> TWOmxBufferSource::onInputBufferAdded(uint32_t buffer) {
+    return toHardwareStatus(mBase->onInputBufferAdded(
+            static_cast<int32_t>(buffer)));
+}
+
+Return<void> TWOmxBufferSource::onInputBufferEmptied(
+        uint32_t buffer, hidl_handle const& fence) {
+    OMXFenceParcelable fenceParcelable;
+    wrapAs(&fenceParcelable, fence);
+    return toHardwareStatus(mBase->onInputBufferEmptied(
+            static_cast<int32_t>(buffer), fenceParcelable));
+}
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
diff --git a/media/libstagefright/omx/hal/1.0/WOmxBufferSource.h b/media/libstagefright/omx/hal/1.0/WOmxBufferSource.h
new file mode 100644
index 0000000..3ba9453
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/WOmxBufferSource.h
@@ -0,0 +1,74 @@
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERSOURCE_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERSOURCE_H
+
+#include <android/hardware/media/omx/1.0/IOmxBufferSource.h>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+#include <frameworks/native/include/binder/Binder.h>
+#include <android/IOMXBufferSource.h>
+#include <OMXFenceParcelable.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::hardware::media::omx::V1_0::IOmxBufferSource;
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::hidl_handle;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+using ::android::OMXFenceParcelable;
+using ::android::IOMXBufferSource;
+
+/**
+ * Wrapper classes for conversion
+ * ==============================
+ *
+ * Naming convention:
+ * - LW = Legacy Wrapper --- It wraps a Treble object inside a legacy object.
+ * - TW = Treble Wrapper --- It wraps a legacy object inside a Treble object.
+ */
+
+struct LWOmxBufferSource : public IOMXBufferSource {
+    sp<IOmxBufferSource> mBase;
+    LWOmxBufferSource(sp<IOmxBufferSource> const& base);
+    ::android::binder::Status onOmxExecuting() override;
+    ::android::binder::Status onOmxIdle() override;
+    ::android::binder::Status onOmxLoaded() override;
+    ::android::binder::Status onInputBufferAdded(int32_t bufferID) override;
+    ::android::binder::Status onInputBufferEmptied(
+            int32_t bufferID, OMXFenceParcelable const& fenceParcel) override;
+protected:
+    ::android::IBinder* onAsBinder() override;
+};
+
+struct TWOmxBufferSource : public IOmxBufferSource {
+    sp<IOMXBufferSource> mBase;
+    TWOmxBufferSource(sp<IOMXBufferSource> const& base);
+    Return<void> onOmxExecuting() override;
+    Return<void> onOmxIdle() override;
+    Return<void> onOmxLoaded() override;
+    Return<void> onInputBufferAdded(uint32_t buffer) override;
+    Return<void> onInputBufferEmptied(
+            uint32_t buffer, hidl_handle const& fence) override;
+};
+
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
+
+#endif  // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERSOURCE_H
diff --git a/media/libstagefright/omx/hal/1.0/WOmxNode.cpp b/media/libstagefright/omx/hal/1.0/WOmxNode.cpp
new file mode 100644
index 0000000..0e781d8
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/WOmxNode.cpp
@@ -0,0 +1,421 @@
+#include <IOMX.h>
+#include <OMXNodeInstance.h>
+#include "WOmxNode.h"
+#include "WOmxBufferSource.h"
+#include "Conversion.h"
+
+#include <algorithm>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::hardware::Void;
+
+// LWOmxNode
+LWOmxNode::LWOmxNode(sp<IOmxNode> const& base) : mBase(base) {
+}
+
+status_t LWOmxNode::freeNode() {
+    return toStatusT(mBase->freeNode());
+}
+
+status_t LWOmxNode::sendCommand(
+        OMX_COMMANDTYPE cmd, OMX_S32 param) {
+    return toStatusT(mBase->sendCommand(
+            toRawCommandType(cmd), param));
+}
+
+status_t LWOmxNode::getParameter(
+        OMX_INDEXTYPE index, void *params, size_t size) {
+    hidl_vec<uint8_t> tParams = inHidlBytes(params, size);
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->getParameter(
+            toRawIndexType(index),
+            tParams,
+            [&fnStatus, params, size](
+                    Status status, hidl_vec<uint8_t> const& outParams) {
+                fnStatus = toStatusT(status);
+                std::copy(
+                        outParams.data(),
+                        outParams.data() + outParams.size(),
+                        static_cast<uint8_t*>(params));
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxNode::setParameter(
+        OMX_INDEXTYPE index, const void *params, size_t size) {
+    hidl_vec<uint8_t> tParams = inHidlBytes(params, size);
+    tParams = inHidlBytes(params, size);
+    return toStatusT(mBase->setParameter(
+            toRawIndexType(index), tParams));
+}
+
+status_t LWOmxNode::getConfig(
+        OMX_INDEXTYPE index, void *params, size_t size) {
+    hidl_vec<uint8_t> tParams = inHidlBytes(params, size);
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->getConfig(
+            toRawIndexType(index),
+            tParams,
+            [&fnStatus, params, size](
+                    Status status, hidl_vec<uint8_t> const& outParams) {
+                fnStatus = toStatusT(status);
+                std::copy(
+                        outParams.data(),
+                        outParams.data() + size,
+                        static_cast<uint8_t*>(params));
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxNode::setConfig(
+        OMX_INDEXTYPE index, const void *params, size_t size) {
+    hidl_vec<uint8_t> tParams = inHidlBytes(params, size);
+    return toStatusT(mBase->setConfig(toRawIndexType(index), tParams));
+}
+
+status_t LWOmxNode::setPortMode(
+        OMX_U32 port_index, IOMX::PortMode mode) {
+    return toStatusT(mBase->setPortMode(port_index, toHardwarePortMode(mode)));
+}
+
+status_t LWOmxNode::prepareForAdaptivePlayback(
+        OMX_U32 portIndex, OMX_BOOL enable,
+        OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight) {
+    return toStatusT(mBase->prepareForAdaptivePlayback(
+            portIndex, toRawBool(enable), maxFrameWidth, maxFrameHeight));
+}
+
+status_t LWOmxNode::configureVideoTunnelMode(
+        OMX_U32 portIndex, OMX_BOOL tunneled,
+        OMX_U32 audioHwSync, native_handle_t **sidebandHandle) {
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->configureVideoTunnelMode(
+            portIndex,
+            toRawBool(tunneled),
+            audioHwSync,
+            [&fnStatus, sidebandHandle](
+                    Status status, hidl_handle const& outSidebandHandle) {
+                fnStatus = toStatusT(status);
+                *sidebandHandle = native_handle_clone(outSidebandHandle);
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxNode::getGraphicBufferUsage(
+        OMX_U32 portIndex, OMX_U32* usage) {
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->getGraphicBufferUsage(
+            portIndex,
+            [&fnStatus, usage](
+                    Status status, uint32_t outUsage) {
+                fnStatus = toStatusT(status);
+                *usage = outUsage;
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxNode::setInputSurface(
+        const sp<IOMXBufferSource> &bufferSource) {
+    return toStatusT(mBase->setInputSurface(
+            new TWOmxBufferSource(bufferSource)));
+}
+
+status_t LWOmxNode::allocateSecureBuffer(
+        OMX_U32 portIndex, size_t size, buffer_id *buffer,
+        void **buffer_data, sp<NativeHandle> *native_handle) {
+    *buffer_data = nullptr;
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->allocateSecureBuffer(
+            portIndex,
+            static_cast<uint64_t>(size),
+            [&fnStatus, buffer, buffer_data, native_handle](
+                    Status status,
+                    uint32_t outBuffer,
+                    hidl_handle const& outNativeHandle) {
+                fnStatus = toStatusT(status);
+                *buffer = outBuffer;
+                *native_handle = NativeHandle::create(
+                        native_handle_clone(outNativeHandle), true);
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxNode::useBuffer(
+        OMX_U32 portIndex, const OMXBuffer &omxBuffer, buffer_id *buffer) {
+    CodecBuffer codecBuffer;
+    if (!wrapAs(&codecBuffer, omxBuffer)) {
+        return BAD_VALUE;
+    }
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->useBuffer(
+            portIndex,
+            codecBuffer,
+            [&fnStatus, buffer](Status status, uint32_t outBuffer) {
+                fnStatus = toStatusT(status);
+                *buffer = outBuffer;
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxNode::freeBuffer(
+        OMX_U32 portIndex, buffer_id buffer) {
+    return toStatusT(mBase->freeBuffer(portIndex, buffer));
+}
+
+status_t LWOmxNode::fillBuffer(
+        buffer_id buffer, const OMXBuffer &omxBuffer, int fenceFd) {
+    CodecBuffer codecBuffer;
+    if (!wrapAs(&codecBuffer, omxBuffer)) {
+        return BAD_VALUE;
+    }
+    native_handle_t* fenceNh = native_handle_create_from_fd(fenceFd);
+    if (!fenceNh) {
+        return NO_MEMORY;
+    }
+    status_t status = toStatusT(mBase->fillBuffer(
+            buffer, codecBuffer, fenceNh));
+    native_handle_delete(fenceNh);
+    return status;
+}
+
+status_t LWOmxNode::emptyBuffer(
+        buffer_id buffer, const OMXBuffer &omxBuffer,
+        OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
+    CodecBuffer codecBuffer;
+    if (!wrapAs(&codecBuffer, omxBuffer)) {
+        return BAD_VALUE;
+    }
+    native_handle_t* fenceNh = native_handle_create_from_fd(fenceFd);
+    if (!fenceNh) {
+        return NO_MEMORY;
+    }
+    status_t status = toStatusT(mBase->emptyBuffer(
+            buffer,
+            codecBuffer,
+            flags,
+            toRawTicks(timestamp),
+            fenceNh));
+    native_handle_delete(fenceNh);
+    return status;
+}
+status_t LWOmxNode::getExtensionIndex(
+        const char *parameter_name,
+        OMX_INDEXTYPE *index) {
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->getExtensionIndex(
+            hidl_string(parameter_name),
+            [&fnStatus, index](Status status, uint32_t outIndex) {
+                fnStatus = toStatusT(status);
+                *index = toEnumIndexType(outIndex);
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxNode::dispatchMessage(const omx_message &lMsg) {
+    Message tMsg;
+    native_handle_t* nh;
+    if (!wrapAs(&tMsg, &nh, lMsg)) {
+        return NO_MEMORY;
+    }
+    status_t status = toStatusT(mBase->dispatchMessage(tMsg));
+    native_handle_delete(nh);
+    return status;
+}
+
+// TODO: this is temporary, will be removed when quirks move to OMX side.
+status_t LWOmxNode::setQuirks(OMX_U32 /* quirks */) {
+    return NO_ERROR;
+}
+
+::android::IBinder* LWOmxNode::onAsBinder() {
+    return nullptr;
+}
+
+// TWOmxNode
+TWOmxNode::TWOmxNode(sp<IOMXNode> const& base) : mBase(base) {
+}
+
+Return<Status> TWOmxNode::freeNode() {
+    return toStatus(mBase->freeNode());
+}
+
+Return<Status> TWOmxNode::sendCommand(uint32_t cmd, int32_t param) {
+    return toStatus(mBase->sendCommand(toEnumCommandType(cmd), param));
+}
+
+Return<void> TWOmxNode::getParameter(
+        uint32_t index, hidl_vec<uint8_t> const& inParams,
+        getParameter_cb _hidl_cb) {
+    hidl_vec<uint8_t> params(inParams);
+    Status status = toStatus(mBase->getParameter(
+            toEnumIndexType(index),
+            static_cast<void*>(params.data()),
+            params.size()));
+    _hidl_cb(status, params);
+    return Void();
+}
+
+Return<Status> TWOmxNode::setParameter(
+        uint32_t index, hidl_vec<uint8_t> const& params) {
+    return toStatus(mBase->setParameter(
+            toEnumIndexType(index),
+            static_cast<void const*>(params.data()),
+            params.size()));
+}
+
+Return<void> TWOmxNode::getConfig(
+        uint32_t index, const hidl_vec<uint8_t>& inConfig,
+        getConfig_cb _hidl_cb) {
+    hidl_vec<uint8_t> config(inConfig);
+    Status status = toStatus(mBase->getConfig(
+            toEnumIndexType(index),
+            static_cast<void*>(config.data()),
+            config.size()));
+    _hidl_cb(status, config);
+    return Void();
+}
+
+Return<Status> TWOmxNode::setConfig(
+        uint32_t index, const hidl_vec<uint8_t>& config) {
+    return toStatus(mBase->setConfig(
+            toEnumIndexType(index),
+            static_cast<void const*>(config.data()),
+            config.size()));
+}
+
+Return<Status> TWOmxNode::setPortMode(uint32_t portIndex, PortMode mode) {
+    return toStatus(mBase->setPortMode(portIndex, toIOMXPortMode(mode)));
+}
+
+Return<Status> TWOmxNode::prepareForAdaptivePlayback(
+        uint32_t portIndex, bool enable,
+        uint32_t maxFrameWidth, uint32_t maxFrameHeight) {
+    return toStatus(mBase->prepareForAdaptivePlayback(
+            portIndex,
+            toEnumBool(enable),
+            maxFrameWidth,
+            maxFrameHeight));
+}
+
+Return<void> TWOmxNode::configureVideoTunnelMode(
+        uint32_t portIndex, bool tunneled, uint32_t audioHwSync,
+        configureVideoTunnelMode_cb _hidl_cb) {
+    native_handle_t* sidebandHandle;
+    Status status = toStatus(mBase->configureVideoTunnelMode(
+            portIndex,
+            toEnumBool(tunneled),
+            audioHwSync,
+            &sidebandHandle));
+    _hidl_cb(status, hidl_handle(sidebandHandle));
+    return Void();
+}
+
+Return<void> TWOmxNode::getGraphicBufferUsage(
+        uint32_t portIndex, getGraphicBufferUsage_cb _hidl_cb) {
+    OMX_U32 usage;
+    Status status = toStatus(mBase->getGraphicBufferUsage(
+            portIndex, &usage));
+    _hidl_cb(status, usage);
+    return Void();
+}
+
+Return<Status> TWOmxNode::setInputSurface(
+        const sp<IOmxBufferSource>& bufferSource) {
+    return toStatus(mBase->setInputSurface(new LWOmxBufferSource(
+            bufferSource)));
+}
+
+Return<void> TWOmxNode::allocateSecureBuffer(
+        uint32_t portIndex, uint64_t size,
+        allocateSecureBuffer_cb _hidl_cb) {
+    IOMX::buffer_id buffer;
+    void* bufferData;
+    sp<NativeHandle> nativeHandle;
+    Status status = toStatus(mBase->allocateSecureBuffer(
+            portIndex,
+            static_cast<size_t>(size),
+            &buffer,
+            &bufferData,
+            &nativeHandle));
+    _hidl_cb(status, buffer, nativeHandle->handle());
+    return Void();
+}
+
+Return<void> TWOmxNode::useBuffer(
+        uint32_t portIndex, const CodecBuffer& codecBuffer,
+        useBuffer_cb _hidl_cb) {
+    IOMX::buffer_id buffer;
+    OMXBuffer omxBuffer;
+    if (!convertTo(&omxBuffer, codecBuffer)) {
+        _hidl_cb(Status::BAD_VALUE, 0);
+        return Void();
+    }
+    Status status = toStatus(mBase->useBuffer(
+            portIndex, omxBuffer, &buffer));
+    _hidl_cb(status, buffer);
+    return Void();
+}
+
+Return<Status> TWOmxNode::freeBuffer(uint32_t portIndex, uint32_t buffer) {
+    return toStatus(mBase->freeBuffer(portIndex, buffer));
+}
+
+Return<Status> TWOmxNode::fillBuffer(
+        uint32_t buffer, const CodecBuffer& codecBuffer,
+        const hidl_handle& fence) {
+    OMXBuffer omxBuffer;
+    if (!convertTo(&omxBuffer, codecBuffer)) {
+        return Status::BAD_VALUE;
+    }
+    return toStatus(mBase->fillBuffer(
+            buffer,
+            omxBuffer,
+            native_handle_read_fd(fence)));
+}
+
+Return<Status> TWOmxNode::emptyBuffer(
+        uint32_t buffer, const CodecBuffer& codecBuffer, uint32_t flags,
+        uint64_t timestampUs, const hidl_handle& fence) {
+    OMXBuffer omxBuffer;
+    if (!convertTo(&omxBuffer, codecBuffer)) {
+        return Status::BAD_VALUE;
+    }
+    return toStatus(mBase->emptyBuffer(
+            buffer,
+            omxBuffer,
+            flags,
+            toOMXTicks(timestampUs),
+            native_handle_read_fd(fence)));
+}
+
+Return<void> TWOmxNode::getExtensionIndex(
+        const hidl_string& parameterName,
+        getExtensionIndex_cb _hidl_cb) {
+    OMX_INDEXTYPE index;
+    Status status = toStatus(mBase->getExtensionIndex(
+            parameterName, &index));
+    _hidl_cb(status, toRawIndexType(index));
+    return Void();
+}
+
+Return<Status> TWOmxNode::dispatchMessage(const Message& tMsg) {
+    omx_message lMsg;
+    if (!wrapAs(&lMsg, tMsg)) {
+        return Status::BAD_VALUE;
+    }
+    return toStatus(mBase->dispatchMessage(lMsg));
+}
+
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
diff --git a/media/libstagefright/omx/hal/1.0/WOmxNode.h b/media/libstagefright/omx/hal/1.0/WOmxNode.h
new file mode 100644
index 0000000..3a459d6
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/WOmxNode.h
@@ -0,0 +1,149 @@
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXNODE_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXNODE_H
+
+#include <android/hardware/media/omx/1.0/IOmxNode.h>
+#include <android/hardware/media/omx/1.0/IOmxObserver.h>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+#include <utils/Errors.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::hardware::media::omx::V1_0::CodecBuffer;
+using ::android::hardware::media::omx::V1_0::IOmxBufferSource;
+using ::android::hardware::media::omx::V1_0::IOmxNode;
+using ::android::hardware::media::omx::V1_0::IOmxObserver;
+using ::android::hardware::media::omx::V1_0::Message;
+using ::android::hardware::media::omx::V1_0::PortMode;
+using ::android::hardware::media::omx::V1_0::Status;
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+/**
+ * Wrapper classes for conversion
+ * ==============================
+ *
+ * Naming convention:
+ * - LW = Legacy Wrapper --- It wraps a Treble object inside a legacy object.
+ * - TW = Treble Wrapper --- It wraps a legacy object inside a Treble object.
+ */
+
+struct LWOmxNode : public IOMXNode {
+    sp<IOmxNode> mBase;
+    LWOmxNode(sp<IOmxNode> const& base);
+    status_t freeNode() override;
+    status_t sendCommand(
+            OMX_COMMANDTYPE cmd, OMX_S32 param) override;
+    status_t getParameter(
+            OMX_INDEXTYPE index, void *params, size_t size) override;
+    status_t setParameter(
+            OMX_INDEXTYPE index, const void *params, size_t size) override;
+    status_t getConfig(
+            OMX_INDEXTYPE index, void *params, size_t size) override;
+    status_t setConfig(
+            OMX_INDEXTYPE index, const void *params, size_t size) override;
+    status_t setPortMode(
+            OMX_U32 port_index, IOMX::PortMode mode) override;
+    status_t prepareForAdaptivePlayback(
+            OMX_U32 portIndex, OMX_BOOL enable,
+            OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight) override;
+    status_t configureVideoTunnelMode(
+            OMX_U32 portIndex, OMX_BOOL tunneled,
+            OMX_U32 audioHwSync, native_handle_t **sidebandHandle) override;
+    status_t getGraphicBufferUsage(
+            OMX_U32 port_index, OMX_U32* usage) override;
+    status_t setInputSurface(
+            const sp<IOMXBufferSource> &bufferSource) override;
+    status_t allocateSecureBuffer(
+            OMX_U32 port_index, size_t size, buffer_id *buffer,
+            void **buffer_data, sp<NativeHandle> *native_handle) override;
+    status_t useBuffer(
+            OMX_U32 port_index, const OMXBuffer &omxBuf,
+            buffer_id *buffer) override;
+    status_t freeBuffer(
+            OMX_U32 port_index, buffer_id buffer) override;
+    status_t fillBuffer(
+            buffer_id buffer, const OMXBuffer &omxBuf,
+            int fenceFd = -1) override;
+    status_t emptyBuffer(
+            buffer_id buffer, const OMXBuffer &omxBuf,
+            OMX_U32 flags, OMX_TICKS timestamp, int fenceFd = -1) override;
+    status_t getExtensionIndex(
+            const char *parameter_name,
+            OMX_INDEXTYPE *index) override;
+    status_t dispatchMessage(const omx_message &msg) override;
+
+    // TODO: this is temporary, will be removed when quirks move to OMX side.
+    status_t setQuirks(OMX_U32 quirks) override;
+protected:
+    ::android::IBinder* onAsBinder() override;
+};
+
+struct TWOmxNode : public IOmxNode {
+    sp<IOMXNode> mBase;
+    TWOmxNode(sp<IOMXNode> const& base);
+
+    Return<Status> freeNode() override;
+    Return<Status> sendCommand(uint32_t cmd, int32_t param) override;
+    Return<void> getParameter(
+            uint32_t index, hidl_vec<uint8_t> const& inParams,
+            getParameter_cb _hidl_cb) override;
+    Return<Status> setParameter(
+            uint32_t index, hidl_vec<uint8_t> const& params) override;
+    Return<void> getConfig(
+            uint32_t index, hidl_vec<uint8_t> const& inConfig,
+            getConfig_cb _hidl_cb) override;
+    Return<Status> setConfig(
+            uint32_t index, hidl_vec<uint8_t> const& config) override;
+    Return<Status> setPortMode(uint32_t portIndex, PortMode mode) override;
+    Return<Status> prepareForAdaptivePlayback(
+            uint32_t portIndex, bool enable,
+            uint32_t maxFrameWidth, uint32_t maxFrameHeight) override;
+    Return<void> configureVideoTunnelMode(
+            uint32_t portIndex, bool tunneled, uint32_t audioHwSync,
+            configureVideoTunnelMode_cb _hidl_cb) override;
+    Return<void> getGraphicBufferUsage(
+            uint32_t portIndex,
+            getGraphicBufferUsage_cb _hidl_cb) override;
+    Return<Status> setInputSurface(
+            sp<IOmxBufferSource> const& bufferSource) override;
+    Return<void> allocateSecureBuffer(
+            uint32_t portIndex, uint64_t size,
+            allocateSecureBuffer_cb _hidl_cb) override;
+    Return<void> useBuffer(
+            uint32_t portIndex, CodecBuffer const& codecBuffer,
+            useBuffer_cb _hidl_cb) override;
+    Return<Status> freeBuffer(uint32_t portIndex, uint32_t buffer) override;
+    Return<Status> fillBuffer(
+            uint32_t buffer, CodecBuffer const& codecBuffer,
+            const hidl_handle& fence) override;
+    Return<Status> emptyBuffer(
+            uint32_t buffer, CodecBuffer const& codecBuffer,
+            uint32_t flags, uint64_t timestampUs,
+            hidl_handle const& fence) override;
+    Return<void> getExtensionIndex(
+            hidl_string const& parameterName,
+            getExtensionIndex_cb _hidl_cb) override;
+    Return<Status> dispatchMessage(Message const& msg) override;
+};
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
+
+#endif  // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXNODE_H
diff --git a/media/libstagefright/omx/hal/1.0/WOmxObserver.cpp b/media/libstagefright/omx/hal/1.0/WOmxObserver.cpp
new file mode 100644
index 0000000..f3ad8d3
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/WOmxObserver.cpp
@@ -0,0 +1,59 @@
+#include "WOmxObserver.h"
+
+#include <vector>
+
+#include <cutils/native_handle.h>
+#include <frameworks/native/include/binder/Binder.h>
+
+#include "Conversion.h"
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+// LWOmxObserver
+LWOmxObserver::LWOmxObserver(sp<IOmxObserver> const& base) : mBase(base) {
+}
+
+void LWOmxObserver::onMessages(std::list<omx_message> const& lMessages) {
+    hidl_vec<Message> tMessages;
+    std::vector<native_handle_t*> handles(lMessages.size());
+    tMessages.resize(lMessages.size());
+    size_t i = 0;
+    for (auto const& message : lMessages) {
+        wrapAs(&tMessages[i], &handles[i], message);
+        ++i;
+    }
+    mBase->onMessages(tMessages);
+    for (auto& handle : handles) {
+        native_handle_delete(handle);
+    }
+}
+
+::android::IBinder* LWOmxObserver::onAsBinder() {
+    return nullptr;
+}
+
+// TWOmxObserver
+TWOmxObserver::TWOmxObserver(sp<IOMXObserver> const& base) : mBase(base) {
+}
+
+Return<void> TWOmxObserver::onMessages(const hidl_vec<Message>& tMessages) {
+    std::list<omx_message> lMessages;
+    for (size_t i = 0; i < tMessages.size(); ++i) {
+        lMessages.push_back(omx_message{});
+        wrapAs(&lMessages.back(), tMessages[i]);
+    }
+    mBase->onMessages(lMessages);
+    return Return<void>();
+}
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
diff --git a/media/libstagefright/omx/hal/1.0/WOmxObserver.h b/media/libstagefright/omx/hal/1.0/WOmxObserver.h
new file mode 100644
index 0000000..c8ab296
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/WOmxObserver.h
@@ -0,0 +1,62 @@
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXOBSERVER_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXOBSERVER_H
+
+#include <android/hardware/media/omx/1.0/IOmxObserver.h>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+#include <IOMX.h>
+#include <list>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::hardware::media::omx::V1_0::IOmxObserver;
+using ::android::hardware::media::omx::V1_0::Message;
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+using ::android::IOMXObserver;
+using ::android::omx_message;
+
+/**
+ * Wrapper classes for conversion
+ * ==============================
+ *
+ * Naming convention:
+ * - LW = Legacy Wrapper --- It wraps a Treble object inside a legacy object.
+ * - TW = Treble Wrapper --- It wraps a legacy object inside a Treble object.
+ */
+
+struct LWOmxObserver : public IOMXObserver {
+    sp<IOmxObserver> mBase;
+    LWOmxObserver(sp<IOmxObserver> const& base);
+    void onMessages(std::list<omx_message> const& lMessages) override;
+protected:
+    ::android::IBinder* onAsBinder() override;
+};
+
+struct TWOmxObserver : public IOmxObserver {
+    sp<IOMXObserver> mBase;
+    TWOmxObserver(sp<IOMXObserver> const& base);
+    Return<void> onMessages(const hidl_vec<Message>& tMessages) override;
+};
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
+
+#endif  // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXOBSERVER_H
diff --git a/media/libstagefright/omx/tests/Android.mk b/media/libstagefright/omx/tests/Android.mk
index 02e97f1..ec14eb7 100644
--- a/media/libstagefright/omx/tests/Android.mk
+++ b/media/libstagefright/omx/tests/Android.mk
@@ -12,7 +12,6 @@
 	$(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Werror -Wall
-LOCAL_CLANG := true
 
 LOCAL_MODULE := omx_tests
 
@@ -39,6 +38,5 @@
 	frameworks/av/media/libstagefright/omx \
 
 LOCAL_CFLAGS += -Werror -Wall
-LOCAL_CLANG := true
 
 include $(BUILD_NATIVE_TEST)
diff --git a/media/libstagefright/omx/tests/OMXHarness.cpp b/media/libstagefright/omx/tests/OMXHarness.cpp
index 50bb0de..1ce5d1a 100644
--- a/media/libstagefright/omx/tests/OMXHarness.cpp
+++ b/media/libstagefright/omx/tests/OMXHarness.cpp
@@ -38,11 +38,31 @@
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/SimpleDecodingSource.h>
+#include <media/OMXBuffer.h>
 
 #define DEFAULT_TIMEOUT         500000
 
 namespace android {
 
+/////////////////////////////////////////////////////////////////////
+
+struct Harness::CodecObserver : public BnOMXObserver {
+    CodecObserver(const sp<Harness> &harness, int32_t gen)
+            : mHarness(harness), mGeneration(gen) {}
+
+    void onMessages(const std::list<omx_message> &messages) override;
+
+private:
+    sp<Harness> mHarness;
+    int32_t mGeneration;
+};
+
+void Harness::CodecObserver::onMessages(const std::list<omx_message> &messages) {
+    mHarness->handleMessages(mGeneration, messages);
+}
+
+/////////////////////////////////////////////////////////////////////
+
 Harness::Harness()
     : mInitCheck(NO_INIT) {
     mInitCheck = initOMX();
@@ -64,18 +84,17 @@
     return mOMX != 0 ? OK : NO_INIT;
 }
 
-void Harness::onMessages(const std::list<omx_message> &messages) {
+void Harness::handleMessages(int32_t gen, const std::list<omx_message> &messages) {
     Mutex::Autolock autoLock(mLock);
     for (std::list<omx_message>::const_iterator it = messages.cbegin(); it != messages.cend(); ) {
         mMessageQueue.push_back(*it++);
+        mLastMsgGeneration = gen;
     }
     mMessageAddedCondition.signal();
 }
 
-status_t Harness::dequeueMessageForNode(
-        IOMX::node_id node, omx_message *msg, int64_t timeoutUs) {
-    return dequeueMessageForNodeIgnoringBuffers(
-            node, NULL, NULL, msg, timeoutUs);
+status_t Harness::dequeueMessageForNode(omx_message *msg, int64_t timeoutUs) {
+    return dequeueMessageForNodeIgnoringBuffers(NULL, NULL, msg, timeoutUs);
 }
 
 // static
@@ -120,7 +139,6 @@
 }
 
 status_t Harness::dequeueMessageForNodeIgnoringBuffers(
-        IOMX::node_id node,
         Vector<Buffer> *inputBuffers,
         Vector<Buffer> *outputBuffers,
         omx_message *msg, int64_t timeoutUs) {
@@ -128,21 +146,22 @@
 
     for (;;) {
         Mutex::Autolock autoLock(mLock);
+        // Messages are queued in batches, if the last batch queued is
+        // from a node that already expired, discard those messages.
+        if (mLastMsgGeneration < mCurGeneration) {
+            mMessageQueue.clear();
+        }
         List<omx_message>::iterator it = mMessageQueue.begin();
         while (it != mMessageQueue.end()) {
-            if ((*it).node == node) {
-                if (handleBufferMessage(*it, inputBuffers, outputBuffers)) {
-                    it = mMessageQueue.erase(it);
-                    continue;
-                }
-
-                *msg = *it;
-                mMessageQueue.erase(it);
-
-                return OK;
+            if (handleBufferMessage(*it, inputBuffers, outputBuffers)) {
+                it = mMessageQueue.erase(it);
+                continue;
             }
 
-            ++it;
+            *msg = *it;
+            mMessageQueue.erase(it);
+
+            return OK;
         }
 
         status_t err = (timeoutUs < 0)
@@ -158,16 +177,15 @@
 }
 
 status_t Harness::getPortDefinition(
-        IOMX::node_id node, OMX_U32 portIndex,
-        OMX_PARAM_PORTDEFINITIONTYPE *def) {
+        OMX_U32 portIndex, OMX_PARAM_PORTDEFINITIONTYPE *def) {
     def->nSize = sizeof(*def);
     def->nVersion.s.nVersionMajor = 1;
     def->nVersion.s.nVersionMinor = 0;
     def->nVersion.s.nRevision = 0;
     def->nVersion.s.nStep = 0;
     def->nPortIndex = portIndex;
-    return mOMX->getParameter(
-            node, OMX_IndexParamPortDefinition, def, sizeof(*def));
+    return mOMXNode->getParameter(
+            OMX_IndexParamPortDefinition, def, sizeof(*def));
 }
 
 #define EXPECT(condition, info) \
@@ -180,12 +198,11 @@
 
 status_t Harness::allocatePortBuffers(
         const sp<MemoryDealer> &dealer,
-        IOMX::node_id node, OMX_U32 portIndex,
-        Vector<Buffer> *buffers) {
+        OMX_U32 portIndex, Vector<Buffer> *buffers) {
     buffers->clear();
 
     OMX_PARAM_PORTDEFINITIONTYPE def;
-    status_t err = getPortDefinition(node, portIndex, &def);
+    status_t err = getPortDefinition(portIndex, &def);
     EXPECT_SUCCESS(err, "getPortDefinition");
 
     for (OMX_U32 i = 0; i < def.nBufferCountActual; ++i) {
@@ -194,9 +211,8 @@
         buffer.mFlags = 0;
         CHECK(buffer.mMemory != NULL);
 
-        err = mOMX->allocateBufferWithBackup(
-                node, portIndex, buffer.mMemory, &buffer.mID, buffer.mMemory->size());
-        EXPECT_SUCCESS(err, "allocateBuffer");
+        err = mOMXNode->useBuffer(portIndex, buffer.mMemory, &buffer.mID);
+        EXPECT_SUCCESS(err, "useBuffer");
 
         buffers->push(buffer);
     }
@@ -204,7 +220,7 @@
     return OK;
 }
 
-status_t Harness::setRole(IOMX::node_id node, const char *role) {
+status_t Harness::setRole(const char *role) {
     OMX_PARAM_COMPONENTROLETYPE params;
     params.nSize = sizeof(params);
     params.nVersion.s.nVersionMajor = 1;
@@ -214,31 +230,31 @@
     strncpy((char *)params.cRole, role, OMX_MAX_STRINGNAME_SIZE - 1);
     params.cRole[OMX_MAX_STRINGNAME_SIZE - 1] = '\0';
 
-    return mOMX->setParameter(
-            node, OMX_IndexParamStandardComponentRole,
+    return mOMXNode->setParameter(
+            OMX_IndexParamStandardComponentRole,
             &params, sizeof(params));
 }
 
 struct NodeReaper {
-    NodeReaper(const sp<Harness> &harness, IOMX::node_id node)
+    NodeReaper(const sp<Harness> &harness, const sp<IOMXNode> &omxNode)
         : mHarness(harness),
-          mNode(node) {
+          mOMXNode(omxNode) {
     }
 
     ~NodeReaper() {
-        if (mNode != 0) {
-            mHarness->mOMX->freeNode(mNode);
-            mNode = 0;
+        if (mOMXNode != 0) {
+            mOMXNode->freeNode();
+            mOMXNode = NULL;
         }
     }
 
     void disarm() {
-        mNode = 0;
+        mOMXNode = NULL;
     }
 
 private:
     sp<Harness> mHarness;
-    IOMX::node_id mNode;
+    sp<IOMXNode> mOMXNode;
 
     NodeReaper(const NodeReaper &);
     NodeReaper &operator=(const NodeReaper &);
@@ -264,23 +280,23 @@
     }
 
     sp<MemoryDealer> dealer = new MemoryDealer(16 * 1024 * 1024, "OMXHarness");
-    IOMX::node_id node;
 
-    status_t err =
-        mOMX->allocateNode(componentName, this, NULL, &node);
+    sp<CodecObserver> observer = new CodecObserver(this, ++mCurGeneration);
+
+    status_t err = mOMX->allocateNode(componentName, observer, &mOMXNode);
     EXPECT_SUCCESS(err, "allocateNode");
 
-    NodeReaper reaper(this, node);
+    NodeReaper reaper(this, mOMXNode);
 
-    err = setRole(node, componentRole);
+    err = setRole(componentRole);
     EXPECT_SUCCESS(err, "setRole");
 
     // Initiate transition Loaded->Idle
-    err = mOMX->sendCommand(node, OMX_CommandStateSet, OMX_StateIdle);
+    err = mOMXNode->sendCommand(OMX_CommandStateSet, OMX_StateIdle);
     EXPECT_SUCCESS(err, "sendCommand(go-to-Idle)");
 
     omx_message msg;
-    err = dequeueMessageForNode(node, &msg, DEFAULT_TIMEOUT);
+    err = dequeueMessageForNode(&msg, DEFAULT_TIMEOUT);
     // Make sure node doesn't just transition to idle before we are done
     // allocating all input and output buffers.
     EXPECT(err == TIMED_OUT,
@@ -289,17 +305,17 @@
 
     // Now allocate buffers.
     Vector<Buffer> inputBuffers;
-    err = allocatePortBuffers(dealer, node, 0, &inputBuffers);
+    err = allocatePortBuffers(dealer, 0, &inputBuffers);
     EXPECT_SUCCESS(err, "allocatePortBuffers(input)");
 
-    err = dequeueMessageForNode(node, &msg, DEFAULT_TIMEOUT);
+    err = dequeueMessageForNode(&msg, DEFAULT_TIMEOUT);
     CHECK_EQ(err, (status_t)TIMED_OUT);
 
     Vector<Buffer> outputBuffers;
-    err = allocatePortBuffers(dealer, node, 1, &outputBuffers);
+    err = allocatePortBuffers(dealer, 1, &outputBuffers);
     EXPECT_SUCCESS(err, "allocatePortBuffers(output)");
 
-    err = dequeueMessageForNode(node, &msg, DEFAULT_TIMEOUT);
+    err = dequeueMessageForNode(&msg, DEFAULT_TIMEOUT);
     EXPECT(err == OK
             && msg.type == omx_message::EVENT
             && msg.u.event_data.event == OMX_EventCmdComplete
@@ -309,10 +325,10 @@
            "after all input and output buffers were allocated.");
 
     // Initiate transition Idle->Executing
-    err = mOMX->sendCommand(node, OMX_CommandStateSet, OMX_StateExecuting);
+    err = mOMXNode->sendCommand(OMX_CommandStateSet, OMX_StateExecuting);
     EXPECT_SUCCESS(err, "sendCommand(go-to-Executing)");
 
-    err = dequeueMessageForNode(node, &msg, DEFAULT_TIMEOUT);
+    err = dequeueMessageForNode(&msg, DEFAULT_TIMEOUT);
     EXPECT(err == OK
             && msg.type == omx_message::EVENT
             && msg.u.event_data.event == OMX_EventCmdComplete
@@ -322,17 +338,17 @@
            "executing state.");
 
     for (size_t i = 0; i < outputBuffers.size(); ++i) {
-        err = mOMX->fillBuffer(node, outputBuffers[i].mID);
+        err = mOMXNode->fillBuffer(outputBuffers[i].mID, OMXBuffer::sPreset);
         EXPECT_SUCCESS(err, "fillBuffer");
 
         outputBuffers.editItemAt(i).mFlags |= kBufferBusy;
     }
 
-    err = mOMX->sendCommand(node, OMX_CommandFlush, 1);
+    err = mOMXNode->sendCommand(OMX_CommandFlush, 1);
     EXPECT_SUCCESS(err, "sendCommand(flush-output-port)");
 
     err = dequeueMessageForNodeIgnoringBuffers(
-            node, &inputBuffers, &outputBuffers, &msg, DEFAULT_TIMEOUT);
+            &inputBuffers, &outputBuffers, &msg, DEFAULT_TIMEOUT);
     EXPECT(err == OK
             && msg.type == omx_message::EVENT
             && msg.u.event_data.event == OMX_EventCmdComplete
@@ -347,18 +363,18 @@
     }
 
     for (size_t i = 0; i < outputBuffers.size(); ++i) {
-        err = mOMX->fillBuffer(node, outputBuffers[i].mID);
+        err = mOMXNode->fillBuffer(outputBuffers[i].mID, OMXBuffer::sPreset);
         EXPECT_SUCCESS(err, "fillBuffer");
 
         outputBuffers.editItemAt(i).mFlags |= kBufferBusy;
     }
 
     // Initiate transition Executing->Idle
-    err = mOMX->sendCommand(node, OMX_CommandStateSet, OMX_StateIdle);
+    err = mOMXNode->sendCommand(OMX_CommandStateSet, OMX_StateIdle);
     EXPECT_SUCCESS(err, "sendCommand(go-to-Idle)");
 
     err = dequeueMessageForNodeIgnoringBuffers(
-            node, &inputBuffers, &outputBuffers, &msg, DEFAULT_TIMEOUT);
+            &inputBuffers, &outputBuffers, &msg, DEFAULT_TIMEOUT);
     EXPECT(err == OK
             && msg.type == omx_message::EVENT
             && msg.u.event_data.event == OMX_EventCmdComplete
@@ -382,28 +398,28 @@
     }
 
     // Initiate transition Idle->Loaded
-    err = mOMX->sendCommand(node, OMX_CommandStateSet, OMX_StateLoaded);
+    err = mOMXNode->sendCommand(OMX_CommandStateSet, OMX_StateLoaded);
     EXPECT_SUCCESS(err, "sendCommand(go-to-Loaded)");
 
     // Make sure node doesn't just transition to loaded before we are done
     // freeing all input and output buffers.
-    err = dequeueMessageForNode(node, &msg, DEFAULT_TIMEOUT);
+    err = dequeueMessageForNode(&msg, DEFAULT_TIMEOUT);
     CHECK_EQ(err, (status_t)TIMED_OUT);
 
     for (size_t i = 0; i < inputBuffers.size(); ++i) {
-        err = mOMX->freeBuffer(node, 0, inputBuffers[i].mID);
+        err = mOMXNode->freeBuffer(0, inputBuffers[i].mID);
         EXPECT_SUCCESS(err, "freeBuffer");
     }
 
-    err = dequeueMessageForNode(node, &msg, DEFAULT_TIMEOUT);
+    err = dequeueMessageForNode(&msg, DEFAULT_TIMEOUT);
     CHECK_EQ(err, (status_t)TIMED_OUT);
 
     for (size_t i = 0; i < outputBuffers.size(); ++i) {
-        err = mOMX->freeBuffer(node, 1, outputBuffers[i].mID);
+        err = mOMXNode->freeBuffer(1, outputBuffers[i].mID);
         EXPECT_SUCCESS(err, "freeBuffer");
     }
 
-    err = dequeueMessageForNode(node, &msg, DEFAULT_TIMEOUT);
+    err = dequeueMessageForNode(&msg, DEFAULT_TIMEOUT);
     EXPECT(err == OK
             && msg.type == omx_message::EVENT
             && msg.u.event_data.event == OMX_EventCmdComplete
@@ -412,12 +428,12 @@
            "Component did not properly transition to from idle to "
            "loaded state after freeing all input and output buffers.");
 
-    err = mOMX->freeNode(node);
+    err = mOMXNode->freeNode();
     EXPECT_SUCCESS(err, "freeNode");
 
     reaper.disarm();
 
-    node = 0;
+    mOMXNode = NULL;
 
     return OK;
 }
@@ -784,7 +800,6 @@
     using namespace android;
 
     android::ProcessState::self()->startThreadPool();
-    DataSource::RegisterDefaultSniffers();
 
     const char *me = argv[0];
 
diff --git a/media/libstagefright/omx/tests/OMXHarness.h b/media/libstagefright/omx/tests/OMXHarness.h
index 1ebf3aa..0fe00a6 100644
--- a/media/libstagefright/omx/tests/OMXHarness.h
+++ b/media/libstagefright/omx/tests/OMXHarness.h
@@ -29,7 +29,7 @@
 
 class MemoryDealer;
 
-struct Harness : public BnOMXObserver {
+struct Harness : public RefBase {
     enum BufferFlags {
         kBufferBusy = 1
     };
@@ -43,25 +43,21 @@
 
     status_t initCheck() const;
 
-    status_t dequeueMessageForNode(
-            IOMX::node_id node, omx_message *msg, int64_t timeoutUs = -1);
+    status_t dequeueMessageForNode(omx_message *msg, int64_t timeoutUs = -1);
 
     status_t dequeueMessageForNodeIgnoringBuffers(
-            IOMX::node_id node,
             Vector<Buffer> *inputBuffers,
             Vector<Buffer> *outputBuffers,
             omx_message *msg, int64_t timeoutUs = -1);
 
     status_t getPortDefinition(
-            IOMX::node_id node, OMX_U32 portIndex,
-            OMX_PARAM_PORTDEFINITIONTYPE *def);
+            OMX_U32 portIndex, OMX_PARAM_PORTDEFINITIONTYPE *def);
 
     status_t allocatePortBuffers(
             const sp<MemoryDealer> &dealer,
-            IOMX::node_id node, OMX_U32 portIndex,
-            Vector<Buffer> *buffers);
+            OMX_U32 portIndex, Vector<Buffer> *buffers);
 
-    status_t setRole(IOMX::node_id node, const char *role);
+    status_t setRole(const char *role);
 
     status_t testStateTransitions(
             const char *componentName, const char *componentRole);
@@ -74,20 +70,22 @@
 
     status_t testAll();
 
-    virtual void onMessages(const std::list<omx_message> &messages);
-
 protected:
     virtual ~Harness();
 
 private:
     friend struct NodeReaper;
+    struct CodecObserver;
 
     Mutex mLock;
 
     status_t mInitCheck;
     sp<IOMX> mOMX;
+    sp<IOMXNode> mOMXNode;
     List<omx_message> mMessageQueue;
     Condition mMessageAddedCondition;
+    int32_t mLastMsgGeneration;
+    int32_t mCurGeneration;
 
     status_t initOMX();
 
@@ -96,6 +94,8 @@
             Vector<Buffer> *inputBuffers,
             Vector<Buffer> *outputBuffers);
 
+    void handleMessages(int32_t gen, const std::list<omx_message> &messages);
+
     Harness(const Harness &);
     Harness &operator=(const Harness &);
 };
diff --git a/media/libstagefright/rtsp/APacketSource.cpp b/media/libstagefright/rtsp/APacketSource.cpp
index cfafaa7..8ba9e02 100644
--- a/media/libstagefright/rtsp/APacketSource.cpp
+++ b/media/libstagefright/rtsp/APacketSource.cpp
@@ -418,7 +418,7 @@
     if (sessionDesc->getDurationUs(&durationUs)) {
         mFormat->setInt64(kKeyDuration, durationUs);
     } else {
-        mFormat->setInt64(kKeyDuration, 60 * 60 * 1000000ll);
+        mFormat->setInt64(kKeyDuration, -1ll);
     }
 
     mInitCheck = OK;
diff --git a/media/libstagefright/rtsp/ASessionDescription.cpp b/media/libstagefright/rtsp/ASessionDescription.cpp
index 8b0331a..325084c 100644
--- a/media/libstagefright/rtsp/ASessionDescription.cpp
+++ b/media/libstagefright/rtsp/ASessionDescription.cpp
@@ -17,7 +17,6 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "ASessionDescription"
 #include <utils/Log.h>
-#include <cutils/log.h>
 
 #include "ASessionDescription.h"
 
@@ -212,7 +211,7 @@
 
     *PT = x;
 
-    char key[32];
+    char key[20];
     snprintf(key, sizeof(key), "a=rtpmap:%lu", x);
     if (findAttribute(index, key, desc)) {
         snprintf(key, sizeof(key), "a=fmtp:%lu", x);
@@ -231,11 +230,8 @@
     *width = 0;
     *height = 0;
 
-    char key[33];
+    char key[20];
     snprintf(key, sizeof(key), "a=framesize:%lu", PT);
-    if (PT > 9999999) {
-        android_errorWriteLog(0x534e4554, "25747670");
-    }
     AString value;
     if (!findAttribute(index, key, &value)) {
         return false;
diff --git a/media/libstagefright/rtsp/Android.mk b/media/libstagefright/rtsp/Android.mk
index 35301ce..3472e49 100644
--- a/media/libstagefright/rtsp/Android.mk
+++ b/media/libstagefright/rtsp/Android.mk
@@ -32,7 +32,6 @@
 endif
 
 LOCAL_CFLAGS += -Werror -Wall
-LOCAL_CLANG := true
 LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
 
 LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
@@ -58,7 +57,6 @@
 	$(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_MODULE_TAGS := optional
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index 76e2e6e..5505aa4 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -25,6 +25,7 @@
 #endif
 
 #include <utils/Log.h>
+#include <cutils/properties.h> // for property_get
 
 #include "APacketSource.h"
 #include "ARTPConnection.h"
@@ -807,11 +808,7 @@
                         result = UNKNOWN_ERROR;
                     } else {
                         parsePlayResponse(response);
-
-                        sp<AMessage> timeout = new AMessage('tiou', this);
-                        mCheckTimeoutGeneration++;
-                        timeout->setInt32("tioucheck", mCheckTimeoutGeneration);
-                        timeout->post(kStartupTimeoutUs);
+                        postTimeout();
                     }
                 }
 
@@ -1153,10 +1150,7 @@
 
                         // Post new timeout in order to make sure to use
                         // fake timestamps if no new Sender Reports arrive
-                        sp<AMessage> timeout = new AMessage('tiou', this);
-                        mCheckTimeoutGeneration++;
-                        timeout->setInt32("tioucheck", mCheckTimeoutGeneration);
-                        timeout->post(kStartupTimeoutUs);
+                        postTimeout();
                     }
                 }
 
@@ -1248,10 +1242,7 @@
 
                 // Start new timeoutgeneration to avoid getting timeout
                 // before PLAY response arrive
-                sp<AMessage> timeout = new AMessage('tiou', this);
-                mCheckTimeoutGeneration++;
-                timeout->setInt32("tioucheck", mCheckTimeoutGeneration);
-                timeout->post(kStartupTimeoutUs);
+                postTimeout();
 
                 int64_t timeUs;
                 CHECK(msg->findInt64("time", &timeUs));
@@ -1305,10 +1296,7 @@
 
                         // Post new timeout in order to make sure to use
                         // fake timestamps if no new Sender Reports arrive
-                        sp<AMessage> timeout = new AMessage('tiou', this);
-                        mCheckTimeoutGeneration++;
-                        timeout->setInt32("tioucheck", mCheckTimeoutGeneration);
-                        timeout->post(kStartupTimeoutUs);
+                        postTimeout();
 
                         ssize_t i = response->mHeaders.indexOfKey("rtp-info");
                         CHECK_GE(i, 0);
@@ -1964,6 +1952,16 @@
         msg->post();
     }
 
+    void postTimeout() {
+        sp<AMessage> timeout = new AMessage('tiou', this);
+        mCheckTimeoutGeneration++;
+        timeout->setInt32("tioucheck", mCheckTimeoutGeneration);
+
+        int64_t startupTimeoutUs;
+        startupTimeoutUs = property_get_int64("media.rtsp.timeout-us", kStartupTimeoutUs);
+        timeout->post(startupTimeoutUs);
+    }
+
     DISALLOW_EVIL_CONSTRUCTORS(MyHandler);
 };
 
diff --git a/media/libstagefright/rtsp/rtp_test.cpp b/media/libstagefright/rtsp/rtp_test.cpp
index 24f529b..e612a8d 100644
--- a/media/libstagefright/rtsp/rtp_test.cpp
+++ b/media/libstagefright/rtsp/rtp_test.cpp
@@ -37,8 +37,6 @@
 int main(int argc, char **argv) {
     android::ProcessState::self()->startThreadPool();
 
-    DataSource::RegisterDefaultSniffers();
-
     const char *rtpFilename = NULL;
     const char *rtcpFilename = NULL;
 
diff --git a/media/libstagefright/tests/Android.mk b/media/libstagefright/tests/Android.mk
index d1c9d36..c6963b1 100644
--- a/media/libstagefright/tests/Android.mk
+++ b/media/libstagefright/tests/Android.mk
@@ -33,42 +33,11 @@
 	$(TOP)/frameworks/native/include/media/hardware \
 
 LOCAL_CFLAGS += -Werror -Wall
-LOCAL_CLANG := true
 
 LOCAL_32_BIT_ONLY := true
 
 include $(BUILD_NATIVE_TEST)
 
-
-include $(CLEAR_VARS)
-LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
-
-LOCAL_MODULE := Utils_test
-
-LOCAL_MODULE_TAGS := tests
-
-LOCAL_SRC_FILES := \
-	Utils_test.cpp \
-
-LOCAL_SHARED_LIBRARIES := \
-	libcutils \
-	liblog \
-	libmedia \
-	libstagefright \
-	libstagefright_foundation \
-	libstagefright_omx \
-
-LOCAL_C_INCLUDES := \
-	frameworks/av/include \
-	frameworks/av/media/libstagefright \
-	frameworks/av/media/libstagefright/include \
-	$(TOP)/frameworks/native/include/media/openmax \
-
-LOCAL_CFLAGS += -Werror -Wall
-LOCAL_CLANG := true
-
-include $(BUILD_NATIVE_TEST)
-
 include $(CLEAR_VARS)
 LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
 
@@ -95,7 +64,6 @@
 LOCAL_32_BIT_ONLY := true
 
 LOCAL_CFLAGS += -Werror -Wall
-LOCAL_CLANG := true
 
 include $(BUILD_NATIVE_TEST)
 
diff --git a/media/libstagefright/timedtext/Android.mk b/media/libstagefright/timedtext/Android.mk
index 8d128b8..0b0facf 100644
--- a/media/libstagefright/timedtext/Android.mk
+++ b/media/libstagefright/timedtext/Android.mk
@@ -5,7 +5,6 @@
         TextDescriptions.cpp      \
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_C_INCLUDES:= \
diff --git a/media/libstagefright/webm/Android.mk b/media/libstagefright/webm/Android.mk
index ce580ae..096fd07 100644
--- a/media/libstagefright/webm/Android.mk
+++ b/media/libstagefright/webm/Android.mk
@@ -4,7 +4,6 @@
 LOCAL_CPPFLAGS += -D__STDINT_LIMITS
 
 LOCAL_CFLAGS += -Werror -Wall
-LOCAL_CLANG := true
 LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
 
 LOCAL_SRC_FILES:= EbmlUtil.cpp        \
diff --git a/media/libstagefright/wifi-display/Android.mk b/media/libstagefright/wifi-display/Android.mk
index c87875d..c5322a4 100644
--- a/media/libstagefright/wifi-display/Android.mk
+++ b/media/libstagefright/wifi-display/Android.mk
@@ -31,7 +31,6 @@
         libutils                        \
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_MODULE:= libstagefright_wfd
diff --git a/media/libstagefright/wifi-display/MediaSender.cpp b/media/libstagefright/wifi-display/MediaSender.cpp
index 6f0087f..ae507fc 100644
--- a/media/libstagefright/wifi-display/MediaSender.cpp
+++ b/media/libstagefright/wifi-display/MediaSender.cpp
@@ -423,9 +423,11 @@
             CHECK_GE(accessUnit->size(), rangeLength);
 
             sp<GraphicBuffer> grbuf(new GraphicBuffer(
-                    rangeOffset + rangeLength, 1, HAL_PIXEL_FORMAT_Y8,
-                    GRALLOC_USAGE_HW_VIDEO_ENCODER, rangeOffset + rangeLength,
-                    handle, false));
+                    rangeOffset + rangeLength /* width */, 1 /* height */,
+                    HAL_PIXEL_FORMAT_Y8, 1 /* layerCount */,
+                    GRALLOC_USAGE_HW_VIDEO_ENCODER,
+                    rangeOffset + rangeLength /* stride */, handle,
+                    false /* keepOwnership */));
 
             err = mHDCP->encryptNative(
                     grbuf, rangeOffset, rangeLength,
diff --git a/media/libstagefright/wifi-display/source/Converter.cpp b/media/libstagefright/wifi-display/source/Converter.cpp
index 471152e..273af18 100644
--- a/media/libstagefright/wifi-display/source/Converter.cpp
+++ b/media/libstagefright/wifi-display/source/Converter.cpp
@@ -26,6 +26,7 @@
 #include <cutils/properties.h>
 #include <gui/Surface.h>
 #include <media/ICrypto.h>
+#include <media/MediaCodecBuffer.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
@@ -720,7 +721,7 @@
 #endif
 
             sp<ABuffer> buffer;
-            sp<ABuffer> outbuf = mEncoderOutputBuffers.itemAt(bufferIndex);
+            sp<MediaCodecBuffer> outbuf = mEncoderOutputBuffers.itemAt(bufferIndex);
 
             if (outbuf->meta()->findPointer("handle", (void**)&handle) &&
                     handle != NULL) {
diff --git a/media/libstagefright/wifi-display/source/Converter.h b/media/libstagefright/wifi-display/source/Converter.h
index b182990..ad95ab5 100644
--- a/media/libstagefright/wifi-display/source/Converter.h
+++ b/media/libstagefright/wifi-display/source/Converter.h
@@ -25,6 +25,7 @@
 struct ABuffer;
 class IGraphicBufferProducer;
 struct MediaCodec;
+class MediaCodecBuffer;
 
 #define ENABLE_SILENCE_DETECTION        0
 
@@ -106,8 +107,8 @@
 
     sp<IGraphicBufferProducer> mGraphicBufferProducer;
 
-    Vector<sp<ABuffer> > mEncoderInputBuffers;
-    Vector<sp<ABuffer> > mEncoderOutputBuffers;
+    Vector<sp<MediaCodecBuffer> > mEncoderInputBuffers;
+    Vector<sp<MediaCodecBuffer> > mEncoderOutputBuffers;
 
     List<size_t> mAvailEncoderInputIndices;
 
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.cpp b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
index 3587cb9..f1ecca0 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.cpp
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
@@ -36,7 +36,6 @@
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/AudioSource.h>
-#include <media/stagefright/DataSource.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaSource.h>
@@ -748,8 +747,6 @@
 
 status_t WifiDisplaySource::PlaybackSession::setupMediaPacketizer(
         bool enableAudio, bool enableVideo) {
-    DataSource::RegisterDefaultSniffers();
-
     mExtractor = new NuMediaExtractor;
 
     status_t err = mExtractor->setDataSource(
diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
index f0a4ded..9cda8dc 100644
--- a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
+++ b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
@@ -911,10 +911,8 @@
 
         bool supportsPCM = (modes & 2) != 0;  // LPCM 2ch 48kHz
 
-        char val[PROPERTY_VALUE_MAX];
         if (supportsPCM
-                && property_get("media.wfd.use-pcm-audio", val, NULL)
-                && (!strcasecmp("true", val) || !strcmp("1", val))) {
+                && property_get_bool("media.wfd.use-pcm-audio", false)) {
             ALOGI("Using PCM audio.");
             mUsingPCMAudio = true;
         } else if (supportsAAC) {
diff --git a/media/mediaserver/Android.mk b/media/mediaserver/Android.mk
index ffbfcbb..a4cb66d 100644
--- a/media/mediaserver/Android.mk
+++ b/media/mediaserver/Android.mk
@@ -17,7 +17,6 @@
 LOCAL_SHARED_LIBRARIES := \
 	libresourcemanagerservice \
 	liblog \
-	libcutils \
 	libmediaplayerservice \
 	libutils \
 	libbinder \
diff --git a/media/mtp/MtpDataPacket.cpp b/media/mtp/MtpDataPacket.cpp
index 40f4cea..d1c71d7 100644
--- a/media/mtp/MtpDataPacket.cpp
+++ b/media/mtp/MtpDataPacket.cpp
@@ -520,7 +520,7 @@
 
 // Wait for result of readDataAsync
 int MtpDataPacket::readDataWait(struct usb_device *device) {
-    struct usb_request *req = usb_request_wait(device);
+    struct usb_request *req = usb_request_wait(device, -1);
     return (req ? req->actual_length : -1);
 }
 
diff --git a/media/mtp/MtpDevice.cpp b/media/mtp/MtpDevice.cpp
index e3d2ec6..82a2627 100644
--- a/media/mtp/MtpDevice.cpp
+++ b/media/mtp/MtpDevice.cpp
@@ -39,6 +39,12 @@
 
 namespace android {
 
+namespace {
+
+static constexpr int USB_CONTROL_TRANSFER_TIMEOUT_MS = 200;
+
+}  // namespace
+
 #if 0
 static bool isMtpDevice(uint16_t vendor, uint16_t product) {
     // Sandisk Sansa Fuze
@@ -84,15 +90,18 @@
                 interface->bInterfaceSubClass == 1 && // Still Image Capture
                 interface->bInterfaceProtocol == 1)     // Picture Transfer Protocol (PIMA 15470)
             {
-                char* manufacturerName = usb_device_get_manufacturer_name(device);
-                char* productName = usb_device_get_product_name(device);
+                char* manufacturerName = usb_device_get_manufacturer_name(device,
+                        USB_CONTROL_TRANSFER_TIMEOUT_MS);
+                char* productName = usb_device_get_product_name(device,
+                        USB_CONTROL_TRANSFER_TIMEOUT_MS);
                 ALOGD("Found camera: \"%s\" \"%s\"\n", manufacturerName, productName);
                 free(manufacturerName);
                 free(productName);
             } else if (interface->bInterfaceClass == 0xFF &&
                     interface->bInterfaceSubClass == 0xFF &&
                     interface->bInterfaceProtocol == 0) {
-                char* interfaceName = usb_device_get_string(device, interface->iInterface);
+                char* interfaceName = usb_device_get_string(device, interface->iInterface,
+                        USB_CONTROL_TRANSFER_TIMEOUT_MS);
                 if (!interfaceName) {
                     continue;
                 } else if (strcmp(interfaceName, "MTP")) {
@@ -102,8 +111,10 @@
                 free(interfaceName);
 
                 // Looks like an android style MTP device
-                char* manufacturerName = usb_device_get_manufacturer_name(device);
-                char* productName = usb_device_get_product_name(device);
+                char* manufacturerName = usb_device_get_manufacturer_name(device,
+                        USB_CONTROL_TRANSFER_TIMEOUT_MS);
+                char* productName = usb_device_get_product_name(device,
+                        USB_CONTROL_TRANSFER_TIMEOUT_MS);
                 ALOGD("Found MTP device: \"%s\" \"%s\"\n", manufacturerName, productName);
                 free(manufacturerName);
                 free(productName);
diff --git a/media/mtp/MtpDevice.h b/media/mtp/MtpDevice.h
index c84c842..a9a3e0e 100644
--- a/media/mtp/MtpDevice.h
+++ b/media/mtp/MtpDevice.h
@@ -107,7 +107,7 @@
     bool                    sendObject(MtpObjectHandle handle, int size, int srcFD);
     bool                    deleteObject(MtpObjectHandle handle);
     MtpObjectHandle         getParent(MtpObjectHandle handle);
-    MtpObjectHandle         getStorageID(MtpObjectHandle handle);
+    MtpStorageID            getStorageID(MtpObjectHandle handle);
 
     MtpObjectPropertyList*  getObjectPropsSupported(MtpObjectFormat format);
 
diff --git a/media/mtp/MtpDeviceInfo.cpp b/media/mtp/MtpDeviceInfo.cpp
index 3e1dff7..3d5cb06 100644
--- a/media/mtp/MtpDeviceInfo.cpp
+++ b/media/mtp/MtpDeviceInfo.cpp
@@ -69,6 +69,7 @@
 
     if (!packet.getString(string)) return false;
     mVendorExtensionDesc = strdup((const char *)string);
+    if (!mVendorExtensionDesc) return false;
 
     if (!packet.getUInt16(mFunctionalMode)) return false;
     mOperations = packet.getAUInt16();
@@ -84,12 +85,16 @@
 
     if (!packet.getString(string)) return false;
     mManufacturer = strdup((const char *)string);
+    if (!mManufacturer) return false;
     if (!packet.getString(string)) return false;
     mModel = strdup((const char *)string);
+    if (!mModel) return false;
     if (!packet.getString(string)) return false;
     mVersion = strdup((const char *)string);
+    if (!mVersion) return false;
     if (!packet.getString(string)) return false;
     mSerial = strdup((const char *)string);
+    if (!mSerial) return false;
 
     return true;
 }
diff --git a/media/mtp/MtpEventPacket.cpp b/media/mtp/MtpEventPacket.cpp
index fbee72f..921ecbd 100644
--- a/media/mtp/MtpEventPacket.cpp
+++ b/media/mtp/MtpEventPacket.cpp
@@ -62,7 +62,7 @@
 }
 
 int MtpEventPacket::readResponse(struct usb_device *device) {
-    struct usb_request* const req = usb_request_wait(device);
+    struct usb_request* const req = usb_request_wait(device, -1);
     if (req) {
         mPacketSize = req->actual_length;
         return req->actual_length;
diff --git a/media/mtp/MtpObjectInfo.cpp b/media/mtp/MtpObjectInfo.cpp
index 0573104..43b745f 100644
--- a/media/mtp/MtpObjectInfo.cpp
+++ b/media/mtp/MtpObjectInfo.cpp
@@ -77,6 +77,7 @@
 
     if (!packet.getString(string)) return false;
     mName = strdup((const char *)string);
+    if (!mName) return false;
 
     if (!packet.getString(string)) return false;
     if (parseDateTime((const char*)string, time))
@@ -88,6 +89,7 @@
 
     if (!packet.getString(string)) return false;
     mKeywords = strdup((const char *)string);
+    if (!mKeywords) return false;
 
     return true;
 }
diff --git a/media/mtp/MtpPacket.cpp b/media/mtp/MtpPacket.cpp
index 35ecb4f..3dd4248 100644
--- a/media/mtp/MtpPacket.cpp
+++ b/media/mtp/MtpPacket.cpp
@@ -70,8 +70,8 @@
     char* bufptr = buffer;
 
     for (size_t i = 0; i < mPacketSize; i++) {
-        sprintf(bufptr, "%02X ", mBuffer[i]);
-        bufptr += strlen(bufptr);
+        bufptr += snprintf(bufptr, sizeof(buffer) - (bufptr - buffer), "%02X ",
+                           mBuffer[i]);
         if (i % DUMP_BYTES_PER_ROW == (DUMP_BYTES_PER_ROW - 1)) {
             ALOGV("%s", buffer);
             bufptr = buffer;
diff --git a/media/mtp/MtpStorageInfo.cpp b/media/mtp/MtpStorageInfo.cpp
index 5d4ebbf..8801a38 100644
--- a/media/mtp/MtpStorageInfo.cpp
+++ b/media/mtp/MtpStorageInfo.cpp
@@ -58,8 +58,10 @@
 
     if (!packet.getString(string)) return false;
     mStorageDescription = strdup((const char *)string);
+    if (!mStorageDescription) return false;
     if (!packet.getString(string)) return false;
     mVolumeIdentifier = strdup((const char *)string);
+    if (!mVolumeIdentifier) return false;
 
     return true;
 }
diff --git a/media/ndk/Android.mk b/media/ndk/Android.mk
index a4f999f..74729e4 100644
--- a/media/ndk/Android.mk
+++ b/media/ndk/Android.mk
@@ -45,6 +45,7 @@
 LOCAL_SHARED_LIBRARIES := \
     libbinder \
     libmedia \
+    libmediadrm \
     libstagefright \
     libstagefright_foundation \
     liblog \
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index 3d1eca1..22c90e2 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -30,10 +30,10 @@
 
 #include <media/stagefright/foundation/ALooper.h>
 #include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/foundation/ABuffer.h>
 
 #include <media/stagefright/MediaCodec.h>
 #include <media/stagefright/MediaErrors.h>
+#include <media/MediaCodecBuffer.h>
 
 using namespace android;
 
@@ -268,13 +268,17 @@
 
 EXPORT
 uint8_t* AMediaCodec_getInputBuffer(AMediaCodec *mData, size_t idx, size_t *out_size) {
-    android::Vector<android::sp<android::ABuffer> > abufs;
+    android::Vector<android::sp<android::MediaCodecBuffer> > abufs;
     if (mData->mCodec->getInputBuffers(&abufs) == 0) {
         size_t n = abufs.size();
         if (idx >= n) {
             ALOGE("buffer index %zu out of range", idx);
             return NULL;
         }
+        if (abufs[idx] == NULL) {
+            ALOGE("buffer index %zu is NULL", idx);
+            return NULL;
+        }
         if (out_size != NULL) {
             *out_size = abufs[idx]->capacity();
         }
@@ -286,7 +290,7 @@
 
 EXPORT
 uint8_t* AMediaCodec_getOutputBuffer(AMediaCodec *mData, size_t idx, size_t *out_size) {
-    android::Vector<android::sp<android::ABuffer> > abufs;
+    android::Vector<android::sp<android::MediaCodecBuffer> > abufs;
     if (mData->mCodec->getOutputBuffers(&abufs) == 0) {
         size_t n = abufs.size();
         if (idx >= n) {
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index 166e6f1..cdce932 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -99,11 +99,12 @@
             break;
         default:
             ALOGE("Invalid event DrmPlugin::EventType %d, ignored", (int)eventType);
-            return;
+            goto cleanup;
     }
 
     (*mListener)(mObj, &sessionId, ndkEventType, extra, data, dataSize);
 
+ cleanup:
     delete [] sessionId.ptr;
     delete [] data;
 }
diff --git a/media/utils/Android.mk b/media/utils/Android.mk
index f482d1a..21d1b5b 100644
--- a/media/utils/Android.mk
+++ b/media/utils/Android.mk
@@ -20,6 +20,7 @@
   BatteryNotifier.cpp \
   ISchedulingPolicyService.cpp \
   MemoryLeakTrackUtil.cpp \
+  ProcessInfo.cpp \
   SchedulingPolicyService.cpp
 
 LOCAL_SHARED_LIBRARIES := \
diff --git a/media/utils/BatteryNotifier.cpp b/media/utils/BatteryNotifier.cpp
index 341d391..09bc042 100644
--- a/media/utils/BatteryNotifier.cpp
+++ b/media/utils/BatteryNotifier.cpp
@@ -29,7 +29,7 @@
     BatteryNotifier::getInstance().onBatteryStatServiceDied();
 }
 
-BatteryNotifier::BatteryNotifier() : mVideoRefCount(0), mAudioRefCount(0) {}
+BatteryNotifier::BatteryNotifier() {}
 
 BatteryNotifier::~BatteryNotifier() {
     Mutex::Autolock _l(mLock);
@@ -38,77 +38,83 @@
     }
 }
 
-void BatteryNotifier::noteStartVideo() {
+void BatteryNotifier::noteStartVideo(uid_t uid) {
     Mutex::Autolock _l(mLock);
     sp<IBatteryStats> batteryService = getBatteryService_l();
-    if (mVideoRefCount == 0 && batteryService != nullptr) {
-        batteryService->noteStartVideo(AID_MEDIA);
+    if (mVideoRefCounts[uid] == 0 && batteryService != nullptr) {
+        batteryService->noteStartVideo(uid);
     }
-    mVideoRefCount++;
+    mVideoRefCounts[uid]++;
 }
 
-void BatteryNotifier::noteStopVideo() {
+void BatteryNotifier::noteStopVideo(uid_t uid) {
     Mutex::Autolock _l(mLock);
-    if (mVideoRefCount == 0) {
-        ALOGW("%s: video refcount is broken.", __FUNCTION__);
+    if (mVideoRefCounts.find(uid) == mVideoRefCounts.end()) {
+        ALOGW("%s: video refcount is broken for uid(%d).", __FUNCTION__, (int)uid);
         return;
     }
 
     sp<IBatteryStats> batteryService = getBatteryService_l();
 
-    mVideoRefCount--;
-    if (mVideoRefCount == 0 && batteryService != nullptr) {
-        batteryService->noteStopVideo(AID_MEDIA);
+    mVideoRefCounts[uid]--;
+    if (mVideoRefCounts[uid] == 0) {
+        if (batteryService != nullptr) {
+            batteryService->noteStopVideo(uid);
+        }
+        mVideoRefCounts.erase(uid);
     }
 }
 
 void BatteryNotifier::noteResetVideo() {
     Mutex::Autolock _l(mLock);
     sp<IBatteryStats> batteryService = getBatteryService_l();
-    mVideoRefCount = 0;
+    mVideoRefCounts.clear();
     if (batteryService != nullptr) {
         batteryService->noteResetVideo();
     }
 }
 
-void BatteryNotifier::noteStartAudio() {
+void BatteryNotifier::noteStartAudio(uid_t uid) {
     Mutex::Autolock _l(mLock);
     sp<IBatteryStats> batteryService = getBatteryService_l();
-    if (mAudioRefCount == 0 && batteryService != nullptr) {
-        batteryService->noteStartAudio(AID_AUDIOSERVER);
+    if (mAudioRefCounts[uid] == 0 && batteryService != nullptr) {
+        batteryService->noteStartAudio(uid);
     }
-    mAudioRefCount++;
+    mAudioRefCounts[uid]++;
 }
 
-void BatteryNotifier::noteStopAudio() {
+void BatteryNotifier::noteStopAudio(uid_t uid) {
     Mutex::Autolock _l(mLock);
-    if (mAudioRefCount == 0) {
-        ALOGW("%s: audio refcount is broken.", __FUNCTION__);
+    if (mAudioRefCounts.find(uid) == mAudioRefCounts.end()) {
+        ALOGW("%s: audio refcount is broken for uid(%d).", __FUNCTION__, (int)uid);
         return;
     }
 
     sp<IBatteryStats> batteryService = getBatteryService_l();
 
-    mAudioRefCount--;
-    if (mAudioRefCount == 0 && batteryService != nullptr) {
-        batteryService->noteStopAudio(AID_AUDIOSERVER);
+    mAudioRefCounts[uid]--;
+    if (mAudioRefCounts[uid] == 0) {
+        if (batteryService != nullptr) {
+            batteryService->noteStopAudio(uid);
+        }
+        mAudioRefCounts.erase(uid);
     }
 }
 
 void BatteryNotifier::noteResetAudio() {
     Mutex::Autolock _l(mLock);
     sp<IBatteryStats> batteryService = getBatteryService_l();
-    mAudioRefCount = 0;
+    mAudioRefCounts.clear();
     if (batteryService != nullptr) {
         batteryService->noteResetAudio();
     }
 }
 
-void BatteryNotifier::noteFlashlightOn(const String8& id, int uid) {
+void BatteryNotifier::noteFlashlightOn(const String8& id, uid_t uid) {
     Mutex::Autolock _l(mLock);
     sp<IBatteryStats> batteryService = getBatteryService_l();
 
-    std::pair<String8, int> k = std::make_pair(id, uid);
+    std::pair<String8, uid_t> k = std::make_pair(id, uid);
     if (!mFlashlightState[k]) {
         mFlashlightState[k] = true;
         if (batteryService != nullptr) {
@@ -117,11 +123,11 @@
     }
 }
 
-void BatteryNotifier::noteFlashlightOff(const String8& id, int uid) {
+void BatteryNotifier::noteFlashlightOff(const String8& id, uid_t uid) {
     Mutex::Autolock _l(mLock);
     sp<IBatteryStats> batteryService = getBatteryService_l();
 
-    std::pair<String8, int> k = std::make_pair(id, uid);
+    std::pair<String8, uid_t> k = std::make_pair(id, uid);
     if (mFlashlightState[k]) {
         mFlashlightState[k] = false;
         if (batteryService != nullptr) {
@@ -139,10 +145,10 @@
     }
 }
 
-void BatteryNotifier::noteStartCamera(const String8& id, int uid) {
+void BatteryNotifier::noteStartCamera(const String8& id, uid_t uid) {
     Mutex::Autolock _l(mLock);
     sp<IBatteryStats> batteryService = getBatteryService_l();
-    std::pair<String8, int> k = std::make_pair(id, uid);
+    std::pair<String8, uid_t> k = std::make_pair(id, uid);
     if (!mCameraState[k]) {
         mCameraState[k] = true;
         if (batteryService != nullptr) {
@@ -151,10 +157,10 @@
     }
 }
 
-void BatteryNotifier::noteStopCamera(const String8& id, int uid) {
+void BatteryNotifier::noteStopCamera(const String8& id, uid_t uid) {
     Mutex::Autolock _l(mLock);
     sp<IBatteryStats> batteryService = getBatteryService_l();
-    std::pair<String8, int> k = std::make_pair(id, uid);
+    std::pair<String8, uid_t> k = std::make_pair(id, uid);
     if (mCameraState[k]) {
         mCameraState[k] = false;
         if (batteryService != nullptr) {
@@ -176,7 +182,7 @@
     Mutex::Autolock _l(mLock);
     mBatteryStatService.clear();
     mDeathNotifier.clear();
-    // Do not reset mVideoRefCount and mAudioRefCount here. The ref
+    // Do not reset mVideoRefCounts and mAudioRefCounts here. The ref
     // counting is independent of the battery service availability.
     // We need this if battery service becomes available after media
     // started.
@@ -205,11 +211,13 @@
         // Notify start now if mediaserver or audioserver is already started.
         // 1) mediaserver and audioserver is started before batterystats service
         // 2) batterystats server may have crashed.
-        if (mVideoRefCount > 0) {
-            mBatteryStatService->noteStartVideo(AID_MEDIA);
+        std::map<uid_t, int>::iterator it = mVideoRefCounts.begin();
+        for (; it != mVideoRefCounts.end(); ++it) {
+            mBatteryStatService->noteStartVideo(it->first);
         }
-        if (mAudioRefCount > 0) {
-            mBatteryStatService->noteStartAudio(AID_AUDIOSERVER);
+        it = mAudioRefCounts.begin();
+        for (; it != mAudioRefCounts.end(); ++it) {
+            mBatteryStatService->noteStartAudio(it->first);
         }
         // TODO: Notify for camera and flashlight state as well?
     }
diff --git a/media/libstagefright/ProcessInfo.cpp b/media/utils/ProcessInfo.cpp
similarity index 100%
rename from media/libstagefright/ProcessInfo.cpp
rename to media/utils/ProcessInfo.cpp
diff --git a/media/utils/include/mediautils/BatteryNotifier.h b/media/utils/include/mediautils/BatteryNotifier.h
index 49048042..a4e42ad 100644
--- a/media/utils/include/mediautils/BatteryNotifier.h
+++ b/media/utils/include/mediautils/BatteryNotifier.h
@@ -37,17 +37,17 @@
 public:
     ~BatteryNotifier();
 
-    void noteStartVideo();
-    void noteStopVideo();
+    void noteStartVideo(uid_t uid);
+    void noteStopVideo(uid_t uid);
     void noteResetVideo();
-    void noteStartAudio();
-    void noteStopAudio();
+    void noteStartAudio(uid_t uid);
+    void noteStopAudio(uid_t uid);
     void noteResetAudio();
-    void noteFlashlightOn(const String8& id, int uid);
-    void noteFlashlightOff(const String8& id, int uid);
+    void noteFlashlightOn(const String8& id, uid_t uid);
+    void noteFlashlightOff(const String8& id, uid_t uid);
     void noteResetFlashlight();
-    void noteStartCamera(const String8& id, int uid);
-    void noteStopCamera(const String8& id, int uid);
+    void noteStartCamera(const String8& id, uid_t uid);
+    void noteStopCamera(const String8& id, uid_t uid);
     void noteResetCamera();
 
 private:
@@ -58,10 +58,10 @@
     };
 
     Mutex mLock;
-    int mVideoRefCount;
-    int mAudioRefCount;
-    std::map<std::pair<String8, int>, bool> mFlashlightState;
-    std::map<std::pair<String8, int>, bool> mCameraState;
+    std::map<uid_t, int> mVideoRefCounts;
+    std::map<uid_t, int> mAudioRefCounts;
+    std::map<std::pair<String8, uid_t>, bool> mFlashlightState;
+    std::map<std::pair<String8, uid_t>, bool> mCameraState;
     sp<IBatteryStats> mBatteryStatService;
     sp<DeathNotifier> mDeathNotifier;
 
diff --git a/radio/IRadio.cpp b/radio/IRadio.cpp
index 0881a91..5bbe7cb 100644
--- a/radio/IRadio.cpp
+++ b/radio/IRadio.cpp
@@ -112,7 +112,7 @@
         if (status == NO_ERROR) {
             status = (status_t)reply.readInt32();
             if (status == NO_ERROR) {
-                int muteread = reply.readInt32();
+                int32_t muteread = reply.readInt32();
                 *mute = muteread != 0;
             }
         }
@@ -145,12 +145,12 @@
         return status;
     }
 
-    virtual status_t tune(unsigned int channel, unsigned int subChannel)
+    virtual status_t tune(uint32_t channel, uint32_t subChannel)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IRadio::getInterfaceDescriptor());
-        data.writeInt32(channel);
-        data.writeInt32(subChannel);
+        data.writeUint32(channel);
+        data.writeUint32(subChannel);
         status_t status = remote()->transact(TUNE, data, &reply);
         if (status == NO_ERROR) {
             status = (status_t)reply.readInt32();
@@ -172,7 +172,7 @@
     virtual status_t getProgramInformation(struct radio_program_info *info)
     {
         Parcel data, reply;
-        if (info == NULL) {
+        if (info == nullptr || info->metadata == nullptr) {
             return BAD_VALUE;
         }
         radio_metadata_t *metadata = info->metadata;
@@ -182,22 +182,19 @@
             status = (status_t)reply.readInt32();
             if (status == NO_ERROR) {
                 reply.read(info, sizeof(struct radio_program_info));
+                // restore local metadata pointer
                 info->metadata = metadata;
-                if (metadata == NULL) {
-                    return status;
+
+                uint32_t metadataSize = reply.readUint32();
+                if (metadataSize != 0) {
+                    radio_metadata_t *newMetadata = (radio_metadata_t *)malloc(metadataSize);
+                    if (newMetadata == NULL) {
+                        return NO_MEMORY;
+                    }
+                    reply.read(newMetadata, metadataSize);
+                    status = radio_metadata_add_metadata(&info->metadata, newMetadata);
+                    free(newMetadata);
                 }
-                size_t size = (size_t)reply.readInt32();
-                if (size == 0) {
-                    return status;
-                }
-                metadata =
-                    (radio_metadata_t *)calloc(size / sizeof(unsigned int), sizeof(unsigned int));
-                if (metadata == NULL) {
-                    return NO_MEMORY;
-                }
-                reply.read(metadata, size);
-                status = radio_metadata_add_metadata(&info->metadata, metadata);
-                free(metadata);
             }
         }
         return status;
@@ -288,8 +285,8 @@
         }
         case TUNE: {
             CHECK_INTERFACE(IRadio, data, reply);
-            unsigned int channel = (unsigned int)data.readInt32();
-            unsigned int subChannel = (unsigned int)data.readInt32();
+            uint32_t channel = data.readUint32();
+            uint32_t subChannel = data.readUint32();
             status_t status = tune(channel, subChannel);
             reply->writeInt32(status);
             return NO_ERROR;
@@ -303,22 +300,23 @@
         case GET_PROGRAM_INFORMATION: {
             CHECK_INTERFACE(IRadio, data, reply);
             struct radio_program_info info;
+            status_t status;
 
-            status_t status = radio_metadata_allocate(&info.metadata, 0, 0);
+            status = radio_metadata_allocate(&info.metadata, 0, 0);
             if (status != NO_ERROR) {
                 return status;
             }
             status = getProgramInformation(&info);
+
             reply->writeInt32(status);
             if (status == NO_ERROR) {
                 reply->write(&info, sizeof(struct radio_program_info));
-                int count = radio_metadata_get_count(info.metadata);
-                if (count > 0) {
+                if (radio_metadata_get_count(info.metadata) > 0) {
                     size_t size = radio_metadata_get_size(info.metadata);
-                    reply->writeInt32(size);
+                    reply->writeUint32((uint32_t)size);
                     reply->write(info.metadata, size);
                 } else {
-                    reply->writeInt32(0);
+                    reply->writeUint32(0);
                 }
             }
             radio_metadata_deallocate(info.metadata);
diff --git a/radio/IRadioService.cpp b/radio/IRadioService.cpp
index be7d21e..72e3a61 100644
--- a/radio/IRadioService.cpp
+++ b/radio/IRadioService.cpp
@@ -16,8 +16,7 @@
 */
 
 #define LOG_TAG "BpRadioService"
-//
-#define LOG_NDEBUG 0
+//#define LOG_NDEBUG 0
 
 #include <utils/Log.h>
 #include <utils/Errors.h>
@@ -58,12 +57,12 @@
         }
         Parcel data, reply;
         data.writeInterfaceToken(IRadioService::getInterfaceDescriptor());
-        unsigned int numModulesReq = (properties == NULL) ? 0 : *numModules;
+        uint32_t numModulesReq = (properties == NULL) ? 0 : *numModules;
         data.writeInt32(numModulesReq);
         status_t status = remote()->transact(LIST_MODULES, data, &reply);
         if (status == NO_ERROR) {
             status = (status_t)reply.readInt32();
-            *numModules = (unsigned int)reply.readInt32();
+            *numModules = (uint32_t)reply.readInt32();
         }
         ALOGV("listModules() status %d got *numModules %d", status, *numModules);
         if (status == NO_ERROR) {
@@ -120,11 +119,11 @@
     switch(code) {
         case LIST_MODULES: {
             CHECK_INTERFACE(IRadioService, data, reply);
-            unsigned int numModulesReq = data.readInt32();
+            uint32_t numModulesReq = data.readInt32();
             if (numModulesReq > MAX_ITEMS_PER_LIST) {
                 numModulesReq = MAX_ITEMS_PER_LIST;
             }
-            unsigned int numModules = numModulesReq;
+            uint32_t numModules = numModulesReq;
             struct radio_properties *properties =
                     (struct radio_properties *)calloc(numModulesReq,
                                                       sizeof(struct radio_properties));
diff --git a/radio/Radio.cpp b/radio/Radio.cpp
index 3c04fb0..fa39589 100644
--- a/radio/Radio.cpp
+++ b/radio/Radio.cpp
@@ -240,20 +240,31 @@
         return;
     }
 
+    // The event layout in shared memory is:
+    // sizeof(struct radio_event) bytes : the event itself
+    // 4 bytes                          : metadata size or 0
+    // N bytes                          : metadata if present
     struct radio_event *event = (struct radio_event *)eventMemory->pointer();
+    uint32_t metadataOffset = sizeof(struct radio_event) + sizeof(uint32_t);
+    uint32_t metadataSize = *(uint32_t *)((uint8_t *)event + metadataOffset - sizeof(uint32_t));
+
     // restore local metadata pointer from offset
     switch (event->type) {
     case RADIO_EVENT_TUNED:
     case RADIO_EVENT_AF_SWITCH:
-        if (event->info.metadata != NULL) {
+        if (metadataSize != 0) {
             event->info.metadata =
-                    (radio_metadata_t *)((char *)event + (size_t)event->info.metadata);
+                    (radio_metadata_t *)((uint8_t *)event + metadataOffset);
+        } else {
+            event->info.metadata = 0;
         }
         break;
     case RADIO_EVENT_METADATA:
-        if (event->metadata != NULL) {
+        if (metadataSize != 0) {
             event->metadata =
-                    (radio_metadata_t *)((char *)event + (size_t)event->metadata);
+                    (radio_metadata_t *)((uint8_t *)event + metadataOffset);
+        } else {
+            event->metadata = 0;
         }
         break;
     default:
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 5c28e46..aa2cd95 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -28,20 +28,18 @@
     AudioStreamOut.cpp          \
     SpdifStreamOut.cpp          \
     Effects.cpp                 \
-    AudioMixer.cpp.arm          \
-    BufferProviders.cpp         \
     PatchPanel.cpp              \
-    StateQueue.cpp
+    StateQueue.cpp              \
+    BufLog.cpp
 
 LOCAL_C_INCLUDES := \
     $(TOPDIR)frameworks/av/services/audiopolicy \
     $(TOPDIR)frameworks/av/services/medialog \
-    $(TOPDIR)external/sonic \
-    $(call include-path-for, audio-effects) \
     $(call include-path-for, audio-utils)
 
 LOCAL_SHARED_LIBRARIES := \
-    libaudioresampler \
+    libaudiohal \
+    libaudioprocessing \
     libaudiospdif \
     libaudioutils \
     libcutils \
@@ -52,18 +50,14 @@
     libmedialogservice \
     libmediautils \
     libnbaio \
-    libhardware \
-    libhardware_legacy \
-    libeffects \
     libpowermanager \
     libserviceutility \
-    libsonic \
     libmediautils \
-    libmemunreachable
+    libmemunreachable \
+    libmedia_helper
 
 LOCAL_STATIC_LIBRARIES := \
     libcpustats \
-    libmedia_helper
 
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
 
@@ -89,59 +83,4 @@
 
 include $(BUILD_SHARED_LIBRARY)
 
-#
-# build audio resampler test tool
-#
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:=               \
-    test-resample.cpp           \
-
-LOCAL_C_INCLUDES := \
-    $(call include-path-for, audio-utils)
-
-LOCAL_STATIC_LIBRARIES := \
-    libsndfile
-
-LOCAL_SHARED_LIBRARIES := \
-    libaudioresampler \
-    libaudioutils \
-    libdl \
-    libcutils \
-    libutils \
-    liblog
-
-LOCAL_MODULE:= test-resample
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_CFLAGS := -Werror -Wall
-
-include $(BUILD_EXECUTABLE)
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
-    AudioResampler.cpp.arm \
-    AudioResamplerCubic.cpp.arm \
-    AudioResamplerSinc.cpp.arm \
-    AudioResamplerDyn.cpp.arm
-
-LOCAL_C_INCLUDES := \
-    $(call include-path-for, audio-utils)
-
-LOCAL_SHARED_LIBRARIES := \
-    libcutils \
-    libdl \
-    liblog
-
-LOCAL_MODULE := libaudioresampler
-
-LOCAL_CFLAGS := -Werror -Wall
-
-# uncomment to disable NEON on architectures that actually do support NEON, for benchmarking
-#LOCAL_CFLAGS += -DUSE_NEON=false
-
-include $(BUILD_SHARED_LIBRARY)
-
 include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 1a41106..a248912 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -31,6 +31,11 @@
 #include <utils/Log.h>
 #include <utils/Trace.h>
 #include <binder/Parcel.h>
+#include <media/audiohal/DeviceHalInterface.h>
+#include <media/audiohal/DevicesFactoryHalInterface.h>
+#include <media/audiohal/EffectsFactoryHalInterface.h>
+#include <media/AudioParameter.h>
+#include <media/TypeConverter.h>
 #include <memunreachable/memunreachable.h>
 #include <utils/String16.h>
 #include <utils/threads.h>
@@ -40,18 +45,15 @@
 #include <cutils/properties.h>
 
 #include <system/audio.h>
-#include <hardware/audio.h>
 
-#include "AudioMixer.h"
 #include "AudioFlinger.h"
 #include "ServiceUtilities.h"
 
 #include <media/AudioResamplerPublic.h>
 
-#include <media/EffectsFactoryApi.h>
-#include <audio_effects/effect_visualizer.h>
-#include <audio_effects/effect_ns.h>
-#include <audio_effects/effect_aec.h>
+#include <system/audio_effects/effect_visualizer.h>
+#include <system/audio_effects/effect_ns.h>
+#include <system/audio_effects/effect_aec.h>
 
 #include <audio_utils/primitives.h>
 
@@ -65,6 +67,9 @@
 #include <mediautils/BatteryNotifier.h>
 #include <private/android_filesystem_config.h>
 
+//#define BUFLOG_NDEBUG 0
+#include <BufLog.h>
+
 // ----------------------------------------------------------------------------
 
 // Note: the following macro is used for extremely verbose logging message.  In
@@ -85,6 +90,7 @@
 static const char kDeadlockedString[] = "AudioFlinger may be deadlocked\n";
 static const char kHardwareLockedString[] = "Hardware lock is taken\n";
 static const char kClientLockedString[] = "Client lock is taken\n";
+static const char kNoEffectsFactory[] = "Effects Factory is absent\n";
 
 
 nsecs_t AudioFlinger::mStandbyTimeInNsecs = kDefaultStandbyTimeInNsecs;
@@ -107,67 +113,10 @@
 
 // ----------------------------------------------------------------------------
 
-const char *formatToString(audio_format_t format) {
-    switch (audio_get_main_format(format)) {
-    case AUDIO_FORMAT_PCM:
-        switch (format) {
-        case AUDIO_FORMAT_PCM_16_BIT: return "pcm16";
-        case AUDIO_FORMAT_PCM_8_BIT: return "pcm8";
-        case AUDIO_FORMAT_PCM_32_BIT: return "pcm32";
-        case AUDIO_FORMAT_PCM_8_24_BIT: return "pcm8.24";
-        case AUDIO_FORMAT_PCM_FLOAT: return "pcmfloat";
-        case AUDIO_FORMAT_PCM_24_BIT_PACKED: return "pcm24";
-        default:
-            break;
-        }
-        break;
-    case AUDIO_FORMAT_MP3: return "mp3";
-    case AUDIO_FORMAT_AMR_NB: return "amr-nb";
-    case AUDIO_FORMAT_AMR_WB: return "amr-wb";
-    case AUDIO_FORMAT_AAC: return "aac";
-    case AUDIO_FORMAT_HE_AAC_V1: return "he-aac-v1";
-    case AUDIO_FORMAT_HE_AAC_V2: return "he-aac-v2";
-    case AUDIO_FORMAT_VORBIS: return "vorbis";
-    case AUDIO_FORMAT_OPUS: return "opus";
-    case AUDIO_FORMAT_AC3: return "ac-3";
-    case AUDIO_FORMAT_E_AC3: return "e-ac-3";
-    case AUDIO_FORMAT_IEC61937: return "iec61937";
-    case AUDIO_FORMAT_DTS: return "dts";
-    case AUDIO_FORMAT_DTS_HD: return "dts-hd";
-    case AUDIO_FORMAT_DOLBY_TRUEHD: return "dolby-truehd";
-    default:
-        break;
-    }
-    return "unknown";
-}
-
-static int load_audio_interface(const char *if_name, audio_hw_device_t **dev)
-{
-    const hw_module_t *mod;
-    int rc;
-
-    rc = hw_get_module_by_class(AUDIO_HARDWARE_MODULE_ID, if_name, &mod);
-    ALOGE_IF(rc, "%s couldn't load audio hw module %s.%s (%s)", __func__,
-                 AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
-    if (rc) {
-        goto out;
-    }
-    rc = audio_hw_device_open(mod, dev);
-    ALOGE_IF(rc, "%s couldn't open audio hw device in %s.%s (%s)", __func__,
-                 AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
-    if (rc) {
-        goto out;
-    }
-    if ((*dev)->common.version < AUDIO_DEVICE_API_VERSION_MIN) {
-        ALOGE("%s wrong audio hw device version %04x", __func__, (*dev)->common.version);
-        rc = BAD_VALUE;
-        goto out;
-    }
-    return 0;
-
-out:
-    *dev = NULL;
-    return rc;
+std::string formatToString(audio_format_t format) {
+    std::string result;
+    FormatConverter::toString(format, result);
+    return result;
 }
 
 // ----------------------------------------------------------------------------
@@ -205,6 +154,9 @@
     // in bad state, reset the state upon service start.
     BatteryNotifier::getInstance().noteResetAudio();
 
+    mDevicesFactoryHal = DevicesFactoryHalInterface::create();
+    mEffectsFactoryHal = EffectsFactoryHalInterface::create();
+
 #ifdef TEE_SINK
     char value[PROPERTY_VALUE_MAX];
     (void) property_get("ro.debuggable", value, "0");
@@ -263,7 +215,6 @@
 
     for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
         // no mHardwareLock needed, as there are no other references to this
-        audio_hw_device_close(mAudioHwDevs.valueAt(i)->hwDevice());
         delete mAudioHwDevs.valueAt(i);
     }
 
@@ -302,10 +253,12 @@
         // then try to find a module supporting the requested device.
         for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
             AudioHwDevice *audioHwDevice = mAudioHwDevs.valueAt(i);
-            audio_hw_device_t *dev = audioHwDevice->hwDevice();
-            if ((dev->get_supported_devices != NULL) &&
-                    (dev->get_supported_devices(dev) & devices) == devices)
+            sp<DeviceHalInterface> dev = audioHwDevice->hwDevice();
+            uint32_t supportedDevices;
+            if (dev->getSupportedDevices(&supportedDevices) == OK &&
+                    (supportedDevices & devices) == devices) {
                 return audioHwDevice;
+            }
         }
     } else {
         // check a match for the requested module handle
@@ -419,7 +372,12 @@
             write(fd, result.string(), result.size());
         }
 
-        EffectDumpEffects(fd);
+        if (mEffectsFactoryHal != 0) {
+            mEffectsFactoryHal->dumpEffects(fd);
+        } else {
+            String8 result(kNoEffectsFactory);
+            write(fd, result.string(), result.size());
+        }
 
         dumpClients(fd, args);
         if (clientLocked) {
@@ -447,8 +405,8 @@
         }
         // dump all hardware devs
         for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
-            audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
-            dev->dump(dev, fd);
+            sp<DeviceHalInterface> dev = mAudioHwDevs.valueAt(i)->hwDevice();
+            dev->dump(fd);
         }
 
 #ifdef TEE_SINK
@@ -458,6 +416,8 @@
         }
 #endif
 
+        BUFLOG_RESET;
+
         if (locked) {
             mLock.unlock();
         }
@@ -551,8 +511,11 @@
         return new NBLog::Writer();
     }
 success:
+    NBLog::Shared *sharedRawPtr = (NBLog::Shared *) shared->pointer();
+    new((void *) sharedRawPtr) NBLog::Shared(); // placement new here, but the corresponding
+                                                // explicit destructor not needed since it is POD
     mediaLogService->registerWriter(shared, size, name);
-    return new NBLog::Writer(size, shared);
+    return new NBLog::Writer(shared, size);
 }
 
 void AudioFlinger::unregisterWriter(const sp<NBLog::Writer>& writer)
@@ -586,7 +549,8 @@
         pid_t tid,
         audio_session_t *sessionId,
         int clientUid,
-        status_t *status)
+        status_t *status,
+        audio_port_handle_t portId)
 {
     sp<PlaybackThread::Track> track;
     sp<TrackHandle> trackHandle;
@@ -679,7 +643,8 @@
         ALOGV("createTrack() lSessionId: %d", lSessionId);
 
         track = thread->createTrack_l(client, streamType, sampleRate, format,
-                channelMask, frameCount, sharedBuffer, lSessionId, flags, tid, clientUid, &lStatus);
+                channelMask, frameCount, sharedBuffer, lSessionId, flags, tid,
+                clientUid, &lStatus, portId);
         LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (track == 0));
         // we don't abort yet if lStatus != NO_ERROR; there is still work to be done regardless
 
@@ -810,7 +775,7 @@
 
         mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
         if (dev->canSetMasterVolume()) {
-            dev->hwDevice()->set_master_volume(dev->hwDevice(), value);
+            dev->hwDevice()->setMasterVolume(value);
         }
         mHardwareStatus = AUDIO_HW_IDLE;
     }
@@ -847,9 +812,9 @@
 
     { // scope for the lock
         AutoMutex lock(mHardwareLock);
-        audio_hw_device_t *dev = mPrimaryHardwareDev->hwDevice();
+        sp<DeviceHalInterface> dev = mPrimaryHardwareDev->hwDevice();
         mHardwareStatus = AUDIO_HW_SET_MODE;
-        ret = dev->set_mode(dev, mode);
+        ret = dev->setMode(mode);
         mHardwareStatus = AUDIO_HW_IDLE;
     }
 
@@ -878,8 +843,8 @@
     AutoMutex lock(mHardwareLock);
     mHardwareStatus = AUDIO_HW_SET_MIC_MUTE;
     for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
-        audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
-        status_t result = dev->set_mic_mute(dev, state);
+        sp<DeviceHalInterface> dev = mAudioHwDevs.valueAt(i)->hwDevice();
+        status_t result = dev->setMicMute(state);
         if (result != NO_ERROR) {
             ret = result;
         }
@@ -899,8 +864,8 @@
     AutoMutex lock(mHardwareLock);
     mHardwareStatus = AUDIO_HW_GET_MIC_MUTE;
     for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
-        audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
-        status_t result = dev->get_mic_mute(dev, &state);
+        sp<DeviceHalInterface> dev = mAudioHwDevs.valueAt(i)->hwDevice();
+        status_t result = dev->getMicMute(&state);
         if (result == NO_ERROR) {
             mute = mute && state;
         }
@@ -932,7 +897,7 @@
 
         mHardwareStatus = AUDIO_HW_SET_MASTER_MUTE;
         if (dev->canSetMasterMute()) {
-            dev->hwDevice()->set_master_mute(dev->hwDevice(), muted);
+            dev->hwDevice()->setMasterMute(muted);
         }
         mHardwareStatus = AUDIO_HW_IDLE;
     }
@@ -1110,8 +1075,8 @@
             AutoMutex lock(mHardwareLock);
             mHardwareStatus = AUDIO_HW_SET_PARAMETER;
             for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
-                audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
-                status_t result = dev->set_parameters(dev, keyValuePairs.string());
+                sp<DeviceHalInterface> dev = mAudioHwDevs.valueAt(i)->hwDevice();
+                status_t result = dev->setParameters(keyValuePairs);
                 // return success if at least one audio device accepts the parameters as not all
                 // HALs are requested to support all parameters. If no audio device supports the
                 // requested parameters, the last error is reported.
@@ -1124,8 +1089,8 @@
         // disable AEC and NS if the device is a BT SCO headset supporting those pre processings
         AudioParameter param = AudioParameter(keyValuePairs);
         String8 value;
-        if (param.get(String8(AUDIO_PARAMETER_KEY_BT_NREC), value) == NO_ERROR) {
-            bool btNrecIsOff = (value == AUDIO_PARAMETER_VALUE_OFF);
+        if (param.get(String8(AudioParameter::keyBtNrec), value) == NO_ERROR) {
+            bool btNrecIsOff = (value == AudioParameter::valueOff);
             if (mBtNrecIsOff != btNrecIsOff) {
                 for (size_t i = 0; i < mRecordThreads.size(); i++) {
                     sp<RecordThread> thread = mRecordThreads.valueAt(i);
@@ -1149,7 +1114,7 @@
         }
         String8 screenState;
         if (param.get(String8(AudioParameter::keyScreenState), screenState) == NO_ERROR) {
-            bool isOff = screenState == "off";
+            bool isOff = (screenState == AudioParameter::valueOff);
             if (isOff != (AudioFlinger::mScreenState & 1)) {
                 AudioFlinger::mScreenState = ((AudioFlinger::mScreenState & ~1) + 2) | isOff;
             }
@@ -1192,16 +1157,16 @@
         String8 out_s8;
 
         for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
-            char *s;
+            String8 s;
+            status_t result;
             {
             AutoMutex lock(mHardwareLock);
             mHardwareStatus = AUDIO_HW_GET_PARAMETER;
-            audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
-            s = dev->get_parameters(dev, keys.string());
+            sp<DeviceHalInterface> dev = mAudioHwDevs.valueAt(i)->hwDevice();
+            result = dev->getParameters(keys, &s);
             mHardwareStatus = AUDIO_HW_IDLE;
             }
-            out_s8 += String8(s ? s : "");
-            free(s);
+            if (result == OK) out_s8 += s;
         }
         return out_s8;
     }
@@ -1238,14 +1203,14 @@
     proposed.channel_mask = channelMask;
     proposed.format = format;
 
-    audio_hw_device_t *dev = mPrimaryHardwareDev->hwDevice();
+    sp<DeviceHalInterface> dev = mPrimaryHardwareDev->hwDevice();
     size_t frames;
     for (;;) {
         // Note: config is currently a const parameter for get_input_buffer_size()
         // but we use a copy from proposed in case config changes from the call.
         config = proposed;
-        frames = dev->get_input_buffer_size(dev, &config);
-        if (frames != 0) {
+        status_t result = dev->getInputBufferSize(&config, &frames);
+        if (result == OK && frames != 0) {
             break; // hal success, config is the result
         }
         // change one parameter of the configuration each iteration to a more "common" value
@@ -1292,9 +1257,9 @@
     }
 
     AutoMutex lock(mHardwareLock);
-    audio_hw_device_t *dev = mPrimaryHardwareDev->hwDevice();
+    sp<DeviceHalInterface> dev = mPrimaryHardwareDev->hwDevice();
     mHardwareStatus = AUDIO_HW_SET_VOICE_VOLUME;
-    ret = dev->set_voice_volume(dev, value);
+    ret = dev->setVoiceVolume(value);
     mHardwareStatus = AUDIO_HW_IDLE;
 
     return ret;
@@ -1484,7 +1449,8 @@
         size_t *notificationFrames,
         sp<IMemory>& cblk,
         sp<IMemory>& buffers,
-        status_t *status)
+        status_t *status,
+        audio_port_handle_t portId)
 {
     sp<RecordThread::RecordTrack> recordTrack;
     sp<RecordHandle> recordHandle;
@@ -1568,7 +1534,7 @@
 
         recordTrack = thread->createRecordTrack_l(client, sampleRate, format, channelMask,
                                                   frameCount, lSessionId, notificationFrames,
-                                                  clientUid, flags, tid, &lStatus);
+                                                  clientUid, flags, tid, &lStatus, portId);
         LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (recordTrack == 0));
 
         if (lStatus == NO_ERROR) {
@@ -1632,16 +1598,16 @@
         }
     }
 
-    audio_hw_device_t *dev;
+    sp<DeviceHalInterface> dev;
 
-    int rc = load_audio_interface(name, &dev);
+    int rc = mDevicesFactoryHal->openDevice(name, &dev);
     if (rc) {
         ALOGE("loadHwModule() error %d loading module %s", rc, name);
         return AUDIO_MODULE_HANDLE_NONE;
     }
 
     mHardwareStatus = AUDIO_HW_INIT;
-    rc = dev->init_check(dev);
+    rc = dev->initCheck();
     mHardwareStatus = AUDIO_HW_IDLE;
     if (rc) {
         ALOGE("loadHwModule() init check error %d for module %s", rc, name);
@@ -1659,32 +1625,26 @@
 
         if (0 == mAudioHwDevs.size()) {
             mHardwareStatus = AUDIO_HW_GET_MASTER_VOLUME;
-            if (NULL != dev->get_master_volume) {
-                float mv;
-                if (OK == dev->get_master_volume(dev, &mv)) {
-                    mMasterVolume = mv;
-                }
+            float mv;
+            if (OK == dev->getMasterVolume(&mv)) {
+                mMasterVolume = mv;
             }
 
             mHardwareStatus = AUDIO_HW_GET_MASTER_MUTE;
-            if (NULL != dev->get_master_mute) {
-                bool mm;
-                if (OK == dev->get_master_mute(dev, &mm)) {
-                    mMasterMute = mm;
-                }
+            bool mm;
+            if (OK == dev->getMasterMute(&mm)) {
+                mMasterMute = mm;
             }
         }
 
         mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
-        if ((NULL != dev->set_master_volume) &&
-            (OK == dev->set_master_volume(dev, mMasterVolume))) {
+        if (OK == dev->setMasterVolume(mMasterVolume)) {
             flags = static_cast<AudioHwDevice::Flags>(flags |
                     AudioHwDevice::AHWD_CAN_SET_MASTER_VOLUME);
         }
 
         mHardwareStatus = AUDIO_HW_SET_MASTER_MUTE;
-        if ((NULL != dev->set_master_mute) &&
-            (OK == dev->set_master_mute(dev, mMasterMute))) {
+        if (OK == dev->setMasterMute(mMasterMute)) {
             flags = static_cast<AudioHwDevice::Flags>(flags |
                     AudioHwDevice::AHWD_CAN_SET_MASTER_MUTE);
         }
@@ -1695,8 +1655,7 @@
     audio_module_handle_t handle = (audio_module_handle_t) nextUniqueId(AUDIO_UNIQUE_ID_USE_MODULE);
     mAudioHwDevs.add(handle, new AudioHwDevice(handle, name, dev, flags));
 
-    ALOGI("loadHwModule() Loaded %s audio interface from %s (%s) handle %d",
-          name, dev->common.module->name, dev->common.module->id, handle);
+    ALOGI("loadHwModule() Loaded %s audio interface, handle %d", name, handle);
 
     return handle;
 
@@ -1746,16 +1705,18 @@
         return mHwAvSyncIds.valueAt(index);
     }
 
-    audio_hw_device_t *dev = mPrimaryHardwareDev->hwDevice();
+    sp<DeviceHalInterface> dev = mPrimaryHardwareDev->hwDevice();
     if (dev == NULL) {
         return AUDIO_HW_SYNC_INVALID;
     }
-    char *reply = dev->get_parameters(dev, AUDIO_PARAMETER_HW_AV_SYNC);
-    AudioParameter param = AudioParameter(String8(reply));
-    free(reply);
+    String8 reply;
+    AudioParameter param;
+    if (dev->getParameters(String8(AudioParameter::keyHwAvSync), &reply) == OK) {
+        param = AudioParameter(reply);
+    }
 
     int value;
-    if (param.getInt(String8(AUDIO_PARAMETER_HW_AV_SYNC), value) != NO_ERROR) {
+    if (param.getInt(String8(AudioParameter::keyHwAvSync), value) != NO_ERROR) {
         ALOGW("getAudioHwSyncForSession error getting sync for session %d", sessionId);
         return AUDIO_HW_SYNC_INVALID;
     }
@@ -1777,7 +1738,7 @@
         uint32_t sessions = thread->hasAudioSession(sessionId);
         if (sessions & ThreadBase::TRACK_SESSION) {
             AudioParameter param = AudioParameter();
-            param.addInt(String8(AUDIO_PARAMETER_STREAM_HW_AV_SYNC), value);
+            param.addInt(String8(AudioParameter::keyStreamHwAvSync), value);
             thread->setParameters(param.toString());
             break;
         }
@@ -1815,7 +1776,7 @@
         audio_hw_sync_t syncId = mHwAvSyncIds.valueAt(index);
         ALOGV("setAudioHwSyncForSession_l found ID %d for session %d", syncId, sessionId);
         AudioParameter param = AudioParameter();
-        param.addInt(String8(AUDIO_PARAMETER_STREAM_HW_AV_SYNC), syncId);
+        param.addInt(String8(AudioParameter::keyStreamHwAvSync), syncId);
         thread->setParameters(param.toString());
     }
 }
@@ -1937,7 +1898,7 @@
 
             AutoMutex lock(mHardwareLock);
             mHardwareStatus = AUDIO_HW_SET_MODE;
-            mPrimaryHardwareDev->hwDevice()->set_mode(mPrimaryHardwareDev->hwDevice(), mMode);
+            mPrimaryHardwareDev->hwDevice()->setMode(mMode);
             mHardwareStatus = AUDIO_HW_IDLE;
         }
         return NO_ERROR;
@@ -2033,7 +1994,6 @@
     AudioStreamOut *out = thread->clearOutput();
     ALOG_ASSERT(out != NULL, "out shouldn't be NULL");
     // from now on thread->mOutput is NULL
-    out->hwDev()->close_output_stream(out->hwDev(), out->stream);
     delete out;
 }
 
@@ -2128,13 +2088,14 @@
     }
 
     audio_config_t halconfig = *config;
-    audio_hw_device_t *inHwHal = inHwDev->hwDevice();
-    audio_stream_in_t *inStream = NULL;
-    status_t status = inHwHal->open_input_stream(inHwHal, *input, devices, &halconfig,
-                                        &inStream, flags, address.string(), source);
-    ALOGV("openInput_l() openInputStream returned input %p, SamplingRate %d"
+    sp<DeviceHalInterface> inHwHal = inHwDev->hwDevice();
+    sp<StreamInHalInterface> inStream;
+    status_t status = inHwHal->openInputStream(
+            *input, devices, &halconfig, flags, address.string(), source, &inStream);
+    ALOGV("openInput_l() openInputStream returned input %p, devices %x, SamplingRate %d"
            ", Format %#x, Channels %x, flags %#x, status %d addr %s",
-            inStream,
+            inStream.get(),
+            devices,
             halconfig.sample_rate,
             halconfig.format,
             halconfig.channel_mask,
@@ -2151,13 +2112,13 @@
         (audio_channel_count_from_in_mask(config->channel_mask) <= FCC_8)) {
         // FIXME describe the change proposed by HAL (save old values so we can log them here)
         ALOGV("openInput_l() reopening with proposed sampling rate and channel mask");
-        inStream = NULL;
-        status = inHwHal->open_input_stream(inHwHal, *input, devices, &halconfig,
-                                            &inStream, flags, address.string(), source);
+        inStream.clear();
+        status = inHwHal->openInputStream(
+                *input, devices, &halconfig, flags, address.string(), source, &inStream);
         // FIXME log this new status; HAL should not propose any further changes
     }
 
-    if (status == NO_ERROR && inStream != NULL) {
+    if (status == NO_ERROR && inStream != 0) {
 
 #ifdef TEE_SINK
         // Try to re-use most recently used Pipe to archive a copy of input for dumpsys,
@@ -2304,7 +2265,6 @@
     AudioStreamIn *in = thread->clearInput();
     ALOG_ASSERT(in != NULL, "in shouldn't be NULL");
     // from now on thread->mInput is NULL
-    in->hwDev()->close_input_stream(in->hwDev(), in->stream);
     delete in;
 }
 
@@ -2608,24 +2568,39 @@
 //  Effect management
 // ----------------------------------------------------------------------------
 
+sp<EffectsFactoryHalInterface> AudioFlinger::getEffectsFactory() {
+    return mEffectsFactoryHal;
+}
 
 status_t AudioFlinger::queryNumberEffects(uint32_t *numEffects) const
 {
     Mutex::Autolock _l(mLock);
-    return EffectQueryNumberEffects(numEffects);
+    if (mEffectsFactoryHal.get()) {
+        return mEffectsFactoryHal->queryNumberEffects(numEffects);
+    } else {
+        return -ENODEV;
+    }
 }
 
 status_t AudioFlinger::queryEffect(uint32_t index, effect_descriptor_t *descriptor) const
 {
     Mutex::Autolock _l(mLock);
-    return EffectQueryEffect(index, descriptor);
+    if (mEffectsFactoryHal.get()) {
+        return mEffectsFactoryHal->getDescriptor(index, descriptor);
+    } else {
+        return -ENODEV;
+    }
 }
 
 status_t AudioFlinger::getEffectDescriptor(const effect_uuid_t *pUuid,
         effect_descriptor_t *descriptor) const
 {
     Mutex::Autolock _l(mLock);
-    return EffectGetDescriptor(pUuid, descriptor);
+    if (mEffectsFactoryHal.get()) {
+        return mEffectsFactoryHal->getDescriptor(pUuid, descriptor);
+    } else {
+        return -ENODEV;
+    }
 }
 
 
@@ -2636,6 +2611,7 @@
         audio_io_handle_t io,
         audio_session_t sessionId,
         const String16& opPackageName,
+        pid_t pid,
         status_t *status,
         int *id,
         int *enabled)
@@ -2644,9 +2620,17 @@
     sp<EffectHandle> handle;
     effect_descriptor_t desc;
 
-    pid_t pid = IPCThreadState::self()->getCallingPid();
-    ALOGV("createEffect pid %d, effectClient %p, priority %d, sessionId %d, io %d",
-            pid, effectClient.get(), priority, sessionId, io);
+    const uid_t callingUid = IPCThreadState::self()->getCallingUid();
+    if (pid == -1 || !isTrustedCallingUid(callingUid)) {
+        const pid_t callingPid = IPCThreadState::self()->getCallingPid();
+        ALOGW_IF(pid != -1 && pid != callingPid,
+                 "%s uid %d pid %d tried to pass itself off as pid %d",
+                 __func__, callingUid, callingPid, pid);
+        pid = callingPid;
+    }
+
+    ALOGV("createEffect pid %d, effectClient %p, priority %d, sessionId %d, io %d, factory %p",
+            pid, effectClient.get(), priority, sessionId, io, mEffectsFactoryHal.get());
 
     if (pDesc == NULL) {
         lStatus = BAD_VALUE;
@@ -2666,10 +2650,15 @@
         goto Exit;
     }
 
+    if (mEffectsFactoryHal == 0) {
+        lStatus = NO_INIT;
+        goto Exit;
+    }
+
     {
-        if (!EffectIsNullUuid(&pDesc->uuid)) {
+        if (!EffectsFactoryHalInterface::isNullUuid(&pDesc->uuid)) {
             // if uuid is specified, request effect descriptor
-            lStatus = EffectGetDescriptor(&pDesc->uuid, &desc);
+            lStatus = mEffectsFactoryHal->getDescriptor(&pDesc->uuid, &desc);
             if (lStatus < 0) {
                 ALOGW("createEffect() error %d from EffectGetDescriptor", lStatus);
                 goto Exit;
@@ -2677,7 +2666,7 @@
         } else {
             // if uuid is not specified, look for an available implementation
             // of the required type in effect factory
-            if (EffectIsNullUuid(&pDesc->type)) {
+            if (EffectsFactoryHalInterface::isNullUuid(&pDesc->type)) {
                 ALOGW("createEffect() no effect type");
                 lStatus = BAD_VALUE;
                 goto Exit;
@@ -2687,13 +2676,13 @@
             d.flags = 0; // prevent compiler warning
             bool found = false;
 
-            lStatus = EffectQueryNumberEffects(&numEffects);
+            lStatus = mEffectsFactoryHal->queryNumberEffects(&numEffects);
             if (lStatus < 0) {
                 ALOGW("createEffect() error %d from EffectQueryNumberEffects", lStatus);
                 goto Exit;
             }
             for (uint32_t i = 0; i < numEffects; i++) {
-                lStatus = EffectQueryEffect(i, &desc);
+                lStatus = mEffectsFactoryHal->getDescriptor(i, &desc);
                 if (lStatus < 0) {
                     ALOGW("createEffect() error %d from EffectQueryEffect", lStatus);
                     continue;
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 8120a29..e97d1ed 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -20,11 +20,13 @@
 
 #include "Configuration.h"
 #include <deque>
+#include <map>
 #include <stdint.h>
 #include <sys/types.h>
 #include <limits.h>
 
 #include <cutils/compiler.h>
+#include <cutils/properties.h>
 
 #include <media/IAudioFlinger.h>
 #include <media/IAudioFlingerClient.h>
@@ -44,21 +46,22 @@
 #include <binder/MemoryDealer.h>
 
 #include <system/audio.h>
-#include <hardware/audio.h>
-#include <hardware/audio_policy.h>
+#include <system/audio_policy.h>
 
+#include <media/audiohal/EffectBufferHalInterface.h>
+#include <media/audiohal/StreamHalInterface.h>
 #include <media/AudioBufferProvider.h>
+#include <media/AudioMixer.h>
 #include <media/ExtendedAudioBufferProvider.h>
+#include <media/LinearMap.h>
 
 #include "FastCapture.h"
 #include "FastMixer.h"
 #include <media/nbaio/NBAIO.h>
 #include "AudioWatchdog.h"
-#include "AudioMixer.h"
 #include "AudioStreamOut.h"
 #include "SpdifStreamOut.h"
 #include "AudioHwDevice.h"
-#include "LinearMap.h"
 
 #include <powermanager/IPowerManager.h>
 
@@ -72,8 +75,12 @@
 class AudioMixer;
 class AudioBuffer;
 class AudioResampler;
+class DeviceHalInterface;
+class DevicesFactoryHalInterface;
+class EffectsFactoryHalInterface;
 class FastMixer;
 class PassthruBufferProvider;
+class RecordBufferConverter;
 class ServerProxy;
 
 // ----------------------------------------------------------------------------
@@ -112,7 +119,8 @@
                                 pid_t tid,
                                 audio_session_t *sessionId,
                                 int clientUid,
-                                status_t *status /*non-NULL*/);
+                                status_t *status /*non-NULL*/,
+                                audio_port_handle_t portId);
 
     virtual sp<IAudioRecord> openRecord(
                                 audio_io_handle_t input,
@@ -129,7 +137,8 @@
                                 size_t *notificationFrames,
                                 sp<IMemory>& cblk,
                                 sp<IMemory>& buffers,
-                                status_t *status /*non-NULL*/);
+                                status_t *status /*non-NULL*/,
+                                audio_port_handle_t portId);
 
     virtual     uint32_t    sampleRate(audio_io_handle_t ioHandle) const;
     virtual     audio_format_t format(audio_io_handle_t output) const;
@@ -221,6 +230,7 @@
                         audio_io_handle_t io,
                         audio_session_t sessionId,
                         const String16& opPackageName,
+                        pid_t pid,
                         status_t *status /*non-NULL*/,
                         int *id,
                         int *enabled);
@@ -272,6 +282,7 @@
 
     sp<NBLog::Writer>   newWriter_l(size_t size, const char *name);
     void                unregisterWriter(const sp<NBLog::Writer>& writer);
+    sp<EffectsFactoryHalInterface> getEffectsFactory();
 private:
     static const size_t kLogMemorySize = 40 * 1024;
     sp<MemoryDealer>    mLogMemoryDealer;   // == 0 when NBLog is disabled
@@ -613,12 +624,12 @@
 
     struct AudioStreamIn {
         AudioHwDevice* const audioHwDev;
-        audio_stream_in_t* const stream;
+        sp<StreamInHalInterface> stream;
         audio_input_flags_t flags;
 
-        audio_hw_device_t* hwDev() const { return audioHwDev->hwDevice(); }
+        sp<DeviceHalInterface> hwDev() const { return audioHwDev->hwDevice(); }
 
-        AudioStreamIn(AudioHwDevice *dev, audio_stream_in_t *in, audio_input_flags_t flags) :
+        AudioStreamIn(AudioHwDevice *dev, sp<StreamInHalInterface> in, audio_input_flags_t flags) :
             audioHwDev(dev), stream(in), flags(flags) {}
     };
 
@@ -647,6 +658,8 @@
                 AudioHwDevice*                      mPrimaryHardwareDev; // mAudioHwDevs[0] or NULL
                 DefaultKeyedVector<audio_module_handle_t, AudioHwDevice*>  mAudioHwDevs;
 
+                sp<DevicesFactoryHalInterface> mDevicesFactoryHal;
+
     // for dump, indicates which hardware operation is currently in progress (but not stream ops)
     enum hardware_call_state {
         AUDIO_HW_IDLE = 0,              // no operation in progress
@@ -762,16 +775,17 @@
     nsecs_t mGlobalEffectEnableTime;  // when a global effect was last enabled
 
     sp<PatchPanel> mPatchPanel;
+    sp<EffectsFactoryHalInterface> mEffectsFactoryHal;
 
     bool        mSystemReady;
 };
 
 #undef INCLUDING_FROM_AUDIOFLINGER_H
 
-const char *formatToString(audio_format_t format);
-String8 inputFlagsToString(audio_input_flags_t flags);
-String8 outputFlagsToString(audio_output_flags_t flags);
-String8 devicesToString(audio_devices_t devices);
+std::string formatToString(audio_format_t format);
+std::string inputFlagsToString(audio_input_flags_t flags);
+std::string outputFlagsToString(audio_output_flags_t flags);
+std::string devicesToString(audio_devices_t devices);
 const char *sourceToString(audio_source_t source);
 
 // ----------------------------------------------------------------------------
diff --git a/services/audioflinger/AudioHwDevice.cpp b/services/audioflinger/AudioHwDevice.cpp
index 7494930..b109d06 100644
--- a/services/audioflinger/AudioHwDevice.cpp
+++ b/services/audioflinger/AudioHwDevice.cpp
@@ -18,7 +18,7 @@
 #define LOG_TAG "AudioHwDevice"
 //#define LOG_NDEBUG 0
 
-#include <hardware/audio.h>
+#include <system/audio.h>
 #include <utils/Log.h>
 
 #include <audio_utils/spdif/SPDIFEncoder.h>
@@ -93,5 +93,10 @@
     return status;
 }
 
+bool AudioHwDevice::supportsAudioPatches() const {
+    bool result;
+    return mHwDevice->supportsAudioPatches(&result) == OK ? result : false;
+}
+
 
 }; // namespace android
diff --git a/services/audioflinger/AudioHwDevice.h b/services/audioflinger/AudioHwDevice.h
index b9f65c1..eb826c6 100644
--- a/services/audioflinger/AudioHwDevice.h
+++ b/services/audioflinger/AudioHwDevice.h
@@ -22,11 +22,10 @@
 #include <stdlib.h>
 #include <sys/types.h>
 
-#include <hardware/audio.h>
+#include <media/audiohal/DeviceHalInterface.h>
 #include <utils/Errors.h>
 #include <system/audio.h>
 
-
 namespace android {
 
 class AudioStreamOut;
@@ -40,7 +39,7 @@
 
     AudioHwDevice(audio_module_handle_t handle,
                   const char *moduleName,
-                  audio_hw_device_t *hwDevice,
+                  sp<DeviceHalInterface> hwDevice,
                   Flags flags)
         : mHandle(handle)
         , mModuleName(strdup(moduleName))
@@ -58,8 +57,7 @@
 
     audio_module_handle_t handle() const { return mHandle; }
     const char *moduleName() const { return mModuleName; }
-    audio_hw_device_t *hwDevice() const { return mHwDevice; }
-    uint32_t version() const { return mHwDevice->common.version; }
+    sp<DeviceHalInterface> hwDevice() const { return mHwDevice; }
 
     /** This method creates and opens the audio hardware output stream.
      * The "address" parameter qualifies the "devices" audio device type if needed.
@@ -76,10 +74,12 @@
             struct audio_config *config,
             const char *address);
 
+    bool supportsAudioPatches() const;
+
 private:
     const audio_module_handle_t mHandle;
     const char * const          mModuleName;
-    audio_hw_device_t * const   mHwDevice;
+    sp<DeviceHalInterface>      mHwDevice;
     const Flags                 mFlags;
 };
 
diff --git a/services/audioflinger/AudioStreamOut.cpp b/services/audioflinger/AudioStreamOut.cpp
index 6026bbb..1d4b3fe 100644
--- a/services/audioflinger/AudioStreamOut.cpp
+++ b/services/audioflinger/AudioStreamOut.cpp
@@ -18,7 +18,9 @@
 #define LOG_TAG "AudioFlinger"
 //#define LOG_NDEBUG 0
 
-#include <hardware/audio.h>
+#include <media/audiohal/DeviceHalInterface.h>
+#include <media/audiohal/StreamHalInterface.h>
+#include <system/audio.h>
 #include <utils/Log.h>
 
 #include "AudioHwDevice.h"
@@ -40,19 +42,23 @@
 {
 }
 
-audio_hw_device_t *AudioStreamOut::hwDev() const
+AudioStreamOut::~AudioStreamOut()
+{
+}
+
+sp<DeviceHalInterface> AudioStreamOut::hwDev() const
 {
     return audioHwDev->hwDevice();
 }
 
 status_t AudioStreamOut::getRenderPosition(uint64_t *frames)
 {
-    if (stream == NULL) {
+    if (stream == 0) {
         return NO_INIT;
     }
 
     uint32_t halPosition = 0;
-    status_t status = stream->get_render_position(stream, &halPosition);
+    status_t status = stream->getRenderPosition(&halPosition);
     if (status != NO_ERROR) {
         return status;
     }
@@ -84,12 +90,12 @@
 
 status_t AudioStreamOut::getPresentationPosition(uint64_t *frames, struct timespec *timestamp)
 {
-    if (stream == NULL) {
+    if (stream == 0) {
         return NO_INIT;
     }
 
     uint64_t halPosition = 0;
-    status_t status = stream->get_presentation_position(stream, &halPosition, timestamp);
+    status_t status = stream->getPresentationPosition(&halPosition, timestamp);
     if (status != NO_ERROR) {
         return status;
     }
@@ -115,24 +121,23 @@
         struct audio_config *config,
         const char *address)
 {
-    audio_stream_out_t *outStream;
+    sp<StreamOutHalInterface> outStream;
 
     audio_output_flags_t customFlags = (config->format == AUDIO_FORMAT_IEC61937)
                 ? (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO)
                 : flags;
 
-    int status = hwDev()->open_output_stream(
-            hwDev(),
+    int status = hwDev()->openOutputStream(
             handle,
             devices,
             customFlags,
             config,
-            &outStream,
-            address);
+            address,
+            &outStream);
     ALOGV("AudioStreamOut::open(), HAL returned "
             " stream %p, sampleRate %d, Format %#x, "
             "channelMask %#x, status %d",
-            outStream,
+            outStream.get(),
             config->sample_rate,
             config->format,
             config->channel_mask,
@@ -144,21 +149,20 @@
         struct audio_config customConfig = *config;
         customConfig.format = AUDIO_FORMAT_PCM_16_BIT;
 
-        status = hwDev()->open_output_stream(
-                hwDev(),
+        status = hwDev()->openOutputStream(
                 handle,
                 devices,
                 customFlags,
                 &customConfig,
-                &outStream,
-                address);
+                address,
+                &outStream);
         ALOGV("AudioStreamOut::open(), treat IEC61937 as PCM, status = %d", status);
     }
 
     if (status == NO_ERROR) {
         stream = outStream;
         mHalFormatHasProportionalFrames = audio_has_proportional_frames(config->format);
-        mHalFrameSize = audio_stream_out_frame_size(stream);
+        status = stream->getFrameSize(&mHalFrameSize);
     }
 
     return status;
@@ -166,47 +170,46 @@
 
 audio_format_t AudioStreamOut::getFormat() const
 {
-    return stream->common.get_format(&stream->common);
+    audio_format_t result;
+    return stream->getFormat(&result) == OK ? result : AUDIO_FORMAT_INVALID;
 }
 
 uint32_t AudioStreamOut::getSampleRate() const
 {
-    return stream->common.get_sample_rate(&stream->common);
+    uint32_t result;
+    return stream->getSampleRate(&result) == OK ? result : 0;
 }
 
 audio_channel_mask_t AudioStreamOut::getChannelMask() const
 {
-    return stream->common.get_channels(&stream->common);
+    audio_channel_mask_t result;
+    return stream->getChannelMask(&result) == OK ? result : AUDIO_CHANNEL_INVALID;
 }
 
 int AudioStreamOut::flush()
 {
-    ALOG_ASSERT(stream != NULL);
     mRenderPosition = 0;
     mFramesWritten = 0;
     mFramesWrittenAtStandby = 0;
-    if (stream->flush != NULL) {
-        return stream->flush(stream);
-    }
-    return NO_ERROR;
+    status_t result = stream->flush();
+    return result != INVALID_OPERATION ? result : NO_ERROR;
 }
 
 int AudioStreamOut::standby()
 {
-    ALOG_ASSERT(stream != NULL);
     mRenderPosition = 0;
     mFramesWrittenAtStandby = mFramesWritten;
-    return stream->common.standby(&stream->common);
+    return stream->standby();
 }
 
 ssize_t AudioStreamOut::write(const void *buffer, size_t numBytes)
 {
-    ALOG_ASSERT(stream != NULL);
-    ssize_t bytesWritten = stream->write(stream, buffer, numBytes);
-    if (bytesWritten > 0 && mHalFrameSize > 0) {
+    size_t bytesWritten;
+    status_t result = stream->write(buffer, numBytes, &bytesWritten);
+    if (result == OK && bytesWritten > 0 && mHalFrameSize > 0) {
         mFramesWritten += bytesWritten / mHalFrameSize;
     }
-    return bytesWritten;
+    return result == OK ? bytesWritten : result;
 }
 
 } // namespace android
diff --git a/services/audioflinger/AudioStreamOut.h b/services/audioflinger/AudioStreamOut.h
index 768f537..b16b1af 100644
--- a/services/audioflinger/AudioStreamOut.h
+++ b/services/audioflinger/AudioStreamOut.h
@@ -23,11 +23,11 @@
 
 #include <system/audio.h>
 
-#include "AudioStreamOut.h"
-
 namespace android {
 
 class AudioHwDevice;
+class DeviceHalInterface;
+class StreamOutHalInterface;
 
 /**
  * Managed access to a HAL output stream.
@@ -38,10 +38,10 @@
 // For emphasis, we could also make all pointers to them be "const *",
 // but that would clutter the code unnecessarily.
     AudioHwDevice * const audioHwDev;
-    audio_stream_out_t *stream;
+    sp<StreamOutHalInterface> stream;
     const audio_output_flags_t flags;
 
-    audio_hw_device_t *hwDev() const;
+    sp<DeviceHalInterface> hwDev() const;
 
     AudioStreamOut(AudioHwDevice *dev, audio_output_flags_t flags);
 
@@ -51,7 +51,7 @@
             struct audio_config *config,
             const char *address);
 
-    virtual ~AudioStreamOut() { }
+    virtual ~AudioStreamOut();
 
     // Get the bottom 32-bits of the 64-bit render position.
     status_t getRenderPosition(uint32_t *frames);
diff --git a/services/audioflinger/BufLog.cpp b/services/audioflinger/BufLog.cpp
new file mode 100644
index 0000000..9680eb5
--- /dev/null
+++ b/services/audioflinger/BufLog.cpp
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "BufLog.h"
+#define LOG_TAG "BufLog"
+//#define LOG_NDEBUG 0
+
+#include <errno.h>
+#include "log/log.h"
+#include <pthread.h>
+#include <stdio.h>
+#include <string.h>
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+
+// ------------------------------
+// BufLogSingleton
+// ------------------------------
+pthread_once_t onceControl = PTHREAD_ONCE_INIT;
+
+BufLog *BufLogSingleton::mInstance = NULL;
+
+void BufLogSingleton::initOnce() {
+    mInstance = new BufLog();
+    ALOGW("=====================================\n" \
+            "Warning: BUFLOG is defined in some part of your code.\n" \
+            "This will create large audio dumps in %s.\n" \
+            "=====================================\n", BUFLOG_BASE_PATH);
+}
+
+BufLog *BufLogSingleton::instance() {
+    pthread_once(&onceControl, initOnce);
+    return mInstance;
+}
+
+bool BufLogSingleton::instanceExists() {
+    return mInstance != NULL;
+}
+
+// ------------------------------
+// BufLog
+// ------------------------------
+
+BufLog::BufLog() {
+    memset(mStreams, 0, sizeof(mStreams));
+}
+
+BufLog::~BufLog() {
+    android::Mutex::Autolock autoLock(mLock);
+
+    for (unsigned int id = 0; id < BUFLOG_MAXSTREAMS; id++) {
+        BufLogStream *pBLStream = mStreams[id];
+        if (pBLStream != NULL) {
+            delete pBLStream ;
+            mStreams[id] = NULL;
+        }
+    }
+}
+
+size_t BufLog::write(int streamid, const char *tag, int format, int channels,
+        int samplingRate, size_t maxBytes, const void *buf, size_t size) {
+    unsigned int id = streamid % BUFLOG_MAXSTREAMS;
+    android::Mutex::Autolock autoLock(mLock);
+
+    BufLogStream *pBLStream = mStreams[id];
+
+    if (pBLStream == NULL) {
+        pBLStream = mStreams[id] = new BufLogStream(id, tag, format, channels,
+                samplingRate, maxBytes);
+        ALOG_ASSERT(pBLStream != NULL, "BufLogStream Failed to be created");
+    }
+
+    return pBLStream->write(buf, size);
+}
+
+void BufLog::reset() {
+    android::Mutex::Autolock autoLock(mLock);
+    ALOGV("Resetting all BufLogs");
+    int count = 0;
+
+    for (unsigned int id = 0; id < BUFLOG_MAXSTREAMS; id++) {
+        BufLogStream *pBLStream = mStreams[id];
+        if (pBLStream != NULL) {
+            delete pBLStream;
+            mStreams[id] = NULL;
+            count++;
+        }
+    }
+    ALOGV("Reset %d BufLogs", count);
+}
+
+// ------------------------------
+// BufLogStream
+// ------------------------------
+
+BufLogStream::BufLogStream(unsigned int id,
+        const char *tag,
+        unsigned int format,
+        unsigned int channels,
+        unsigned int samplingRate,
+        size_t maxBytes = 0) : mId(id), mFormat(format), mChannels(channels),
+                mSamplingRate(samplingRate), mMaxBytes(maxBytes) {
+    mByteCount = 0l;
+    mPaused = false;
+    if (tag != NULL) {
+        strncpy(mTag, tag, BUFLOGSTREAM_MAX_TAGSIZE);
+    } else {
+        mTag[0] = 0;
+    }
+    ALOGV("Creating BufLogStream id:%d tag:%s format:%d ch:%d sr:%d maxbytes:%zu", mId, mTag,
+            mFormat, mChannels, mSamplingRate, mMaxBytes);
+
+    //open file (s), info about tag, format, etc.
+    //timestamp
+    char timeStr[16];   //size 16: format %Y%m%d%H%M%S 14 chars + string null terminator
+    struct timeval tv;
+    gettimeofday(&tv, NULL);
+    struct tm tm;
+    localtime_r(&tv.tv_sec, &tm);
+    strftime(timeStr, sizeof(timeStr), "%Y%m%d%H%M%S", &tm);
+    char logPath[BUFLOG_MAX_PATH_SIZE];
+    snprintf(logPath, BUFLOG_MAX_PATH_SIZE, "%s/%s_%d_%s_%d_%d_%d.raw", BUFLOG_BASE_PATH, timeStr,
+            mId, mTag, mFormat, mChannels, mSamplingRate);
+    ALOGV("data output: %s", logPath);
+
+    mFile = fopen(logPath, "wb");
+    if (mFile != NULL) {
+        ALOGV("Success creating file at: %p", mFile);
+    } else {
+        ALOGE("Error: could not create file BufLogStream %s", strerror(errno));
+    }
+}
+
+void BufLogStream::closeStream_l() {
+    ALOGV("Closing BufLogStream id:%d tag:%s", mId, mTag);
+    if (mFile != NULL) {
+        fclose(mFile);
+        mFile = NULL;
+    }
+}
+
+BufLogStream::~BufLogStream() {
+    ALOGV("Destroying BufLogStream id:%d tag:%s", mId, mTag);
+    android::Mutex::Autolock autoLock(mLock);
+    closeStream_l();
+}
+
+size_t BufLogStream::write(const void *buf, size_t size) {
+
+    size_t bytes = 0;
+    if (!mPaused && mFile != NULL) {
+        if (size > 0 && buf != NULL) {
+            android::Mutex::Autolock autoLock(mLock);
+            if (mMaxBytes > 0) {
+                size = MIN(size, mMaxBytes - mByteCount);
+            }
+            bytes = fwrite(buf, 1, size, mFile);
+            mByteCount += bytes;
+            if (mMaxBytes > 0 && mMaxBytes == mByteCount) {
+                closeStream_l();
+            }
+        }
+        ALOGV("wrote %zu/%zu bytes to BufLogStream %d tag:%s. Total Bytes: %zu", bytes, size, mId,
+                mTag, mByteCount);
+    } else {
+        ALOGV("Warning: trying to write to %s BufLogStream id:%d tag:%s",
+                mPaused ? "paused" : "closed", mId, mTag);
+    }
+    return bytes;
+}
+
+bool BufLogStream::setPause(bool pause) {
+    bool old = mPaused;
+    mPaused = pause;
+    return old;
+}
+
+void BufLogStream::finalize() {
+    android::Mutex::Autolock autoLock(mLock);
+    closeStream_l();
+}
diff --git a/services/audioflinger/BufLog.h b/services/audioflinger/BufLog.h
new file mode 100644
index 0000000..1b402f4
--- /dev/null
+++ b/services/audioflinger/BufLog.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_BUFLOG_H
+#define ANDROID_AUDIO_BUFLOG_H
+
+/*
+ * BUFLOG creates up to BUFLOG_MAXSTREAMS simultaneous streams [0:15] of audio buffer data
+ * and saves them to disk. The files are stored in the path specified in BUFLOG_BASE_PATH and
+ * are named following this format:
+ *   YYYYMMDDHHMMSS_id_format_channels_samplingrate.raw
+ *
+ * Normally we strip BUFLOG dumps from release builds.
+ * You can modify this (for example with "#define BUFLOG_NDEBUG 0"
+ * at the top of your source file) to change that behavior.
+ *
+ * usage:
+ * - Add this to the top of the source file you want to debug:
+ *   #define BUFLOG_NDEBUG 0
+ *   #include "BufLog.h"
+ *
+ * - dump an audio buffer
+ *  BUFLOG(buff_id, buff_tag, format, channels, sampling_rate, max_bytes, buff_pointer, buff_size);
+ *
+ *  buff_id:   int [0:15]   buffer id. If a buffer doesn't exist, it is created the first time.
+ *  buff_tag:  char*        string tag used on stream filename and logs
+ *  format:    int          Audio format (audio_format_t see audio.h)
+ *  channels:  int          Channel Count
+ *  sampling_rate:  int     Sampling rate in Hz. e.g. 8000, 16000, 44100, 48000, etc
+ *  max_bytes: int [0 or positive number]
+ *                          Maximum size of the file (in bytes) to be output.
+ *                          If the value is 0, no limit.
+ *  buff_pointer: void *    Pointer to audio buffer.
+ *  buff_size:  int         Size (in bytes) of the current audio buffer to be stored.
+ *
+ *
+ *  Example usage:
+ *    int format       = mConfig.outputCfg.format;
+ *    int channels     = audio_channel_count_from_out_mask(mConfig.outputCfg.channels);
+ *    int samplingRate = mConfig.outputCfg.samplingRate;
+ *    int frameCount   = mConfig.outputCfg.buffer.frameCount;
+ *    int frameSize    = audio_bytes_per_sample((audio_format_t)format) * channels;
+ *    int buffSize     = frameCount * frameSize;
+ *    long maxBytes = 10 * samplingRate * frameSize; //10 seconds max
+ *  BUFLOG(11, "loudnes_enhancer_out", format, channels, samplingRate, maxBytes,
+ *                               mConfig.outputCfg.buffer.raw, buffSize);
+ *
+ *  Other macros:
+ *  BUFLOG_EXISTS       returns true if there is an instance of BufLog
+ *
+ *  BUFLOG_RESET        If an instance of BufLog exists, it stops the capture and closes all
+ *                      streams.
+ *                      If a new call to BUFLOG(..) is done, new streams are created.
+ */
+
+#ifndef BUFLOG_NDEBUG
+#ifdef NDEBUG
+#define BUFLOG_NDEBUG 1
+#else
+#define BUFLOG_NDEBUG 0
+#endif
+#endif
+
+/*
+ * Simplified macro to send a buffer.
+ */
+#ifndef BUFLOG
+#define __BUFLOG(STREAMID, TAG, FORMAT, CHANNELS, SAMPLINGRATE, MAXBYTES, BUF, SIZE) \
+    BufLogSingleton::instance()->write(STREAMID, TAG, FORMAT, CHANNELS, SAMPLINGRATE, MAXBYTES, \
+            BUF, SIZE)
+#if BUFLOG_NDEBUG
+#define BUFLOG(STREAMID, TAG, FORMAT, CHANNELS, SAMPLINGRATE, MAXBYTES, BUF, SIZE) \
+    do { if (0) {  } } while (0)
+#else
+#define BUFLOG(STREAMID, TAG, FORMAT, CHANNELS, SAMPLINGRATE, MAXBYTES, BUF, SIZE) \
+    __BUFLOG(STREAMID, TAG, FORMAT, CHANNELS, SAMPLINGRATE, MAXBYTES, BUF, SIZE)
+#endif
+#endif
+
+#ifndef BUFLOG_EXISTS
+#define BUFLOG_EXISTS BufLogSingleton::instanceExists()
+#endif
+
+#ifndef BUFLOG_RESET
+#define BUFLOG_RESET do { if (BufLogSingleton::instanceExists()) { \
+    BufLogSingleton::instance()->reset(); } } while (0)
+#endif
+
+
+#include <stdint.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <utils/Mutex.h>
+
+//BufLog configuration
+#define BUFLOGSTREAM_MAX_TAGSIZE    32
+#define BUFLOG_BASE_PATH            "/data/misc/audioserver"
+#define BUFLOG_MAX_PATH_SIZE        300
+
+class BufLogStream {
+public:
+    BufLogStream(unsigned int id,
+            const char *tag,
+            unsigned int format,
+            unsigned int channels,
+            unsigned int samplingRate,
+            size_t maxBytes);
+    ~BufLogStream();
+
+    // write buffer to stream
+    //  buf:  pointer to buffer
+    //  size: number of bytes to write
+    size_t          write(const void *buf, size_t size);
+
+    // pause/resume stream
+    //  pause: true = paused, false = not paused
+    //  return value: previous state of stream (paused or not).
+    bool            setPause(bool pause);
+
+    // will stop the stream and close any open file
+    // the stream can't be reopen. Instead, a new stream (and file) should be created.
+    void            finalize();
+
+private:
+    bool                mPaused;
+    const unsigned int  mId;
+    char                mTag[BUFLOGSTREAM_MAX_TAGSIZE + 1];
+    const unsigned int  mFormat;
+    const unsigned int  mChannels;
+    const unsigned int  mSamplingRate;
+    const size_t        mMaxBytes;
+    size_t              mByteCount;
+    FILE                *mFile;
+    mutable android::Mutex mLock;
+
+    void            closeStream_l();
+};
+
+
+class BufLog {
+public:
+    BufLog();
+    ~BufLog();
+    BufLog(BufLog const&) {};
+
+    //  streamid:      int [0:BUFLOG_MAXSTREAMS-1]   buffer id.
+    //                  If a buffer doesn't exist, it is created the first time is referenced
+    //  tag:           char*  string tag used on stream filename and logs
+    //  format:        int Audio format (audio_format_t see audio.h)
+    //  channels:      int          Channel Count
+    //  samplingRate:  int Sampling rate in Hz. e.g. 8000, 16000, 44100, 48000, etc
+    //  maxBytes:      int [0 or positive number]
+    //                  Maximum size of the file (in bytes) to be output.
+    //                  If the value is 0, no limit.
+    //  size:          int Size (in bytes) of the current audio buffer to be written.
+    //  buf:           void *    Pointer to audio buffer.
+    size_t          write(int streamid,
+                        const char *tag,
+                        int format,
+                        int channels,
+                        int samplingRate,
+                        size_t maxBytes,
+                        const void *buf,
+                        size_t size);
+
+    // reset will stop and close all active streams, thus finalizing any open file.
+    //  New streams will be created if write() is called again.
+    void            reset();
+
+protected:
+    static const unsigned int BUFLOG_MAXSTREAMS = 16;
+    BufLogStream    *mStreams[BUFLOG_MAXSTREAMS];
+    mutable android::Mutex mLock;
+};
+
+class BufLogSingleton {
+public:
+    static BufLog   *instance();
+    static bool     instanceExists();
+
+private:
+    static void     initOnce();
+    static BufLog   *mInstance;
+};
+
+#endif //ANDROID_AUDIO_BUFLOG_H
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 09e7fd8..343ad25 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -21,10 +21,11 @@
 
 #include "Configuration.h"
 #include <utils/Log.h>
-#include <audio_effects/effect_visualizer.h>
 #include <audio_utils/primitives.h>
 #include <private/media/AudioEffectShared.h>
-#include <media/EffectsFactoryApi.h>
+#include <media/audiohal/EffectHalInterface.h>
+#include <media/audiohal/EffectsFactoryHalInterface.h>
+#include <system/audio_effects/effect_visualizer.h>
 
 #include "AudioFlinger.h"
 #include "ServiceUtilities.h"
@@ -65,7 +66,6 @@
       mThread(thread), mChain(chain), mId(id), mSessionId(sessionId),
       mDescriptor(*desc),
       // mConfig is set by configure() and not used before then
-      mEffectInterface(NULL),
       mStatus(NO_INIT), mState(IDLE),
       // mMaxDisableWaitCnt is set by configure() and not used before then
       // mDisableWaitCnt is set by process() and updateState() and not used before then
@@ -76,7 +76,15 @@
     int lStatus;
 
     // create effect engine from effect factory
-    mStatus = EffectCreate(&desc->uuid, sessionId, thread->id(), &mEffectInterface);
+    mStatus = -ENODEV;
+    sp<AudioFlinger> audioFlinger = mAudioFlinger.promote();
+    if (audioFlinger != 0) {
+        sp<EffectsFactoryHalInterface> effectsFactory = audioFlinger->getEffectsFactory();
+        if (effectsFactory != 0) {
+            mStatus = effectsFactory->createEffect(
+                    &desc->uuid, sessionId, thread->id(), &mEffectInterface);
+        }
+    }
 
     if (mStatus != NO_ERROR) {
         return;
@@ -88,22 +96,22 @@
     }
 
     setOffloaded(thread->type() == ThreadBase::OFFLOAD, thread->id());
+    ALOGV("Constructor success name %s, Interface %p", mDescriptor.name, mEffectInterface.get());
 
-    ALOGV("Constructor success name %s, Interface %p", mDescriptor.name, mEffectInterface);
     return;
 Error:
-    EffectRelease(mEffectInterface);
-    mEffectInterface = NULL;
+    mEffectInterface.clear();
     ALOGV("Constructor Error %d", mStatus);
 }
 
 AudioFlinger::EffectModule::~EffectModule()
 {
     ALOGV("Destructor %p", this);
-    if (mEffectInterface != NULL) {
+    if (mEffectInterface != 0) {
         ALOGW("EffectModule %p destructor called with unreleased interface", this);
         release_l();
     }
+
 }
 
 status_t AudioFlinger::EffectModule::addHandle(EffectHandle *handle)
@@ -180,6 +188,7 @@
     // this object is released which can happen after next process is called.
     if (mHandles.size() == 0 && !mPinned) {
         mState = DESTROYED;
+        mEffectInterface->close();
     }
 
     return mHandles.size();
@@ -267,9 +276,7 @@
 {
     Mutex::Autolock _l(mLock);
 
-    if (mState == DESTROYED || mEffectInterface == NULL ||
-            mConfig.inputCfg.buffer.raw == NULL ||
-            mConfig.outputCfg.buffer.raw == NULL) {
+    if (mState == DESTROYED || mEffectInterface == 0 || mInBuffer == 0 || mOutBuffer == 0) {
         return;
     }
 
@@ -283,9 +290,7 @@
         int ret;
         if (isProcessImplemented()) {
             // do the actual processing in the effect engine
-            ret = (*mEffectInterface)->process(mEffectInterface,
-                                                   &mConfig.inputCfg.buffer,
-                                                   &mConfig.outputCfg.buffer);
+            ret = mEffectInterface->process();
         } else {
             if (mConfig.inputCfg.buffer.raw != mConfig.outputCfg.buffer.raw) {
                 size_t frameCnt = mConfig.inputCfg.buffer.frameCount * FCC_2;  //always stereo here
@@ -331,10 +336,10 @@
 
 void AudioFlinger::EffectModule::reset_l()
 {
-    if (mStatus != NO_ERROR || mEffectInterface == NULL) {
+    if (mStatus != NO_ERROR || mEffectInterface == 0) {
         return;
     }
-    (*mEffectInterface)->command(mEffectInterface, EFFECT_CMD_RESET, 0, NULL, 0, NULL);
+    mEffectInterface->command(EFFECT_CMD_RESET, 0, NULL, 0, NULL);
 }
 
 status_t AudioFlinger::EffectModule::configure()
@@ -344,7 +349,7 @@
     uint32_t size;
     audio_channel_mask_t channelMask;
 
-    if (mEffectInterface == NULL) {
+    if (mEffectInterface == 0) {
         status = NO_INIT;
         goto exit;
     }
@@ -403,18 +408,23 @@
     mConfig.outputCfg.mask = EFFECT_CONFIG_ALL;
     mConfig.inputCfg.buffer.frameCount = thread->frameCount();
     mConfig.outputCfg.buffer.frameCount = mConfig.inputCfg.buffer.frameCount;
+    if (mInBuffer != 0) {
+        mInBuffer->setFrameCount(mConfig.inputCfg.buffer.frameCount);
+    }
+    if (mOutBuffer != 0) {
+        mOutBuffer->setFrameCount(mConfig.outputCfg.buffer.frameCount);
+    }
 
     ALOGV("configure() %p thread %p buffer %p framecount %zu",
             this, thread.get(), mConfig.inputCfg.buffer.raw, mConfig.inputCfg.buffer.frameCount);
 
     status_t cmdStatus;
     size = sizeof(int);
-    status = (*mEffectInterface)->command(mEffectInterface,
-                                                   EFFECT_CMD_SET_CONFIG,
-                                                   sizeof(effect_config_t),
-                                                   &mConfig,
-                                                   &size,
-                                                   &cmdStatus);
+    status = mEffectInterface->command(EFFECT_CMD_SET_CONFIG,
+                                       sizeof(effect_config_t),
+                                       &mConfig,
+                                       &size,
+                                       &cmdStatus);
     if (status == 0) {
         status = cmdStatus;
     }
@@ -436,12 +446,11 @@
         }
 
         *((int32_t *)p->data + 1)= latency;
-        (*mEffectInterface)->command(mEffectInterface,
-                                     EFFECT_CMD_SET_PARAM,
-                                     sizeof(effect_param_t) + 8,
-                                     &buf32,
-                                     &size,
-                                     &cmdStatus);
+        mEffectInterface->command(EFFECT_CMD_SET_PARAM,
+                                  sizeof(effect_param_t) + 8,
+                                  &buf32,
+                                  &size,
+                                  &cmdStatus);
     }
 
     mMaxDisableWaitCnt = (MAX_DISABLE_TIME_MS * mConfig.outputCfg.samplingRate) /
@@ -455,17 +464,16 @@
 status_t AudioFlinger::EffectModule::init()
 {
     Mutex::Autolock _l(mLock);
-    if (mEffectInterface == NULL) {
+    if (mEffectInterface == 0) {
         return NO_INIT;
     }
     status_t cmdStatus;
     uint32_t size = sizeof(status_t);
-    status_t status = (*mEffectInterface)->command(mEffectInterface,
-                                                   EFFECT_CMD_INIT,
-                                                   0,
-                                                   NULL,
-                                                   &size,
-                                                   &cmdStatus);
+    status_t status = mEffectInterface->command(EFFECT_CMD_INIT,
+                                                0,
+                                                NULL,
+                                                &size,
+                                                &cmdStatus);
     if (status == 0) {
         status = cmdStatus;
     }
@@ -478,9 +486,10 @@
          (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) {
         sp<ThreadBase> thread = mThread.promote();
         if (thread != 0) {
-            audio_stream_t *stream = thread->stream();
-            if (stream != NULL) {
-                stream->add_audio_effect(stream, mEffectInterface);
+            sp<StreamHalInterface> stream = thread->stream();
+            if (stream != 0) {
+                status_t result = stream->addEffect(mEffectInterface);
+                ALOGE_IF(result != OK, "Error when adding effect: %d", result);
             }
         }
     }
@@ -506,7 +515,7 @@
 
 status_t AudioFlinger::EffectModule::start_l()
 {
-    if (mEffectInterface == NULL) {
+    if (mEffectInterface == 0) {
         return NO_INIT;
     }
     if (mStatus != NO_ERROR) {
@@ -514,12 +523,11 @@
     }
     status_t cmdStatus;
     uint32_t size = sizeof(status_t);
-    status_t status = (*mEffectInterface)->command(mEffectInterface,
-                                                   EFFECT_CMD_ENABLE,
-                                                   0,
-                                                   NULL,
-                                                   &size,
-                                                   &cmdStatus);
+    status_t status = mEffectInterface->command(EFFECT_CMD_ENABLE,
+                                                0,
+                                                NULL,
+                                                &size,
+                                                &cmdStatus);
     if (status == 0) {
         status = cmdStatus;
     }
@@ -537,7 +545,7 @@
 
 status_t AudioFlinger::EffectModule::stop_l()
 {
-    if (mEffectInterface == NULL) {
+    if (mEffectInterface == 0) {
         return NO_INIT;
     }
     if (mStatus != NO_ERROR) {
@@ -545,12 +553,11 @@
     }
     status_t cmdStatus = NO_ERROR;
     uint32_t size = sizeof(status_t);
-    status_t status = (*mEffectInterface)->command(mEffectInterface,
-                                                   EFFECT_CMD_DISABLE,
-                                                   0,
-                                                   NULL,
-                                                   &size,
-                                                   &cmdStatus);
+    status_t status = mEffectInterface->command(EFFECT_CMD_DISABLE,
+                                                0,
+                                                NULL,
+                                                &size,
+                                                &cmdStatus);
     if (status == NO_ERROR) {
         status = cmdStatus;
     }
@@ -563,11 +570,11 @@
 // must be called with EffectChain::mLock held
 void AudioFlinger::EffectModule::release_l()
 {
-    if (mEffectInterface != NULL) {
+    if (mEffectInterface != 0) {
         remove_effect_from_hal_l();
         // release effect engine
-        EffectRelease(mEffectInterface);
-        mEffectInterface = NULL;
+        mEffectInterface->close();
+        mEffectInterface.clear();
     }
 }
 
@@ -577,9 +584,10 @@
              (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) {
         sp<ThreadBase> thread = mThread.promote();
         if (thread != 0) {
-            audio_stream_t *stream = thread->stream();
-            if (stream != NULL) {
-                stream->remove_audio_effect(stream, mEffectInterface);
+            sp<StreamHalInterface> stream = thread->stream();
+            if (stream != 0) {
+                status_t result = stream->removeEffect(mEffectInterface);
+                ALOGE_IF(result != OK, "Error when removing effect: %d", result);
             }
         }
     }
@@ -600,25 +608,26 @@
                                              void *pReplyData)
 {
     Mutex::Autolock _l(mLock);
-    ALOGVV("command(), cmdCode: %d, mEffectInterface: %p", cmdCode, mEffectInterface);
+    ALOGVV("command(), cmdCode: %d, mEffectInterface: %p", cmdCode, mEffectInterface.get());
 
-    if (mState == DESTROYED || mEffectInterface == NULL) {
+    if (mState == DESTROYED || mEffectInterface == 0) {
         return NO_INIT;
     }
     if (mStatus != NO_ERROR) {
         return mStatus;
     }
     if (cmdCode == EFFECT_CMD_GET_PARAM &&
-            (*replySize < sizeof(effect_param_t) ||
-                    ((effect_param_t *)pCmdData)->psize > *replySize - sizeof(effect_param_t))) {
-        android_errorWriteLog(0x534e4554, "29251553");
-        return -EINVAL;
-    }
-    if (cmdCode == EFFECT_CMD_GET_PARAM &&
             (sizeof(effect_param_t) > cmdSize ||
                     ((effect_param_t *)pCmdData)->psize > cmdSize
                                                           - sizeof(effect_param_t))) {
         android_errorWriteLog(0x534e4554, "32438594");
+        android_errorWriteLog(0x534e4554, "33003822");
+        return -EINVAL;
+    }
+    if (cmdCode == EFFECT_CMD_GET_PARAM &&
+            (*replySize < sizeof(effect_param_t) ||
+                    ((effect_param_t *)pCmdData)->psize > *replySize - sizeof(effect_param_t))) {
+        android_errorWriteLog(0x534e4554, "29251553");
         return -EINVAL;
     }
     if (cmdCode == EFFECT_CMD_GET_PARAM &&
@@ -653,12 +662,11 @@
         android_errorWriteLog(0x534e4554, "30204301");
         return -EINVAL;
     }
-    status_t status = (*mEffectInterface)->command(mEffectInterface,
-                                                   cmdCode,
-                                                   cmdSize,
-                                                   pCmdData,
-                                                   replySize,
-                                                   pReplyData);
+    status_t status = mEffectInterface->command(cmdCode,
+                                                cmdSize,
+                                                pCmdData,
+                                                replySize,
+                                                pReplyData);
     if (cmdCode != EFFECT_CMD_GET_PARAM && status == NO_ERROR) {
         uint32_t size = (replySize == NULL) ? 0 : *replySize;
         for (size_t i = 1; i < mHandles.size(); i++) {
@@ -760,6 +768,28 @@
     }
 }
 
+void AudioFlinger::EffectModule::setInBuffer(const sp<EffectBufferHalInterface>& buffer) {
+    if (buffer != 0) {
+        mConfig.inputCfg.buffer.raw = buffer->audioBuffer()->raw;
+        buffer->setFrameCount(mConfig.inputCfg.buffer.frameCount);
+    } else {
+        mConfig.inputCfg.buffer.raw = NULL;
+    }
+    mInBuffer = buffer;
+    mEffectInterface->setInBuffer(buffer);
+}
+
+void AudioFlinger::EffectModule::setOutBuffer(const sp<EffectBufferHalInterface>& buffer) {
+    if (buffer != 0) {
+        mConfig.outputCfg.buffer.raw = buffer->audioBuffer()->raw;
+        buffer->setFrameCount(mConfig.outputCfg.buffer.frameCount);
+    } else {
+        mConfig.outputCfg.buffer.raw = NULL;
+    }
+    mOutBuffer = buffer;
+    mEffectInterface->setOutBuffer(buffer);
+}
+
 status_t AudioFlinger::EffectModule::setVolume(uint32_t *left, uint32_t *right, bool controller)
 {
     Mutex::Autolock _l(mLock);
@@ -780,12 +810,11 @@
         if (controller) {
             pVolume = volume;
         }
-        status = (*mEffectInterface)->command(mEffectInterface,
-                                              EFFECT_CMD_SET_VOLUME,
-                                              size,
-                                              volume,
-                                              &size,
-                                              pVolume);
+        status = mEffectInterface->command(EFFECT_CMD_SET_VOLUME,
+                                           size,
+                                           volume,
+                                           &size,
+                                           pVolume);
         if (controller && status == NO_ERROR && size == sizeof(volume)) {
             *left = volume[0];
             *right = volume[1];
@@ -810,12 +839,11 @@
         uint32_t size = sizeof(status_t);
         uint32_t cmd = audio_is_output_devices(device) ? EFFECT_CMD_SET_DEVICE :
                             EFFECT_CMD_SET_INPUT_DEVICE;
-        status = (*mEffectInterface)->command(mEffectInterface,
-                                              cmd,
-                                              sizeof(uint32_t),
-                                              &device,
-                                              &size,
-                                              &cmdStatus);
+        status = mEffectInterface->command(cmd,
+                                           sizeof(uint32_t),
+                                           &device,
+                                           &size,
+                                           &cmdStatus);
     }
     return status;
 }
@@ -830,12 +858,11 @@
     if ((mDescriptor.flags & EFFECT_FLAG_AUDIO_MODE_MASK) == EFFECT_FLAG_AUDIO_MODE_IND) {
         status_t cmdStatus;
         uint32_t size = sizeof(status_t);
-        status = (*mEffectInterface)->command(mEffectInterface,
-                                              EFFECT_CMD_SET_AUDIO_MODE,
-                                              sizeof(audio_mode_t),
-                                              &mode,
-                                              &size,
-                                              &cmdStatus);
+        status = mEffectInterface->command(EFFECT_CMD_SET_AUDIO_MODE,
+                                           sizeof(audio_mode_t),
+                                           &mode,
+                                           &size,
+                                           &cmdStatus);
         if (status == NO_ERROR) {
             status = cmdStatus;
         }
@@ -852,12 +879,11 @@
     status_t status = NO_ERROR;
     if ((mDescriptor.flags & EFFECT_FLAG_AUDIO_SOURCE_MASK) == EFFECT_FLAG_AUDIO_SOURCE_IND) {
         uint32_t size = 0;
-        status = (*mEffectInterface)->command(mEffectInterface,
-                                              EFFECT_CMD_SET_AUDIO_SOURCE,
-                                              sizeof(audio_source_t),
-                                              &source,
-                                              &size,
-                                              NULL);
+        status = mEffectInterface->command(EFFECT_CMD_SET_AUDIO_SOURCE,
+                                           sizeof(audio_source_t),
+                                           &source,
+                                           &size,
+                                           NULL);
     }
     return status;
 }
@@ -903,12 +929,11 @@
 
         cmd.isOffload = offloaded;
         cmd.ioHandle = io;
-        status = (*mEffectInterface)->command(mEffectInterface,
-                                              EFFECT_CMD_OFFLOAD,
-                                              sizeof(effect_offload_param_t),
-                                              &cmd,
-                                              &size,
-                                              &cmdStatus);
+        status = mEffectInterface->command(EFFECT_CMD_OFFLOAD,
+                                           sizeof(effect_offload_param_t),
+                                           &cmd,
+                                           &size,
+                                           &cmdStatus);
         if (status == NO_ERROR) {
             status = cmdStatus;
         }
@@ -1051,7 +1076,7 @@
 
     result.append("\t\tSession Status State Engine:\n");
     snprintf(buffer, SIZE, "\t\t%05d   %03d    %03d   %p\n",
-            mSessionId, mStatus, mState, mEffectInterface);
+            mSessionId, mStatus, mState, mEffectInterface.get());
     result.append(buffer);
 
     result.append("\t\tDescriptor:\n");
@@ -1087,7 +1112,7 @@
             mConfig.inputCfg.samplingRate,
             mConfig.inputCfg.channels,
             mConfig.inputCfg.format,
-            formatToString((audio_format_t)mConfig.inputCfg.format),
+            formatToString((audio_format_t)mConfig.inputCfg.format).c_str(),
             mConfig.inputCfg.buffer.raw);
     result.append(buffer);
 
@@ -1099,7 +1124,7 @@
             mConfig.outputCfg.samplingRate,
             mConfig.outputCfg.channels,
             mConfig.outputCfg.format,
-            formatToString((audio_format_t)mConfig.outputCfg.format));
+            formatToString((audio_format_t)mConfig.outputCfg.format).c_str());
     result.append(buffer);
 
     snprintf(buffer, SIZE, "\t\t%zu Clients:\n", mHandles.size());
@@ -1485,7 +1510,7 @@
 AudioFlinger::EffectChain::EffectChain(ThreadBase *thread,
                                         audio_session_t sessionId)
     : mThread(thread), mSessionId(sessionId), mActiveTrackCnt(0), mTrackCnt(0), mTailBufferCount(0),
-      mOwnInBuffer(false), mVolumeCtrlIdx(-1), mLeftVolume(UINT_MAX), mRightVolume(UINT_MAX),
+      mVolumeCtrlIdx(-1), mLeftVolume(UINT_MAX), mRightVolume(UINT_MAX),
       mNewLeftVolume(UINT_MAX), mNewRightVolume(UINT_MAX)
 {
     mStrategy = AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
@@ -1498,9 +1523,6 @@
 
 AudioFlinger::EffectChain::~EffectChain()
 {
-    if (mOwnInBuffer) {
-        delete mInBuffer;
-    }
 }
 
 // getEffectFromDesc_l() must be called with ThreadBase::mLock held
@@ -1565,7 +1587,8 @@
     // (4 bytes frame size)
     const size_t frameSize =
             audio_bytes_per_sample(AUDIO_FORMAT_PCM_16_BIT) * min(FCC_2, thread->channelCount());
-    memset(mInBuffer, 0, thread->frameCount() * frameSize);
+    memset(mInBuffer->audioBuffer()->raw, 0, thread->frameCount() * frameSize);
+    mInBuffer->commit();
 }
 
 // Must be called with EffectChain::mLock locked
@@ -1603,9 +1626,15 @@
 
     size_t size = mEffects.size();
     if (doProcess) {
+        // Only the input and output buffers of the chain can be external,
+        // and 'update' / 'commit' do nothing for allocated buffers, thus
+        // it's not needed to consider any other buffers here.
+        mInBuffer->update();
+        mOutBuffer->update();
         for (size_t i = 0; i < size; i++) {
             mEffects[i]->process();
         }
+        mOutBuffer->commit();
     }
     bool doResetVolume = false;
     for (size_t i = 0; i < size; i++) {
@@ -1665,9 +1694,11 @@
         // accumulation stage. Saturation is done in EffectModule::process() before
         // calling the process in effect engine
         size_t numSamples = thread->frameCount();
-        int32_t *buffer = new int32_t[numSamples];
-        memset(buffer, 0, numSamples * sizeof(int32_t));
-        effect->setInBuffer((int16_t *)buffer);
+        sp<EffectBufferHalInterface> halBuffer;
+        status_t result = EffectBufferHalInterface::allocate(
+                numSamples * sizeof(int32_t), &halBuffer);
+        if (result != OK) return result;
+        effect->setInBuffer(halBuffer);
         // auxiliary effects output samples to chain input buffer for further processing
         // by insert effects
         effect->setOutBuffer(mInBuffer);
@@ -1778,9 +1809,7 @@
                 mEffects[i]->release_l();
             }
 
-            if (type == EFFECT_FLAG_TYPE_AUXILIARY) {
-                delete[] effect->inBuffer();
-            } else {
+            if (type != EFFECT_FLAG_TYPE_AUXILIARY) {
                 if (i == size - 1 && i != 0) {
                     mEffects[i - 1]->setOutBuffer(mOutBuffer);
                     mEffects[i - 1]->configure();
@@ -1925,8 +1954,8 @@
 
         result.append("\tIn buffer   Out buffer   Active tracks:\n");
         snprintf(buffer, SIZE, "\t%p  %p   %d\n",
-                mInBuffer,
-                mOutBuffer,
+                mInBuffer->audioBuffer(),
+                mOutBuffer->audioBuffer(),
                 mActiveTrackCnt);
         result.append(buffer);
         write(fd, result.string(), result.size());
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 8fe0b96..0755c52 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -25,10 +25,11 @@
 // state changes or resource modifications. Always respect the following order
 // if multiple mutexes must be acquired to avoid cross deadlock:
 // AudioFlinger -> ThreadBase -> EffectChain -> EffectModule
+// AudioHandle -> ThreadBase -> EffectChain -> EffectModule
 // In addition, methods that lock the AudioPolicyService mutex (getOutputForEffect(),
-// startOutput()...) should never be called with AudioFlinger or Threadbase mutex locked
-// to avoid cross deadlock with other clients calling AudioPolicyService methods that in turn
-// call AudioFlinger thus locking the same mutexes in the reverse order.
+// startOutput(), getInputForAttr(), releaseInput()...) should never be called with AudioFlinger or
+// Threadbase mutex locked to avoid cross deadlock with other clients calling AudioPolicyService
+// methods that in turn call AudioFlinger thus locking the same mutexes in the reverse order.
 
 // The EffectModule class is a wrapper object controlling the effect engine implementation
 // in the effect library. It prevents concurrent calls to process() and command() functions
@@ -85,10 +86,14 @@
     bool isEnabled() const;
     bool isProcessEnabled() const;
 
-    void        setInBuffer(int16_t *buffer) { mConfig.inputCfg.buffer.s16 = buffer; }
-    int16_t     *inBuffer() { return mConfig.inputCfg.buffer.s16; }
-    void        setOutBuffer(int16_t *buffer) { mConfig.outputCfg.buffer.s16 = buffer; }
-    int16_t     *outBuffer() { return mConfig.outputCfg.buffer.s16; }
+    void        setInBuffer(const sp<EffectBufferHalInterface>& buffer);
+    int16_t     *inBuffer() const {
+        return mInBuffer != 0 ? reinterpret_cast<int16_t*>(mInBuffer->ptr()) : NULL;
+    }
+    void        setOutBuffer(const sp<EffectBufferHalInterface>& buffer);
+    int16_t     *outBuffer() const {
+        return mOutBuffer != 0 ? reinterpret_cast<int16_t*>(mOutBuffer->ptr()) : NULL;
+    }
     void        setChain(const wp<EffectChain>& chain) { mChain = chain; }
     void        setThread(const wp<ThreadBase>& thread) { mThread = thread; }
     const wp<ThreadBase>& thread() { return mThread; }
@@ -151,7 +156,9 @@
     const audio_session_t mSessionId; // audio session ID
     const effect_descriptor_t mDescriptor;// effect descriptor received from effect engine
     effect_config_t     mConfig;    // input and output audio configuration
-    effect_handle_t  mEffectInterface; // Effect module C API
+    sp<EffectHalInterface> mEffectInterface; // Effect module HAL
+    sp<EffectBufferHalInterface> mInBuffer;  // Buffers for interacting with HAL
+    sp<EffectBufferHalInterface> mOutBuffer;
     status_t            mStatus;    // initialization status
     effect_state        mState;     // current activation state
     Vector<EffectHandle *> mHandles;    // list of client handles
@@ -300,18 +307,17 @@
     void setMode_l(audio_mode_t mode);
     void setAudioSource_l(audio_source_t source);
 
-    void setInBuffer(int16_t *buffer, bool ownsBuffer = false) {
+    void setInBuffer(const sp<EffectBufferHalInterface>& buffer) {
         mInBuffer = buffer;
-        mOwnInBuffer = ownsBuffer;
     }
     int16_t *inBuffer() const {
-        return mInBuffer;
+        return mInBuffer != 0 ? reinterpret_cast<int16_t*>(mInBuffer->ptr()) : NULL;
     }
-    void setOutBuffer(int16_t *buffer) {
+    void setOutBuffer(const sp<EffectBufferHalInterface>& buffer) {
         mOutBuffer = buffer;
     }
     int16_t *outBuffer() const {
-        return mOutBuffer;
+        return mOutBuffer != 0 ? reinterpret_cast<int16_t*>(mOutBuffer->ptr()) : NULL;
     }
 
     void incTrackCnt() { android_atomic_inc(&mTrackCnt); }
@@ -393,8 +399,8 @@
     mutable  Mutex mLock;        // mutex protecting effect list
              Vector< sp<EffectModule> > mEffects; // list of effect modules
              audio_session_t mSessionId; // audio session ID
-             int16_t *mInBuffer;         // chain input buffer
-             int16_t *mOutBuffer;        // chain output buffer
+             sp<EffectBufferHalInterface> mInBuffer;  // chain input buffer
+             sp<EffectBufferHalInterface> mOutBuffer; // chain output buffer
 
     // 'volatile' here means these are accessed with atomic operations instead of mutex
     volatile int32_t mActiveTrackCnt;    // number of active tracks connected
@@ -402,7 +408,6 @@
 
              int32_t mTailBufferCount;   // current effect tail buffer count
              int32_t mMaxTailBuffers;    // maximum effect tail buffers
-             bool mOwnInBuffer;          // true if the chain owns its input buffer
              int mVolumeCtrlIdx;         // index of insert effect having control over volume
              uint32_t mLeftVolume;       // previous volume on left channel
              uint32_t mRightVolume;      // previous volume on right channel
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 93f7ce5..7182f32 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -39,7 +39,7 @@
 #endif
 #include <audio_utils/conversion.h>
 #include <audio_utils/format.h>
-#include "AudioMixer.h"
+#include <media/AudioMixer.h>
 #include "FastMixer.h"
 
 namespace android {
diff --git a/services/audioflinger/FastThreadDumpState.cpp b/services/audioflinger/FastThreadDumpState.cpp
index 9df5c4c..964a725 100644
--- a/services/audioflinger/FastThreadDumpState.cpp
+++ b/services/audioflinger/FastThreadDumpState.cpp
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#include <audio_utils/roundup.h>
 #include "FastThreadDumpState.h"
 
 namespace android {
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index bee17fd..591a49e 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -202,9 +202,9 @@
                     if (hwModule != AUDIO_MODULE_HANDLE_NONE) {
                         ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(hwModule);
                         if (index >= 0) {
-                            audio_hw_device_t *hwDevice =
+                            sp<DeviceHalInterface> hwDevice =
                                     audioflinger->mAudioHwDevs.valueAt(index)->hwDevice();
-                            hwDevice->release_audio_patch(hwDevice, halHandle);
+                            hwDevice->releaseAudioPatch(halHandle);
                         }
                     }
                 }
@@ -247,11 +247,11 @@
             // - special patch request with 2 sources (reuse one existing output mix) OR
             // - Device to device AND
             //    - source HW module != destination HW module OR
-            //    - audio HAL version < 3.0
+            //    - audio HAL does not support audio patches creation
             if ((patch->num_sources == 2) ||
                 ((patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE) &&
                  ((patch->sinks[0].ext.device.hw_module != srcModule) ||
-                  (audioHwDevice->version() < AUDIO_DEVICE_API_VERSION_3_0)))) {
+                  !audioHwDevice->supportsAudioPatches()))) {
                 if (patch->num_sources == 2) {
                     if (patch->sources[1].type != AUDIO_PORT_TYPE_MIX ||
                             (patch->num_sinks != 0 && patch->sinks[0].ext.device.hw_module !=
@@ -339,18 +339,13 @@
                     }
                     status = thread->sendCreateAudioPatchConfigEvent(patch, &halHandle);
                 } else {
-                    if (audioHwDevice->version() < AUDIO_DEVICE_API_VERSION_3_0) {
-                        status = INVALID_OPERATION;
-                        goto exit;
-                    }
-
-                    audio_hw_device_t *hwDevice = audioHwDevice->hwDevice();
-                    status = hwDevice->create_audio_patch(hwDevice,
-                                                           patch->num_sources,
-                                                           patch->sources,
-                                                           patch->num_sinks,
-                                                           patch->sinks,
-                                                           &halHandle);
+                    sp<DeviceHalInterface> hwDevice = audioHwDevice->hwDevice();
+                    status = hwDevice->createAudioPatch(patch->num_sources,
+                                                        patch->sources,
+                                                        patch->num_sinks,
+                                                        patch->sinks,
+                                                        &halHandle);
+                    if (status == INVALID_OPERATION) goto exit;
                 }
             }
         } break;
@@ -388,7 +383,7 @@
             }
             if (thread == audioflinger->primaryPlaybackThread_l()) {
                 AudioParameter param = AudioParameter();
-                param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), (int)type);
+                param.addInt(String8(AudioParameter::keyRouting), (int)type);
 
                 audioflinger->broacastParametersToRecordThreads_l(param.toString());
             }
@@ -619,12 +614,8 @@
                 status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle);
             } else {
                 AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
-                if (audioHwDevice->version() < AUDIO_DEVICE_API_VERSION_3_0) {
-                    status = INVALID_OPERATION;
-                    break;
-                }
-                audio_hw_device_t *hwDevice = audioHwDevice->hwDevice();
-                status = hwDevice->release_audio_patch(hwDevice, removedPatch->mHalHandle);
+                sp<DeviceHalInterface> hwDevice = audioHwDevice->hwDevice();
+                status = hwDevice->releaseAudioPatch(removedPatch->mHalHandle);
             }
         } break;
         case AUDIO_PORT_TYPE_MIX: {
@@ -687,13 +678,7 @@
     }
 
     AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
-    if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-        audio_hw_device_t *hwDevice = audioHwDevice->hwDevice();
-        return hwDevice->set_audio_port_config(hwDevice, config);
-    } else {
-        return INVALID_OPERATION;
-    }
-    return NO_ERROR;
+    return audioHwDevice->hwDevice()->setAudioPortConfig(config);
 }
 
 } // namespace android
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 5601bde..27e4627 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -32,9 +32,10 @@
                                 void *buffer,
                                 const sp<IMemory>& sharedBuffer,
                                 audio_session_t sessionId,
-                                int uid,
+                                uid_t uid,
                                 audio_output_flags_t flags,
-                                track_type type);
+                                track_type type,
+                                audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
     virtual             ~Track();
     virtual status_t    initCheck() const;
 
@@ -166,7 +167,7 @@
                                         // 'volatile' means accessed without lock or
                                         // barrier, but is read/written atomically
     bool                mIsInvalid; // non-resettable latch, set by invalidate()
-    AudioTrackServerProxy*  mAudioTrackServerProxy;
+    sp<AudioTrackServerProxy>  mAudioTrackServerProxy;
     bool                mResumeToStopping; // track was paused in stopping state.
     bool                mFlushHwPending; // track requests for thread flush
     audio_output_flags_t mFlags;
@@ -188,7 +189,7 @@
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
                                 size_t frameCount,
-                                int uid);
+                                uid_t uid);
     virtual             ~OutputTrack();
 
     virtual status_t    start(AudioSystem::sync_event_t event =
@@ -214,8 +215,8 @@
     Vector < Buffer* >          mBufferQueue;
     AudioBufferProvider::Buffer mOutBuffer;
     bool                        mActive;
-    DuplicatingThread* const mSourceThread; // for waitTimeMs() in write()
-    AudioTrackClientProxy*      mClientProxy;
+    DuplicatingThread* const    mSourceThread; // for waitTimeMs() in write()
+    sp<AudioTrackClientProxy>   mClientProxy;
 };  // end of OutputTrack
 
 // playback track, used by PatchPanel
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index 123e033..848e531 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -30,9 +30,10 @@
                                 size_t frameCount,
                                 void *buffer,
                                 audio_session_t sessionId,
-                                int uid,
+                                uid_t uid,
                                 audio_input_flags_t flags,
-                                track_type type);
+                                track_type type,
+                                audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
     virtual             ~RecordTrack();
     virtual status_t    initCheck() const;
 
diff --git a/services/audioflinger/SpdifStreamOut.cpp b/services/audioflinger/SpdifStreamOut.cpp
index 004a068..a44ab2a 100644
--- a/services/audioflinger/SpdifStreamOut.cpp
+++ b/services/audioflinger/SpdifStreamOut.cpp
@@ -17,13 +17,12 @@
 
 #define LOG_TAG "AudioFlinger"
 //#define LOG_NDEBUG 0
-#include <hardware/audio.h>
+#include <system/audio.h>
 #include <utils/Log.h>
 
 #include <audio_utils/spdif/SPDIFEncoder.h>
 
 #include "AudioHwDevice.h"
-#include "AudioStreamOut.h"
 #include "SpdifStreamOut.h"
 
 namespace android {
diff --git a/services/audioflinger/SpdifStreamOut.h b/services/audioflinger/SpdifStreamOut.h
index c870250..fc9bb6e 100644
--- a/services/audioflinger/SpdifStreamOut.h
+++ b/services/audioflinger/SpdifStreamOut.h
@@ -23,9 +23,7 @@
 
 #include <system/audio.h>
 
-#include "AudioHwDevice.h"
 #include "AudioStreamOut.h"
-#include "SpdifStreamOut.h"
 
 #include <audio_utils/spdif/SPDIFEncoder.h>
 
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 4e17f12..b1ede30 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -29,17 +29,20 @@
 #include <cutils/properties.h>
 #include <media/AudioParameter.h>
 #include <media/AudioResamplerPublic.h>
+#include <media/RecordBufferConverter.h>
+#include <media/TypeConverter.h>
 #include <utils/Log.h>
 #include <utils/Trace.h>
 
 #include <private/media/AudioTrackShared.h>
-#include <hardware/audio.h>
-#include <audio_effects/effect_ns.h>
-#include <audio_effects/effect_aec.h>
+#include <private/android_filesystem_config.h>
 #include <audio_utils/conversion.h>
 #include <audio_utils/primitives.h>
 #include <audio_utils/format.h>
 #include <audio_utils/minifloat.h>
+#include <system/audio_effects/effect_ns.h>
+#include <system/audio_effects/effect_aec.h>
+#include <system/audio.h>
 
 // NBAIO implementations
 #include <media/nbaio/AudioStreamInSource.h>
@@ -54,8 +57,6 @@
 #include <powermanager/PowerManager.h>
 
 #include "AudioFlinger.h"
-#include "AudioMixer.h"
-#include "BufferProviders.h"
 #include "FastMixer.h"
 #include "FastCapture.h"
 #include "ServiceUtilities.h"
@@ -143,6 +144,12 @@
 // Direct output thread minimum sleep time in idle or active(underrun) state
 static const nsecs_t kDirectMinSleepTimeUs = 10000;
 
+// The universal constant for ubiquitous 20ms value. The value of 20ms seems to provide a good
+// balance between power consumption and latency, and allows threads to be scheduled reliably
+// by the CFS scheduler.
+// FIXME Express other hardcoded references to 20ms with references to this constant and move
+// it appropriately.
+#define FMS_20 20
 
 // Whether to use fast mixer
 static const enum {
@@ -442,168 +449,28 @@
     }
 }
 
-String8 devicesToString(audio_devices_t devices)
+std::string devicesToString(audio_devices_t devices)
 {
-    static const struct mapping {
-        audio_devices_t mDevices;
-        const char *    mString;
-    } mappingsOut[] = {
-        {AUDIO_DEVICE_OUT_EARPIECE,         "EARPIECE"},
-        {AUDIO_DEVICE_OUT_SPEAKER,          "SPEAKER"},
-        {AUDIO_DEVICE_OUT_WIRED_HEADSET,    "WIRED_HEADSET"},
-        {AUDIO_DEVICE_OUT_WIRED_HEADPHONE,  "WIRED_HEADPHONE"},
-        {AUDIO_DEVICE_OUT_BLUETOOTH_SCO,    "BLUETOOTH_SCO"},
-        {AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET,    "BLUETOOTH_SCO_HEADSET"},
-        {AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT,     "BLUETOOTH_SCO_CARKIT"},
-        {AUDIO_DEVICE_OUT_BLUETOOTH_A2DP,           "BLUETOOTH_A2DP"},
-        {AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES,"BLUETOOTH_A2DP_HEADPHONES"},
-        {AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER,   "BLUETOOTH_A2DP_SPEAKER"},
-        {AUDIO_DEVICE_OUT_AUX_DIGITAL,      "AUX_DIGITAL"},
-        {AUDIO_DEVICE_OUT_HDMI,             "HDMI"},
-        {AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET,"ANLG_DOCK_HEADSET"},
-        {AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET,"DGTL_DOCK_HEADSET"},
-        {AUDIO_DEVICE_OUT_USB_ACCESSORY,    "USB_ACCESSORY"},
-        {AUDIO_DEVICE_OUT_USB_DEVICE,       "USB_DEVICE"},
-        {AUDIO_DEVICE_OUT_TELEPHONY_TX,     "TELEPHONY_TX"},
-        {AUDIO_DEVICE_OUT_LINE,             "LINE"},
-        {AUDIO_DEVICE_OUT_HDMI_ARC,         "HDMI_ARC"},
-        {AUDIO_DEVICE_OUT_SPDIF,            "SPDIF"},
-        {AUDIO_DEVICE_OUT_FM,               "FM"},
-        {AUDIO_DEVICE_OUT_AUX_LINE,         "AUX_LINE"},
-        {AUDIO_DEVICE_OUT_SPEAKER_SAFE,     "SPEAKER_SAFE"},
-        {AUDIO_DEVICE_OUT_IP,               "IP"},
-        {AUDIO_DEVICE_OUT_BUS,              "BUS"},
-        {AUDIO_DEVICE_NONE,                 "NONE"},       // must be last
-    }, mappingsIn[] = {
-        {AUDIO_DEVICE_IN_COMMUNICATION,     "COMMUNICATION"},
-        {AUDIO_DEVICE_IN_AMBIENT,           "AMBIENT"},
-        {AUDIO_DEVICE_IN_BUILTIN_MIC,       "BUILTIN_MIC"},
-        {AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET, "BLUETOOTH_SCO_HEADSET"},
-        {AUDIO_DEVICE_IN_WIRED_HEADSET,     "WIRED_HEADSET"},
-        {AUDIO_DEVICE_IN_AUX_DIGITAL,       "AUX_DIGITAL"},
-        {AUDIO_DEVICE_IN_VOICE_CALL,        "VOICE_CALL"},
-        {AUDIO_DEVICE_IN_TELEPHONY_RX,      "TELEPHONY_RX"},
-        {AUDIO_DEVICE_IN_BACK_MIC,          "BACK_MIC"},
-        {AUDIO_DEVICE_IN_REMOTE_SUBMIX,     "REMOTE_SUBMIX"},
-        {AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET, "ANLG_DOCK_HEADSET"},
-        {AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET, "DGTL_DOCK_HEADSET"},
-        {AUDIO_DEVICE_IN_USB_ACCESSORY,     "USB_ACCESSORY"},
-        {AUDIO_DEVICE_IN_USB_DEVICE,        "USB_DEVICE"},
-        {AUDIO_DEVICE_IN_FM_TUNER,          "FM_TUNER"},
-        {AUDIO_DEVICE_IN_TV_TUNER,          "TV_TUNER"},
-        {AUDIO_DEVICE_IN_LINE,              "LINE"},
-        {AUDIO_DEVICE_IN_SPDIF,             "SPDIF"},
-        {AUDIO_DEVICE_IN_BLUETOOTH_A2DP,    "BLUETOOTH_A2DP"},
-        {AUDIO_DEVICE_IN_LOOPBACK,          "LOOPBACK"},
-        {AUDIO_DEVICE_IN_IP,                "IP"},
-        {AUDIO_DEVICE_IN_BUS,               "BUS"},
-        {AUDIO_DEVICE_NONE,                 "NONE"},        // must be last
-    };
-    String8 result;
-    audio_devices_t allDevices = AUDIO_DEVICE_NONE;
-    const mapping *entry;
+    std::string result;
     if (devices & AUDIO_DEVICE_BIT_IN) {
-        devices &= ~AUDIO_DEVICE_BIT_IN;
-        entry = mappingsIn;
+        InputDeviceConverter::maskToString(devices, result);
     } else {
-        entry = mappingsOut;
-    }
-    for ( ; entry->mDevices != AUDIO_DEVICE_NONE; entry++) {
-        allDevices = (audio_devices_t) (allDevices | entry->mDevices);
-        if (devices & entry->mDevices) {
-            if (!result.isEmpty()) {
-                result.append("|");
-            }
-            result.append(entry->mString);
-        }
-    }
-    if (devices & ~allDevices) {
-        if (!result.isEmpty()) {
-            result.append("|");
-        }
-        result.appendFormat("0x%X", devices & ~allDevices);
-    }
-    if (result.isEmpty()) {
-        result.append(entry->mString);
+        OutputDeviceConverter::maskToString(devices, result);
     }
     return result;
 }
 
-String8 inputFlagsToString(audio_input_flags_t flags)
+std::string inputFlagsToString(audio_input_flags_t flags)
 {
-    static const struct mapping {
-        audio_input_flags_t     mFlag;
-        const char *            mString;
-    } mappings[] = {
-        {AUDIO_INPUT_FLAG_FAST,             "FAST"},
-        {AUDIO_INPUT_FLAG_HW_HOTWORD,       "HW_HOTWORD"},
-        {AUDIO_INPUT_FLAG_RAW,              "RAW"},
-        {AUDIO_INPUT_FLAG_SYNC,             "SYNC"},
-        {AUDIO_INPUT_FLAG_NONE,             "NONE"},        // must be last
-    };
-    String8 result;
-    audio_input_flags_t allFlags = AUDIO_INPUT_FLAG_NONE;
-    const mapping *entry;
-    for (entry = mappings; entry->mFlag != AUDIO_INPUT_FLAG_NONE; entry++) {
-        allFlags = (audio_input_flags_t) (allFlags | entry->mFlag);
-        if (flags & entry->mFlag) {
-            if (!result.isEmpty()) {
-                result.append("|");
-            }
-            result.append(entry->mString);
-        }
-    }
-    if (flags & ~allFlags) {
-        if (!result.isEmpty()) {
-            result.append("|");
-        }
-        result.appendFormat("0x%X", flags & ~allFlags);
-    }
-    if (result.isEmpty()) {
-        result.append(entry->mString);
-    }
+    std::string result;
+    InputFlagConverter::maskToString(flags, result);
     return result;
 }
 
-String8 outputFlagsToString(audio_output_flags_t flags)
+std::string outputFlagsToString(audio_output_flags_t flags)
 {
-    static const struct mapping {
-        audio_output_flags_t    mFlag;
-        const char *            mString;
-    } mappings[] = {
-        {AUDIO_OUTPUT_FLAG_DIRECT,          "DIRECT"},
-        {AUDIO_OUTPUT_FLAG_PRIMARY,         "PRIMARY"},
-        {AUDIO_OUTPUT_FLAG_FAST,            "FAST"},
-        {AUDIO_OUTPUT_FLAG_DEEP_BUFFER,     "DEEP_BUFFER"},
-        {AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD,"COMPRESS_OFFLOAD"},
-        {AUDIO_OUTPUT_FLAG_NON_BLOCKING,    "NON_BLOCKING"},
-        {AUDIO_OUTPUT_FLAG_HW_AV_SYNC,      "HW_AV_SYNC"},
-        {AUDIO_OUTPUT_FLAG_RAW,             "RAW"},
-        {AUDIO_OUTPUT_FLAG_SYNC,            "SYNC"},
-        {AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO, "IEC958_NONAUDIO"},
-        {AUDIO_OUTPUT_FLAG_NONE,            "NONE"},        // must be last
-    };
-    String8 result;
-    audio_output_flags_t allFlags = AUDIO_OUTPUT_FLAG_NONE;
-    const mapping *entry;
-    for (entry = mappings; entry->mFlag != AUDIO_OUTPUT_FLAG_NONE; entry++) {
-        allFlags = (audio_output_flags_t) (allFlags | entry->mFlag);
-        if (flags & entry->mFlag) {
-            if (!result.isEmpty()) {
-                result.append("|");
-            }
-            result.append(entry->mString);
-        }
-    }
-    if (flags & ~allFlags) {
-        if (!result.isEmpty()) {
-            result.append("|");
-        }
-        result.appendFormat("0x%X", flags & ~allFlags);
-    }
-    if (result.isEmpty()) {
-        result.append(entry->mString);
-    }
+    std::string result;
+    OutputFlagConverter::maskToString(flags, result);
     return result;
 }
 
@@ -640,8 +507,7 @@
         mAudioSource(AUDIO_SOURCE_DEFAULT), mId(id),
         // mName will be set by concrete (non-virtual) subclass
         mDeathRecipient(new PMDeathRecipient(this)),
-        mSystemReady(systemReady),
-        mNotifiedBatteryStart(false)
+        mSystemReady(systemReady)
 {
     memset(&mPatch, 0, sizeof(struct audio_patch));
 }
@@ -763,12 +629,12 @@
     sp<ConfigEvent> configEvent;
     AudioParameter param(keyValuePair);
     int value;
-    if (param.getInt(String8(AUDIO_PARAMETER_MONO_OUTPUT), value) == NO_ERROR) {
+    if (param.getInt(String8(AudioParameter::keyMonoOutput), value) == NO_ERROR) {
         setMasterMono_l(value != 0);
         if (param.size() == 1) {
             return NO_ERROR; // should be a solo parameter - we don't pass down
         }
-        param.remove(String8(AUDIO_PARAMETER_MONO_OUTPUT));
+        param.remove(String8(AudioParameter::keyMonoOutput));
         configEvent = new SetParameterConfigEvent(param.toString());
     } else {
         configEvent = new SetParameterConfigEvent(keyValuePair);
@@ -937,12 +803,12 @@
     dprintf(fd, "  Standby: %s\n", mStandby ? "yes" : "no");
     dprintf(fd, "  Sample rate: %u Hz\n", mSampleRate);
     dprintf(fd, "  HAL frame count: %zu\n", mFrameCount);
-    dprintf(fd, "  HAL format: 0x%x (%s)\n", mHALFormat, formatToString(mHALFormat));
+    dprintf(fd, "  HAL format: 0x%x (%s)\n", mHALFormat, formatToString(mHALFormat).c_str());
     dprintf(fd, "  HAL buffer size: %zu bytes\n", mBufferSize);
     dprintf(fd, "  Channel count: %u\n", mChannelCount);
     dprintf(fd, "  Channel mask: 0x%08x (%s)\n", mChannelMask,
             channelMaskToString(mChannelMask, mType != RECORD).string());
-    dprintf(fd, "  Processing format: 0x%x (%s)\n", mFormat, formatToString(mFormat));
+    dprintf(fd, "  Processing format: 0x%x (%s)\n", mFormat, formatToString(mFormat).c_str());
     dprintf(fd, "  Processing frame size: %zu bytes\n", mFrameSize);
     dprintf(fd, "  Pending config events:");
     size_t numConfig = mConfigEvents.size();
@@ -955,8 +821,8 @@
     } else {
         dprintf(fd, " none\n");
     }
-    dprintf(fd, "  Output device: %#x (%s)\n", mOutDevice, devicesToString(mOutDevice).string());
-    dprintf(fd, "  Input device: %#x (%s)\n", mInDevice, devicesToString(mInDevice).string());
+    dprintf(fd, "  Output device: %#x (%s)\n", mOutDevice, devicesToString(mOutDevice).c_str());
+    dprintf(fd, "  Input device: %#x (%s)\n", mInDevice, devicesToString(mInDevice).c_str());
     dprintf(fd, "  Audio source: %d (%s)\n", mAudioSource, sourceToString(mAudioSource));
 
     if (locked) {
@@ -982,10 +848,10 @@
     }
 }
 
-void AudioFlinger::ThreadBase::acquireWakeLock(int uid)
+void AudioFlinger::ThreadBase::acquireWakeLock()
 {
     Mutex::Autolock _l(mLock);
-    acquireWakeLock_l(uid);
+    acquireWakeLock_l();
 }
 
 String16 AudioFlinger::ThreadBase::getWakeLockTag()
@@ -1007,36 +873,23 @@
     }
 }
 
-void AudioFlinger::ThreadBase::acquireWakeLock_l(int uid)
+void AudioFlinger::ThreadBase::acquireWakeLock_l()
 {
     getPowerManager_l();
     if (mPowerManager != 0) {
         sp<IBinder> binder = new BBinder();
-        status_t status;
-        if (uid >= 0) {
-            status = mPowerManager->acquireWakeLockWithUid(POWERMANAGER_PARTIAL_WAKE_LOCK,
-                    binder,
-                    getWakeLockTag(),
-                    String16("audioserver"),
-                    uid,
-                    true /* FIXME force oneway contrary to .aidl */);
-        } else {
-            status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
+        // Uses AID_AUDIOSERVER for wakelock.  updateWakeLockUids_l() updates with client uids.
+        status_t status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
                     binder,
                     getWakeLockTag(),
                     String16("audioserver"),
                     true /* FIXME force oneway contrary to .aidl */);
-        }
         if (status == NO_ERROR) {
             mWakeLockToken = binder;
         }
         ALOGV("acquireWakeLock_l() %s status %d", mThreadName, status);
     }
 
-    if (!mNotifiedBatteryStart) {
-        BatteryNotifier::getInstance().noteStartAudio();
-        mNotifiedBatteryStart = true;
-    }
     gBoottime.acquire(mWakeLockToken);
     mTimestamp.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_BOOTTIME] =
             gBoottime.getBoottimeOffset();
@@ -1059,16 +912,6 @@
         }
         mWakeLockToken.clear();
     }
-
-    if (mNotifiedBatteryStart) {
-        BatteryNotifier::getInstance().noteStopAudio();
-        mNotifiedBatteryStart = false;
-    }
-}
-
-void AudioFlinger::ThreadBase::updateWakeLockUids(const SortedVector<int> &uids) {
-    Mutex::Autolock _l(mLock);
-    updateWakeLockUids_l(uids);
 }
 
 void AudioFlinger::ThreadBase::getPowerManager_l() {
@@ -1085,8 +928,17 @@
     }
 }
 
-void AudioFlinger::ThreadBase::updateWakeLockUids_l(const SortedVector<int> &uids) {
+void AudioFlinger::ThreadBase::updateWakeLockUids_l(const SortedVector<uid_t> &uids) {
     getPowerManager_l();
+
+#if !LOG_NDEBUG
+    std::stringstream s;
+    for (uid_t uid : uids) {
+        s << uid << " ";
+    }
+    ALOGD("updateWakeLockUids_l %s uids:%s", mThreadName, s.str().c_str());
+#endif
+
     if (mWakeLockToken == NULL) { // token may be NULL if AudioFlinger::systemReady() not called.
         if (mSystemReady) {
             ALOGE("no wake lock to update, but system ready!");
@@ -1096,10 +948,10 @@
         return;
     }
     if (mPowerManager != 0) {
-        sp<IBinder> binder = new BBinder();
-        status_t status;
-        status = mPowerManager->updateWakeLockUids(mWakeLockToken, uids.size(), uids.array(),
-                    true /* FIXME force oneway contrary to .aidl */);
+        std::vector<int> uidsAsInt(uids.begin(), uids.end()); // powermanager expects uids as ints
+        status_t status = mPowerManager->updateWakeLockUids(
+                mWakeLockToken, uidsAsInt.size(), uidsAsInt.data(),
+                true /* FIXME force oneway contrary to .aidl */);
         ALOGV("updateWakeLockUids_l() %s status %d", mThreadName, status);
     }
 }
@@ -1412,6 +1264,7 @@
     bool chainCreated = false;
     bool effectCreated = false;
     bool effectRegistered = false;
+    audio_unique_id_t effectId = AUDIO_UNIQUE_ID_USE_UNSPECIFIED;
 
     lStatus = initCheck();
     if (lStatus != NO_ERROR) {
@@ -1445,15 +1298,16 @@
         ALOGV("createEffect_l() got effect %p on chain %p", effect.get(), chain.get());
 
         if (effect == 0) {
-            audio_unique_id_t id = mAudioFlinger->nextUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT);
+            effectId = mAudioFlinger->nextUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT);
             // Check CPU and memory usage
-            lStatus = AudioSystem::registerEffect(desc, mId, chain->strategy(), sessionId, id);
+            lStatus = AudioSystem::registerEffect(
+                    desc, mId, chain->strategy(), sessionId, effectId);
             if (lStatus != NO_ERROR) {
                 goto Exit;
             }
             effectRegistered = true;
             // create a new effect module if none present in the chain
-            lStatus = chain->createEffect_l(effect, this, desc, id, sessionId, pinned);
+            lStatus = chain->createEffect_l(effect, this, desc, effectId, sessionId, pinned);
             if (lStatus != NO_ERROR) {
                 goto Exit;
             }
@@ -1482,7 +1336,7 @@
             chain->removeEffect_l(effect);
         }
         if (effectRegistered) {
-            AudioSystem::unregisterEffect(effect->id());
+            AudioSystem::unregisterEffect(effectId);
         }
         if (chainCreated) {
             removeEffectChain_l(chain);
@@ -1669,6 +1523,73 @@
     mPendingConfigEvents.clear();
 }
 
+template <typename T>
+ssize_t AudioFlinger::ThreadBase::ActiveTracks<T>::add(const sp<T> &track) {
+    ssize_t index = mActiveTracks.indexOf(track);
+    if (index >= 0) {
+        ALOGW("ActiveTracks<T>::add track %p already there", track.get());
+        return index;
+    }
+    mActiveTracksGeneration++;
+    mLatestActiveTrack = track;
+    ++mBatteryCounter[track->uid()].second;
+    return mActiveTracks.add(track);
+}
+
+template <typename T>
+ssize_t AudioFlinger::ThreadBase::ActiveTracks<T>::remove(const sp<T> &track) {
+    ssize_t index = mActiveTracks.remove(track);
+    if (index < 0) {
+        ALOGW("ActiveTracks<T>::remove nonexistent track %p", track.get());
+        return index;
+    }
+    mActiveTracksGeneration++;
+    --mBatteryCounter[track->uid()].second;
+    // mLatestActiveTrack is not cleared even if is the same as track.
+    return index;
+}
+
+template <typename T>
+void AudioFlinger::ThreadBase::ActiveTracks<T>::clear() {
+    for (const sp<T> &track : mActiveTracks) {
+        BatteryNotifier::getInstance().noteStopAudio(track->uid());
+    }
+    mLastActiveTracksGeneration = mActiveTracksGeneration;
+    mActiveTracks.clear();
+    mLatestActiveTrack.clear();
+    mBatteryCounter.clear();
+}
+
+template <typename T>
+void AudioFlinger::ThreadBase::ActiveTracks<T>::updatePowerState(
+        sp<ThreadBase> thread, bool force) {
+    // Updates ActiveTracks client uids to the thread wakelock.
+    if (mActiveTracksGeneration != mLastActiveTracksGeneration || force) {
+        thread->updateWakeLockUids_l(getWakeLockUids());
+        mLastActiveTracksGeneration = mActiveTracksGeneration;
+    }
+
+    // Updates BatteryNotifier uids
+    for (auto it = mBatteryCounter.begin(); it != mBatteryCounter.end();) {
+        const uid_t uid = it->first;
+        ssize_t &previous = it->second.first;
+        ssize_t &current = it->second.second;
+        if (current > 0) {
+            if (previous == 0) {
+                BatteryNotifier::getInstance().noteStartAudio(uid);
+            }
+            previous = current;
+            ++it;
+        } else if (current == 0) {
+            if (previous > 0) {
+                BatteryNotifier::getInstance().noteStopAudio(uid);
+            }
+            it = mBatteryCounter.erase(it); // std::map<> is stable on iterator erase.
+        } else /* (current < 0) */ {
+            LOG_ALWAYS_FATAL("negative battery count %zd", current);
+        }
+    }
+}
 
 // ----------------------------------------------------------------------------
 //      Playback
@@ -1695,7 +1616,6 @@
         mSuspended(0), mBytesWritten(0),
         mFramesWritten(0),
         mSuspendedFrames(0),
-        mActiveTracksGeneration(0),
         // mStreamTypes[] initialized in constructor body
         mOutput(output),
         mLastWriteTime(-1), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
@@ -1815,8 +1735,8 @@
         result.append(buffer);
         Track::appendDumpHeader(result);
         for (size_t i = 0; i < numactive; ++i) {
-            sp<Track> track = mActiveTracks[i].promote();
-            if (track != 0 && mTracks.indexOf(track) < 0) {
+            sp<Track> track = mActiveTracks[i];
+            if (mTracks.indexOf(track) < 0) {
                 track->dump(buffer, SIZE, true);
                 result.append(buffer);
             }
@@ -1846,8 +1766,8 @@
     dprintf(fd, "  Standby delay ns=%lld\n", (long long)mStandbyDelayNs);
     AudioStreamOut *output = mOutput;
     audio_output_flags_t flags = output != NULL ? output->flags : AUDIO_OUTPUT_FLAG_NONE;
-    String8 flagsAsString = outputFlagsToString(flags);
-    dprintf(fd, "  AudioStreamOut: %p flags %#x (%s)\n", output, flags, flagsAsString.string());
+    dprintf(fd, "  AudioStreamOut: %p flags %#x (%s)\n",
+            output, flags, outputFlagsToString(flags).c_str());
     dprintf(fd, "  Frames written: %lld\n", (long long)mFramesWritten);
     dprintf(fd, "  Suspended frames: %lld\n", (long long)mSuspendedFrames);
     if (mPipeSink.get() != nullptr) {
@@ -1855,7 +1775,7 @@
     }
     if (output != nullptr) {
         dprintf(fd, "  Hal stream dump:\n");
-        (void)output->stream->common.dump(&output->stream->common, fd);
+        (void)output->stream->dump(fd);
     }
 }
 
@@ -1872,7 +1792,8 @@
     ALOGV("  preExit()");
     // FIXME this is using hard-coded strings but in the future, this functionality will be
     //       converted to use audio HAL extensions required to support tunneling
-    mOutput->stream->common.set_parameters(&mOutput->stream->common, "exiting=1");
+    status_t result = mOutput->stream->setParameters(String8("exiting=1"));
+    ALOGE_IF(result != OK, "Error when setting parameters on exit: %d", result);
 }
 
 // PlaybackThread::createTrack_l() must be called with AudioFlinger::mLock held
@@ -1887,8 +1808,9 @@
         audio_session_t sessionId,
         audio_output_flags_t *flags,
         pid_t tid,
-        int uid,
-        status_t *status)
+        uid_t uid,
+        status_t *status,
+        audio_port_handle_t portId)
 {
     size_t frameCount = *pFrameCount;
     sp<Track> track;
@@ -1979,7 +1901,12 @@
             && audio_has_proportional_frames(format) && sharedBuffer == 0) {
         // this must match AudioTrack.cpp calculateMinFrameCount().
         // TODO: Move to a common library
-        uint32_t latencyMs = mOutput->stream->get_latency(mOutput->stream);
+        uint32_t latencyMs = 0;
+        lStatus = mOutput->stream->getLatency(&latencyMs);
+        if (lStatus != OK) {
+            ALOGE("Error when retrieving output stream latency: %d", lStatus);
+            goto Exit;
+        }
         uint32_t minBufCount = latencyMs / ((1000 * mNormalFrameCount) / mSampleRate);
         if (minBufCount < 2) {
             minBufCount = 2;
@@ -2065,7 +1992,7 @@
 
         track = new Track(this, client, streamType, sampleRate, format,
                           channelMask, frameCount, NULL, sharedBuffer,
-                          sessionId, uid, *flags, TrackBase::TYPE_DEFAULT);
+                          sessionId, uid, *flags, TrackBase::TYPE_DEFAULT, portId);
 
         lStatus = track != 0 ? track->initCheck() : (status_t) NO_MEMORY;
         if (lStatus != NO_ERROR) {
@@ -2110,11 +2037,11 @@
 }
 uint32_t AudioFlinger::PlaybackThread::latency_l() const
 {
-    if (initCheck() == NO_ERROR) {
-        return correctLatency_l(mOutput->stream->get_latency(mOutput->stream));
-    } else {
-        return 0;
+    uint32_t latency;
+    if (initCheck() == NO_ERROR && mOutput->stream->getLatency(&latency) == OK) {
+        return correctLatency_l(latency);
     }
+    return 0;
 }
 
 void AudioFlinger::PlaybackThread::setMasterVolume(float value)
@@ -2213,9 +2140,6 @@
         track->mResetDone = false;
         track->mPresentationCompleteFrames = 0;
         mActiveTracks.add(track);
-        mWakeLockUids.add(track->uid());
-        mActiveTracksGeneration++;
-        mLatestActiveTrack = track;
         sp<EffectChain> chain = getEffectChain_l(track->sessionId());
         if (chain != 0) {
             ALOGV("addTrack_l() starting track on chain %p for session %d", chain.get(),
@@ -2288,14 +2212,11 @@
 String8 AudioFlinger::PlaybackThread::getParameters(const String8& keys)
 {
     Mutex::Autolock _l(mLock);
-    if (initCheck() != NO_ERROR) {
-        return String8();
+    String8 out_s8;
+    if (initCheck() == NO_ERROR && mOutput->stream->getParameters(keys, &out_s8) == OK) {
+        return out_s8;
     }
-
-    char *s = mOutput->stream->common.get_parameters(&mOutput->stream->common, keys.string());
-    const String8 out_s8(s);
-    free(s);
-    return out_s8;
+    return String8();
 }
 
 void AudioFlinger::PlaybackThread::ioConfigChanged(audio_io_config_event event, pid_t pid) {
@@ -2324,21 +2245,18 @@
     mAudioFlinger->ioConfigChanged(event, desc, pid);
 }
 
-void AudioFlinger::PlaybackThread::writeCallback()
+void AudioFlinger::PlaybackThread::onWriteReady()
 {
-    ALOG_ASSERT(mCallbackThread != 0);
     mCallbackThread->resetWriteBlocked();
 }
 
-void AudioFlinger::PlaybackThread::drainCallback()
+void AudioFlinger::PlaybackThread::onDrainReady()
 {
-    ALOG_ASSERT(mCallbackThread != 0);
     mCallbackThread->resetDraining();
 }
 
-void AudioFlinger::PlaybackThread::errorCallback()
+void AudioFlinger::PlaybackThread::onError()
 {
-    ALOG_ASSERT(mCallbackThread != 0);
     mCallbackThread->setAsyncError();
 }
 
@@ -2362,30 +2280,6 @@
     }
 }
 
-// static
-int AudioFlinger::PlaybackThread::asyncCallback(stream_callback_event_t event,
-                                                void *param __unused,
-                                                void *cookie)
-{
-    AudioFlinger::PlaybackThread *me = (AudioFlinger::PlaybackThread *)cookie;
-    ALOGV("asyncCallback() event %d", event);
-    switch (event) {
-    case STREAM_CBK_EVENT_WRITE_READY:
-        me->writeCallback();
-        break;
-    case STREAM_CBK_EVENT_DRAIN_READY:
-        me->drainCallback();
-        break;
-    case STREAM_CBK_EVENT_ERROR:
-        me->errorCallback();
-        break;
-    default:
-        ALOGW("asyncCallback() unknown event %d", event);
-        break;
-    }
-    return 0;
-}
-
 void AudioFlinger::PlaybackThread::readOutputParameters_l()
 {
     // unfortunately we have no way of recovering from errors here, hence the LOG_ALWAYS_FATAL
@@ -2402,7 +2296,8 @@
     mChannelCount = audio_channel_count_from_out_mask(mChannelMask);
 
     // Get actual HAL format.
-    mHALFormat = mOutput->stream->common.get_format(&mOutput->stream->common);
+    status_t result = mOutput->stream->getFormat(&mHALFormat);
+    LOG_ALWAYS_FATAL_IF(result != OK, "Error when retrieving output stream format: %d", result);
     // Get format from the shim, which will be different than the HAL format
     // if playing compressed audio over HDMI passthrough.
     mFormat = mOutput->getFormat();
@@ -2415,17 +2310,17 @@
                 mFormat);
     }
     mFrameSize = mOutput->getFrameSize();
-    mBufferSize = mOutput->stream->common.get_buffer_size(&mOutput->stream->common);
+    result = mOutput->stream->getBufferSize(&mBufferSize);
+    LOG_ALWAYS_FATAL_IF(result != OK,
+            "Error when retrieving output stream buffer size: %d", result);
     mFrameCount = mBufferSize / mFrameSize;
     if (mFrameCount & 15) {
         ALOGW("HAL output buffer size is %zu frames but AudioMixer requires multiples of 16 frames",
                 mFrameCount);
     }
 
-    if ((mOutput->flags & AUDIO_OUTPUT_FLAG_NON_BLOCKING) &&
-            (mOutput->stream->set_callback != NULL)) {
-        if (mOutput->stream->set_callback(mOutput->stream,
-                                      AudioFlinger::PlaybackThread::asyncCallback, this) == 0) {
+    if (mOutput->flags & AUDIO_OUTPUT_FLAG_NON_BLOCKING) {
+        if (mOutput->stream->setCallback(this) == OK) {
             mUseAsyncWrite = true;
             mCallbackThread = new AudioFlinger::AsyncCallbackThread(this);
         }
@@ -2433,14 +2328,15 @@
 
     mHwSupportsPause = false;
     if (mOutput->flags & AUDIO_OUTPUT_FLAG_DIRECT) {
-        if (mOutput->stream->pause != NULL) {
-            if (mOutput->stream->resume != NULL) {
+        bool supportsPause = false, supportsResume = false;
+        if (mOutput->stream->supportsPauseAndResume(&supportsPause, &supportsResume) == OK) {
+            if (supportsPause && supportsResume) {
                 mHwSupportsPause = true;
-            } else {
+            } else if (supportsPause) {
                 ALOGW("direct output implements pause but not resume");
+            } else if (supportsResume) {
+                ALOGW("direct output implements resume but not pause");
             }
-        } else if (mOutput->stream->resume != NULL) {
-            ALOGW("direct output implements resume but not pause");
         }
     }
     if (!mHwSupportsPause && mOutput->flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) {
@@ -2630,12 +2526,12 @@
 }
 
 // this method must always be called either with ThreadBase mLock held or inside the thread loop
-audio_stream_t* AudioFlinger::PlaybackThread::stream() const
+sp<StreamHalInterface> AudioFlinger::PlaybackThread::stream() const
 {
     if (mOutput == NULL) {
         return NULL;
     }
-    return &mOutput->stream->common;
+    return mOutput->stream;
 }
 
 uint32_t AudioFlinger::PlaybackThread::activeSleepTimeUs() const
@@ -2773,7 +2669,8 @@
 
 void AudioFlinger::PlaybackThread::threadLoop_drain()
 {
-    if (mOutput->stream->drain) {
+    bool supportsDrain = false;
+    if (mOutput->stream->supportsDrain(&supportsDrain) == OK && supportsDrain) {
         ALOGV("draining %s", (mMixerStatus == MIXER_DRAIN_TRACK) ? "early" : "full");
         if (mUseAsyncWrite) {
             ALOGW_IF(mDrainSequence & 1, "threadLoop_drain(): out of sequence drain request");
@@ -2781,9 +2678,8 @@
             ALOG_ASSERT(mCallbackThread != 0);
             mCallbackThread->setDraining(mDrainSequence);
         }
-        mOutput->stream->drain(mOutput->stream,
-            (mMixerStatus == MIXER_DRAIN_TRACK) ? AUDIO_DRAIN_EARLY_NOTIFY
-                                                : AUDIO_DRAIN_ALL);
+        status_t result = mOutput->stream->drain(mMixerStatus == MIXER_DRAIN_TRACK);
+        ALOGE_IF(result != OK, "Error when draining stream: %d", result);
     }
 }
 
@@ -2795,6 +2691,11 @@
             sp<Track> track = mTracks[i];
             track->invalidate();
         }
+        // Clear ActiveTracks to update BatteryNotifier in case active tracks remain.
+        // After we exit there are no more track changes sent to BatteryNotifier
+        // because that requires an active threadLoop.
+        // TODO: should we decActiveTrackCnt() of the cleared track effect chain?
+        mActiveTracks.clear();
     }
 }
 
@@ -2859,9 +2760,14 @@
 status_t AudioFlinger::PlaybackThread::addEffectChain_l(const sp<EffectChain>& chain)
 {
     audio_session_t session = chain->sessionId();
-    int16_t* buffer = reinterpret_cast<int16_t*>(mEffectBufferEnabled
-            ? mEffectBuffer : mSinkBuffer);
-    bool ownsBuffer = false;
+    sp<EffectBufferHalInterface> halInBuffer, halOutBuffer;
+    status_t result = EffectBufferHalInterface::mirror(
+            mEffectBufferEnabled ? mEffectBuffer : mSinkBuffer,
+            mEffectBufferEnabled ? mEffectBufferSize : mSinkBufferSize,
+            &halInBuffer);
+    if (result != OK) return result;
+    halOutBuffer = halInBuffer;
+    int16_t *buffer = reinterpret_cast<int16_t*>(halInBuffer->externalData());
 
     ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session);
     if (session > AUDIO_SESSION_OUTPUT_MIX) {
@@ -2869,10 +2775,13 @@
         // the sink buffer as input
         if (mType != DIRECT) {
             size_t numSamples = mNormalFrameCount * mChannelCount;
-            buffer = new int16_t[numSamples];
-            memset(buffer, 0, numSamples * sizeof(int16_t));
-            ALOGV("addEffectChain_l() creating new input buffer %p session %d", buffer, session);
-            ownsBuffer = true;
+            status_t result = EffectBufferHalInterface::allocate(
+                    numSamples * sizeof(int16_t),
+                    &halInBuffer);
+            if (result != OK) return result;
+            buffer = halInBuffer->audioBuffer()->s16;
+            ALOGV("addEffectChain_l() creating new input buffer %p session %d",
+                    buffer, session);
         }
 
         // Attach all tracks with same session ID to this chain.
@@ -2887,11 +2796,7 @@
         }
 
         // indicate all active tracks in the chain
-        for (size_t i = 0 ; i < mActiveTracks.size() ; ++i) {
-            sp<Track> track = mActiveTracks[i].promote();
-            if (track == 0) {
-                continue;
-            }
+        for (const sp<Track> &track : mActiveTracks) {
             if (session == track->sessionId()) {
                 ALOGV("addEffectChain_l() activating track %p on session %d", track.get(), session);
                 chain->incActiveTrackCnt();
@@ -2899,9 +2804,8 @@
         }
     }
     chain->setThread(this);
-    chain->setInBuffer(buffer, ownsBuffer);
-    chain->setOutBuffer(reinterpret_cast<int16_t*>(mEffectBufferEnabled
-            ? mEffectBuffer : mSinkBuffer));
+    chain->setInBuffer(halInBuffer);
+    chain->setOutBuffer(halOutBuffer);
     // Effect chain for session AUDIO_SESSION_OUTPUT_STAGE is inserted at end of effect
     // chains list in order to be processed last as it contains output stage effects.
     // Effect chain for session AUDIO_SESSION_OUTPUT_MIX is inserted before
@@ -2938,11 +2842,7 @@
         if (chain == mEffectChains[i]) {
             mEffectChains.removeAt(i);
             // detach all active tracks from the chain
-            for (size_t i = 0 ; i < mActiveTracks.size() ; ++i) {
-                sp<Track> track = mActiveTracks[i].promote();
-                if (track == 0) {
-                    continue;
-                }
+            for (const sp<Track> &track : mActiveTracks) {
                 if (session == track->sessionId()) {
                     ALOGV("removeEffectChain_l(): stopping track on chain %p for session Id: %d",
                             chain.get(), session);
@@ -3019,8 +2919,6 @@
     // FIXME could this be made local to while loop?
     writeFrames = 0;
 
-    int lastGeneration = 0;
-
     cacheParameters_l();
     mSleepTimeUs = mIdleSleepTimeUs;
 
@@ -3118,10 +3016,9 @@
                     mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = mLastWriteTime == -1
                             ? systemTime() : mLastWriteTime;
                 }
-                const size_t size = mActiveTracks.size();
-                for (size_t i = 0; i < size; ++i) {
-                    sp<Track> t = mActiveTracks[i].promote();
-                    if (t != 0 && !t->isFastTrack()) {
+
+                for (const sp<Track> &t : mActiveTracks) {
+                    if (!t->isFastTrack()) {
                         t->updateTrackFrameInfo(
                                 t->mAudioTrackServerProxy->framesReleased(),
                                 mFramesWritten,
@@ -3142,8 +3039,6 @@
                 if (!keepWakeLock()) {
                     releaseWakeLock_l();
                     released = true;
-                    mWakeLockUids.clear();
-                    mActiveTracksGeneration++;
                 }
                 ALOGV("wait async completion");
                 mWaitWorkCV.wait(mLock);
@@ -3177,8 +3072,6 @@
                     }
 
                     releaseWakeLock_l();
-                    mWakeLockUids.clear();
-                    mActiveTracksGeneration++;
                     // wait until we have something to do...
                     ALOGV("%s going to sleep", myName.string());
                     mWaitWorkCV.wait(mLock);
@@ -3203,12 +3096,7 @@
             // mMixerStatusIgnoringFastTracks is also updated internally
             mMixerStatus = prepareTracks_l(&tracksToRemove);
 
-            // compare with previously applied list
-            if (lastGeneration != mActiveTracksGeneration) {
-                // update wakelock
-                updateWakeLockUids_l(mWakeLockUids);
-                lastGeneration = mActiveTracksGeneration;
-            }
+            mActiveTracks.updatePowerState(this);
 
             // prevent any changes in effect chain list and in each effect chain
             // during mixing and effect process as the audio buffers could be deleted
@@ -3426,8 +3314,6 @@
     }
 
     releaseWakeLock();
-    mWakeLockUids.clear();
-    mActiveTracksGeneration++;
 
     ALOGV("Thread %p type %d exiting", this, mType);
     return false;
@@ -3441,8 +3327,6 @@
         for (size_t i=0 ; i<count ; i++) {
             const sp<Track>& track = tracksToRemove.itemAt(i);
             mActiveTracks.remove(track);
-            mWakeLockUids.remove(track->uid());
-            mActiveTracksGeneration++;
             ALOGV("removeTracks_l removing track on session %d", track->sessionId());
             sp<EffectChain> chain = getEffectChain_l(track->sessionId());
             if (chain != 0) {
@@ -3472,11 +3356,9 @@
         }
         return status;
     }
-    if ((mType == OFFLOAD || mType == DIRECT)
-            && mOutput != NULL && mOutput->stream->get_presentation_position) {
+    if ((mType == OFFLOAD || mType == DIRECT) && mOutput != NULL) {
         uint64_t position64;
-        int ret = mOutput->getPresentationPosition(&position64, &timestamp.mTime);
-        if (ret == 0) {
+        if (mOutput->getPresentationPosition(&position64, &timestamp.mTime) == OK) {
             timestamp.mPosition = (uint32_t)position64;
             return NO_ERROR;
         }
@@ -3543,14 +3425,13 @@
     mOutDevice = type;
     mPatch = *patch;
 
-    if (mOutput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-        audio_hw_device_t *hwDevice = mOutput->audioHwDev->hwDevice();
-        status = hwDevice->create_audio_patch(hwDevice,
-                                               patch->num_sources,
-                                               patch->sources,
-                                               patch->num_sinks,
-                                               patch->sinks,
-                                               handle);
+    if (mOutput->audioHwDev->supportsAudioPatches()) {
+        sp<DeviceHalInterface> hwDevice = mOutput->audioHwDev->hwDevice();
+        status = hwDevice->createAudioPatch(patch->num_sources,
+                                            patch->sources,
+                                            patch->num_sinks,
+                                            patch->sinks,
+                                            handle);
     } else {
         char *address;
         if (strcmp(patch->sinks[0].ext.device.address, "") != 0) {
@@ -3563,9 +3444,8 @@
         }
         AudioParameter param = AudioParameter(String8(address));
         free(address);
-        param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), (int)type);
-        status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
-                param.toString().string());
+        param.addInt(String8(AudioParameter::keyRouting), (int)type);
+        status = mOutput->stream->setParameters(param.toString());
         *handle = AUDIO_PATCH_HANDLE_NONE;
     }
     if (configChanged) {
@@ -3595,14 +3475,13 @@
 
     mOutDevice = AUDIO_DEVICE_NONE;
 
-    if (mOutput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-        audio_hw_device_t *hwDevice = mOutput->audioHwDev->hwDevice();
-        status = hwDevice->release_audio_patch(hwDevice, handle);
+    if (mOutput->audioHwDev->supportsAudioPatches()) {
+        sp<DeviceHalInterface> hwDevice = mOutput->audioHwDev->hwDevice();
+        status = hwDevice->releaseAudioPatch(handle);
     } else {
         AudioParameter param;
-        param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), 0);
-        status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
-                param.toString().string());
+        param.addInt(String8(AudioParameter::keyRouting), 0);
+        status = mOutput->stream->setParameters(param.toString());
     }
     return status;
 }
@@ -3778,6 +3657,7 @@
         mFastMixer->run("FastMixer", PRIORITY_URGENT_AUDIO);
         pid_t tid = mFastMixer->getTid();
         sendPrioConfigEvent(getpid_cached, tid, kPriorityFastMixer);
+        stream()->setHalThreadPriority(kPriorityFastMixer);
 
 #ifdef AUDIO_WATCHDOG
         // create and start the watchdog
@@ -4079,10 +3959,7 @@
     mEffectBufferValid = false; // mEffectBuffer has no valid data until tracks found.
 
     for (size_t i=0 ; i<count ; i++) {
-        const sp<Track> t = mActiveTracks[i].promote();
-        if (t == 0) {
-            continue;
-        }
+        const sp<Track> t = mActiveTracks[i];
 
         // this const just means the local variable doesn't change
         Track* const track = t.get();
@@ -4177,8 +4054,11 @@
                 // We have consumed all the buffers of this track.
                 // This would be incomplete if we auto-paused on underrun
                 {
-                    size_t audioHALFrames =
-                            (mOutput->stream->get_latency(mOutput->stream)*mSampleRate) / 1000;
+                    uint32_t latency = 0;
+                    status_t result = mOutput->stream->getLatency(&latency);
+                    ALOGE_IF(result != OK,
+                            "Error when retrieving output stream latency: %d", result);
+                    size_t audioHALFrames = (latency * mSampleRate) / 1000;
                     int64_t framesWritten = mBytesWritten / mFrameSize;
                     if (!(mStandby || track->presentationComplete(framesWritten, audioHALFrames))) {
                         // track stays in active list until presentation is complete
@@ -4346,7 +4226,7 @@
                 // read original volumes with volume control
                 float typeVolume = mStreamTypes[track->streamType()].volume;
                 float v = masterVolume * typeVolume;
-                AudioTrackServerProxy *proxy = track->mAudioTrackServerProxy;
+                sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
                 gain_minifloat_packed_t vlr = proxy->getVolumeLR();
                 vlf = float_from_gain(gain_minifloat_unpack_left(vlr));
                 vrf = float_from_gain(gain_minifloat_unpack_right(vlr));
@@ -4575,11 +4455,7 @@
         size_t i = __builtin_ctz(resetMask);
         ALOG_ASSERT(i < count);
         resetMask &= ~(1 << i);
-        sp<Track> t = mActiveTracks[i].promote();
-        if (t == 0) {
-            continue;
-        }
-        Track* track = t.get();
+        sp<Track> track = mActiveTracks[i];
         ALOG_ASSERT(track->isFastTrack() && track->isStopped());
         track->reset();
     }
@@ -4631,7 +4507,7 @@
 {
     uint32_t trackCount = 0;
     for (size_t i = 0; i < mTracks.size() ; i++) {
-        if (mTracks[i]->uid() == (int)uid) {
+        if (mTracks[i]->uid() == uid) {
             trackCount++;
         }
     }
@@ -4734,14 +4610,12 @@
     }
 
     if (status == NO_ERROR) {
-        status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
-                                                keyValuePair.string());
+        status = mOutput->stream->setParameters(keyValuePair);
         if (!mStandby && status == INVALID_OPERATION) {
             mOutput->standby();
             mStandby = true;
             mBytesWritten = 0;
-            status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
-                                                   keyValuePair.string());
+            status = mOutput->stream->setParameters(keyValuePair);
         }
         if (status == NO_ERROR && reconfig) {
             readOutputParameters_l();
@@ -4851,7 +4725,7 @@
     } else {
         float typeVolume = mStreamTypes[track->streamType()].volume;
         float v = mMasterVolume * typeVolume;
-        AudioTrackServerProxy *proxy = track->mAudioTrackServerProxy;
+        sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
         gain_minifloat_packed_t vlr = proxy->getVolumeLR();
         left = float_from_gain(gain_minifloat_unpack_left(vlr));
         if (left > GAIN_FLOAT_UNITY) {
@@ -4882,9 +4756,8 @@
                 left = (float)vl / (1 << 24);
                 right = (float)vr / (1 << 24);
             }
-            if (mOutput->stream->set_volume) {
-                mOutput->stream->set_volume(mOutput->stream, left, right);
-            }
+            status_t result = mOutput->stream->setVolume(left, right);
+            ALOGE_IF(result != OK, "Error when setting output stream volume: %d", result);
         }
     }
 }
@@ -4892,7 +4765,7 @@
 void AudioFlinger::DirectOutputThread::onAddNewTrack_l()
 {
     sp<Track> previousTrack = mPreviousTrack.promote();
-    sp<Track> latestTrack = mLatestActiveTrack.promote();
+    sp<Track> latestTrack = mActiveTracks.getLatest();
 
     if (previousTrack != 0 && latestTrack != 0) {
         if (mType == DIRECT) {
@@ -4918,13 +4791,7 @@
     bool doHwResume = false;
 
     // find out which tracks need to be processed
-    for (size_t i = 0; i < count; i++) {
-        sp<Track> t = mActiveTracks[i].promote();
-        // The track died recently
-        if (t == 0) {
-            continue;
-        }
-
+    for (const sp<Track> &t : mActiveTracks) {
         if (t->isInvalid()) {
             ALOGW("An invalidated track shouldn't be in active list");
             tracksToRemove->add(t);
@@ -4939,7 +4806,7 @@
         // In theory an older track could underrun and restart after the new one starts
         // but as we only care about the transition phase between two tracks on a
         // direct output, it is not a problem to ignore the underrun case.
-        sp<Track> l = mLatestActiveTrack.promote();
+        sp<Track> l = mActiveTracks.getLatest();
         bool last = l.get() == track;
 
         if (track->isPausing()) {
@@ -5094,13 +4961,15 @@
     // if resume is received before pause is executed.
     if (mHwSupportsPause && !mStandby &&
             (doHwPause || (mFlushPending && !mHwPaused && (count != 0)))) {
-        mOutput->stream->pause(mOutput->stream);
+        status_t result = mOutput->stream->pause();
+        ALOGE_IF(result != OK, "Error when pausing output stream: %d", result);
     }
     if (mFlushPending) {
         flushHw_l();
     }
     if (mHwSupportsPause && !mStandby && doHwResume) {
-        mOutput->stream->resume(mOutput->stream);
+        status_t result = mOutput->stream->resume();
+        ALOGE_IF(result != OK, "Error when resuming output stream: %d", result);
     }
     // remove all the tracks that need to be...
     removeTracks_l(*tracksToRemove);
@@ -5241,14 +5110,12 @@
         }
     }
     if (status == NO_ERROR) {
-        status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
-                                                keyValuePair.string());
+        status = mOutput->stream->setParameters(keyValuePair);
         if (!mStandby && status == INVALID_OPERATION) {
             mOutput->standby();
             mStandby = true;
             mBytesWritten = 0;
-            status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
-                                                   keyValuePair.string());
+            status = mOutput->stream->setParameters(keyValuePair);
         }
         if (status == NO_ERROR && reconfig) {
             readOutputParameters_l();
@@ -5473,12 +5340,7 @@
     ALOGV("OffloadThread::prepareTracks_l active tracks %zu", count);
 
     // find out which tracks need to be processed
-    for (size_t i = 0; i < count; i++) {
-        sp<Track> t = mActiveTracks[i].promote();
-        // The track died recently
-        if (t == 0) {
-            continue;
-        }
+    for (const sp<Track> &t : mActiveTracks) {
         Track* const track = t.get();
 #ifdef VERY_VERY_VERBOSE_LOGGING
         audio_track_cblk_t* cblk = track->cblk();
@@ -5487,7 +5349,7 @@
         // In theory an older track could underrun and restart after the new one starts
         // but as we only care about the transition phase between two tracks on a
         // direct output, it is not a problem to ignore the underrun case.
-        sp<Track> l = mLatestActiveTrack.promote();
+        sp<Track> l = mActiveTracks.getLatest();
         bool last = l.get() == track;
 
         if (track->isInvalid()) {
@@ -5635,8 +5497,11 @@
                 // Drain has completed or we are in standby, signal presentation complete
                 if (!(mDrainSequence & 1) || !last || mStandby) {
                     track->mState = TrackBase::STOPPED;
-                    size_t audioHALFrames =
-                            (mOutput->stream->get_latency(mOutput->stream)*mSampleRate) / 1000;
+                    uint32_t latency = 0;
+                    status_t result = mOutput->stream->getLatency(&latency);
+                    ALOGE_IF(result != OK,
+                            "Error when retrieving output stream latency: %d", result);
+                    size_t audioHALFrames = (latency * mSampleRate) / 1000;
                     int64_t framesWritten =
                             mBytesWritten / mOutput->getFrameSize();
                     track->presentationComplete(framesWritten, audioHALFrames);
@@ -5648,16 +5513,15 @@
                 // fill a buffer, then remove it from active list.
                 if (--(track->mRetryCount) <= 0) {
                     bool running = false;
-                    if (mOutput->stream->get_presentation_position != nullptr) {
-                        uint64_t position = 0;
-                        struct timespec unused;
-                        // The running check restarts the retry counter at least once.
-                        int ret = mOutput->stream->get_presentation_position(
-                                mOutput->stream, &position, &unused);
-                        if (ret == NO_ERROR && position != mOffloadUnderrunPosition) {
-                            running = true;
-                            mOffloadUnderrunPosition = position;
-                        }
+                    uint64_t position = 0;
+                    struct timespec unused;
+                    // The running check restarts the retry counter at least once.
+                    status_t ret = mOutput->stream->getPresentationPosition(&position, &unused);
+                    if (ret == NO_ERROR && position != mOffloadUnderrunPosition) {
+                        running = true;
+                        mOffloadUnderrunPosition = position;
+                    }
+                    if (ret == NO_ERROR) {
                         ALOGVV("underrun counter, running(%d): %lld vs %lld", running,
                                 (long long)position, (long long)mOffloadUnderrunPosition);
                     }
@@ -5685,13 +5549,15 @@
     // before flush and then resume HW. This can happen in case of pause/flush/resume
     // if resume is received before pause is executed.
     if (!mStandby && (doHwPause || (mFlushPending && !mHwPaused && (count != 0)))) {
-        mOutput->stream->pause(mOutput->stream);
+        status_t result = mOutput->stream->pause();
+        ALOGE_IF(result != OK, "Error when pausing output stream: %d", result);
     }
     if (mFlushPending) {
         flushHw_l();
     }
     if (!mStandby && doHwResume) {
-        mOutput->stream->resume(mOutput->stream);
+        status_t result = mOutput->stream->resume();
+        ALOGE_IF(result != OK, "Error when resuming output stream: %d", result);
     }
 
     // remove all the tracks that need to be...
@@ -5942,8 +5808,8 @@
 #endif
                                          ) :
     ThreadBase(audioFlinger, id, outDevice, inDevice, RECORD, systemReady),
-    mInput(input), mActiveTracksGen(0), mRsmpInBuffer(NULL),
-    // mRsmpInFrames and mRsmpInFramesP2 are set by readInputParameters_l()
+    mInput(input), mRsmpInBuffer(NULL),
+    // mRsmpInFrames, mRsmpInFramesP2, and mRsmpInFramesOA are set by readInputParameters_l()
     mRsmpInRear(0)
 #ifdef TEE_SINK
     , mTeeSink(teeSink)
@@ -5995,7 +5861,8 @@
     if (initFastCapture) {
         // create a Pipe for FastCapture to write to, and for us and fast tracks to read from
         NBAIO_Format format = mInputSource->format();
-        size_t pipeFramesP2 = roundup(mSampleRate / 25);    // double-buffering of 20 ms each
+        // quadruple-buffering of 20 ms each; this ensures we can sleep for 20ms in RecordThread
+        size_t pipeFramesP2 = roundup(4 * FMS_20 * mSampleRate / 1000);
         size_t pipeSize = pipeFramesP2 * Format_frameSize(format);
         void *pipeBuffer;
         const sp<MemoryDealer> roHeap(readOnlyHeap());
@@ -6053,6 +5920,7 @@
         mFastCapture->run("FastCapture", ANDROID_PRIORITY_URGENT_AUDIO);
         pid_t tid = mFastCapture->getTid();
         sendPrioConfigEvent(getpid_cached, tid, kPriorityFastCapture);
+        stream()->setHalThreadPriority(kPriorityFastCapture);
 #ifdef AUDIO_WATCHDOG
         // FIXME
 #endif
@@ -6099,25 +5967,9 @@
 
 reacquire_wakelock:
     sp<RecordTrack> activeTrack;
-    int activeTracksGen;
     {
         Mutex::Autolock _l(mLock);
-        size_t size = mActiveTracks.size();
-        activeTracksGen = mActiveTracksGen;
-        if (size > 0) {
-            // FIXME an arbitrary choice
-            activeTrack = mActiveTracks[0];
-            acquireWakeLock_l(activeTrack->uid());
-            if (size > 1) {
-                SortedVector<int> tmp;
-                for (size_t i = 0; i < size; i++) {
-                    tmp.add(mActiveTracks[i]->uid());
-                }
-                updateWakeLockUids_l(tmp);
-            }
-        } else {
-            acquireWakeLock_l(-1);
-        }
+        acquireWakeLock_l();
     }
 
     // used to request a deferred sleep, to be executed later while mutex is unlocked
@@ -6169,15 +6021,6 @@
                 goto reacquire_wakelock;
             }
 
-            if (mActiveTracksGen != activeTracksGen) {
-                activeTracksGen = mActiveTracksGen;
-                SortedVector<int> tmp;
-                for (size_t i = 0; i < size; i++) {
-                    tmp.add(mActiveTracks[i]->uid());
-                }
-                updateWakeLockUids_l(tmp);
-            }
-
             bool doBroadcast = false;
             bool allStopped = true;
             for (size_t i = 0; i < size; ) {
@@ -6190,7 +6033,6 @@
                     }
                     removeTrack_l(activeTrack);
                     mActiveTracks.remove(activeTrack);
-                    mActiveTracksGen++;
                     size--;
                     continue;
                 }
@@ -6200,7 +6042,6 @@
 
                 case TrackBase::PAUSING:
                     mActiveTracks.remove(activeTrack);
-                    mActiveTracksGen++;
                     doBroadcast = true;
                     size--;
                     continue;
@@ -6240,6 +6081,8 @@
                 }
             }
 
+            mActiveTracks.updatePowerState(this);
+
             if (allStopped) {
                 standbyIfNotAlreadyInStandby();
             }
@@ -6325,20 +6168,41 @@
         // If an NBAIO source is present, use it to read the normal capture's data
         if (mPipeSource != 0) {
             size_t framesToRead = mBufferSize / mFrameSize;
+            framesToRead = min(mRsmpInFramesOA - rear, mRsmpInFramesP2 / 2);
             framesRead = mPipeSource->read((uint8_t*)mRsmpInBuffer + rear * mFrameSize,
                     framesToRead);
-            if (framesRead == 0) {
-                // since pipe is non-blocking, simulate blocking input
-                sleepUs = (framesToRead * 1000000LL) / mSampleRate;
+            // since pipe is non-blocking, simulate blocking input by waiting for 1/2 of
+            // buffer size or at least for 20ms.
+            size_t sleepFrames = max(
+                    min(mPipeFramesP2, mRsmpInFramesP2) / 2, FMS_20 * mSampleRate / 1000);
+            if (framesRead <= (ssize_t) sleepFrames) {
+                sleepUs = (sleepFrames * 1000000LL) / mSampleRate;
+            }
+            if (framesRead < 0) {
+                status_t status = (status_t) framesRead;
+                switch (status) {
+                case OVERRUN:
+                    ALOGW("overrun on read from pipe");
+                    framesRead = 0;
+                    break;
+                case NEGOTIATE:
+                    ALOGE("re-negotiation is needed");
+                    framesRead = -1;  // Will cause an attempt to recover.
+                    break;
+                default:
+                    ALOGE("unknown error %d on read from pipe", status);
+                    break;
+                }
             }
         // otherwise use the HAL / AudioStreamIn directly
         } else {
             ATRACE_BEGIN("read");
-            ssize_t bytesRead = mInput->stream->read(mInput->stream,
-                    (uint8_t*)mRsmpInBuffer + rear * mFrameSize, mBufferSize);
+            size_t bytesRead;
+            status_t result = mInput->stream->read(
+                    (uint8_t*)mRsmpInBuffer + rear * mFrameSize, mBufferSize, &bytesRead);
             ATRACE_END();
-            if (bytesRead < 0) {
-                framesRead = bytesRead;
+            if (result < 0) {
+                framesRead = result;
             } else {
                 framesRead = bytesRead / mFrameSize;
             }
@@ -6350,10 +6214,9 @@
         mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = systemTime();
 
         // Update server timestamp with kernel stats
-        if (mInput->stream->get_capture_position != nullptr
-                && mPipeSource.get() == nullptr /* don't obtain for FastCapture, could block */) {
+        if (mPipeSource.get() == nullptr /* don't obtain for FastCapture, could block */) {
             int64_t position, time;
-            int ret = mInput->stream->get_capture_position(mInput->stream, &position, &time);
+            int ret = mInput->stream->getCapturePosition(&position, &time);
             if (ret == NO_ERROR) {
                 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = position;
                 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] = time;
@@ -6518,7 +6381,6 @@
             track->invalidate();
         }
         mActiveTracks.clear();
-        mActiveTracksGen++;
         mStartStopCond.broadcast();
     }
 
@@ -6562,7 +6424,18 @@
             sq->end(false /*didModify*/);
         }
     }
-    mInput->stream->common.standby(&mInput->stream->common);
+    status_t result = mInput->stream->standby();
+    ALOGE_IF(result != OK, "Error when putting input stream into standby: %d", result);
+
+    // If going into standby, flush the pipe source.
+    if (mPipeSource.get() != nullptr) {
+        const ssize_t flushed = mPipeSource->flush();
+        if (flushed > 0) {
+            ALOGV("Input standby flushed PipeSource %zd frames", flushed);
+            mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] += flushed;
+            mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = systemTime();
+        }
+    }
 }
 
 // RecordThread::createRecordTrack_l() must be called with AudioFlinger::mLock held
@@ -6574,10 +6447,11 @@
         size_t *pFrameCount,
         audio_session_t sessionId,
         size_t *notificationFrames,
-        int uid,
+        uid_t uid,
         audio_input_flags_t *flags,
         pid_t tid,
-        status_t *status)
+        status_t *status,
+        audio_port_handle_t portId)
 {
     size_t frameCount = *pFrameCount;
     sp<RecordTrack> track;
@@ -6685,7 +6559,7 @@
 
         track = new RecordTrack(this, client, sampleRate,
                       format, channelMask, frameCount, NULL, sessionId, uid,
-                      *flags, TrackBase::TYPE_DEFAULT);
+                      *flags, TrackBase::TYPE_DEFAULT, portId);
 
         lStatus = track->initCheck();
         if (lStatus != NO_ERROR) {
@@ -6761,7 +6635,6 @@
         //      or using a separate command thread
         recordTrack->mState = TrackBase::STARTING_1;
         mActiveTracks.add(recordTrack);
-        mActiveTracksGen++;
         status_t status = NO_ERROR;
         if (recordTrack->isExternalTrack()) {
             mLock.unlock();
@@ -6770,7 +6643,6 @@
             // FIXME should verify that recordTrack is still in mActiveTracks
             if (status != NO_ERROR) {
                 mActiveTracks.remove(recordTrack);
-                mActiveTracksGen++;
                 recordTrack->clearSyncStartEvent();
                 ALOGV("RecordThread::start error %d", status);
                 return status;
@@ -6905,6 +6777,10 @@
 
     dumpBase(fd, args);
 
+    AudioStreamIn *input = mInput;
+    audio_input_flags_t flags = input != NULL ? input->flags : AUDIO_INPUT_FLAG_NONE;
+    dprintf(fd, "  AudioStreamIn: %p flags %#x (%s)\n",
+            input, flags, inputFlagsToString(flags).c_str());
     if (mActiveTracks.size() == 0) {
         dprintf(fd, "  No active record clients\n");
     }
@@ -7063,252 +6939,6 @@
     buffer->frameCount = 0;
 }
 
-AudioFlinger::RecordThread::RecordBufferConverter::RecordBufferConverter(
-        audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
-        uint32_t srcSampleRate,
-        audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
-        uint32_t dstSampleRate) :
-            mSrcChannelMask(AUDIO_CHANNEL_INVALID), // updateParameters will set following vars
-            // mSrcFormat
-            // mSrcSampleRate
-            // mDstChannelMask
-            // mDstFormat
-            // mDstSampleRate
-            // mSrcChannelCount
-            // mDstChannelCount
-            // mDstFrameSize
-            mBuf(NULL), mBufFrames(0), mBufFrameSize(0),
-            mResampler(NULL),
-            mIsLegacyDownmix(false),
-            mIsLegacyUpmix(false),
-            mRequiresFloat(false),
-            mInputConverterProvider(NULL)
-{
-    (void)updateParameters(srcChannelMask, srcFormat, srcSampleRate,
-            dstChannelMask, dstFormat, dstSampleRate);
-}
-
-AudioFlinger::RecordThread::RecordBufferConverter::~RecordBufferConverter() {
-    free(mBuf);
-    delete mResampler;
-    delete mInputConverterProvider;
-}
-
-size_t AudioFlinger::RecordThread::RecordBufferConverter::convert(void *dst,
-        AudioBufferProvider *provider, size_t frames)
-{
-    if (mInputConverterProvider != NULL) {
-        mInputConverterProvider->setBufferProvider(provider);
-        provider = mInputConverterProvider;
-    }
-
-    if (mResampler == NULL) {
-        ALOGVV("NO RESAMPLING sampleRate:%u mSrcFormat:%#x mDstFormat:%#x",
-                mSrcSampleRate, mSrcFormat, mDstFormat);
-
-        AudioBufferProvider::Buffer buffer;
-        for (size_t i = frames; i > 0; ) {
-            buffer.frameCount = i;
-            status_t status = provider->getNextBuffer(&buffer);
-            if (status != OK || buffer.frameCount == 0) {
-                frames -= i; // cannot fill request.
-                break;
-            }
-            // format convert to destination buffer
-            convertNoResampler(dst, buffer.raw, buffer.frameCount);
-
-            dst = (int8_t*)dst + buffer.frameCount * mDstFrameSize;
-            i -= buffer.frameCount;
-            provider->releaseBuffer(&buffer);
-        }
-    } else {
-         ALOGVV("RESAMPLING mSrcSampleRate:%u mDstSampleRate:%u mSrcFormat:%#x mDstFormat:%#x",
-                 mSrcSampleRate, mDstSampleRate, mSrcFormat, mDstFormat);
-
-         // reallocate buffer if needed
-         if (mBufFrameSize != 0 && mBufFrames < frames) {
-             free(mBuf);
-             mBufFrames = frames;
-             (void)posix_memalign(&mBuf, 32, mBufFrames * mBufFrameSize);
-         }
-        // resampler accumulates, but we only have one source track
-        memset(mBuf, 0, frames * mBufFrameSize);
-        frames = mResampler->resample((int32_t*)mBuf, frames, provider);
-        // format convert to destination buffer
-        convertResampler(dst, mBuf, frames);
-    }
-    return frames;
-}
-
-status_t AudioFlinger::RecordThread::RecordBufferConverter::updateParameters(
-        audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
-        uint32_t srcSampleRate,
-        audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
-        uint32_t dstSampleRate)
-{
-    // quick evaluation if there is any change.
-    if (mSrcFormat == srcFormat
-            && mSrcChannelMask == srcChannelMask
-            && mSrcSampleRate == srcSampleRate
-            && mDstFormat == dstFormat
-            && mDstChannelMask == dstChannelMask
-            && mDstSampleRate == dstSampleRate) {
-        return NO_ERROR;
-    }
-
-    ALOGV("RecordBufferConverter updateParameters srcMask:%#x dstMask:%#x"
-            "  srcFormat:%#x dstFormat:%#x  srcRate:%u dstRate:%u",
-            srcChannelMask, dstChannelMask, srcFormat, dstFormat, srcSampleRate, dstSampleRate);
-    const bool valid =
-            audio_is_input_channel(srcChannelMask)
-            && audio_is_input_channel(dstChannelMask)
-            && audio_is_valid_format(srcFormat) && audio_is_linear_pcm(srcFormat)
-            && audio_is_valid_format(dstFormat) && audio_is_linear_pcm(dstFormat)
-            && (srcSampleRate <= dstSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX)
-            ; // no upsampling checks for now
-    if (!valid) {
-        return BAD_VALUE;
-    }
-
-    mSrcFormat = srcFormat;
-    mSrcChannelMask = srcChannelMask;
-    mSrcSampleRate = srcSampleRate;
-    mDstFormat = dstFormat;
-    mDstChannelMask = dstChannelMask;
-    mDstSampleRate = dstSampleRate;
-
-    // compute derived parameters
-    mSrcChannelCount = audio_channel_count_from_in_mask(srcChannelMask);
-    mDstChannelCount = audio_channel_count_from_in_mask(dstChannelMask);
-    mDstFrameSize = mDstChannelCount * audio_bytes_per_sample(mDstFormat);
-
-    // do we need to resample?
-    delete mResampler;
-    mResampler = NULL;
-    if (mSrcSampleRate != mDstSampleRate) {
-        mResampler = AudioResampler::create(AUDIO_FORMAT_PCM_FLOAT,
-                mSrcChannelCount, mDstSampleRate);
-        mResampler->setSampleRate(mSrcSampleRate);
-        mResampler->setVolume(AudioMixer::UNITY_GAIN_FLOAT, AudioMixer::UNITY_GAIN_FLOAT);
-    }
-
-    // are we running legacy channel conversion modes?
-    mIsLegacyDownmix = (mSrcChannelMask == AUDIO_CHANNEL_IN_STEREO
-                            || mSrcChannelMask == AUDIO_CHANNEL_IN_FRONT_BACK)
-                   && mDstChannelMask == AUDIO_CHANNEL_IN_MONO;
-    mIsLegacyUpmix = mSrcChannelMask == AUDIO_CHANNEL_IN_MONO
-                   && (mDstChannelMask == AUDIO_CHANNEL_IN_STEREO
-                            || mDstChannelMask == AUDIO_CHANNEL_IN_FRONT_BACK);
-
-    // do we need to process in float?
-    mRequiresFloat = mResampler != NULL || mIsLegacyDownmix || mIsLegacyUpmix;
-
-    // do we need a staging buffer to convert for destination (we can still optimize this)?
-    // we use mBufFrameSize > 0 to indicate both frame size as well as buffer necessity
-    if (mResampler != NULL) {
-        mBufFrameSize = max(mSrcChannelCount, FCC_2)
-                * audio_bytes_per_sample(AUDIO_FORMAT_PCM_FLOAT);
-    } else if (mIsLegacyUpmix || mIsLegacyDownmix) { // legacy modes always float
-        mBufFrameSize = mDstChannelCount * audio_bytes_per_sample(AUDIO_FORMAT_PCM_FLOAT);
-    } else if (mSrcChannelMask != mDstChannelMask && mDstFormat != mSrcFormat) {
-        mBufFrameSize = mDstChannelCount * audio_bytes_per_sample(mSrcFormat);
-    } else {
-        mBufFrameSize = 0;
-    }
-    mBufFrames = 0; // force the buffer to be resized.
-
-    // do we need an input converter buffer provider to give us float?
-    delete mInputConverterProvider;
-    mInputConverterProvider = NULL;
-    if (mRequiresFloat && mSrcFormat != AUDIO_FORMAT_PCM_FLOAT) {
-        mInputConverterProvider = new ReformatBufferProvider(
-                audio_channel_count_from_in_mask(mSrcChannelMask),
-                mSrcFormat,
-                AUDIO_FORMAT_PCM_FLOAT,
-                256 /* provider buffer frame count */);
-    }
-
-    // do we need a remixer to do channel mask conversion
-    if (!mIsLegacyDownmix && !mIsLegacyUpmix && mSrcChannelMask != mDstChannelMask) {
-        (void) memcpy_by_index_array_initialization_from_channel_mask(
-                mIdxAry, ARRAY_SIZE(mIdxAry), mDstChannelMask, mSrcChannelMask);
-    }
-    return NO_ERROR;
-}
-
-void AudioFlinger::RecordThread::RecordBufferConverter::convertNoResampler(
-        void *dst, const void *src, size_t frames)
-{
-    // src is native type unless there is legacy upmix or downmix, whereupon it is float.
-    if (mBufFrameSize != 0 && mBufFrames < frames) {
-        free(mBuf);
-        mBufFrames = frames;
-        (void)posix_memalign(&mBuf, 32, mBufFrames * mBufFrameSize);
-    }
-    // do we need to do legacy upmix and downmix?
-    if (mIsLegacyUpmix || mIsLegacyDownmix) {
-        void *dstBuf = mBuf != NULL ? mBuf : dst;
-        if (mIsLegacyUpmix) {
-            upmix_to_stereo_float_from_mono_float((float *)dstBuf,
-                    (const float *)src, frames);
-        } else /*mIsLegacyDownmix */ {
-            downmix_to_mono_float_from_stereo_float((float *)dstBuf,
-                    (const float *)src, frames);
-        }
-        if (mBuf != NULL) {
-            memcpy_by_audio_format(dst, mDstFormat, mBuf, AUDIO_FORMAT_PCM_FLOAT,
-                    frames * mDstChannelCount);
-        }
-        return;
-    }
-    // do we need to do channel mask conversion?
-    if (mSrcChannelMask != mDstChannelMask) {
-        void *dstBuf = mBuf != NULL ? mBuf : dst;
-        memcpy_by_index_array(dstBuf, mDstChannelCount,
-                src, mSrcChannelCount, mIdxAry, audio_bytes_per_sample(mSrcFormat), frames);
-        if (dstBuf == dst) {
-            return; // format is the same
-        }
-    }
-    // convert to destination buffer
-    const void *convertBuf = mBuf != NULL ? mBuf : src;
-    memcpy_by_audio_format(dst, mDstFormat, convertBuf, mSrcFormat,
-            frames * mDstChannelCount);
-}
-
-void AudioFlinger::RecordThread::RecordBufferConverter::convertResampler(
-        void *dst, /*not-a-const*/ void *src, size_t frames)
-{
-    // src buffer format is ALWAYS float when entering this routine
-    if (mIsLegacyUpmix) {
-        ; // mono to stereo already handled by resampler
-    } else if (mIsLegacyDownmix
-            || (mSrcChannelMask == mDstChannelMask && mSrcChannelCount == 1)) {
-        // the resampler outputs stereo for mono input channel (a feature?)
-        // must convert to mono
-        downmix_to_mono_float_from_stereo_float((float *)src,
-                (const float *)src, frames);
-    } else if (mSrcChannelMask != mDstChannelMask) {
-        // convert to mono channel again for channel mask conversion (could be skipped
-        // with further optimization).
-        if (mSrcChannelCount == 1) {
-            downmix_to_mono_float_from_stereo_float((float *)src,
-                (const float *)src, frames);
-        }
-        // convert to destination format (in place, OK as float is larger than other types)
-        if (mDstFormat != AUDIO_FORMAT_PCM_FLOAT) {
-            memcpy_by_audio_format(src, mDstFormat, src, AUDIO_FORMAT_PCM_FLOAT,
-                    frames * mSrcChannelCount);
-        }
-        // channel convert and save to dst
-        memcpy_by_index_array(dst, mDstChannelCount,
-                src, mSrcChannelCount, mIdxAry, audio_bytes_per_sample(mDstFormat), frames);
-        return;
-    }
-    // convert to destination format and save to dst
-    memcpy_by_audio_format(dst, mDstFormat, src, AUDIO_FORMAT_PCM_FLOAT,
-            frames * mDstChannelCount);
-}
 
 bool AudioFlinger::RecordThread::checkForNewParameter_l(const String8& keyValuePair,
                                                         status_t& status)
@@ -7405,22 +7035,22 @@
     }
 
     if (status == NO_ERROR) {
-        status = mInput->stream->common.set_parameters(&mInput->stream->common,
-                keyValuePair.string());
+        status = mInput->stream->setParameters(keyValuePair);
         if (status == INVALID_OPERATION) {
             inputStandBy();
-            status = mInput->stream->common.set_parameters(&mInput->stream->common,
-                    keyValuePair.string());
+            status = mInput->stream->setParameters(keyValuePair);
         }
         if (reconfig) {
-            if (status == BAD_VALUE &&
-                audio_is_linear_pcm(mInput->stream->common.get_format(&mInput->stream->common)) &&
-                audio_is_linear_pcm(reqFormat) &&
-                (mInput->stream->common.get_sample_rate(&mInput->stream->common)
-                        <= (AUDIO_RESAMPLER_DOWN_RATIO_MAX * samplingRate)) &&
-                audio_channel_count_from_in_mask(
-                        mInput->stream->common.get_channels(&mInput->stream->common)) <= FCC_8) {
-                status = NO_ERROR;
+            if (status == BAD_VALUE) {
+                uint32_t sRate;
+                audio_channel_mask_t channelMask;
+                audio_format_t format;
+                if (mInput->stream->getAudioProperties(&sRate, &channelMask, &format) == OK &&
+                        audio_is_linear_pcm(format) && audio_is_linear_pcm(reqFormat) &&
+                        sRate <= (AUDIO_RESAMPLER_DOWN_RATIO_MAX * samplingRate) &&
+                        audio_channel_count_from_in_mask(channelMask) <= FCC_8) {
+                    status = NO_ERROR;
+                }
             }
             if (status == NO_ERROR) {
                 readInputParameters_l();
@@ -7435,14 +7065,13 @@
 String8 AudioFlinger::RecordThread::getParameters(const String8& keys)
 {
     Mutex::Autolock _l(mLock);
-    if (initCheck() != NO_ERROR) {
-        return String8();
+    if (initCheck() == NO_ERROR) {
+        String8 out_s8;
+        if (mInput->stream->getParameters(keys, &out_s8) == OK) {
+            return out_s8;
+        }
     }
-
-    char *s = mInput->stream->common.get_parameters(&mInput->stream->common, keys.string());
-    const String8 out_s8(s);
-    free(s);
-    return out_s8;
+    return String8();
 }
 
 void AudioFlinger::RecordThread::ioConfigChanged(audio_io_config_event event, pid_t pid) {
@@ -7471,19 +7100,16 @@
 
 void AudioFlinger::RecordThread::readInputParameters_l()
 {
-    mSampleRate = mInput->stream->common.get_sample_rate(&mInput->stream->common);
-    mChannelMask = mInput->stream->common.get_channels(&mInput->stream->common);
+    status_t result = mInput->stream->getAudioProperties(&mSampleRate, &mChannelMask, &mHALFormat);
+    LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving audio properties from HAL: %d", result);
     mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
-    if (mChannelCount > FCC_8) {
-        ALOGE("HAL channel count %d > %d", mChannelCount, FCC_8);
-    }
-    mHALFormat = mInput->stream->common.get_format(&mInput->stream->common);
+    LOG_ALWAYS_FATAL_IF(mChannelCount > FCC_8, "HAL channel count %d > %d", mChannelCount, FCC_8);
     mFormat = mHALFormat;
-    if (!audio_is_linear_pcm(mFormat)) {
-        ALOGE("HAL format %#x is not linear pcm", mFormat);
-    }
-    mFrameSize = audio_stream_in_frame_size(mInput->stream);
-    mBufferSize = mInput->stream->common.get_buffer_size(&mInput->stream->common);
+    LOG_ALWAYS_FATAL_IF(!audio_is_linear_pcm(mFormat), "HAL format %#x is not linear pcm", mFormat);
+    result = mInput->stream->getFrameSize(&mFrameSize);
+    LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving frame size from HAL: %d", result);
+    result = mInput->stream->getBufferSize(&mBufferSize);
+    LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving buffer size from HAL: %d", result);
     mFrameCount = mBufferSize / mFrameSize;
     // This is the formula for calculating the temporary buffer size.
     // With 7 HAL buffers, we can guarantee ability to down-sample the input by ratio of 6:1 to
@@ -7506,9 +7132,9 @@
     // The current value is higher than necessary.  However it should not add to latency.
 
     // Over-allocate beyond mRsmpInFramesP2 to permit a HAL read past end of buffer
-    size_t bufferSize = (mRsmpInFramesP2 + mFrameCount - 1) * mFrameSize;
-    (void)posix_memalign(&mRsmpInBuffer, 32, bufferSize);
-    memset(mRsmpInBuffer, 0, bufferSize); // if posix_memalign fails, will segv here.
+    mRsmpInFramesOA = mRsmpInFramesP2 + mFrameCount - 1;
+    (void)posix_memalign(&mRsmpInBuffer, 32, mRsmpInFramesOA * mFrameSize);
+    memset(mRsmpInBuffer, 0, mRsmpInFramesOA * mFrameSize); // if posix_memalign fails, will segv here.
 
     // AudioRecord mSampleRate and mChannelCount are constant due to AudioRecord API constraints.
     // But if thread's mSampleRate or mChannelCount changes, how will that affect active tracks?
@@ -7517,11 +7143,11 @@
 uint32_t AudioFlinger::RecordThread::getInputFramesLost()
 {
     Mutex::Autolock _l(mLock);
-    if (initCheck() != NO_ERROR) {
-        return 0;
+    uint32_t result;
+    if (initCheck() == NO_ERROR && mInput->stream->getInputFramesLost(&result) == OK) {
+        return result;
     }
-
-    return mInput->stream->get_input_frames_lost(mInput->stream);
+    return 0;
 }
 
 // hasAudioSession_l() must be called with ThreadBase::mLock held
@@ -7568,12 +7194,12 @@
 }
 
 // this method must always be called either with ThreadBase mLock held or inside the thread loop
-audio_stream_t* AudioFlinger::RecordThread::stream() const
+sp<StreamHalInterface> AudioFlinger::RecordThread::stream() const
 {
     if (mInput == NULL) {
         return NULL;
     }
-    return &mInput->stream->common;
+    return mInput->stream;
 }
 
 status_t AudioFlinger::RecordThread::addEffectChain_l(const sp<EffectChain>& chain)
@@ -7643,14 +7269,13 @@
         }
     }
 
-    if (mInput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-        audio_hw_device_t *hwDevice = mInput->audioHwDev->hwDevice();
-        status = hwDevice->create_audio_patch(hwDevice,
-                                               patch->num_sources,
-                                               patch->sources,
-                                               patch->num_sinks,
-                                               patch->sinks,
-                                               handle);
+    if (mInput->audioHwDev->supportsAudioPatches()) {
+        sp<DeviceHalInterface> hwDevice = mInput->audioHwDev->hwDevice();
+        status = hwDevice->createAudioPatch(patch->num_sources,
+                                            patch->sources,
+                                            patch->num_sinks,
+                                            patch->sinks,
+                                            handle);
     } else {
         char *address;
         if (strcmp(patch->sources[0].ext.device.address, "") != 0) {
@@ -7662,12 +7287,11 @@
         }
         AudioParameter param = AudioParameter(String8(address));
         free(address);
-        param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING),
+        param.addInt(String8(AudioParameter::keyRouting),
                      (int)patch->sources[0].ext.device.type);
-        param.addInt(String8(AUDIO_PARAMETER_STREAM_INPUT_SOURCE),
+        param.addInt(String8(AudioParameter::keyInputSource),
                                          (int)patch->sinks[0].ext.mix.usecase.source);
-        status = mInput->stream->common.set_parameters(&mInput->stream->common,
-                param.toString().string());
+        status = mInput->stream->setParameters(param.toString());
         *handle = AUDIO_PATCH_HANDLE_NONE;
     }
 
@@ -7685,14 +7309,13 @@
 
     mInDevice = AUDIO_DEVICE_NONE;
 
-    if (mInput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-        audio_hw_device_t *hwDevice = mInput->audioHwDev->hwDevice();
-        status = hwDevice->release_audio_patch(hwDevice, handle);
+    if (mInput->audioHwDev->supportsAudioPatches()) {
+        sp<DeviceHalInterface> hwDevice = mInput->audioHwDev->hwDevice();
+        status = hwDevice->releaseAudioPatch(handle);
     } else {
         AudioParameter param;
-        param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), 0);
-        status = mInput->stream->common.set_parameters(&mInput->stream->common,
-                param.toString().string());
+        param.addInt(String8(AudioParameter::keyRouting), 0);
+        status = mInput->stream->setParameters(param.toString());
     }
     return status;
 }
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 49aa984..3fb0b07 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -286,7 +286,7 @@
                 audio_devices_t outDevice() const { return mOutDevice; }
                 audio_devices_t inDevice() const { return mInDevice; }
 
-    virtual     audio_stream_t* stream() const = 0;
+    virtual     sp<StreamHalInterface> stream() const = 0;
 
                 sp<EffectHandle> createEffect_l(
                                     const sp<AudioFlinger::Client>& client,
@@ -403,12 +403,11 @@
                     effect_uuid_t mType;    // effect type UUID
                 };
 
-                void        acquireWakeLock(int uid = -1);
-                virtual void acquireWakeLock_l(int uid = -1);
+                void        acquireWakeLock();
+                virtual void acquireWakeLock_l();
                 void        releaseWakeLock();
                 void        releaseWakeLock_l();
-                void        updateWakeLockUids(const SortedVector<int> &uids);
-                void        updateWakeLockUids_l(const SortedVector<int> &uids);
+                void        updateWakeLockUids_l(const SortedVector<uid_t> &uids);
                 void        getPowerManager_l();
                 void setEffectSuspended_l(const effect_uuid_t *type,
                                           bool suspend,
@@ -482,12 +481,96 @@
                 static const size_t     kLogSize = 4 * 1024;
                 sp<NBLog::Writer>       mNBLogWriter;
                 bool                    mSystemReady;
-                bool                    mNotifiedBatteryStart;
                 ExtendedTimestamp       mTimestamp;
+
+                // ActiveTracks is a sorted vector of track type T representing the
+                // active tracks of threadLoop() to be considered by the locked prepare portion.
+                // ActiveTracks should be accessed with the ThreadBase lock held.
+                //
+                // During processing and I/O, the threadLoop does not hold the lock;
+                // hence it does not directly use ActiveTracks.  Care should be taken
+                // to hold local strong references or defer removal of tracks
+                // if the threadLoop may still be accessing those tracks due to mix, etc.
+                //
+                // This class updates power information appropriately.
+                //
+
+                template <typename T>
+                class ActiveTracks {
+                public:
+                    ActiveTracks()
+                        : mActiveTracksGeneration(0)
+                        , mLastActiveTracksGeneration(0)
+                    { }
+
+                    ~ActiveTracks() {
+                        ALOGW_IF(!mActiveTracks.isEmpty(),
+                                "ActiveTracks should be empty in destructor");
+                    }
+                    // returns the last track added (even though it may have been
+                    // subsequently removed from ActiveTracks).
+                    //
+                    // Used for DirectOutputThread to ensure a flush is called when transitioning
+                    // to a new track (even though it may be on the same session).
+                    // Used for OffloadThread to ensure that volume and mixer state is
+                    // taken from the latest track added.
+                    //
+                    // The latest track is saved with a weak pointer to prevent keeping an
+                    // otherwise useless track alive. Thus the function will return nullptr
+                    // if the latest track has subsequently been removed and destroyed.
+                    sp<T> getLatest() {
+                        return mLatestActiveTrack.promote();
+                    }
+
+                    // SortedVector methods
+                    ssize_t         add(const sp<T> &track);
+                    ssize_t         remove(const sp<T> &track);
+                    size_t          size() const {
+                        return mActiveTracks.size();
+                    }
+                    ssize_t         indexOf(const sp<T>& item) {
+                        return mActiveTracks.indexOf(item);
+                    }
+                    sp<T>           operator[](size_t index) const {
+                        return mActiveTracks[index];
+                    }
+                    typename SortedVector<sp<T>>::iterator begin() {
+                        return mActiveTracks.begin();
+                    }
+                    typename SortedVector<sp<T>>::iterator end() {
+                        return mActiveTracks.end();
+                    }
+
+                    // Due to Binder recursion optimization, clear() and updatePowerState()
+                    // cannot be called from a Binder thread because they may call back into
+                    // the original calling process (system server) for BatteryNotifier
+                    // (which requires a Java environment that may not be present).
+                    // Hence, call clear() and updatePowerState() only from the
+                    // ThreadBase thread.
+                    void            clear();
+                    // periodically called in the threadLoop() to update power state uids.
+                    void            updatePowerState(sp<ThreadBase> thread, bool force = false);
+
+                private:
+                    SortedVector<uid_t> getWakeLockUids() {
+                        SortedVector<uid_t> wakeLockUids;
+                        for (const sp<T> &track : mActiveTracks) {
+                            wakeLockUids.add(track->uid());
+                        }
+                        return wakeLockUids; // moved by underlying SharedBuffer
+                    }
+
+                    std::map<uid_t, std::pair<ssize_t /* previous */, ssize_t /* current */>>
+                                        mBatteryCounter;
+                    SortedVector<sp<T>> mActiveTracks;
+                    int                 mActiveTracksGeneration;
+                    int                 mLastActiveTracksGeneration;
+                    wp<T>               mLatestActiveTrack; // latest track added to ActiveTracks
+                };
 };
 
 // --- PlaybackThread ---
-class PlaybackThread : public ThreadBase {
+class PlaybackThread : public ThreadBase, public StreamOutHalInterfaceCallback {
 public:
 
 #include "PlaybackTracks.h"
@@ -544,13 +627,13 @@
     virtual     mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove) = 0;
                 void        removeTracks_l(const Vector< sp<Track> >& tracksToRemove);
 
-                void        writeCallback();
-                void        resetWriteBlocked(uint32_t sequence);
-                void        drainCallback();
-                void        resetDraining(uint32_t sequence);
-                void        errorCallback();
+    // StreamOutHalInterfaceCallback implementation
+    virtual     void        onWriteReady();
+    virtual     void        onDrainReady();
+    virtual     void        onError();
 
-    static      int         asyncCallback(stream_callback_event_t event, void *param, void *cookie);
+                void        resetWriteBlocked(uint32_t sequence);
+                void        resetDraining(uint32_t sequence);
 
     virtual     bool        waitingAsyncCallback();
     virtual     bool        waitingAsyncCallback_l();
@@ -562,6 +645,10 @@
     virtual     void        preExit();
 
     virtual     bool        keepWakeLock() const { return true; }
+    virtual     void        acquireWakeLock_l() {
+                                ThreadBase::acquireWakeLock_l();
+                                mActiveTracks.updatePowerState(this, true /* force */);
+                            }
 
 public:
 
@@ -591,12 +678,13 @@
                                 audio_session_t sessionId,
                                 audio_output_flags_t *flags,
                                 pid_t tid,
-                                int uid,
-                                status_t *status /*non-NULL*/);
+                                uid_t uid,
+                                status_t *status /*non-NULL*/,
+                                audio_port_handle_t portId);
 
                 AudioStreamOut* getOutput() const;
                 AudioStreamOut* clearOutput();
-                virtual audio_stream_t* stream() const;
+                virtual sp<StreamHalInterface> stream() const;
 
                 // a very large number of suspend() will eventually wraparound, but unlikely
                 void        suspend() { (void) android_atomic_inc(&mSuspended); }
@@ -734,10 +822,7 @@
     bool                            mMasterMute;
                 void        setMasterMute_l(bool muted) { mMasterMute = muted; }
 protected:
-    SortedVector< wp<Track> >       mActiveTracks;  // FIXME check if this could be sp<>
-    SortedVector<int>               mWakeLockUids;
-    int                             mActiveTracksGeneration;
-    wp<Track>                       mLatestActiveTrack; // latest track added to mActiveTracks
+    ActiveTracks<Track>     mActiveTracks;
 
     // Allocate a track name for a given channel mask.
     //   Returns name >= 0 if successful, -1 on failure.
@@ -909,8 +994,12 @@
 
                 dprintf(fd, "Local Log:\n");
                 auto it = mLog.begin();
-                if (!dumpAll && mLog.size() > kLogPrint) {
-                    it += (mLog.size() - kLogPrint);
+                if (!dumpAll) {
+                    const size_t lines =
+                            (size_t)property_get_int32("audio.locallog.lines", kLogPrint);
+                    if (mLog.size() > lines) {
+                        it += (mLog.size() - lines);
+                    }
                 }
                 for (; it != mLog.end(); ++it) {
                     const int64_t ns = it->first;
@@ -975,8 +1064,8 @@
     virtual     uint32_t    suspendSleepTimeUs() const;
     virtual     void        cacheParameters_l();
 
-    virtual void acquireWakeLock_l(int uid = -1) {
-        PlaybackThread::acquireWakeLock_l(uid);
+    virtual void acquireWakeLock_l() {
+        PlaybackThread::acquireWakeLock_l();
         if (hasFastMixer()) {
             mFastMixer->setBoottimeOffset(
                     mTimestamp.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_BOOTTIME]);
@@ -1234,92 +1323,6 @@
                                             // rolling counter that is never cleared
     };
 
-    /* The RecordBufferConverter is used for format, channel, and sample rate
-     * conversion for a RecordTrack.
-     *
-     * TODO: Self contained, so move to a separate file later.
-     *
-     * RecordBufferConverter uses the convert() method rather than exposing a
-     * buffer provider interface; this is to save a memory copy.
-     */
-    class RecordBufferConverter
-    {
-    public:
-        RecordBufferConverter(
-                audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
-                uint32_t srcSampleRate,
-                audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
-                uint32_t dstSampleRate);
-
-        ~RecordBufferConverter();
-
-        /* Converts input data from an AudioBufferProvider by format, channelMask,
-         * and sampleRate to a destination buffer.
-         *
-         * Parameters
-         *      dst:  buffer to place the converted data.
-         * provider:  buffer provider to obtain source data.
-         *   frames:  number of frames to convert
-         *
-         * Returns the number of frames converted.
-         */
-        size_t convert(void *dst, AudioBufferProvider *provider, size_t frames);
-
-        // returns NO_ERROR if constructor was successful
-        status_t initCheck() const {
-            // mSrcChannelMask set on successful updateParameters
-            return mSrcChannelMask != AUDIO_CHANNEL_INVALID ? NO_ERROR : NO_INIT;
-        }
-
-        // allows dynamic reconfigure of all parameters
-        status_t updateParameters(
-                audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
-                uint32_t srcSampleRate,
-                audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
-                uint32_t dstSampleRate);
-
-        // called to reset resampler buffers on record track discontinuity
-        void reset() {
-            if (mResampler != NULL) {
-                mResampler->reset();
-            }
-        }
-
-    private:
-        // format conversion when not using resampler
-        void convertNoResampler(void *dst, const void *src, size_t frames);
-
-        // format conversion when using resampler; modifies src in-place
-        void convertResampler(void *dst, /*not-a-const*/ void *src, size_t frames);
-
-        // user provided information
-        audio_channel_mask_t mSrcChannelMask;
-        audio_format_t       mSrcFormat;
-        uint32_t             mSrcSampleRate;
-        audio_channel_mask_t mDstChannelMask;
-        audio_format_t       mDstFormat;
-        uint32_t             mDstSampleRate;
-
-        // derived information
-        uint32_t             mSrcChannelCount;
-        uint32_t             mDstChannelCount;
-        size_t               mDstFrameSize;
-
-        // format conversion buffer
-        void                *mBuf;
-        size_t               mBufFrames;
-        size_t               mBufFrameSize;
-
-        // resampler info
-        AudioResampler      *mResampler;
-
-        bool                 mIsLegacyDownmix;  // legacy stereo to mono conversion needed
-        bool                 mIsLegacyUpmix;    // legacy mono to stereo conversion needed
-        bool                 mRequiresFloat;    // data processing requires float (e.g. resampler)
-        PassthruBufferProvider *mInputConverterProvider;    // converts input to float
-        int8_t               mIdxAry[sizeof(uint32_t) * 8]; // used for channel mask conversion
-    };
-
 #include "RecordTracks.h"
 
             RecordThread(const sp<AudioFlinger>& audioFlinger,
@@ -1361,10 +1364,11 @@
                     size_t *pFrameCount,
                     audio_session_t sessionId,
                     size_t *notificationFrames,
-                    int uid,
+                    uid_t uid,
                     audio_input_flags_t *flags,
                     pid_t tid,
-                    status_t *status /*non-NULL*/);
+                    status_t *status /*non-NULL*/,
+                    audio_port_handle_t portId);
 
             status_t    start(RecordTrack* recordTrack,
                               AudioSystem::sync_event_t event,
@@ -1376,7 +1380,7 @@
 
             void        dump(int fd, const Vector<String16>& args);
             AudioStreamIn* clearInput();
-            virtual audio_stream_t* stream() const;
+            virtual sp<StreamHalInterface> stream() const;
 
 
     virtual bool        checkForNewParameter_l(const String8& keyValuePair,
@@ -1415,6 +1419,11 @@
     virtual status_t    checkEffectCompatibility_l(const effect_descriptor_t *desc,
                                                    audio_session_t sessionId);
 
+    virtual void        acquireWakeLock_l() {
+                            ThreadBase::acquireWakeLock_l();
+                            mActiveTracks.updatePowerState(this, true /* force */);
+                        }
+
 private:
             // Enter standby if not already in standby, and set mStandby flag
             void    standbyIfNotAlreadyInStandby();
@@ -1426,15 +1435,15 @@
             SortedVector < sp<RecordTrack> >    mTracks;
             // mActiveTracks has dual roles:  it indicates the current active track(s), and
             // is used together with mStartStopCond to indicate start()/stop() progress
-            SortedVector< sp<RecordTrack> >     mActiveTracks;
-            // generation counter for mActiveTracks
-            int                                 mActiveTracksGen;
+            ActiveTracks<RecordTrack>           mActiveTracks;
+
             Condition                           mStartStopCond;
 
             // resampler converts input at HAL Hz to output at AudioRecord client Hz
-            void                               *mRsmpInBuffer; //
+            void                               *mRsmpInBuffer;  // size = mRsmpInFramesOA
             size_t                              mRsmpInFrames;  // size of resampler input in frames
             size_t                              mRsmpInFramesP2;// size rounded up to a power-of-2
+            size_t                              mRsmpInFramesOA;// mRsmpInFramesP2 + over-allocation
 
             // rolling index that is never cleared
             int32_t                             mRsmpInRear;    // last filled frame + 1
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index 6b97246..9ca2d63 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -62,10 +62,11 @@
                                 size_t frameCount,
                                 void *buffer,
                                 audio_session_t sessionId,
-                                int uid,
+                                uid_t uid,
                                 bool isOut,
                                 alloc_type alloc = ALLOC_CBLK,
-                                track_type type = TYPE_DEFAULT);
+                                track_type type = TYPE_DEFAULT,
+                                audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
     virtual             ~TrackBase();
     virtual status_t    initCheck() const;
 
@@ -75,7 +76,7 @@
             sp<IMemory> getCblk() const { return mCblkMemory; }
             audio_track_cblk_t* cblk() const { return mCblk; }
             audio_session_t sessionId() const { return mSessionId; }
-            int         uid() const { return mUid; }
+            uid_t       uid() const { return mUid; }
     virtual status_t    setSyncEvent(const sp<SyncEvent>& event);
 
             sp<IMemory> getBuffers() const { return mBufferMemory; }
@@ -153,16 +154,17 @@
                                     // openRecord(), and then adjusted as needed
 
     const audio_session_t mSessionId;
-    int                 mUid;
+    uid_t               mUid;
     Vector < sp<SyncEvent> >mSyncEvents;
     const bool          mIsOut;
-    ServerProxy*        mServerProxy;
+    sp<ServerProxy>     mServerProxy;
     const int           mId;
     sp<NBAIO_Sink>      mTeeSink;
     sp<NBAIO_Source>    mTeeSource;
     bool                mTerminated;
     track_type          mType;      // must be one of TYPE_DEFAULT, TYPE_OUTPUT, TYPE_PATCH ...
     audio_io_handle_t   mThreadIoHandle; // I/O handle of the thread the track is attached to
+    audio_port_handle_t mPortId; // unique ID for this track used by audio policy
 };
 
 // PatchProxyBufferProvider interface is implemented by PatchTrack and PatchRecord.
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 348cac4..f2dd884 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -27,12 +27,12 @@
 
 #include <private/media/AudioTrackShared.h>
 
-#include "AudioMixer.h"
 #include "AudioFlinger.h"
 #include "ServiceUtilities.h"
 
 #include <media/nbaio/Pipe.h>
 #include <media/nbaio/PipeReader.h>
+#include <media/RecordBufferConverter.h>
 #include <audio_utils/minifloat.h>
 
 // ----------------------------------------------------------------------------
@@ -52,7 +52,7 @@
 
 // TODO move to a common header  (Also shared with AudioTrack.cpp)
 #define NANOS_PER_SECOND    1000000000
-#define TIME_TO_NANOS(time) ((uint64_t)time.tv_sec * NANOS_PER_SECOND + time.tv_nsec)
+#define TIME_TO_NANOS(time) ((uint64_t)(time).tv_sec * NANOS_PER_SECOND + (time).tv_nsec)
 
 namespace android {
 
@@ -72,10 +72,11 @@
             size_t frameCount,
             void *buffer,
             audio_session_t sessionId,
-            int clientUid,
+            uid_t clientUid,
             bool isOut,
             alloc_type alloc,
-            track_type type)
+            track_type type,
+            audio_port_handle_t portId)
     :   RefBase(),
         mThread(thread),
         mClient(client),
@@ -93,17 +94,17 @@
         mFrameCount(frameCount),
         mSessionId(sessionId),
         mIsOut(isOut),
-        mServerProxy(NULL),
         mId(android_atomic_inc(&nextTrackId)),
         mTerminated(false),
         mType(type),
-        mThreadIoHandle(thread->id())
+        mThreadIoHandle(thread->id()),
+        mPortId(portId)
 {
     const uid_t callingUid = IPCThreadState::self()->getCallingUid();
-    if (!isTrustedCallingUid(callingUid) || clientUid == -1) {
-        ALOGW_IF(clientUid != -1 && clientUid != (int)callingUid,
+    if (!isTrustedCallingUid(callingUid) || clientUid == AUDIO_UID_INVALID) {
+        ALOGW_IF(clientUid != AUDIO_UID_INVALID && clientUid != callingUid,
                 "%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid, clientUid);
-        clientUid = (int)callingUid;
+        clientUid = callingUid;
     }
     // clientUid contains the uid of the app that is responsible for this track, so we can blame
     // battery usage on it.
@@ -218,7 +219,7 @@
     dumpTee(-1, mTeeSource, mId);
 #endif
     // delete the proxy before deleting the shared memory it refers to, to avoid dangling reference
-    delete mServerProxy;
+    mServerProxy.clear();
     if (mCblk != NULL) {
         if (mClient == 0) {
             delete mCblk;
@@ -342,14 +343,15 @@
             void *buffer,
             const sp<IMemory>& sharedBuffer,
             audio_session_t sessionId,
-            int uid,
+            uid_t uid,
             audio_output_flags_t flags,
-            track_type type)
+            track_type type,
+            audio_port_handle_t portId)
     :   TrackBase(thread, client, sampleRate, format, channelMask, frameCount,
                   (sharedBuffer != 0) ? sharedBuffer->pointer() : buffer,
                   sessionId, uid, true /*isOut*/,
                   (type == TYPE_PATCH) ? ( buffer == NULL ? ALLOC_LOCAL : ALLOC_NONE) : ALLOC_CBLK,
-                  type),
+                  type, portId),
     mFillingUpStatus(FS_INVALID),
     // mRetryCount initialized later when needed
     mSharedBuffer(sharedBuffer),
@@ -364,7 +366,6 @@
     mFastIndex(-1),
     mCachedVolume(1.0),
     mIsInvalid(false),
-    mAudioTrackServerProxy(NULL),
     mResumeToStopping(false),
     mFlushHwPending(false),
     mFlags(flags)
@@ -1141,12 +1142,12 @@
             audio_format_t format,
             audio_channel_mask_t channelMask,
             size_t frameCount,
-            int uid)
+            uid_t uid)
     :   Track(playbackThread, NULL, AUDIO_STREAM_PATCH,
               sampleRate, format, channelMask, frameCount,
               NULL, 0, AUDIO_SESSION_NONE, uid, AUDIO_OUTPUT_FLAG_NONE,
               TYPE_OUTPUT),
-    mActive(false), mSourceThread(sourceThread), mClientProxy(NULL)
+    mActive(false), mSourceThread(sourceThread)
 {
 
     if (mCblk != NULL) {
@@ -1171,7 +1172,6 @@
 AudioFlinger::PlaybackThread::OutputTrack::~OutputTrack()
 {
     clearBufferQueue();
-    delete mClientProxy;
     // superclass destructor will now delete the server proxy and shared memory both refer to
 }
 
@@ -1478,15 +1478,16 @@
             size_t frameCount,
             void *buffer,
             audio_session_t sessionId,
-            int uid,
+            uid_t uid,
             audio_input_flags_t flags,
-            track_type type)
+            track_type type,
+            audio_port_handle_t portId)
     :   TrackBase(thread, client, sampleRate, format,
                   channelMask, frameCount, buffer, sessionId, uid, false /*isOut*/,
                   (type == TYPE_DEFAULT) ?
                           ((flags & AUDIO_INPUT_FLAG_FAST) ? ALLOC_PIPE : ALLOC_CBLK) :
                           ((buffer == NULL) ? ALLOC_LOCAL : ALLOC_NONE),
-                  type),
+                  type, portId),
         mOverflow(false),
         mFramesToDrop(0),
         mResamplerBufferProvider(NULL), // initialize in case of early constructor exit
diff --git a/services/audioflinger/tests/Android.mk b/services/audioflinger/tests/Android.mk
deleted file mode 100644
index 3505e0f..0000000
--- a/services/audioflinger/tests/Android.mk
+++ /dev/null
@@ -1,68 +0,0 @@
-# Build the unit tests for audioflinger
-
-#
-# resampler unit test
-#
-LOCAL_PATH:= $(call my-dir)
-include $(CLEAR_VARS)
-
-LOCAL_SHARED_LIBRARIES := \
-	liblog \
-	libutils \
-	libcutils \
-	libaudioutils \
-	libaudioresampler
-
-LOCAL_C_INCLUDES := \
-	$(call include-path-for, audio-utils) \
-	frameworks/av/services/audioflinger
-
-LOCAL_SRC_FILES := \
-	resampler_tests.cpp
-
-LOCAL_MODULE := resampler_tests
-LOCAL_MODULE_TAGS := tests
-
-LOCAL_CFLAGS := -Werror -Wall
-
-include $(BUILD_NATIVE_TEST)
-
-#
-# audio mixer test tool
-#
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
-	test-mixer.cpp \
-	../AudioMixer.cpp.arm \
-	../BufferProviders.cpp
-
-LOCAL_C_INCLUDES := \
-	$(call include-path-for, audio-effects) \
-	$(call include-path-for, audio-utils) \
-	frameworks/av/services/audioflinger \
-	external/sonic
-
-LOCAL_STATIC_LIBRARIES := \
-	libsndfile
-
-LOCAL_SHARED_LIBRARIES := \
-	libeffects \
-	libnbaio \
-	libaudioresampler \
-	libaudioutils \
-	libdl \
-	libcutils \
-	libutils \
-	liblog \
-	libsonic
-
-LOCAL_MODULE:= test-mixer
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_CXX_STL := libc++
-
-LOCAL_CFLAGS := -Werror -Wall
-
-include $(BUILD_EXECUTABLE)
diff --git a/services/audioflinger/tests/build_and_run_all_unit_tests.sh b/services/audioflinger/tests/build_and_run_all_unit_tests.sh
deleted file mode 100755
index 7f4d456..0000000
--- a/services/audioflinger/tests/build_and_run_all_unit_tests.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-
-if [ -z "$ANDROID_BUILD_TOP" ]; then
-    echo "Android build environment not set"
-    exit -1
-fi
-
-# ensure we have mm
-. $ANDROID_BUILD_TOP/build/envsetup.sh
-
-pushd $ANDROID_BUILD_TOP/frameworks/av/services/audioflinger/
-pwd
-mm
-
-echo "waiting for device"
-adb root && adb wait-for-device remount
-adb push $OUT/system/lib/libaudioresampler.so /system/lib
-adb push $OUT/data/nativetest/resampler_tests /system/bin
-
-sh $ANDROID_BUILD_TOP/frameworks/av/services/audioflinger/tests/run_all_unit_tests.sh
-
-popd
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index 69d22c0..7cd2b81 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -4,23 +4,12 @@
 
 LOCAL_SRC_FILES:= \
     service/AudioPolicyService.cpp \
-    service/AudioPolicyEffects.cpp
-
-ifeq ($(USE_LEGACY_AUDIO_POLICY), 1)
-LOCAL_SRC_FILES += \
-    service/AudioPolicyInterfaceImplLegacy.cpp \
-    service/AudioPolicyClientImplLegacy.cpp
-
-    LOCAL_CFLAGS += -DUSE_LEGACY_AUDIO_POLICY
-else
-LOCAL_SRC_FILES += \
+    service/AudioPolicyEffects.cpp \
     service/AudioPolicyInterfaceImpl.cpp \
     service/AudioPolicyClientImpl.cpp
-endif
 
 LOCAL_C_INCLUDES := \
     $(TOPDIR)frameworks/av/services/audioflinger \
-    $(call include-path-for, audio-effects) \
     $(call include-path-for, audio-utils) \
     $(TOPDIR)frameworks/av/services/audiopolicy/common/include \
     $(TOPDIR)frameworks/av/services/audiopolicy/engine/interface \
@@ -33,15 +22,11 @@
     libbinder \
     libaudioclient \
     libhardware_legacy \
-    libserviceutility
-
-ifneq ($(USE_LEGACY_AUDIO_POLICY), 1)
-LOCAL_SHARED_LIBRARIES += \
-    libaudiopolicymanager
-endif
+    libserviceutility \
+    libaudiopolicymanager \
+    libmedia_helper
 
 LOCAL_STATIC_LIBRARIES := \
-    libmedia_helper \
     libaudiopolicycomponents
 
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
@@ -53,8 +38,6 @@
 
 include $(BUILD_SHARED_LIBRARY)
 
-ifneq ($(USE_LEGACY_AUDIO_POLICY), 1)
-
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:= managerdefault/AudioPolicyManager.cpp
@@ -91,13 +74,12 @@
     $(TOPDIR)frameworks/av/services/audiopolicy/utilities
 
 LOCAL_STATIC_LIBRARIES := \
-    libmedia_helper \
     libaudiopolicycomponents
 
-ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
-LOCAL_STATIC_LIBRARIES += libxml2
+LOCAL_SHARED_LIBRARIES += libmedia_helper
 
-LOCAL_SHARED_LIBRARIES += libicuuc
+ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
+LOCAL_SHARED_LIBRARIES += libicuuc libxml2
 
 LOCAL_CFLAGS += -DUSE_XML_AUDIO_POLICY_CONF
 endif #ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
@@ -136,7 +118,6 @@
 include $(BUILD_SHARED_LIBRARY)
 
 endif
-endif
 
 #######################################################################
 # Recursive call sub-folder Android.mk
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index d1b86da..60ed1d6 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -21,8 +21,6 @@
 #include <media/AudioPolicy.h>
 #include <utils/String8.h>
 
-#include <hardware/audio_policy.h>
-
 namespace android {
 
 // ----------------------------------------------------------------------------
@@ -67,6 +65,16 @@
         API_INPUT_TELEPHONY_RX, // used for capture from telephony RX path
     } input_type_t;
 
+   enum {
+        API_INPUT_CONCURRENCY_NONE = 0,
+        API_INPUT_CONCURRENCY_CALL = (1 << 0),      // Concurrency with a call
+        API_INPUT_CONCURRENCY_CAPTURE = (1 << 1),   // Concurrency with another capture
+
+        API_INPUT_CONCURRENCY_ALL = (API_INPUT_CONCURRENCY_CALL | API_INPUT_CONCURRENCY_CAPTURE),
+   };
+
+   typedef uint32_t concurrency_type__mask_t;
+
 public:
     virtual ~AudioPolicyInterface() {}
     //
@@ -112,12 +120,10 @@
                                         audio_session_t session,
                                         audio_stream_type_t *stream,
                                         uid_t uid,
-                                        uint32_t samplingRate,
-                                        audio_format_t format,
-                                        audio_channel_mask_t channelMask,
+                                        const audio_config_t *config,
                                         audio_output_flags_t flags,
                                         int selectedDeviceId,
-                                        const audio_offload_info_t *offloadInfo) = 0;
+                                        audio_port_handle_t *portId) = 0;
     // indicates to the audio policy manager that the output starts being used by corresponding stream.
     virtual status_t startOutput(audio_io_handle_t output,
                                  audio_stream_type_t stream,
@@ -136,15 +142,15 @@
                                      audio_io_handle_t *input,
                                      audio_session_t session,
                                      uid_t uid,
-                                     uint32_t samplingRate,
-                                     audio_format_t format,
-                                     audio_channel_mask_t channelMask,
+                                     const audio_config_base_t *config,
                                      audio_input_flags_t flags,
                                      audio_port_handle_t selectedDeviceId,
-                                     input_type_t *inputType) = 0;
+                                     input_type_t *inputType,
+                                     audio_port_handle_t *portId) = 0;
     // indicates to the audio policy manager that the input starts being used.
     virtual status_t startInput(audio_io_handle_t input,
-                                audio_session_t session) = 0;
+                                audio_session_t session,
+                                concurrency_type__mask_t *concurrency) = 0;
     // indicates to the audio policy manager that the input stops being used.
     virtual status_t stopInput(audio_io_handle_t input,
                                audio_session_t session) = 0;
@@ -229,9 +235,9 @@
 
     virtual status_t startAudioSource(const struct audio_port_config *source,
                                       const audio_attributes_t *attributes,
-                                      audio_io_handle_t *handle,
+                                      audio_patch_handle_t *handle,
                                       uid_t uid) = 0;
-    virtual status_t stopAudioSource(audio_io_handle_t handle) = 0;
+    virtual status_t stopAudioSource(audio_patch_handle_t handle) = 0;
 
     virtual status_t setMasterMono(bool mono) = 0;
     virtual status_t getMasterMono(bool *mono) = 0;
diff --git a/services/audiopolicy/common/include/Volume.h b/services/audiopolicy/common/include/Volume.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
old mode 100755
new mode 100644
index 55ee91f..31f0550
--- a/services/audiopolicy/common/include/policy.h
+++ b/services/audiopolicy/common/include/policy.h
@@ -33,9 +33,9 @@
 
 /**
  * A device mask for all audio input devices that are considered "virtual" when evaluating
- * active inputs in getActiveInput()
+ * active inputs in getActiveInputs()
  */
-#define APM_AUDIO_IN_DEVICE_VIRTUAL_ALL  (AUDIO_DEVICE_IN_REMOTE_SUBMIX|AUDIO_DEVICE_IN_FM_TUNER)
+#define APM_AUDIO_IN_DEVICE_VIRTUAL_ALL  (AUDIO_DEVICE_IN_REMOTE_SUBMIX)
 
 
 /**
@@ -47,16 +47,6 @@
 #define APM_AUDIO_DEVICE_IN_MATCH_ADDRESS_ALL (AUDIO_DEVICE_IN_REMOTE_SUBMIX|AUDIO_DEVICE_IN_BUS)
 
 /**
- * Stub audio output device. Used in policy configuration file on platforms without audio outputs.
- * This alias value to AUDIO_DEVICE_OUT_DEFAULT is only used in the audio policy context.
- */
-#define AUDIO_DEVICE_OUT_STUB AUDIO_DEVICE_OUT_DEFAULT
-/**
- * Stub audio input device. Used in policy configuration file on platforms without audio inputs.
- * This alias value to AUDIO_DEVICE_IN_DEFAULT is only used in the audio policy context.
- */
-#define AUDIO_DEVICE_IN_STUB AUDIO_DEVICE_IN_DEFAULT
-/**
  * Alias to AUDIO_DEVICE_OUT_DEFAULT defined for clarification when this value is used by volume
  * control APIs (e.g setStreamVolumeIndex().
  */
@@ -109,6 +99,44 @@
             ((device & APM_AUDIO_DEVICE_OUT_MATCH_ADDRESS_ALL) != 0));
 }
 
+/**
+ * Returns the priority of a given audio source for capture. The priority is used when more than one
+ * capture session is active on a given input stream to determine which session drives routing and
+ * effect configuration.
+ *
+ * @param[in] inputSource to consider. Valid sources are:
+ * - AUDIO_SOURCE_VOICE_COMMUNICATION
+ * - AUDIO_SOURCE_CAMCORDER
+ * - AUDIO_SOURCE_MIC
+ * - AUDIO_SOURCE_FM_TUNER
+ * - AUDIO_SOURCE_VOICE_RECOGNITION
+ * - AUDIO_SOURCE_HOTWORD
+ *
+ * @return the corresponding input source priority or 0 if priority is irrelevant for this source.
+ *      This happens when the specified source cannot share a given input stream (e.g remote submix)
+ *      The higher the value, the higher the priority.
+ */
+static inline int32_t source_priority(audio_source_t inputSource)
+{
+    switch (inputSource) {
+    case AUDIO_SOURCE_VOICE_COMMUNICATION:
+        return 6;
+    case AUDIO_SOURCE_CAMCORDER:
+        return 5;
+    case AUDIO_SOURCE_MIC:
+        return 4;
+    case AUDIO_SOURCE_FM_TUNER:
+        return 3;
+    case AUDIO_SOURCE_VOICE_RECOGNITION:
+        return 2;
+    case AUDIO_SOURCE_HOTWORD:
+        return 1;
+    default:
+        break;
+    }
+    return 0;
+}
+
 /* Indicates if audio formats are equivalent when considering a match between
  * audio HAL supported formats and client requested formats
  */
diff --git a/services/audiopolicy/common/managerdefinitions/Android.mk b/services/audiopolicy/common/managerdefinitions/Android.mk
index 02118c4..7968103 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.mk
+++ b/services/audiopolicy/common/managerdefinitions/Android.mk
@@ -41,9 +41,7 @@
 
 LOCAL_SRC_FILES += src/Serializer.cpp
 
-LOCAL_STATIC_LIBRARIES += libxml2
-
-LOCAL_SHARED_LIBRARIES += libicuuc
+LOCAL_SHARED_LIBRARIES += libicuuc libxml2
 
 LOCAL_C_INCLUDES += \
     $(TOPDIR)external/libxml2/include \
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index e689320..5445413 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -63,7 +63,9 @@
                              const sp<AudioSession>& audioSession);
     status_t removeAudioSession(audio_session_t session);
     sp<AudioSession> getAudioSession(audio_session_t session) const;
-    AudioSessionCollection getActiveAudioSessions() const;
+    AudioSessionCollection getAudioSessions(bool activeOnly) const;
+    size_t getAudioSessionCount(bool activeOnly) const;
+    audio_source_t getHighestPrioritySource(bool activeOnly) const;
 
     // implementation of AudioSessionInfoProvider
     virtual audio_config_base_t getConfig() const;
@@ -102,7 +104,7 @@
      * Only considers inputs from physical devices (e.g. main mic, headset mic) when
      * ignoreVirtualInputs is true.
      */
-    audio_io_handle_t getActiveInput(bool ignoreVirtualInputs = true);
+    Vector<sp <AudioInputDescriptor> > getActiveInputs(bool ignoreVirtualInputs = true);
 
     audio_devices_t getSupportedDevices(audio_io_handle_t handle) const;
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
index c9652de..0bacef7 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
@@ -19,7 +19,7 @@
 #include <utils/RefBase.h>
 #include <media/AudioPolicy.h>
 #include <utils/KeyedVector.h>
-#include <hardware/audio.h>
+#include <system/audio.h>
 #include <utils/String8.h>
 
 namespace android {
@@ -43,6 +43,8 @@
 
     void setMix(AudioMix &mix);
 
+    status_t dump(int fd, int spaces, int index) const;
+
 private:
     AudioMix    mMix;                   // Audio policy mix descriptor
     sp<SwAudioOutputDescriptor> mOutput;  // Corresponding output stream
@@ -77,6 +79,8 @@
                                                   AudioMix **policyMix);
 
     status_t getInputMixForAttr(audio_attributes_t attr, AudioMix **policyMix);
+
+    status_t dump(int fd) const;
 };
 
 }; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
index d00d49f..ded2285 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
@@ -118,7 +118,7 @@
                                         audio_format_t targetFormat);
 
     audio_module_handle_t getModuleHandle() const;
-    uint32_t getModuleVersion() const;
+    uint32_t getModuleVersionMajor() const;
     const char *getModuleName() const;
 
     bool useInputChannelMask() const
@@ -166,6 +166,10 @@
     virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
                                    const struct audio_port_config *srcConfig = NULL) const = 0;
     virtual sp<AudioPort> getAudioPort() const = 0;
+    virtual bool hasSameHwModuleAs(const sp<AudioPortConfig>& other) const {
+        return (other != 0) &&
+                (other->getAudioPort()->getModuleHandle() == getAudioPort()->getModuleHandle());
+    }
     uint32_t mSamplingRate;
     audio_format_t mFormat;
     audio_channel_mask_t mChannelMask;
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioSession.h b/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
index 388c25d..18fba25 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
@@ -91,8 +91,10 @@
     uint32_t getOpenCount() const;
 
     AudioSessionCollection getActiveSessions() const;
+    size_t getActiveSessionCount() const;
     bool hasActiveSession() const;
     bool isSourceActive(audio_source_t source) const;
+    audio_source_t getHighestPrioritySource(bool activeOnly) const;
 
     // implementation of AudioSessionInfoUpdateListener
     virtual void onSessionInfoUpdate() const;
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h
index 4ab7cf0..7e1e24d 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h
@@ -50,7 +50,7 @@
 };
 
 class AudioSourceCollection :
-        public DefaultKeyedVector< audio_io_handle_t, sp<AudioSourceDescriptor> >
+        public DefaultKeyedVector< audio_patch_handle_t, sp<AudioSourceDescriptor> >
 {
 public:
     status_t dump(int fd) const;
diff --git a/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
index ab650c0..9ea0aea 100644
--- a/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
@@ -17,7 +17,7 @@
 #pragma once
 
 #include <RoutingStrategy.h>
-#include <hardware/audio_effect.h>
+#include <system/audio_effect.h>
 #include <utils/KeyedVector.h>
 #include <utils/RefBase.h>
 #include <utils/Errors.h>
diff --git a/services/audiopolicy/common/managerdefinitions/include/HwModule.h b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
index 3a31672..29b6b9c 100644
--- a/services/audiopolicy/common/managerdefinitions/include/HwModule.h
+++ b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
@@ -18,7 +18,6 @@
 
 #include "DeviceDescriptor.h"
 #include "AudioRoute.h"
-#include <hardware/audio.h>
 #include <utils/RefBase.h>
 #include <utils/String8.h>
 #include <utils/Errors.h>
@@ -40,7 +39,7 @@
 class HwModule : public RefBase
 {
 public:
-    explicit HwModule(const char *name, uint32_t halVersion = AUDIO_DEVICE_API_VERSION_MIN);
+    explicit HwModule(const char *name, uint32_t halVersionMajor = 0, uint32_t halVersionMinor = 0);
     ~HwModule();
 
     const char *getName() const { return mName.string(); }
@@ -55,8 +54,11 @@
 
     void setProfiles(const IOProfileCollection &profiles);
 
-    void setHalVersion(uint32_t halVersion) { mHalVersion = halVersion; }
-    uint32_t getHalVersion() const { return mHalVersion; }
+    void setHalVersion(uint32_t major, uint32_t minor) {
+        mHalVersion = (major << 8) | (minor & 0xff);
+    }
+    uint32_t getHalVersionMajor() const { return mHalVersion >> 8; }
+    uint32_t getHalVersionMinor() const { return mHalVersion & 0xff; }
 
     sp<DeviceDescriptor> getRouteSinkDevice(const sp<AudioRoute> &route) const;
     DeviceVector getRouteSourceDevices(const sp<AudioRoute> &route) const;
diff --git a/services/audiopolicy/common/managerdefinitions/include/StreamDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/StreamDescriptor.h
index 424df84..8822927 100644
--- a/services/audiopolicy/common/managerdefinitions/include/StreamDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/StreamDescriptor.h
@@ -20,7 +20,7 @@
 #include <utils/KeyedVector.h>
 #include <utils/StrongPointer.h>
 #include <utils/SortedVector.h>
-#include <hardware/audio.h>
+#include <system/audio.h>
 
 namespace android {
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h b/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
index 1612714..9e705aa 100644
--- a/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
+++ b/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
@@ -16,190 +16,37 @@
 
 #pragma once
 
+#include <media/TypeConverter.h>
+
 #include "policy.h"
 #include <Volume.h>
-#include <system/audio.h>
-#include <convert/convert.h>
-#include <utils/Log.h>
-#include <string>
-#include <utils/Vector.h>
-#include <utils/SortedVector.h>
 
 namespace android {
 
-struct SampleRateTraits
-{
-    typedef uint32_t Type;
-    typedef SortedVector<Type> Collection;
-};
-struct DeviceTraits
-{
-    typedef audio_devices_t Type;
-    typedef Vector<Type> Collection;
-};
-struct OutputFlagTraits
-{
-    typedef audio_output_flags_t Type;
-    typedef Vector<Type> Collection;
-};
-struct InputFlagTraits
-{
-    typedef audio_input_flags_t Type;
-    typedef Vector<Type> Collection;
-};
-struct FormatTraits
-{
-    typedef audio_format_t Type;
-    typedef Vector<Type> Collection;
-};
-struct ChannelTraits
-{
-    typedef audio_channel_mask_t Type;
-    typedef SortedVector<Type> Collection;
-};
-struct OutputChannelTraits : public ChannelTraits {};
-struct InputChannelTraits : public ChannelTraits {};
-struct ChannelIndexTraits : public ChannelTraits {};
-struct GainModeTraits
-{
-    typedef audio_gain_mode_t Type;
-    typedef Vector<Type> Collection;
-};
-struct StreamTraits
-{
-  typedef audio_stream_type_t Type;
-  typedef Vector<Type> Collection;
-};
 struct DeviceCategoryTraits
 {
-  typedef device_category Type;
-  typedef Vector<Type> Collection;
+    typedef device_category Type;
+    typedef Vector<Type> Collection;
 };
-template <typename T>
-struct DefaultTraits
+struct MixTypeTraits
 {
-  typedef T Type;
-  typedef Vector<Type> Collection;
+    typedef int32_t Type;
+    typedef Vector<Type> Collection;
+};
+struct RouteFlagTraits
+{
+    typedef uint32_t Type;
+    typedef Vector<Type> Collection;
+};
+struct RuleTraits
+{
+    typedef uint32_t Type;
+    typedef Vector<Type> Collection;
 };
 
-template <class Traits>
-static void collectionFromString(const std::string &str, typename Traits::Collection &collection,
-                                 const char *del = "|")
-{
-    char *literal = strdup(str.c_str());
-    for (const char *cstr = strtok(literal, del); cstr != NULL; cstr = strtok(NULL, del)) {
-        typename Traits::Type value;
-        if (utilities::convertTo<std::string, typename Traits::Type >(cstr, value)) {
-            collection.add(value);
-        }
-    }
-    free(literal);
-}
-
-template <class Traits>
-class TypeConverter
-{
-public:
-    static bool toString(const typename Traits::Type &value, std::string &str);
-
-    static bool fromString(const std::string &str, typename Traits::Type &result);
-
-    static void collectionFromString(const std::string &str,
-                                     typename Traits::Collection &collection,
-                                     const char *del = "|");
-
-    static uint32_t maskFromString(const std::string &str, const char *del = "|");
-
-protected:
-    struct Table {
-        const char *literal;
-        typename Traits::Type value;
-    };
-
-    static const Table mTable[];
-    static const size_t mSize;
-};
-
-typedef TypeConverter<DeviceTraits> DeviceConverter;
-typedef TypeConverter<OutputFlagTraits> OutputFlagConverter;
-typedef TypeConverter<InputFlagTraits> InputFlagConverter;
-typedef TypeConverter<FormatTraits> FormatConverter;
-typedef TypeConverter<OutputChannelTraits> OutputChannelConverter;
-typedef TypeConverter<InputChannelTraits> InputChannelConverter;
-typedef TypeConverter<ChannelIndexTraits> ChannelIndexConverter;
-typedef TypeConverter<GainModeTraits> GainModeConverter;
-typedef TypeConverter<StreamTraits> StreamTypeConverter;
 typedef TypeConverter<DeviceCategoryTraits> DeviceCategoryConverter;
-
-inline
-static SampleRateTraits::Collection samplingRatesFromString(const std::string &samplingRates,
-                                                            const char *del = "|")
-{
-    SampleRateTraits::Collection samplingRateCollection;
-    collectionFromString<SampleRateTraits>(samplingRates, samplingRateCollection, del);
-    return samplingRateCollection;
-}
-
-inline
-static FormatTraits::Collection formatsFromString(const std::string &formats, const char *del = "|")
-{
-    FormatTraits::Collection formatCollection;
-    FormatConverter::collectionFromString(formats, formatCollection, del);
-    return formatCollection;
-}
-
-inline
-static audio_format_t formatFromString(const std::string &literalFormat)
-{
-    audio_format_t format;
-    if (literalFormat.empty()) {
-        return gDynamicFormat;
-    }
-    FormatConverter::fromString(literalFormat, format);
-    return format;
-}
-
-inline
-static audio_channel_mask_t channelMaskFromString(const std::string &literalChannels)
-{
-    audio_channel_mask_t channels;
-    if (!OutputChannelConverter::fromString(literalChannels, channels) ||
-            !InputChannelConverter::fromString(literalChannels, channels)) {
-        return AUDIO_CHANNEL_INVALID;
-    }
-    return channels;
-}
-
-inline
-static ChannelTraits::Collection channelMasksFromString(const std::string &channels,
-                                                        const char *del = "|")
-{
-    ChannelTraits::Collection channelMaskCollection;
-    OutputChannelConverter::collectionFromString(channels, channelMaskCollection, del);
-    InputChannelConverter::collectionFromString(channels, channelMaskCollection, del);
-    ChannelIndexConverter::collectionFromString(channels, channelMaskCollection, del);
-    return channelMaskCollection;
-}
-
-inline
-static InputChannelTraits::Collection inputChannelMasksFromString(const std::string &inChannels,
-                                                                  const char *del = "|")
-{
-    InputChannelTraits::Collection inputChannelMaskCollection;
-    InputChannelConverter::collectionFromString(inChannels, inputChannelMaskCollection, del);
-    ChannelIndexConverter::collectionFromString(inChannels, inputChannelMaskCollection, del);
-    return inputChannelMaskCollection;
-}
-
-inline
-static OutputChannelTraits::Collection outputChannelMasksFromString(const std::string &outChannels,
-                                                                    const char *del = "|")
-{
-    OutputChannelTraits::Collection outputChannelMaskCollection;
-    OutputChannelConverter::collectionFromString(outChannels, outputChannelMaskCollection, del);
-    ChannelIndexConverter::collectionFromString(outChannels, outputChannelMaskCollection, del);
-    return outputChannelMaskCollection;
-}
+typedef TypeConverter<MixTypeTraits> MixTypeConverter;
+typedef TypeConverter<RouteFlagTraits> RouteFlagTypeConverter;
+typedef TypeConverter<RuleTraits> RuleTypeConverter;
 
 }; // namespace android
-
diff --git a/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h b/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h
index 10f0766..e7fcefc 100644
--- a/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h
+++ b/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h
@@ -18,7 +18,6 @@
 
 #include "IVolumeCurvesCollection.h"
 #include <policy.h>
-#include <hardware/audio.h>
 #include <utils/RefBase.h>
 #include <utils/String8.h>
 #include <utils/SortedVector.h>
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index c7d2ee4..44f9637 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -132,6 +132,12 @@
     return mSessions.isSourceActive(source);
 }
 
+audio_source_t AudioInputDescriptor::getHighestPrioritySource(bool activeOnly) const
+{
+
+    return mSessions.getHighestPrioritySource(activeOnly);
+}
+
 bool AudioInputDescriptor::isSoundTrigger() const {
     // sound trigger and non sound trigger sessions are not mixed
     // on a given input
@@ -143,9 +149,22 @@
     return mSessions.valueFor(session);
 }
 
-AudioSessionCollection AudioInputDescriptor::getActiveAudioSessions() const
+AudioSessionCollection AudioInputDescriptor::getAudioSessions(bool activeOnly) const
 {
-    return mSessions.getActiveSessions();
+    if (activeOnly) {
+        return mSessions.getActiveSessions();
+    } else {
+        return mSessions;
+    }
+}
+
+size_t AudioInputDescriptor::getAudioSessionCount(bool activeOnly) const
+{
+    if (activeOnly) {
+        return mSessions.getActiveSessionCount();
+    } else {
+        return mSessions.size();
+    }
 }
 
 status_t AudioInputDescriptor::addAudioSession(audio_session_t session,
@@ -236,17 +255,19 @@
     return count;
 }
 
-audio_io_handle_t AudioInputCollection::getActiveInput(bool ignoreVirtualInputs)
+Vector<sp <AudioInputDescriptor> > AudioInputCollection::getActiveInputs(bool ignoreVirtualInputs)
 {
+    Vector<sp <AudioInputDescriptor> > activeInputs;
+
     for (size_t i = 0; i < size(); i++) {
         const sp<AudioInputDescriptor>  inputDescriptor = valueAt(i);
         if ((inputDescriptor->isActive())
                 && (!ignoreVirtualInputs ||
                     !is_virtual_input_device(inputDescriptor->mDevice))) {
-            return keyAt(i);
+            activeInputs.add(inputDescriptor);
         }
     }
-    return 0;
+    return activeInputs;
 }
 
 audio_devices_t AudioInputCollection::getSupportedDevices(audio_io_handle_t handle) const
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 1dbc3d0..93b7f47 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -81,7 +81,7 @@
         return sharesHwModuleWith(outputDesc->subOutput1()) ||
                     sharesHwModuleWith(outputDesc->subOutput2());
     } else {
-        return (getModuleHandle() == outputDesc->getModuleHandle());
+        return hasSameHwModuleAs(outputDesc);
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
index 6059009..c2981a1 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
@@ -55,7 +55,7 @@
     for (size_t i = 0; i < mPatch.num_sources; i++) {
         if (mPatch.sources[i].type == AUDIO_PORT_TYPE_DEVICE) {
             std::string device;
-            DeviceConverter::toString(mPatch.sources[i].ext.device.type, device);
+            deviceToString(mPatch.sources[i].ext.device.type, device);
             snprintf(buffer, SIZE, "%*s- Device ID %d %s\n", spaces + 2, "",
                      mPatch.sources[i].id,
                      device.c_str());
@@ -70,7 +70,7 @@
     for (size_t i = 0; i < mPatch.num_sinks; i++) {
         if (mPatch.sinks[i].type == AUDIO_PORT_TYPE_DEVICE) {
             std::string device;
-            DeviceConverter::toString(mPatch.sinks[i].ext.device.type, device);
+            deviceToString(mPatch.sinks[i].ext.device.type, device);
             snprintf(buffer, SIZE, "%*s- Device ID %d %s\n", spaces + 2, "",
                      mPatch.sinks[i].id,
                      device.c_str());
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index 02833a9..08930f1 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -18,6 +18,7 @@
 //#define LOG_NDEBUG 0
 
 #include "AudioPolicyMix.h"
+#include "TypeConverter.h"
 #include "HwModule.h"
 #include "AudioPort.h"
 #include "IOProfile.h"
@@ -51,6 +52,66 @@
     return &mMix;
 }
 
+status_t AudioPolicyMix::dump(int fd, int spaces, int index) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, "%*sAudio Policy Mix %d:\n", spaces, "", index+1);
+    result.append(buffer);
+    std::string mixTypeLiteral;
+    if (!MixTypeConverter::toString(mMix.mMixType, mixTypeLiteral)) {
+        ALOGE("%s: failed to convert mix type %d", __FUNCTION__, mMix.mMixType);
+        return BAD_VALUE;
+    }
+    snprintf(buffer, SIZE, "%*s- mix type: %s\n", spaces, "", mixTypeLiteral.c_str());
+    result.append(buffer);
+    std::string routeFlagLiteral;
+    RouteFlagTypeConverter::maskToString(mMix.mRouteFlags, routeFlagLiteral);
+    snprintf(buffer, SIZE, "%*s- Route Flags: %s\n", spaces, "", routeFlagLiteral.c_str());
+    result.append(buffer);
+    std::string deviceLiteral;
+    deviceToString(mMix.mDeviceType, deviceLiteral);
+    snprintf(buffer, SIZE, "%*s- device type: %s\n", spaces, "", deviceLiteral.c_str());
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- device address: %s\n", spaces, "", mMix.mDeviceAddress.string());
+    result.append(buffer);
+
+    int indexCriterion = 0;
+    for (const auto &criterion : mMix.mCriteria) {
+        snprintf(buffer, SIZE, "%*s- Criterion %d:\n", spaces + 2, "", indexCriterion++);
+        result.append(buffer);
+        std::string usageLiteral;
+        if (!UsageTypeConverter::toString(criterion.mValue.mUsage, usageLiteral)) {
+            ALOGE("%s: failed to convert usage %d", __FUNCTION__, criterion.mValue.mUsage);
+            return BAD_VALUE;
+        }
+        snprintf(buffer, SIZE, "%*s- Usage:%s\n", spaces + 4, "", usageLiteral.c_str());
+        result.append(buffer);
+        if (mMix.mMixType == MIX_TYPE_RECORDERS) {
+            std::string sourceLiteral;
+            if (!SourceTypeConverter::toString(criterion.mValue.mSource, sourceLiteral)) {
+                ALOGE("%s: failed to convert source %d", __FUNCTION__, criterion.mValue.mSource);
+                return BAD_VALUE;
+            }
+            snprintf(buffer, SIZE, "%*s- Source:%s\n", spaces + 4, "", sourceLiteral.c_str());
+            result.append(buffer);
+        }
+        snprintf(buffer, SIZE, "%*s- Uid:%d\n", spaces + 4, "", criterion.mValue.mUid);
+        result.append(buffer);
+        std::string ruleLiteral;
+        if (!RuleTypeConverter::toString(criterion.mRule, ruleLiteral)) {
+            ALOGE("%s: failed to convert source %d", __FUNCTION__,criterion.mRule);
+            return BAD_VALUE;
+        }
+        snprintf(buffer, SIZE, "%*s- Rule:%s\n", spaces + 4, "", ruleLiteral.c_str());
+        result.append(buffer);
+    }
+    write(fd, result.string(), result.size());
+    return NO_ERROR;
+}
+
 status_t AudioPolicyMixCollection::registerMix(const String8& address, AudioMix mix,
                                                sp<SwAudioOutputDescriptor> desc)
 {
@@ -288,4 +349,14 @@
     return NO_ERROR;
 }
 
+status_t AudioPolicyMixCollection::dump(int fd) const
+{
+    std::string log("\nAudio Policy Mix:\n");
+    write(fd, log.c_str(), log.size());
+    for (size_t i = 0; i < size(); i++) {
+        valueAt(i)->dump(fd, 2, i);
+    }
+    return NO_ERROR;
+}
+
 }; //namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
index 31bf95c..aac23b4 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
@@ -50,12 +50,12 @@
     return mModule->mHandle;
 }
 
-uint32_t AudioPort::getModuleVersion() const
+uint32_t AudioPort::getModuleVersionMajor() const
 {
     if (mModule == 0) {
         return 0;
     }
-    return mModule->getHalVersion();
+    return mModule->getHalVersionMajor();
 }
 
 const char *AudioPort::getModuleName() const
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
index a246c3d..dbdcca7 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
@@ -18,6 +18,7 @@
 //#define LOG_NDEBUG 0
 
 #include <AudioPolicyInterface.h>
+#include "policy.h"
 #include "AudioSession.h"
 #include "AudioGain.h"
 #include "TypeConverter.h"
@@ -215,9 +216,20 @@
     return activeSessions;
 }
 
+size_t AudioSessionCollection::getActiveSessionCount() const
+{
+    size_t activeCount = 0;
+    for (size_t i = 0; i < size(); i++) {
+        if (valueAt(i)->activeCount() != 0) {
+            activeCount++;
+        }
+    }
+    return activeCount;
+}
+
 bool AudioSessionCollection::hasActiveSession() const
 {
-    return getActiveSessions().size() != 0;
+    return getActiveSessionCount() != 0;
 }
 
 bool AudioSessionCollection::isSourceActive(audio_source_t source) const
@@ -237,6 +249,25 @@
     return false;
 }
 
+audio_source_t AudioSessionCollection::getHighestPrioritySource(bool activeOnly) const
+{
+    audio_source_t source = AUDIO_SOURCE_DEFAULT;
+    int32_t priority = -1;
+
+    for (size_t i = 0; i < size(); i++) {
+        const sp<AudioSession>  audioSession = valueAt(i);
+        if (activeOnly && audioSession->activeCount() == 0) {
+            continue;
+        }
+        int32_t curPriority = source_priority(audioSession->inputSource());
+        if (curPriority > priority) {
+            priority = curPriority;
+            source = audioSession->inputSource();
+        }
+    }
+    return source;
+}
+
 void AudioSessionCollection::onSessionInfoUpdate() const
 {
     for (size_t i = 0; i < size(); i++) {
@@ -244,7 +275,6 @@
     }
 }
 
-
 status_t AudioSessionCollection::dump(int fd, int spaces) const
 {
     const size_t SIZE = 256;
diff --git a/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp b/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
index a3536e5..e5888e2 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
@@ -18,11 +18,11 @@
 //#define LOG_NDEBUG 0
 
 #include "ConfigParsingUtils.h"
-#include <convert/convert.h>
 #include "AudioGain.h"
 #include "IOProfile.h"
-#include "TypeConverter.h"
-#include <hardware/audio.h>
+#include <system/audio.h>
+#include <media/AudioParameter.h>
+#include <media/TypeConverter.h>
 #include <utils/Log.h>
 #include <cutils/misc.h>
 
@@ -105,7 +105,7 @@
     audio_devices_t type = AUDIO_DEVICE_NONE;
     while (node) {
         if (strcmp(node->name, APM_DEVICE_TYPE) == 0) {
-            DeviceConverter::fromString(node->value, type);
+            deviceFromString(node->value, type);
             break;
         }
         node = node->next;
@@ -289,11 +289,11 @@
                                             const DeviceVector &declaredDevices)
 {
     char *tagLiteral = strndup(tag, strlen(tag));
-    char *devTag = strtok(tagLiteral, "|");
+    char *devTag = strtok(tagLiteral, AudioParameter::valueListSeparator);
     while (devTag != NULL) {
         if (strlen(devTag) != 0) {
             audio_devices_t type;
-            if (DeviceConverter::fromString(devTag, type)) {
+            if (deviceFromString(devTag, type)) {
                 uint32_t inBit = type & AUDIO_DEVICE_BIT_IN;
                 type &= ~AUDIO_DEVICE_BIT_IN;
                 while (type) {
@@ -311,7 +311,7 @@
                 }
             }
         }
-        devTag = strtok(NULL, "|");
+        devTag = strtok(NULL, AudioParameter::valueListSeparator);
     }
     free(tagLiteral);
 }
@@ -340,7 +340,7 @@
             config.addAvailableOutputDevices(availableOutputDevices);
         } else if (strcmp(DEFAULT_OUTPUT_DEVICE_TAG, node->name) == 0) {
             audio_devices_t device = AUDIO_DEVICE_NONE;
-            DeviceConverter::fromString(node->value, device);
+            deviceFromString(node->value, device);
             if (device != AUDIO_DEVICE_NONE) {
                 sp<DeviceDescriptor> defaultOutputDevice = new DeviceDescriptor(device);
                 config.setDefaultOutputDevice(defaultOutputDevice);
@@ -356,9 +356,8 @@
         } else if (strcmp(AUDIO_HAL_VERSION_TAG, node->name) == 0) {
             uint32_t major, minor;
             sscanf((char *)node->value, "%u.%u", &major, &minor);
-            module->setHalVersion(HARDWARE_DEVICE_API_VERSION(major, minor));
-            ALOGV("loadGlobalConfig() mHalVersion = %04x major %u minor %u",
-                  module->getHalVersion(), major, minor);
+            module->setHalVersion(major, minor);
+            ALOGV("loadGlobalConfig() mHalVersion = major %u minor %u", major, minor);
         }
         node = node->next;
     }
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index ba2b9e3..f0e48b6 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -285,7 +285,7 @@
         result.append(buffer);
     }
     std::string deviceLiteral;
-    if (DeviceConverter::toString(mDeviceType, deviceLiteral)) {
+    if (deviceToString(mDeviceType, deviceLiteral)) {
         snprintf(buffer, SIZE, "%*s- type: %-48s\n", spaces, "", deviceLiteral.c_str());
         result.append(buffer);
     }
@@ -302,7 +302,7 @@
 void DeviceDescriptor::log() const
 {
     std::string device;
-    DeviceConverter::toString(mDeviceType, device);
+    deviceToString(mDeviceType, device);
     ALOGI("Device id:%d type:0x%X:%s, addr:%s", mId,  mDeviceType, device.c_str(),
           mAddress.string());
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index 7a942cd..cc56fb8 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -20,16 +20,16 @@
 #include "HwModule.h"
 #include "IOProfile.h"
 #include "AudioGain.h"
-#include <hardware/audio.h>
 #include <policy.h>
+#include <system/audio.h>
 
 namespace android {
 
-HwModule::HwModule(const char *name, uint32_t halVersion)
+HwModule::HwModule(const char *name, uint32_t halVersionMajor, uint32_t halVersionMinor)
     : mName(String8(name)),
-      mHandle(AUDIO_MODULE_HANDLE_NONE),
-      mHalVersion(halVersion)
+      mHandle(AUDIO_MODULE_HANDLE_NONE)
 {
+    setHalVersion(halVersionMajor, halVersionMinor);
 }
 
 HwModule::~HwModule()
@@ -42,8 +42,8 @@
     }
 }
 
-status_t HwModule::addOutputProfile(const String8 &name, const audio_config_t *config,
-                                    audio_devices_t device, const String8 &address)
+status_t HwModule::addOutputProfile(const String8& name, const audio_config_t *config,
+                                    audio_devices_t device, const String8& address)
 {
     sp<IOProfile> profile = new OutputProfile(name);
 
@@ -227,7 +227,7 @@
     result.append(buffer);
     snprintf(buffer, SIZE, "  - handle: %d\n", mHandle);
     result.append(buffer);
-    snprintf(buffer, SIZE, "  - version: %u.%u\n", mHalVersion >> 8, mHalVersion & 0xFF);
+    snprintf(buffer, SIZE, "  - version: %u.%u\n", getHalVersionMajor(), getHalVersionMinor());
     result.append(buffer);
     write(fd, result.string(), result.size());
     if (mOutputProfiles.size()) {
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index 57f2534..74ef4ec 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -108,8 +108,18 @@
 
     AudioPort::dump(fd, 4);
 
-    snprintf(buffer, SIZE, "    - flags: 0x%04x\n", getFlags());
+    snprintf(buffer, SIZE, "    - flags: 0x%04x", getFlags());
     result.append(buffer);
+    std::string flagsLiteral;
+    if (getRole() == AUDIO_PORT_ROLE_SINK) {
+        InputFlagConverter::maskToString(getFlags(), flagsLiteral);
+    } else if (getRole() == AUDIO_PORT_ROLE_SOURCE) {
+        OutputFlagConverter::maskToString(getFlags(), flagsLiteral);
+    }
+    if (!flagsLiteral.empty()) {
+        result.appendFormat(" (%s)", flagsLiteral.c_str());
+    }
+    result.append("\n");
     write(fd, result.string(), result.size());
     mSupportedDevices.dump(fd, String8("Supported"), 4, false);
 }
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index 2ecd6b1..a224004 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -18,7 +18,7 @@
 //#define LOG_NDEBUG 0
 
 #include "Serializer.h"
-#include <convert/convert.h>
+#include <media/convert.h>
 #include "TypeConverter.h"
 #include <libxml/parser.h>
 #include <libxml/xinclude.h>
@@ -199,7 +199,8 @@
     string format = getXmlAttribute(root, Attributes::format);
     string channels = getXmlAttribute(root, Attributes::channelMasks);
 
-    profile = new Element(formatFromString(format), channelMasksFromString(channels, ","),
+    profile = new Element(formatFromString(format, gDynamicFormat),
+                          channelMasksFromString(channels, ","),
                           samplingRatesFromString(samplingRates, ","));
 
     profile->setDynamicFormat(profile->getFormat() == gDynamicFormat);
@@ -300,7 +301,7 @@
                 AUDIO_PORT_ROLE_SOURCE : AUDIO_PORT_ROLE_SINK;
 
     audio_devices_t type = AUDIO_DEVICE_NONE;
-    if (!DeviceConverter::fromString(typeName, type) ||
+    if (!deviceFromString(typeName, type) ||
             (!audio_is_input_device(type) && portRole == AUDIO_PORT_ROLE_SOURCE) ||
             (!audio_is_output_devices(type) && portRole == AUDIO_PORT_ROLE_SINK)) {
         ALOGW("%s: bad type %08x", __FUNCTION__, type);
@@ -419,19 +420,17 @@
         ALOGE("%s: No %s found", __FUNCTION__, Attributes::name);
         return BAD_VALUE;
     }
-    uint32_t version = AUDIO_DEVICE_API_VERSION_MIN;
+    uint32_t versionMajor = 0, versionMinor = 0;
     string versionLiteral = getXmlAttribute(root, Attributes::version);
     if (!versionLiteral.empty()) {
-        uint32_t major, minor;
-        sscanf(versionLiteral.c_str(), "%u.%u", &major, &minor);
-        version = HARDWARE_DEVICE_API_VERSION(major, minor);
-        ALOGV("%s: mHalVersion = %04x major %u minor %u",  __FUNCTION__,
-              version, major, minor);
+        sscanf(versionLiteral.c_str(), "%u.%u", &versionMajor, &versionMinor);
+        ALOGV("%s: mHalVersion = major %u minor %u",  __FUNCTION__,
+              versionMajor, versionMajor);
     }
 
     ALOGV("%s: %s %s=%s", __FUNCTION__, tag, Attributes::name, name.c_str());
 
-    module = new Element(name.c_str(), version);
+    module = new Element(name.c_str(), versionMajor, versionMinor);
 
     // Deserialize childrens: Audio Mix Port, Audio Device Ports (Source/Sink), Audio Routes
     MixPortTraits::Collection mixPorts;
diff --git a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
index 48bfd79..0362037 100644
--- a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
@@ -14,290 +14,55 @@
  * limitations under the License.
  */
 
+#include <media/AudioPolicy.h>
+
 #include "TypeConverter.h"
 
 namespace android {
 
 #define MAKE_STRING_FROM_ENUM(string) { #string, string }
-
-template <>
-const DeviceConverter::Table DeviceConverter::mTable[] = {
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_EARPIECE),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_SPEAKER),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_SPEAKER_SAFE),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADSET),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADPHONE),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ALL_SCO),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ALL_A2DP),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_AUX_DIGITAL),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_HDMI),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_USB_ACCESSORY),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_USB_DEVICE),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ALL_USB),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_REMOTE_SUBMIX),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_TELEPHONY_TX),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_LINE),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_HDMI_ARC),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_SPDIF),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_FM),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_AUX_LINE),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_IP),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BUS),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_STUB),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_AMBIENT),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BUILTIN_MIC),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_ALL_SCO),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_WIRED_HEADSET),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_AUX_DIGITAL),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_HDMI),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_TELEPHONY_RX),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_VOICE_CALL),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BACK_MIC),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_REMOTE_SUBMIX),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_USB_ACCESSORY),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_USB_DEVICE),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_FM_TUNER),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_TV_TUNER),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_LINE),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_SPDIF),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_A2DP),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_LOOPBACK),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_IP),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BUS),
-        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_STUB),
-};
-
-template<>
-const size_t DeviceConverter::mSize = sizeof(DeviceConverter::mTable) /
-        sizeof(DeviceConverter::mTable[0]);
-
-
-template <>
-const OutputFlagConverter::Table OutputFlagConverter::mTable[] = {
-    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_DIRECT),
-    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_PRIMARY),
-    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_FAST),
-    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_DEEP_BUFFER),
-    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD),
-    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_NON_BLOCKING),
-    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_HW_AV_SYNC),
-    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_TTS),
-    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_RAW),
-    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_SYNC),
-};
-template<>
-const size_t OutputFlagConverter::mSize = sizeof(OutputFlagConverter::mTable) /
-        sizeof(OutputFlagConverter::mTable[0]);
-
-
-template <>
-const InputFlagConverter::Table InputFlagConverter::mTable[] = {
-    MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_FAST),
-    MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_HW_HOTWORD),
-    MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_RAW),
-    MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_SYNC),
-};
-template<>
-const size_t InputFlagConverter::mSize = sizeof(InputFlagConverter::mTable) /
-        sizeof(InputFlagConverter::mTable[0]);
-
-
-template <>
-const FormatConverter::Table FormatConverter::mTable[] = {
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_16_BIT),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_8_BIT),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_32_BIT),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_8_24_BIT),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_FLOAT),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_24_BIT_PACKED),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_MP3),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_MAIN),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LC),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_SSR),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LTP),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_HE_V1),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_SCALABLE),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ERLC),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LD),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_HE_V2),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ELD),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_VORBIS),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_HE_AAC_V1),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_HE_AAC_V2),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_OPUS),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AC3),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_E_AC3),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DTS),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DTS_HD),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_IEC61937),
-    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DOLBY_TRUEHD),
-};
-template<>
-const size_t FormatConverter::mSize = sizeof(FormatConverter::mTable) /
-        sizeof(FormatConverter::mTable[0]);
-
-
-template <>
-const OutputChannelConverter::Table OutputChannelConverter::mTable[] = {
-    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_MONO),
-    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_STEREO),
-    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_QUAD),
-    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_5POINT1),
-    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_7POINT1),
-};
-template<>
-const size_t OutputChannelConverter::mSize = sizeof(OutputChannelConverter::mTable) /
-        sizeof(OutputChannelConverter::mTable[0]);
-
-
-template <>
-const InputChannelConverter::Table InputChannelConverter::mTable[] = {
-    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_MONO),
-    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_STEREO),
-    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_FRONT_BACK),
-};
-template<>
-const size_t InputChannelConverter::mSize = sizeof(InputChannelConverter::mTable) /
-        sizeof(InputChannelConverter::mTable[0]);
-
-template <>
-const ChannelIndexConverter::Table ChannelIndexConverter::mTable[] = {
-    {"AUDIO_CHANNEL_INDEX_MASK_1", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_1)},
-    {"AUDIO_CHANNEL_INDEX_MASK_2", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_2)},
-    {"AUDIO_CHANNEL_INDEX_MASK_3", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_3)},
-    {"AUDIO_CHANNEL_INDEX_MASK_4", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_4)},
-    {"AUDIO_CHANNEL_INDEX_MASK_5", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_5)},
-    {"AUDIO_CHANNEL_INDEX_MASK_6", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_6)},
-    {"AUDIO_CHANNEL_INDEX_MASK_7", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_7)},
-    {"AUDIO_CHANNEL_INDEX_MASK_8", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_8)},
-};
-template<>
-const size_t ChannelIndexConverter::mSize = sizeof(ChannelIndexConverter::mTable) /
-        sizeof(ChannelIndexConverter::mTable[0]);
-
-
-template <>
-const GainModeConverter::Table GainModeConverter::mTable[] = {
-    MAKE_STRING_FROM_ENUM(AUDIO_GAIN_MODE_JOINT),
-    MAKE_STRING_FROM_ENUM(AUDIO_GAIN_MODE_CHANNELS),
-    MAKE_STRING_FROM_ENUM(AUDIO_GAIN_MODE_RAMP),
-};
-
-template<>
-const size_t GainModeConverter::mSize = sizeof(GainModeConverter::mTable) /
-        sizeof(GainModeConverter::mTable[0]);
+#define TERMINATOR { .literal = nullptr }
 
 template <>
 const DeviceCategoryConverter::Table DeviceCategoryConverter::mTable[] = {
     MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_HEADSET),
     MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_SPEAKER),
     MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_EARPIECE),
-    MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_EXT_MEDIA)
+    MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_EXT_MEDIA),
+    TERMINATOR
 };
 
-template<>
-const size_t DeviceCategoryConverter::mSize = sizeof(DeviceCategoryConverter::mTable) /
-        sizeof(DeviceCategoryConverter::mTable[0]);
-
 template <>
-const StreamTypeConverter::Table StreamTypeConverter::mTable[] = {
-    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_VOICE_CALL),
-    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_SYSTEM),
-    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_RING),
-    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_MUSIC),
-    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_ALARM),
-    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_NOTIFICATION),
-    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_BLUETOOTH_SCO ),
-    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_ENFORCED_AUDIBLE),
-    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_DTMF),
-    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_TTS),
-    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_ACCESSIBILITY),
-    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_REROUTING),
-    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_PATCH),
+const MixTypeConverter::Table MixTypeConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(MIX_TYPE_INVALID),
+    MAKE_STRING_FROM_ENUM(MIX_TYPE_PLAYERS),
+    MAKE_STRING_FROM_ENUM(MIX_TYPE_RECORDERS),
+    TERMINATOR
 };
 
-template<>
-const size_t StreamTypeConverter::mSize = sizeof(StreamTypeConverter::mTable) /
-        sizeof(StreamTypeConverter::mTable[0]);
+template <>
+const RouteFlagTypeConverter::Table RouteFlagTypeConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(MIX_ROUTE_FLAG_RENDER),
+    MAKE_STRING_FROM_ENUM(MIX_ROUTE_FLAG_LOOP_BACK),
+    MAKE_STRING_FROM_ENUM(MIX_ROUTE_FLAG_ALL),
+    TERMINATOR
+};
 
-template <class Traits>
-bool TypeConverter<Traits>::toString(const typename Traits::Type &value, std::string &str)
-{
-    for (size_t i = 0; i < mSize; i++) {
-        if (mTable[i].value == value) {
-            str = mTable[i].literal;
-            return true;
-        }
-    }
-    return false;
-}
+template <>
+const RuleTypeConverter::Table RuleTypeConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(RULE_EXCLUSION_MASK),
+    MAKE_STRING_FROM_ENUM(RULE_MATCH_ATTRIBUTE_USAGE),
+    MAKE_STRING_FROM_ENUM(RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET),
+    MAKE_STRING_FROM_ENUM(RULE_MATCH_UID),
+    MAKE_STRING_FROM_ENUM(RULE_EXCLUDE_ATTRIBUTE_USAGE),
+    MAKE_STRING_FROM_ENUM(RULE_EXCLUDE_ATTRIBUTE_CAPTURE_PRESET),
+    MAKE_STRING_FROM_ENUM(RULE_EXCLUDE_UID),
+    TERMINATOR
+};
 
-template <class Traits>
-bool TypeConverter<Traits>::fromString(const std::string &str, typename Traits::Type &result)
-{
-    for (size_t i = 0; i < mSize; i++) {
-        if (strcmp(mTable[i].literal, str.c_str()) == 0) {
-            ALOGV("stringToEnum() found %s", mTable[i].literal);
-            result = mTable[i].value;
-            return true;
-        }
-    }
-    return false;
-}
-
-template <class Traits>
-void TypeConverter<Traits>::collectionFromString(const std::string &str,
-                                                 typename Traits::Collection &collection,
-                                                 const char *del)
-{
-    char *literal = strdup(str.c_str());
-
-    for (const char *cstr = strtok(literal, del); cstr != NULL; cstr = strtok(NULL, del)) {
-        typename Traits::Type value;
-        if (fromString(cstr, value)) {
-            collection.add(value);
-        }
-    }
-    free(literal);
-}
-
-template <class Traits>
-uint32_t TypeConverter<Traits>::maskFromString(const std::string &str, const char *del)
-{
-    char *literal = strdup(str.c_str());
-    uint32_t value = 0;
-    for (const char *cstr = strtok(literal, del); cstr != NULL; cstr = strtok(NULL, del)) {
-        typename Traits::Type type;
-        if (fromString(cstr, type)) {
-            value |= static_cast<uint32_t>(type);
-        }
-    }
-    free(literal);
-    return value;
-}
-
-template class TypeConverter<DeviceTraits>;
-template class TypeConverter<OutputFlagTraits>;
-template class TypeConverter<InputFlagTraits>;
-template class TypeConverter<FormatTraits>;
-template class TypeConverter<OutputChannelTraits>;
-template class TypeConverter<InputChannelTraits>;
-template class TypeConverter<ChannelIndexTraits>;
-template class TypeConverter<GainModeTraits>;
-template class TypeConverter<StreamTraits>;
 template class TypeConverter<DeviceCategoryTraits>;
+template class TypeConverter<MixTypeTraits>;
+template class TypeConverter<RouteFlagTraits>;
+template class TypeConverter<RuleTraits>;
 
 }; // namespace android
-
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h b/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/Android.mk b/services/audiopolicy/engineconfigurable/Android.mk
old mode 100755
new mode 100644
index 116077d..5b43347
--- a/services/audiopolicy/engineconfigurable/Android.mk
+++ b/services/audiopolicy/engineconfigurable/Android.mk
@@ -39,18 +39,20 @@
 
 LOCAL_MODULE := libaudiopolicyengineconfigurable
 LOCAL_MODULE_TAGS := optional
+
 LOCAL_STATIC_LIBRARIES := \
-    libmedia_helper \
     libaudiopolicypfwwrapper \
-    libaudiopolicycomponents \
-    libxml2
+    libaudiopolicycomponents
 
 LOCAL_SHARED_LIBRARIES := \
     liblog \
     libcutils \
     libutils \
+    liblog \
     libaudioutils \
-    libparameter
+    libparameter \
+    libmedia_helper \
+    libxml2
 
 include $(BUILD_SHARED_LIBRARY)
 
diff --git a/services/audiopolicy/engineconfigurable/include/AudioPolicyEngineInstance.h b/services/audiopolicy/engineconfigurable/include/AudioPolicyEngineInstance.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/include/EngineDefinition.h b/services/audiopolicy/engineconfigurable/include/EngineDefinition.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h b/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/ParameterFrameworkConfigurationPolicy.xml.in b/services/audiopolicy/engineconfigurable/parameter-framework/examples/ParameterFrameworkConfigurationPolicy.xml.in
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_stream.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_stream.pfw
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicyClass.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicyClass.xml
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem-CommonTypes.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem-CommonTypes.xml
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/policy_criteria.txt b/services/audiopolicy/engineconfigurable/parameter-framework/examples/policy_criteria.txt
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicyMappingKeys.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicyMappingKeys.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystemBuilder.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystemBuilder.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/src/Collection.h b/services/audiopolicy/engineconfigurable/src/Collection.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/src/Element.h b/services/audiopolicy/engineconfigurable/src/Element.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.cpp b/services/audiopolicy/engineconfigurable/src/Engine.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.h b/services/audiopolicy/engineconfigurable/src/Engine.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/src/EngineInstance.cpp b/services/audiopolicy/engineconfigurable/src/EngineInstance.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/src/InputSource.cpp b/services/audiopolicy/engineconfigurable/src/InputSource.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/src/InputSource.h b/services/audiopolicy/engineconfigurable/src/InputSource.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/src/Strategy.cpp b/services/audiopolicy/engineconfigurable/src/Strategy.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/src/Strategy.h b/services/audiopolicy/engineconfigurable/src/Strategy.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/src/Stream.cpp b/services/audiopolicy/engineconfigurable/src/Stream.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/src/Stream.h b/services/audiopolicy/engineconfigurable/src/Stream.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/src/Usage.cpp b/services/audiopolicy/engineconfigurable/src/Usage.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/src/Usage.h b/services/audiopolicy/engineconfigurable/src/Usage.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/wrapper/Android.mk b/services/audiopolicy/engineconfigurable/wrapper/Android.mk
index c5990ac..066ee0d 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/Android.mk
+++ b/services/audiopolicy/engineconfigurable/wrapper/Android.mk
@@ -14,11 +14,9 @@
 
 LOCAL_SRC_FILES:= ParameterManagerWrapper.cpp
 
-LOCAL_STATIC_LIBRARIES := \
-    libmedia_helper \
-
 LOCAL_SHARED_LIBRARIES := \
     libparameter \
+    libmedia_helper
 
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
 
diff --git a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
old mode 100755
new mode 100644
index fe15d86..9b0442e
--- a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
+++ b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
@@ -22,7 +22,7 @@
 #include <ParameterMgrPlatformConnector.h>
 #include <SelectionCriterionTypeInterface.h>
 #include <SelectionCriterionInterface.h>
-#include <convert.h>
+#include <media/convert.h>
 #include <algorithm>
 #include <cutils/config_utils.h>
 #include <cutils/misc.h>
diff --git a/services/audiopolicy/engineconfigurable/wrapper/audio_policy_criteria_conf.h b/services/audiopolicy/engineconfigurable/wrapper/audio_policy_criteria_conf.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/wrapper/config/audio_policy_criteria.conf b/services/audiopolicy/engineconfigurable/wrapper/config/audio_policy_criteria.conf
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h b/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/enginedefault/Android.mk b/services/audiopolicy/enginedefault/Android.mk
old mode 100755
new mode 100644
index e6de8ae..b37b379
--- a/services/audiopolicy/enginedefault/Android.mk
+++ b/services/audiopolicy/enginedefault/Android.mk
@@ -35,13 +35,12 @@
 LOCAL_MODULE_TAGS := optional
 
 LOCAL_STATIC_LIBRARIES := \
-    libmedia_helper \
     libaudiopolicycomponents \
-    libxml2
 
 LOCAL_SHARED_LIBRARIES += \
     liblog \
     libcutils \
     libutils \
+    libmedia_helper
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/services/audiopolicy/enginedefault/include/AudioPolicyEngineInstance.h b/services/audiopolicy/enginedefault/include/AudioPolicyEngineInstance.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
old mode 100755
new mode 100644
index d31429c..895818c
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -202,6 +202,7 @@
 
     case AUDIO_USAGE_MEDIA:
     case AUDIO_USAGE_GAME:
+    case AUDIO_USAGE_ASSISTANT:
     case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
     case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
         return STRATEGY_MEDIA;
@@ -320,8 +321,7 @@
             if (((availableInputDevices.types() &
                     AUDIO_DEVICE_IN_TELEPHONY_RX & ~AUDIO_DEVICE_BIT_IN) == 0) ||
                     (((txDevice & availPrimaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
-                         (primaryOutput->getAudioPort()->getModuleVersion() <
-                             AUDIO_DEVICE_API_VERSION_3_0))) {
+                         (primaryOutput->getAudioPort()->getModuleVersionMajor() < 3))) {
                 availableOutputDevicesType = availPrimaryOutputDevices;
             }
         }
diff --git a/services/audiopolicy/enginedefault/src/Engine.h b/services/audiopolicy/enginedefault/src/Engine.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/enginedefault/src/EngineInstance.cpp b/services/audiopolicy/enginedefault/src/EngineInstance.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 8cb5e0f..e71bb01 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -33,11 +33,11 @@
 #include <AudioPolicyEngineInstance.h>
 #include <cutils/properties.h>
 #include <utils/Log.h>
-#include <hardware/audio.h>
-#include <hardware/audio_effect.h>
 #include <media/AudioParameter.h>
 #include <media/AudioPolicyHelper.h>
 #include <soundtrigger/SoundTrigger.h>
+#include <system/audio.h>
+#include <audio_policy_conf.h>
 #include "AudioPolicyManager.h"
 #ifndef USE_XML_AUDIO_POLICY_CONF
 #include <ConfigParsingUtils.h>
@@ -70,7 +70,7 @@
 {
     AudioParameter param(device_address);
     const String8 key(state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE ?
-                AUDIO_PARAMETER_DEVICE_CONNECT : AUDIO_PARAMETER_DEVICE_DISCONNECT);
+                AudioParameter::keyStreamConnect : AudioParameter::keyStreamDisconnect);
     param.addInt(key, device);
     mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
 }
@@ -492,15 +492,17 @@
         // FIXME: would be better to refine to only inputs whose profile connects to the
         // call TX device but this information is not in the audio patch and logic here must be
         // symmetric to the one in startInput()
-        audio_io_handle_t activeInput = mInputs.getActiveInput();
-        if (activeInput != 0) {
-            sp<AudioInputDescriptor> activeDesc = mInputs.valueFor(activeInput);
-            if (activeDesc->getModuleHandle() == txSourceDeviceDesc->getModuleHandle()) {
-                //FIXME: consider all active sessions
-                AudioSessionCollection activeSessions = activeDesc->getActiveAudioSessions();
-                audio_session_t activeSession = activeSessions.keyAt(0);
-                stopInput(activeInput, activeSession);
-                releaseInput(activeInput, activeSession);
+        Vector<sp <AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
+        for (size_t i = 0; i < activeInputs.size(); i++) {
+            sp<AudioInputDescriptor> activeDesc = activeInputs[i];
+            if (activeDesc->hasSameHwModuleAs(txSourceDeviceDesc)) {
+                AudioSessionCollection activeSessions =
+                        activeDesc->getAudioSessions(true /*activeOnly*/);
+                for (size_t j = 0; j < activeSessions.size(); j++) {
+                    audio_session_t activeSession = activeSessions.keyAt(j);
+                    stopInput(activeDesc->mIoHandle, activeSession);
+                    releaseInput(activeDesc->mIoHandle, activeSession);
+                }
             }
         }
 
@@ -671,15 +673,16 @@
         }
     }
 
-    audio_io_handle_t activeInput = mInputs.getActiveInput();
-    if (activeInput != 0) {
-        sp<AudioInputDescriptor> activeDesc = mInputs.valueFor(activeInput);
-        audio_devices_t newDevice = getNewInputDevice(activeInput);
+    Vector<sp <AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
+    for (size_t i = 0; i < activeInputs.size(); i++) {
+        sp<AudioInputDescriptor> activeDesc = activeInputs[i];
+        audio_devices_t newDevice = getNewInputDevice(activeDesc);
         // Force new input selection if the new device can not be reached via current input
-        if (activeDesc->mProfile->getSupportedDevices().types() & (newDevice & ~AUDIO_DEVICE_BIT_IN)) {
-            setInputDevice(activeInput, newDevice);
+        if (activeDesc->mProfile->getSupportedDevices().types() &
+                (newDevice & ~AUDIO_DEVICE_BIT_IN)) {
+            setInputDevice(activeDesc->mIoHandle, newDevice);
         } else {
-            closeInput(activeInput);
+            closeInput(activeDesc->mIoHandle);
         }
     }
 }
@@ -759,12 +762,10 @@
                                               audio_session_t session,
                                               audio_stream_type_t *stream,
                                               uid_t uid,
-                                              uint32_t samplingRate,
-                                              audio_format_t format,
-                                              audio_channel_mask_t channelMask,
+                                              const audio_config_t *config,
                                               audio_output_flags_t flags,
                                               audio_port_handle_t selectedDeviceId,
-                                              const audio_offload_info_t *offloadInfo)
+                                              audio_port_handle_t *portId)
 {
     audio_attributes_t attributes;
     if (attr != NULL) {
@@ -782,10 +783,16 @@
         }
         stream_type_to_audio_attributes(*stream, &attributes);
     }
+
+    // TODO: check for existing client for this port ID
+    if (*portId == AUDIO_PORT_HANDLE_NONE) {
+        *portId = AudioPort::getNextUniqueId();
+    }
+
     sp<SwAudioOutputDescriptor> desc;
     if (mPolicyMixes.getOutputForAttr(attributes, uid, desc) == NO_ERROR) {
         ALOG_ASSERT(desc != 0, "Invalid desc returned by getOutputForAttr");
-        if (!audio_has_proportional_frames(format)) {
+        if (!audio_has_proportional_frames(config->format)) {
             return BAD_VALUE;
         }
         *stream = streamTypefromAttributesInt(&attributes);
@@ -823,11 +830,11 @@
     }
 
     ALOGV("getOutputForAttr() device 0x%x, samplingRate %d, format %x, channelMask %x, flags %x",
-          device, samplingRate, format, channelMask, flags);
+          device, config->sample_rate, config->format, config->channel_mask, flags);
 
     *output = getOutputForDevice(device, session, *stream,
-                                 samplingRate, format, channelMask,
-                                 flags, offloadInfo);
+                                 config->sample_rate, config->format, config->channel_mask,
+                                 flags, &config->offload_info);
     if (*output == AUDIO_IO_HANDLE_NONE) {
         mOutputRoutes.removeRoute(session);
         return INVALID_OPERATION;
@@ -1452,19 +1459,19 @@
                                              audio_io_handle_t *input,
                                              audio_session_t session,
                                              uid_t uid,
-                                             uint32_t samplingRate,
-                                             audio_format_t format,
-                                             audio_channel_mask_t channelMask,
+                                             const audio_config_base_t *config,
                                              audio_input_flags_t flags,
                                              audio_port_handle_t selectedDeviceId,
-                                             input_type_t *inputType)
+                                             input_type_t *inputType,
+                                             audio_port_handle_t *portId)
 {
     ALOGV("getInputForAttr() source %d, samplingRate %d, format %d, channelMask %x,"
             "session %d, flags %#x",
-          attr->source, samplingRate, format, channelMask, session, flags);
+          attr->source, config->sample_rate, config->format, config->channel_mask, session, flags);
 
     *input = AUDIO_IO_HANDLE_NONE;
     *inputType = API_INPUT_INVALID;
+
     audio_devices_t device;
     // handle legacy remote submix case where the address was not always specified
     String8 address = String8("");
@@ -1477,6 +1484,11 @@
     }
     halInputSource = inputSource;
 
+    // TODO: check for existing client for this port ID
+    if (*portId == AUDIO_PORT_HANDLE_NONE) {
+        *portId = AudioPort::getNextUniqueId();
+    }
+
     // Explicit routing?
     sp<DeviceDescriptor> deviceDesc;
     for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
@@ -1526,12 +1538,13 @@
     }
 
     *input = getInputForDevice(device, address, session, uid, inputSource,
-                               samplingRate, format, channelMask, flags,
+                               config->sample_rate, config->format, config->channel_mask, flags,
                                policyMix);
     if (*input == AUDIO_IO_HANDLE_NONE) {
         mInputRoutes.removeRoute(session);
         return INVALID_OPERATION;
     }
+
     ALOGV("getInputForAttr() returns input type = %d", *inputType);
     return NO_ERROR;
 }
@@ -1608,14 +1621,22 @@
                                                               isSoundTrigger,
                                                               policyMix, mpClientInterface);
 
-// TODO enable input reuse
-#if 0
+
     // reuse an open input if possible
     for (size_t i = 0; i < mInputs.size(); i++) {
         sp<AudioInputDescriptor> desc = mInputs.valueAt(i);
-        // reuse input if it shares the same profile and same sound trigger attribute
-        if (profile == desc->mProfile &&
-            isSoundTrigger == desc->isSoundTrigger()) {
+        // reuse input if:
+        // - it shares the same profile
+        //      AND
+        // - it is not a reroute submix input
+        //      AND
+        // - it is: not used for sound trigger
+        //                OR
+        //          used for sound trigger and all clients use the same session ID
+        //
+        if ((profile == desc->mProfile) &&
+            (isSoundTrigger == desc->isSoundTrigger()) &&
+            !is_virtual_input_device(device)) {
 
             sp<AudioSession> as = desc->getAudioSession(session);
             if (as != 0) {
@@ -1625,16 +1646,33 @@
                 } else {
                     ALOGW("getInputForDevice() record with different attributes"
                           " exists for session %d", session);
-                    return input;
+                    break;
                 }
+            } else if (isSoundTrigger) {
+                break;
+            }
+            // force close input if current source is now the highest priority request on this input
+            // and current input properties are not exactly as requested.
+            if ((desc->mSamplingRate != samplingRate ||
+                    desc->mChannelMask != channelMask ||
+                    !audio_formats_match(desc->mFormat, format)) &&
+                    (source_priority(desc->getHighestPrioritySource(false /*activeOnly*/)) <
+                     source_priority(inputSource))) {
+                ALOGV("%s: ", __FUNCTION__);
+                AudioSessionCollection sessions = desc->getAudioSessions(false /*activeOnly*/);
+                for (size_t j = 0; j < sessions.size(); j++) {
+                    audio_session_t currentSession = sessions.keyAt(j);
+                    stopInput(desc->mIoHandle, currentSession);
+                    releaseInput(desc->mIoHandle, currentSession);
+                }
+                break;
             } else {
                 desc->addAudioSession(session, audioSession);
+                ALOGV("%s: reusing input %d", __FUNCTION__, mInputs.keyAt(i));
+                return mInputs.keyAt(i);
             }
-            ALOGV("getInputForDevice() reusing input %d", mInputs.keyAt(i));
-            return mInputs.keyAt(i);
         }
     }
-#endif
 
     audio_config_t config = AUDIO_CONFIG_INITIALIZER;
     config.sample_rate = profileSamplingRate;
@@ -1677,10 +1715,50 @@
     return input;
 }
 
+bool AudioPolicyManager::isConcurentCaptureAllowed(const sp<AudioInputDescriptor>& inputDesc,
+        const sp<AudioSession>& audioSession)
+{
+    // Do not allow capture if an active voice call is using a software patch and
+    // the call TX source device is on the same HW module.
+    // FIXME: would be better to refine to only inputs whose profile connects to the
+    // call TX device but this information is not in the audio patch
+    if (mCallTxPatch != 0 &&
+        inputDesc->getModuleHandle() == mCallTxPatch->mPatch.sources[0].ext.device.hw_module) {
+        return false;
+    }
+
+    // starting concurrent capture is enabled if:
+    // 1) capturing for re-routing
+    // 2) capturing for HOTWORD source
+    // 3) capturing for FM TUNER source
+    // 3) All other active captures are either for re-routing or HOTWORD
+
+    if (is_virtual_input_device(inputDesc->mDevice) ||
+            audioSession->inputSource() == AUDIO_SOURCE_HOTWORD ||
+            audioSession->inputSource() == AUDIO_SOURCE_FM_TUNER) {
+        return true;
+    }
+
+    Vector< sp<AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
+    for (size_t i = 0; i <  activeInputs.size(); i++) {
+        sp<AudioInputDescriptor> activeInput = activeInputs[i];
+        if ((activeInput->inputSource() != AUDIO_SOURCE_HOTWORD) &&
+                (activeInput->inputSource() != AUDIO_SOURCE_FM_TUNER) &&
+                !is_virtual_input_device(activeInput->mDevice)) {
+            return false;
+        }
+    }
+
+    return true;
+}
+
+
 status_t AudioPolicyManager::startInput(audio_io_handle_t input,
-                                        audio_session_t session)
+                                        audio_session_t session,
+                                        concurrency_type__mask_t *concurrency)
 {
     ALOGV("startInput() input %d", input);
+    *concurrency = API_INPUT_CONCURRENCY_NONE;
     ssize_t index = mInputs.indexOfKey(input);
     if (index < 0) {
         ALOGW("startInput() unknown input %d", input);
@@ -1694,86 +1772,66 @@
         return BAD_VALUE;
     }
 
-    // virtual input devices are compatible with other input devices
-    if (!is_virtual_input_device(inputDesc->mDevice)) {
-
-        // for a non-virtual input device, check if there is another (non-virtual) active input
-        audio_io_handle_t activeInput = mInputs.getActiveInput();
-        if (activeInput != 0 && activeInput != input) {
-
-            // If the already active input uses AUDIO_SOURCE_HOTWORD then it is closed,
-            // otherwise the active input continues and the new input cannot be started.
-            sp<AudioInputDescriptor> activeDesc = mInputs.valueFor(activeInput);
-            if ((activeDesc->inputSource() == AUDIO_SOURCE_HOTWORD) &&
-                    !activeDesc->hasPreemptedSession(session)) {
-                ALOGW("startInput(%d) preempting low-priority input %d", input, activeInput);
-                //FIXME: consider all active sessions
-                AudioSessionCollection activeSessions = activeDesc->getActiveAudioSessions();
-                audio_session_t activeSession = activeSessions.keyAt(0);
-                SortedVector<audio_session_t> sessions =
-                                           activeDesc->getPreemptedSessions();
-                sessions.add(activeSession);
-                inputDesc->setPreemptedSessions(sessions);
-                stopInput(activeInput, activeSession);
-                releaseInput(activeInput, activeSession);
-            } else {
-                ALOGE("startInput(%d) failed: other input %d already started", input, activeInput);
-                return INVALID_OPERATION;
-            }
-        }
-
-        // Do not allow capture if an active voice call is using a software patch and
-        // the call TX source device is on the same HW module.
-        // FIXME: would be better to refine to only inputs whose profile connects to the
-        // call TX device but this information is not in the audio patch
-        if (mCallTxPatch != 0 &&
-            inputDesc->getModuleHandle() == mCallTxPatch->mPatch.sources[0].ext.device.hw_module) {
-            return INVALID_OPERATION;
-        }
+    if (!isConcurentCaptureAllowed(inputDesc, audioSession)) {
+        ALOGW("startInput(%d) failed: other input already started", input);
+        return INVALID_OPERATION;
     }
 
+    if (isInCall()) {
+        *concurrency |= API_INPUT_CONCURRENCY_CALL;
+    }
+    if (mInputs.activeInputsCountOnDevices() != 0) {
+        *concurrency |= API_INPUT_CONCURRENCY_CAPTURE;
+    }
+
+    // increment activity count before calling getNewInputDevice() below as only active sessions
+    // are considered for device selection
+    audioSession->changeActiveCount(1);
+
     // Routing?
     mInputRoutes.incRouteActivity(session);
 
-    if (!inputDesc->isActive() || mInputRoutes.hasRouteChanged(session)) {
-        // if input maps to a dynamic policy with an activity listener, notify of state change
-        if ((inputDesc->mPolicyMix != NULL)
-                && ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
-            mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mDeviceAddress,
-                    MIX_STATE_MIXING);
-        }
-
+    if (audioSession->activeCount() == 1 || mInputRoutes.hasRouteChanged(session)) {
         // indicate active capture to sound trigger service if starting capture from a mic on
         // primary HW module
-        audio_devices_t device = getNewInputDevice(input);
-        audio_devices_t primaryInputDevices = availablePrimaryInputDevices();
-        if (((device & primaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
-                mInputs.activeInputsCountOnDevices(primaryInputDevices) == 0) {
-            SoundTrigger::setCaptureState(true);
-        }
+        audio_devices_t device = getNewInputDevice(inputDesc);
         setInputDevice(input, device, true /* force */);
 
-        // automatically enable the remote submix output when input is started if not
-        // used by a policy mix of type MIX_TYPE_RECORDERS
-        // For remote submix (a virtual device), we open only one input per capture request.
-        if (audio_is_remote_submix_device(inputDesc->mDevice)) {
-            String8 address = String8("");
-            if (inputDesc->mPolicyMix == NULL) {
-                address = String8("0");
-            } else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
-                address = inputDesc->mPolicyMix->mDeviceAddress;
+        if (inputDesc->getAudioSessionCount(true/*activeOnly*/) == 1) {
+            // if input maps to a dynamic policy with an activity listener, notify of state change
+            if ((inputDesc->mPolicyMix != NULL)
+                    && ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
+                mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mDeviceAddress,
+                        MIX_STATE_MIXING);
             }
-            if (address != "") {
-                setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
-                        AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
-                        address, "remote-submix");
+
+            audio_devices_t primaryInputDevices = availablePrimaryInputDevices();
+            if (((device & primaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
+                    mInputs.activeInputsCountOnDevices(primaryInputDevices) == 1) {
+                SoundTrigger::setCaptureState(true);
+            }
+
+            // automatically enable the remote submix output when input is started if not
+            // used by a policy mix of type MIX_TYPE_RECORDERS
+            // For remote submix (a virtual device), we open only one input per capture request.
+            if (audio_is_remote_submix_device(inputDesc->mDevice)) {
+                String8 address = String8("");
+                if (inputDesc->mPolicyMix == NULL) {
+                    address = String8("0");
+                } else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
+                    address = inputDesc->mPolicyMix->mDeviceAddress;
+                }
+                if (address != "") {
+                    setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+                            AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+                            address, "remote-submix");
+                }
             }
         }
     }
 
     ALOGV("AudioPolicyManager::startInput() input source = %d", audioSession->inputSource());
 
-    audioSession->changeActiveCount(1);
     return NO_ERROR;
 }
 
@@ -1804,41 +1862,46 @@
     // Routing?
     mInputRoutes.decRouteActivity(session);
 
-    if (!inputDesc->isActive()) {
-        // if input maps to a dynamic policy with an activity listener, notify of state change
-        if ((inputDesc->mPolicyMix != NULL)
-                && ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
-            mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mDeviceAddress,
-                    MIX_STATE_IDLE);
-        }
+    if (audioSession->activeCount() == 0) {
 
-        // automatically disable the remote submix output when input is stopped if not
-        // used by a policy mix of type MIX_TYPE_RECORDERS
-        if (audio_is_remote_submix_device(inputDesc->mDevice)) {
-            String8 address = String8("");
-            if (inputDesc->mPolicyMix == NULL) {
-                address = String8("0");
-            } else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
-                address = inputDesc->mPolicyMix->mDeviceAddress;
+        if (inputDesc->isActive()) {
+            setInputDevice(input, getNewInputDevice(inputDesc), false /* force */);
+        } else {
+            // if input maps to a dynamic policy with an activity listener, notify of state change
+            if ((inputDesc->mPolicyMix != NULL)
+                    && ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
+                mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mDeviceAddress,
+                        MIX_STATE_IDLE);
             }
-            if (address != "") {
-                setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
-                                         AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
-                                         address, "remote-submix");
+
+            // automatically disable the remote submix output when input is stopped if not
+            // used by a policy mix of type MIX_TYPE_RECORDERS
+            if (audio_is_remote_submix_device(inputDesc->mDevice)) {
+                String8 address = String8("");
+                if (inputDesc->mPolicyMix == NULL) {
+                    address = String8("0");
+                } else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
+                    address = inputDesc->mPolicyMix->mDeviceAddress;
+                }
+                if (address != "") {
+                    setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+                                             AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+                                             address, "remote-submix");
+                }
             }
-        }
 
-        audio_devices_t device = inputDesc->mDevice;
-        resetInputDevice(input);
+            audio_devices_t device = inputDesc->mDevice;
+            resetInputDevice(input);
 
-        // indicate inactive capture to sound trigger service if stopping capture from a mic on
-        // primary HW module
-        audio_devices_t primaryInputDevices = availablePrimaryInputDevices();
-        if (((device & primaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
-                mInputs.activeInputsCountOnDevices(primaryInputDevices) == 0) {
-            SoundTrigger::setCaptureState(false);
+            // indicate inactive capture to sound trigger service if stopping capture from a mic on
+            // primary HW module
+            audio_devices_t primaryInputDevices = availablePrimaryInputDevices();
+            if (((device & primaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
+                    mInputs.activeInputsCountOnDevices(primaryInputDevices) == 0) {
+                SoundTrigger::setCaptureState(false);
+            }
+            inputDesc->clearPreemptedSessions();
         }
-        inputDesc->clearPreemptedSessions();
     }
     return NO_ERROR;
 }
@@ -2320,7 +2383,9 @@
     snprintf(buffer, SIZE, " Primary Output: %d\n",
              hasPrimaryOutput() ? mPrimaryOutput->mIoHandle : AUDIO_IO_HANDLE_NONE);
     result.append(buffer);
-    snprintf(buffer, SIZE, " Phone state: %d\n", mEngine->getPhoneState());
+    std::string stateLiteral;
+    AudioModeConverter::toString(mEngine->getPhoneState(), stateLiteral);
+    snprintf(buffer, SIZE, " Phone state: %s\n", stateLiteral.c_str());
     result.append(buffer);
     snprintf(buffer, SIZE, " Force use for communications %d\n",
              mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION));
@@ -2354,6 +2419,7 @@
     mVolumeCurves->dump(fd);
     mEffects.dump(fd);
     mAudioPatches.dump(fd);
+    mPolicyMixes.dump(fd);
 
     return NO_ERROR;
 }
@@ -2709,8 +2775,8 @@
                 // create a software bridge in PatchPanel if:
                 // - source and sink devices are on differnt HW modules OR
                 // - audio HAL version is < 3.0
-                if ((srcDeviceDesc->getModuleHandle() != sinkDeviceDesc->getModuleHandle()) ||
-                        (srcDeviceDesc->mModule->getHalVersion() < AUDIO_DEVICE_API_VERSION_3_0)) {
+                if (!srcDeviceDesc->hasSameHwModuleAs(sinkDeviceDesc) ||
+                        (srcDeviceDesc->mModule->getHalVersionMajor() < 3)) {
                     // support only one sink device for now to simplify output selection logic
                     if (patch->num_sinks > 1) {
                         return INVALID_OPERATION;
@@ -2809,7 +2875,7 @@
                 return BAD_VALUE;
             }
             setInputDevice(inputDesc->mIoHandle,
-                           getNewInputDevice(inputDesc->mIoHandle),
+                           getNewInputDevice(inputDesc),
                            true,
                            NULL);
         } else if (patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE) {
@@ -3014,7 +3080,7 @@
 
 status_t AudioPolicyManager::startAudioSource(const struct audio_port_config *source,
                                   const audio_attributes_t *attributes,
-                                  audio_io_handle_t *handle,
+                                  audio_patch_handle_t *handle,
                                   uid_t uid)
 {
     ALOGV("%s source %p attributes %p handle %p", __FUNCTION__, source, attributes, handle);
@@ -3022,7 +3088,7 @@
         return BAD_VALUE;
     }
 
-    *handle = AUDIO_IO_HANDLE_NONE;
+    *handle = AUDIO_PATCH_HANDLE_NONE;
 
     if (source->role != AUDIO_PORT_ROLE_SOURCE ||
             source->type != AUDIO_PORT_TYPE_DEVICE) {
@@ -3072,7 +3138,7 @@
 
     if (srcDeviceDesc->getAudioPort()->mModule->getHandle() ==
             sinkDeviceDesc->getAudioPort()->mModule->getHandle() &&
-            srcDeviceDesc->getAudioPort()->mModule->getHalVersion() >= AUDIO_DEVICE_API_VERSION_3_0 &&
+            srcDeviceDesc->getAudioPort()->mModule->getHalVersionMajor() >= 3 &&
             srcDeviceDesc->getAudioPort()->mGains.size() > 0) {
         ALOGV("%s AUDIO_DEVICE_API_VERSION_3_0", __FUNCTION__);
         //   create patch between src device and output device
@@ -3129,7 +3195,7 @@
     return NO_ERROR;
 }
 
-status_t AudioPolicyManager::stopAudioSource(audio_io_handle_t handle __unused)
+status_t AudioPolicyManager::stopAudioSource(audio_patch_handle_t handle __unused)
 {
     sp<AudioSourceDescriptor> sourceDesc = mAudioSources.valueFor(handle);
     ALOGV("%s handle %d", __FUNCTION__, handle);
@@ -3583,7 +3649,7 @@
                         mTestFormat = format;
                     } else if (mTestOutputs[mCurOutput] != 0) {
                         AudioParameter outputParam = AudioParameter();
-                        outputParam.addInt(String8("format"), format);
+                        outputParam.addInt(String8(AudioParameter::keyStreamSupportedFormats), format);
                         mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString());
                     }
                 }
@@ -3602,7 +3668,7 @@
                         mTestChannels = channels;
                     } else if (mTestOutputs[mCurOutput] != 0) {
                         AudioParameter outputParam = AudioParameter();
-                        outputParam.addInt(String8("channels"), channels);
+                        outputParam.addInt(String8(AudioParameter::keyStreamSupportedChannels), channels);
                         mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString());
                     }
                 }
@@ -3615,7 +3681,7 @@
                         mTestSamplingRate = samplingRate;
                     } else if (mTestOutputs[mCurOutput] != 0) {
                         AudioParameter outputParam = AudioParameter();
-                        outputParam.addInt(String8("sampling_rate"), samplingRate);
+                        outputParam.addInt(String8(AudioParameter::keyStreamSupportedSamplingRates), samplingRate);
                         mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString());
                     }
                 }
@@ -4339,33 +4405,36 @@
             ((mAvailableInputDevices.types() & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET &
                     ~AUDIO_DEVICE_BIT_IN) != 0) ||
             ((mAvailableOutputDevices.types() & AUDIO_DEVICE_OUT_ALL_SCO) != 0);
-    // suspend A2DP output if:
-    //      (NOT already suspended) &&
-    //      ((SCO device is connected &&
-    //       (forced usage for communication || for record is SCO))) ||
-    //      (phone state is ringing || in call)
+
+    // if suspended, restore A2DP output if:
+    //      ((SCO device is NOT connected) ||
+    //       ((forced usage communication is NOT SCO) && (forced usage for record is NOT SCO) &&
+    //        (phone state is NOT in call) && (phone state is NOT ringing)))
     //
-    // restore A2DP output if:
-    //      (Already suspended) &&
-    //      ((SCO device is NOT connected ||
-    //       (forced usage NOT for communication && NOT for record is SCO))) &&
-    //      (phone state is NOT ringing && NOT in call)
+    // if not suspended, suspend A2DP output if:
+    //      (SCO device is connected) &&
+    //       ((forced usage for communication is SCO) || (forced usage for record is SCO) ||
+    //       ((phone state is in call) || (phone state is ringing)))
     //
     if (mA2dpSuspended) {
-        if ((!isScoConnected ||
-             ((mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION) != AUDIO_POLICY_FORCE_BT_SCO) &&
-              (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) != AUDIO_POLICY_FORCE_BT_SCO))) &&
-             ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) &&
+        if (!isScoConnected ||
+             ((mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION) !=
+                     AUDIO_POLICY_FORCE_BT_SCO) &&
+              (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) !=
+                      AUDIO_POLICY_FORCE_BT_SCO) &&
+              (mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) &&
               (mEngine->getPhoneState() != AUDIO_MODE_RINGTONE))) {
 
             mpClientInterface->restoreOutput(a2dpOutput);
             mA2dpSuspended = false;
         }
     } else {
-        if ((isScoConnected &&
-             ((mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION) == AUDIO_POLICY_FORCE_BT_SCO) ||
-              (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) == AUDIO_POLICY_FORCE_BT_SCO))) ||
-             ((mEngine->getPhoneState() == AUDIO_MODE_IN_CALL) ||
+        if (isScoConnected &&
+             ((mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION) ==
+                     AUDIO_POLICY_FORCE_BT_SCO) ||
+              (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) ==
+                      AUDIO_POLICY_FORCE_BT_SCO) ||
+              (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL) ||
               (mEngine->getPhoneState() == AUDIO_MODE_RINGTONE))) {
 
             mpClientInterface->suspendOutput(a2dpOutput);
@@ -4436,9 +4505,9 @@
     return device;
 }
 
-audio_devices_t AudioPolicyManager::getNewInputDevice(audio_io_handle_t input)
+audio_devices_t AudioPolicyManager::getNewInputDevice(const sp<AudioInputDescriptor>& inputDesc)
 {
-    sp<AudioInputDescriptor> inputDesc = mInputs.valueFor(input);
+    audio_devices_t device = AUDIO_DEVICE_NONE;
 
     ssize_t index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
     if (index >= 0) {
@@ -4450,16 +4519,19 @@
         }
     }
 
-    audio_devices_t device = getDeviceAndMixForInputSource(inputDesc->inputSource());
+    audio_source_t source = inputDesc->getHighestPrioritySource(true /*activeOnly*/);
+    if (isInCall()) {
+        device = getDeviceAndMixForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
+    } else if (source != AUDIO_SOURCE_DEFAULT) {
+        device = getDeviceAndMixForInputSource(source);
+    }
 
     return device;
 }
 
 bool AudioPolicyManager::streamsMatchForvolume(audio_stream_type_t stream1,
                                                audio_stream_type_t stream2) {
-    return ((stream1 == stream2) ||
-            ((stream1 == AUDIO_STREAM_ACCESSIBILITY) && (stream2 == AUDIO_STREAM_MUSIC)) ||
-            ((stream1 == AUDIO_STREAM_MUSIC) && (stream2 == AUDIO_STREAM_ACCESSIBILITY)));
+    return (stream1 == stream2);
 }
 
 uint32_t AudioPolicyManager::getStrategyForStream(audio_stream_type_t stream) {
@@ -5268,6 +5340,7 @@
     switch (attr->usage) {
     case AUDIO_USAGE_MEDIA:
     case AUDIO_USAGE_GAME:
+    case AUDIO_USAGE_ASSISTANT:
     case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
         return AUDIO_STREAM_MUSIC;
     case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
@@ -5323,6 +5396,7 @@
     case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
     case AUDIO_USAGE_GAME:
     case AUDIO_USAGE_VIRTUAL_SOURCE:
+    case AUDIO_USAGE_ASSISTANT:
         break;
     default:
         return false;
@@ -5525,12 +5599,12 @@
 
     // Format MUST be checked first to update the list of AudioProfile
     if (profiles.hasDynamicFormat()) {
-        reply = mpClientInterface->getParameters(ioHandle,
-                                                 String8(AUDIO_PARAMETER_STREAM_SUP_FORMATS));
+        reply = mpClientInterface->getParameters(
+                ioHandle, String8(AudioParameter::keyStreamSupportedFormats));
         ALOGV("%s: supported formats %s", __FUNCTION__, reply.string());
         AudioParameter repliedParameters(reply);
         if (repliedParameters.get(
-                String8(AUDIO_PARAMETER_STREAM_SUP_FORMATS), reply) != NO_ERROR) {
+                String8(AudioParameter::keyStreamSupportedFormats), reply) != NO_ERROR) {
             ALOGE("%s: failed to retrieve format, bailing out", __FUNCTION__);
             return;
         }
@@ -5547,27 +5621,28 @@
         ChannelsVector channelMasks;
         SampleRateVector samplingRates;
         AudioParameter requestedParameters;
-        requestedParameters.addInt(String8(AUDIO_PARAMETER_STREAM_FORMAT), format);
+        requestedParameters.addInt(String8(AudioParameter::keyFormat), format);
 
         if (profiles.hasDynamicRateFor(format)) {
-            reply = mpClientInterface->getParameters(ioHandle,
-                                                     requestedParameters.toString() + ";" +
-                                                     AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES);
+            reply = mpClientInterface->getParameters(
+                    ioHandle,
+                    requestedParameters.toString() + ";" +
+                    AudioParameter::keyStreamSupportedSamplingRates);
             ALOGV("%s: supported sampling rates %s", __FUNCTION__, reply.string());
             AudioParameter repliedParameters(reply);
             if (repliedParameters.get(
-                    String8(AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES), reply) == NO_ERROR) {
+                    String8(AudioParameter::keyStreamSupportedSamplingRates), reply) == NO_ERROR) {
                 samplingRates = samplingRatesFromString(reply.string());
             }
         }
         if (profiles.hasDynamicChannelsFor(format)) {
             reply = mpClientInterface->getParameters(ioHandle,
                                                      requestedParameters.toString() + ";" +
-                                                     AUDIO_PARAMETER_STREAM_SUP_CHANNELS);
+                                                     AudioParameter::keyStreamSupportedChannels);
             ALOGV("%s: supported channel masks %s", __FUNCTION__, reply.string());
             AudioParameter repliedParameters(reply);
             if (repliedParameters.get(
-                    String8(AUDIO_PARAMETER_STREAM_SUP_CHANNELS), reply) == NO_ERROR) {
+                    String8(AudioParameter::keyStreamSupportedChannels), reply) == NO_ERROR) {
                 channelMasks = channelMasksFromString(reply.string());
                 if (device == AUDIO_DEVICE_OUT_HDMI) {
                     filterSurroundChannelMasks(&channelMasks);
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 3cfe508..3c5ed3a 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -114,12 +114,10 @@
                                           audio_session_t session,
                                           audio_stream_type_t *stream,
                                           uid_t uid,
-                                          uint32_t samplingRate,
-                                          audio_format_t format,
-                                          audio_channel_mask_t channelMask,
+                                          const audio_config_t *config,
                                           audio_output_flags_t flags,
                                           audio_port_handle_t selectedDeviceId,
-                                          const audio_offload_info_t *offloadInfo);
+                                          audio_port_handle_t *portId);
         virtual status_t startOutput(audio_io_handle_t output,
                                      audio_stream_type_t stream,
                                      audio_session_t session);
@@ -133,16 +131,16 @@
                                          audio_io_handle_t *input,
                                          audio_session_t session,
                                          uid_t uid,
-                                         uint32_t samplingRate,
-                                         audio_format_t format,
-                                         audio_channel_mask_t channelMask,
+                                         const audio_config_base_t *config,
                                          audio_input_flags_t flags,
                                          audio_port_handle_t selectedDeviceId,
-                                         input_type_t *inputType);
+                                         input_type_t *inputType,
+                                         audio_port_handle_t *portId);
 
         // indicates to the audio policy manager that the input starts being used.
         virtual status_t startInput(audio_io_handle_t input,
-                                    audio_session_t session);
+                                    audio_session_t session,
+                                    concurrency_type__mask_t *concurrency);
 
         // indicates to the audio policy manager that the input stops being used.
         virtual status_t stopInput(audio_io_handle_t input,
@@ -230,9 +228,9 @@
 
         virtual status_t startAudioSource(const struct audio_port_config *source,
                                           const audio_attributes_t *attributes,
-                                          audio_io_handle_t *handle,
+                                          audio_patch_handle_t *handle,
                                           uid_t uid);
-        virtual status_t stopAudioSource(audio_io_handle_t handle);
+        virtual status_t stopAudioSource(audio_patch_handle_t handle);
 
         virtual status_t setMasterMono(bool mono);
         virtual status_t getMasterMono(bool *mono);
@@ -408,7 +406,7 @@
         void updateDevicesAndOutputs();
 
         // selects the most appropriate device on input for current state
-        audio_devices_t getNewInputDevice(audio_io_handle_t input);
+        audio_devices_t getNewInputDevice(const sp<AudioInputDescriptor>& inputDesc);
 
         virtual uint32_t getMaxEffectsCpuLoad()
         {
@@ -509,6 +507,8 @@
 
         void clearAudioSources(uid_t uid);
 
+        bool isConcurentCaptureAllowed(const sp<AudioInputDescriptor>& inputDesc,
+                const sp<AudioSession>& audioSession);
 
         static bool streamsMatchForvolume(audio_stream_type_t stream1,
                                           audio_stream_type_t stream2);
@@ -662,7 +662,7 @@
                                                           const char *device_name);
         void updateMono(audio_io_handle_t output) {
             AudioParameter param;
-            param.addInt(String8(AUDIO_PARAMETER_MONO_OUTPUT), (int)mMasterMono);
+            param.addInt(String8(AudioParameter::keyMonoOutput), (int)mMasterMono);
             mpClientInterface->setParameters(output, param.toString());
         }
 };
diff --git a/services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp b/services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp
deleted file mode 100644
index dabffe6..0000000
--- a/services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp
+++ /dev/null
@@ -1,317 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "AudioPolicyService"
-//#define LOG_NDEBUG 0
-
-#include "Configuration.h"
-#undef __STRICT_ANSI__
-#define __STDINT_LIMITS
-#define __STDC_LIMIT_MACROS
-#include <stdint.h>
-
-#include <sys/time.h>
-#include <binder/IServiceManager.h>
-#include <utils/Log.h>
-#include <cutils/properties.h>
-#include <binder/IPCThreadState.h>
-#include <utils/String16.h>
-#include <utils/threads.h>
-#include "AudioPolicyService.h"
-#include "ServiceUtilities.h"
-#include <hardware_legacy/power.h>
-#include <media/AudioEffect.h>
-#include <media/EffectsFactoryApi.h>
-//#include <media/IAudioFlinger.h>
-
-#include <hardware/hardware.h>
-#include <system/audio.h>
-#include <system/audio_policy.h>
-#include <hardware/audio_policy.h>
-#include <audio_effects/audio_effects_conf.h>
-#include <media/AudioParameter.h>
-
-
-namespace android {
-
-/* implementation of the interface to the policy manager */
-extern "C" {
-
-audio_module_handle_t aps_load_hw_module(void *service __unused,
-                                             const char *name)
-{
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0) {
-        ALOGW("%s: could not get AudioFlinger", __func__);
-        return AUDIO_MODULE_HANDLE_NONE;
-    }
-
-    return af->loadHwModule(name);
-}
-
-static audio_io_handle_t open_output(audio_module_handle_t module,
-                                    audio_devices_t *pDevices,
-                                    uint32_t *pSamplingRate,
-                                    audio_format_t *pFormat,
-                                    audio_channel_mask_t *pChannelMask,
-                                    uint32_t *pLatencyMs,
-                                    audio_output_flags_t flags,
-                                    const audio_offload_info_t *offloadInfo)
-{
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0) {
-        ALOGW("%s: could not get AudioFlinger", __func__);
-        return AUDIO_IO_HANDLE_NONE;
-    }
-
-    if (pSamplingRate == NULL || pFormat == NULL || pChannelMask == NULL ||
-            pDevices == NULL || pLatencyMs == NULL) {
-        return AUDIO_IO_HANDLE_NONE;
-    }
-    audio_config_t config = AUDIO_CONFIG_INITIALIZER;
-    config.sample_rate = *pSamplingRate;
-    config.format = *pFormat;
-    config.channel_mask = *pChannelMask;
-    if (offloadInfo != NULL) {
-        config.offload_info = *offloadInfo;
-    }
-    audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
-    status_t status = af->openOutput(module, &output, &config, pDevices,
-                                     String8(""), pLatencyMs, flags);
-    if (status == NO_ERROR) {
-        *pSamplingRate = config.sample_rate;
-        *pFormat = config.format;
-        *pChannelMask = config.channel_mask;
-        if (offloadInfo != NULL) {
-            *((audio_offload_info_t *)offloadInfo) = config.offload_info;
-        }
-    }
-    return output;
-}
-
-// deprecated: replaced by aps_open_output_on_module()
-audio_io_handle_t aps_open_output(void *service __unused,
-                                         audio_devices_t *pDevices,
-                                         uint32_t *pSamplingRate,
-                                         audio_format_t *pFormat,
-                                         audio_channel_mask_t *pChannelMask,
-                                         uint32_t *pLatencyMs,
-                                         audio_output_flags_t flags)
-{
-    return open_output(AUDIO_MODULE_HANDLE_NONE, pDevices, pSamplingRate, pFormat, pChannelMask,
-                          pLatencyMs, flags, NULL);
-}
-
-audio_io_handle_t aps_open_output_on_module(void *service __unused,
-                                                   audio_module_handle_t module,
-                                                   audio_devices_t *pDevices,
-                                                   uint32_t *pSamplingRate,
-                                                   audio_format_t *pFormat,
-                                                   audio_channel_mask_t *pChannelMask,
-                                                   uint32_t *pLatencyMs,
-                                                   audio_output_flags_t flags,
-                                                   const audio_offload_info_t *offloadInfo)
-{
-    return open_output(module, pDevices, pSamplingRate, pFormat, pChannelMask,
-                          pLatencyMs, flags, offloadInfo);
-}
-
-audio_io_handle_t aps_open_dup_output(void *service __unused,
-                                                 audio_io_handle_t output1,
-                                                 audio_io_handle_t output2)
-{
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0) {
-        ALOGW("%s: could not get AudioFlinger", __func__);
-        return 0;
-    }
-    return af->openDuplicateOutput(output1, output2);
-}
-
-int aps_close_output(void *service __unused, audio_io_handle_t output)
-{
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0) {
-        return PERMISSION_DENIED;
-    }
-
-    return af->closeOutput(output);
-}
-
-int aps_suspend_output(void *service __unused, audio_io_handle_t output)
-{
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0) {
-        ALOGW("%s: could not get AudioFlinger", __func__);
-        return PERMISSION_DENIED;
-    }
-
-    return af->suspendOutput(output);
-}
-
-int aps_restore_output(void *service __unused, audio_io_handle_t output)
-{
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0) {
-        ALOGW("%s: could not get AudioFlinger", __func__);
-        return PERMISSION_DENIED;
-    }
-
-    return af->restoreOutput(output);
-}
-
-static audio_io_handle_t open_input(audio_module_handle_t module,
-                                    audio_devices_t *pDevices,
-                                    uint32_t *pSamplingRate,
-                                    audio_format_t *pFormat,
-                                    audio_channel_mask_t *pChannelMask)
-{
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0) {
-        ALOGW("%s: could not get AudioFlinger", __func__);
-        return AUDIO_IO_HANDLE_NONE;
-    }
-
-    if (pSamplingRate == NULL || pFormat == NULL || pChannelMask == NULL || pDevices == NULL) {
-        return AUDIO_IO_HANDLE_NONE;
-    }
-
-    if (((*pDevices & AUDIO_DEVICE_IN_REMOTE_SUBMIX) == AUDIO_DEVICE_IN_REMOTE_SUBMIX)
-            && !captureAudioOutputAllowed(IPCThreadState::self()->getCallingPid(),
-                                          IPCThreadState::self()->getCallingUid())) {
-        ALOGE("open_input() permission denied: capture not allowed");
-        return AUDIO_IO_HANDLE_NONE;
-    }
-
-    audio_config_t config = AUDIO_CONFIG_INITIALIZER;;
-    config.sample_rate = *pSamplingRate;
-    config.format = *pFormat;
-    config.channel_mask = *pChannelMask;
-    audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
-    status_t status = af->openInput(module, &input, &config, pDevices,
-                                    String8(""), AUDIO_SOURCE_MIC, AUDIO_INPUT_FLAG_FAST /*FIXME*/);
-    if (status == NO_ERROR) {
-        *pSamplingRate = config.sample_rate;
-        *pFormat = config.format;
-        *pChannelMask = config.channel_mask;
-    }
-    return input;
-}
-
-
-// deprecated: replaced by aps_open_input_on_module(), and acoustics parameter is ignored
-audio_io_handle_t aps_open_input(void *service __unused,
-                                        audio_devices_t *pDevices,
-                                        uint32_t *pSamplingRate,
-                                        audio_format_t *pFormat,
-                                        audio_channel_mask_t *pChannelMask,
-                                        audio_in_acoustics_t acoustics __unused)
-{
-    return  open_input(AUDIO_MODULE_HANDLE_NONE, pDevices, pSamplingRate, pFormat, pChannelMask);
-}
-
-audio_io_handle_t aps_open_input_on_module(void *service __unused,
-                                                  audio_module_handle_t module,
-                                                  audio_devices_t *pDevices,
-                                                  uint32_t *pSamplingRate,
-                                                  audio_format_t *pFormat,
-                                                  audio_channel_mask_t *pChannelMask)
-{
-    return  open_input(module, pDevices, pSamplingRate, pFormat, pChannelMask);
-}
-
-int aps_close_input(void *service __unused, audio_io_handle_t input)
-{
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0) {
-        return PERMISSION_DENIED;
-    }
-
-    return af->closeInput(input);
-}
-
-int aps_invalidate_stream(void *service __unused, audio_stream_type_t stream)
-{
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0) {
-        return PERMISSION_DENIED;
-    }
-
-    return af->invalidateStream(stream);
-}
-
-int aps_move_effects(void *service __unused, audio_session_t session,
-                                audio_io_handle_t src_output,
-                                audio_io_handle_t dst_output)
-{
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0) {
-        return PERMISSION_DENIED;
-    }
-
-    return af->moveEffects(session, src_output, dst_output);
-}
-
-char * aps_get_parameters(void *service __unused, audio_io_handle_t io_handle,
-                                     const char *keys)
-{
-    String8 result = AudioSystem::getParameters(io_handle, String8(keys));
-    return strdup(result.string());
-}
-
-void aps_set_parameters(void *service, audio_io_handle_t io_handle,
-                                   const char *kv_pairs, int delay_ms)
-{
-    AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
-
-    audioPolicyService->setParameters(io_handle, kv_pairs, delay_ms);
-}
-
-int aps_set_stream_volume(void *service, audio_stream_type_t stream,
-                                     float volume, audio_io_handle_t output,
-                                     int delay_ms)
-{
-    AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
-
-    return audioPolicyService->setStreamVolume(stream, volume, output,
-                                               delay_ms);
-}
-
-int aps_start_tone(void *service, audio_policy_tone_t tone,
-                              audio_stream_type_t stream)
-{
-    AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
-
-    return audioPolicyService->startTone(tone, stream);
-}
-
-int aps_stop_tone(void *service)
-{
-    AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
-
-    return audioPolicyService->stopTone();
-}
-
-int aps_set_voice_volume(void *service, float volume, int delay_ms)
-{
-    AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
-
-    return audioPolicyService->setVoiceVolume(volume, delay_ms);
-}
-
-}; // extern "C"
-
-}; // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
index b732b20..654465d 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -23,11 +23,11 @@
 #include <cutils/misc.h>
 #include <media/AudioEffect.h>
 #include <system/audio.h>
-#include <hardware/audio_effect.h>
-#include <audio_effects/audio_effects_conf.h>
+#include <system/audio_effects/audio_effects_conf.h>
 #include <utils/Vector.h>
 #include <utils/SortedVector.h>
 #include <cutils/config_utils.h>
+#include <binder/IPCThreadState.h>
 #include "AudioPolicyEffects.h"
 #include "ServiceUtilities.h"
 
@@ -57,11 +57,11 @@
     }
     mInputSources.clear();
 
-    for (i = 0; i < mInputs.size(); i++) {
-        mInputs.valueAt(i)->mEffects.clear();
-        delete mInputs.valueAt(i);
+    for (i = 0; i < mInputSessions.size(); i++) {
+        mInputSessions.valueAt(i)->mEffects.clear();
+        delete mInputSessions.valueAt(i);
     }
-    mInputs.clear();
+    mInputSessions.clear();
 
     // release audio output processing resources
     for (i = 0; i < mOutputStreams.size(); i++) {
@@ -93,19 +93,20 @@
         ALOGV("addInputEffects(): no processing needs to be attached to this source");
         return status;
     }
-    ssize_t idx = mInputs.indexOfKey(input);
-    EffectVector *inputDesc;
+    ssize_t idx = mInputSessions.indexOfKey(audioSession);
+    EffectVector *sessionDesc;
     if (idx < 0) {
-        inputDesc = new EffectVector(audioSession);
-        mInputs.add(input, inputDesc);
+        sessionDesc = new EffectVector(audioSession);
+        mInputSessions.add(audioSession, sessionDesc);
     } else {
         // EffectVector is existing and we just need to increase ref count
-        inputDesc = mInputs.valueAt(idx);
+        sessionDesc = mInputSessions.valueAt(idx);
     }
-    inputDesc->mRefCount++;
+    sessionDesc->mRefCount++;
 
-    ALOGV("addInputEffects(): input: %d, refCount: %d", input, inputDesc->mRefCount);
-    if (inputDesc->mRefCount == 1) {
+    ALOGV("addInputEffects(): input: %d, refCount: %d", input, sessionDesc->mRefCount);
+    if (sessionDesc->mRefCount == 1) {
+        int64_t token = IPCThreadState::self()->clearCallingIdentity();
         Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects;
         for (size_t i = 0; i < effects.size(); i++) {
             EffectDesc *effect = effects[i];
@@ -123,30 +124,32 @@
             }
             ALOGV("addInputEffects(): added Fx %s on source: %d",
                   effect->mName, (int32_t)aliasSource);
-            inputDesc->mEffects.add(fx);
+            sessionDesc->mEffects.add(fx);
         }
-        inputDesc->setProcessorEnabled(true);
+        sessionDesc->setProcessorEnabled(true);
+        IPCThreadState::self()->restoreCallingIdentity(token);
     }
     return status;
 }
 
 
-status_t AudioPolicyEffects::releaseInputEffects(audio_io_handle_t input)
+status_t AudioPolicyEffects::releaseInputEffects(audio_io_handle_t input,
+                                                 audio_session_t audioSession)
 {
     status_t status = NO_ERROR;
 
     Mutex::Autolock _l(mLock);
-    ssize_t index = mInputs.indexOfKey(input);
+    ssize_t index = mInputSessions.indexOfKey(audioSession);
     if (index < 0) {
         return status;
     }
-    EffectVector *inputDesc = mInputs.valueAt(index);
-    inputDesc->mRefCount--;
-    ALOGV("releaseInputEffects(): input: %d, refCount: %d", input, inputDesc->mRefCount);
-    if (inputDesc->mRefCount == 0) {
-        inputDesc->setProcessorEnabled(false);
-        delete inputDesc;
-        mInputs.removeItemsAt(index);
+    EffectVector *sessionDesc = mInputSessions.valueAt(index);
+    sessionDesc->mRefCount--;
+    ALOGV("releaseInputEffects(): input: %d, refCount: %d", input, sessionDesc->mRefCount);
+    if (sessionDesc->mRefCount == 0) {
+        sessionDesc->setProcessorEnabled(false);
+        delete sessionDesc;
+        mInputSessions.removeItemsAt(index);
         ALOGV("releaseInputEffects(): all effects released");
     }
     return status;
@@ -160,16 +163,16 @@
 
     Mutex::Autolock _l(mLock);
     size_t index;
-    for (index = 0; index < mInputs.size(); index++) {
-        if (mInputs.valueAt(index)->mSessionId == audioSession) {
+    for (index = 0; index < mInputSessions.size(); index++) {
+        if (mInputSessions.valueAt(index)->mSessionId == audioSession) {
             break;
         }
     }
-    if (index == mInputs.size()) {
+    if (index == mInputSessions.size()) {
         *count = 0;
         return BAD_VALUE;
     }
-    Vector< sp<AudioEffect> > effects = mInputs.valueAt(index)->mEffects;
+    Vector< sp<AudioEffect> > effects = mInputSessions.valueAt(index)->mEffects;
 
     for (size_t i = 0; i < effects.size(); i++) {
         effect_descriptor_t desc = effects[i]->descriptor();
@@ -251,6 +254,8 @@
     ALOGV("addOutputSessionEffects(): session: %d, refCount: %d",
           audioSession, procDesc->mRefCount);
     if (procDesc->mRefCount == 1) {
+        // make sure effects are associated to audio server even if we are executing a binder call
+        int64_t token = IPCThreadState::self()->clearCallingIdentity();
         Vector <EffectDesc *> effects = mOutputStreams.valueAt(index)->mEffects;
         for (size_t i = 0; i < effects.size(); i++) {
             EffectDesc *effect = effects[i];
@@ -269,6 +274,7 @@
         }
 
         procDesc->setProcessorEnabled(true);
+        IPCThreadState::self()->restoreCallingIdentity(token);
     }
     return status;
 }
diff --git a/services/audiopolicy/service/AudioPolicyEffects.h b/services/audiopolicy/service/AudioPolicyEffects.h
index afdaf98..0c74d87 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.h
+++ b/services/audiopolicy/service/AudioPolicyEffects.h
@@ -23,7 +23,6 @@
 #include <cutils/misc.h>
 #include <media/AudioEffect.h>
 #include <system/audio.h>
-#include <hardware/audio_effect.h>
 #include <utils/Vector.h>
 #include <utils/SortedVector.h>
 
@@ -62,7 +61,8 @@
                              audio_session_t audioSession);
 
     // Add all input effects associated to this input
-    status_t releaseInputEffects(audio_io_handle_t input);
+    status_t releaseInputEffects(audio_io_handle_t input,
+                                 audio_session_t audioSession);
 
 
     // Return a list of effect descriptors for default output effects
@@ -178,12 +178,12 @@
                          size_t *curSize,
                          size_t *totSize);
 
-    // protects access to mInputSources, mInputs, mOutputStreams, mOutputSessions
+    // protects access to mInputSources, mInputSessions, mOutputStreams, mOutputSessions
     Mutex mLock;
     // Automatic input effects are configured per audio_source_t
     KeyedVector< audio_source_t, EffectDescVector* > mInputSources;
     // Automatic input effects are unique for audio_io_handle_t
-    KeyedVector< audio_io_handle_t, EffectVector* > mInputs;
+    KeyedVector< audio_session_t, EffectVector* > mInputSessions;
 
     // Automatic output effects are organized per audio_stream_type_t
     KeyedVector< audio_stream_type_t, EffectDescVector* > mOutputStreams;
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 0387ee6..1e63a05 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -165,12 +165,10 @@
                                               audio_session_t session,
                                               audio_stream_type_t *stream,
                                               uid_t uid,
-                                              uint32_t samplingRate,
-                                              audio_format_t format,
-                                              audio_channel_mask_t channelMask,
+                                              const audio_config_t *config,
                                               audio_output_flags_t flags,
                                               audio_port_handle_t selectedDeviceId,
-                                              const audio_offload_info_t *offloadInfo)
+                                              audio_port_handle_t *portId)
 {
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
@@ -184,8 +182,9 @@
                 "%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid, uid);
         uid = callingUid;
     }
-    return mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid, samplingRate,
-                                    format, channelMask, flags, selectedDeviceId, offloadInfo);
+    return mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid,
+                                                 config,
+                                                 flags, selectedDeviceId, portId);
 }
 
 status_t AudioPolicyService::startOutput(audio_io_handle_t output,
@@ -276,11 +275,10 @@
                                              audio_session_t session,
                                              pid_t pid,
                                              uid_t uid,
-                                             uint32_t samplingRate,
-                                             audio_format_t format,
-                                             audio_channel_mask_t channelMask,
+                                             const audio_config_base_t *config,
                                              audio_input_flags_t flags,
-                                             audio_port_handle_t selectedDeviceId)
+                                             audio_port_handle_t selectedDeviceId,
+                                             audio_port_handle_t *portId)
 {
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
@@ -319,9 +317,9 @@
         Mutex::Autolock _l(mLock);
         // the audio_in_acoustics_t parameter is ignored by get_input()
         status = mAudioPolicyManager->getInputForAttr(attr, input, session, uid,
-                                                     samplingRate, format, channelMask,
+                                                     config,
                                                      flags, selectedDeviceId,
-                                                     &inputType);
+                                                     &inputType, portId);
         audioPolicyEffects = mAudioPolicyEffects;
 
         if (status == NO_ERROR) {
@@ -375,8 +373,23 @@
         return NO_INIT;
     }
     Mutex::Autolock _l(mLock);
+    AudioPolicyInterface::concurrency_type__mask_t concurrency;
+    status_t status = mAudioPolicyManager->startInput(input, session, &concurrency);
 
-    return mAudioPolicyManager->startInput(input, session);
+    if (status == NO_ERROR) {
+        LOG_ALWAYS_FATAL_IF(concurrency & ~AudioPolicyInterface::API_INPUT_CONCURRENCY_ALL,
+                            "startInput(): invalid concurrency type %d", (int)concurrency);
+
+        // enforce permission (if any) required for each type of concurrency
+        if (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_CALL) {
+            //TODO: check incall capture permission
+        }
+        if (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_CAPTURE) {
+            //TODO: check concurrent capture permission
+        }
+    }
+
+    return status;
 }
 
 status_t AudioPolicyService::stopInput(audio_io_handle_t input,
@@ -403,7 +416,7 @@
     }
     if (audioPolicyEffects != 0) {
         // release audio processors from the input
-        status_t status = audioPolicyEffects->releaseInputEffects(input);
+        status_t status = audioPolicyEffects->releaseInputEffects(input, session);
         if(status != NO_ERROR) {
             ALOGW("Failed to release effects on input %d", input);
         }
@@ -583,7 +596,8 @@
         *count = 0;
         return NO_INIT;
     }
-    return audioPolicyEffects->queryDefaultInputEffects(audioSession, descriptors, count);
+    return audioPolicyEffects->queryDefaultInputEffects(
+            (audio_session_t)audioSession, descriptors, count);
 }
 
 bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info)
@@ -713,7 +727,7 @@
 
 status_t AudioPolicyService::startAudioSource(const struct audio_port_config *source,
                                   const audio_attributes_t *attributes,
-                                  audio_io_handle_t *handle)
+                                  audio_patch_handle_t *handle)
 {
     Mutex::Autolock _l(mLock);
     if (mAudioPolicyManager == NULL) {
@@ -724,7 +738,7 @@
                                                  IPCThreadState::self()->getCallingUid());
 }
 
-status_t AudioPolicyService::stopAudioSource(audio_io_handle_t handle)
+status_t AudioPolicyService::stopAudioSource(audio_patch_handle_t handle)
 {
     Mutex::Autolock _l(mLock);
     if (mAudioPolicyManager == NULL) {
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
deleted file mode 100644
index 946c380..0000000
--- a/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
+++ /dev/null
@@ -1,633 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "AudioPolicyService"
-//#define LOG_NDEBUG 0
-
-#include <utils/Log.h>
-#include "AudioPolicyService.h"
-#include "ServiceUtilities.h"
-
-#include <system/audio.h>
-#include <system/audio_policy.h>
-#include <hardware/audio_policy.h>
-#include <media/AudioPolicyHelper.h>
-
-namespace android {
-
-
-// ----------------------------------------------------------------------------
-
-status_t AudioPolicyService::setDeviceConnectionState(audio_devices_t device,
-                                                  audio_policy_dev_state_t state,
-                                                  const char *device_address,
-                                                  const char *device_name __unused)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    if (!settingsAllowed()) {
-        return PERMISSION_DENIED;
-    }
-    if (!audio_is_output_device(device) && !audio_is_input_device(device)) {
-        return BAD_VALUE;
-    }
-    if (state != AUDIO_POLICY_DEVICE_STATE_AVAILABLE &&
-            state != AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) {
-        return BAD_VALUE;
-    }
-
-    ALOGV("setDeviceConnectionState()");
-    Mutex::Autolock _l(mLock);
-    return mpAudioPolicy->set_device_connection_state(mpAudioPolicy, device,
-                                                      state, device_address);
-}
-
-audio_policy_dev_state_t AudioPolicyService::getDeviceConnectionState(
-                                                              audio_devices_t device,
-                                                              const char *device_address)
-{
-    if (mpAudioPolicy == NULL) {
-        return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
-    }
-    return mpAudioPolicy->get_device_connection_state(mpAudioPolicy, device,
-                                                      device_address);
-}
-
-status_t AudioPolicyService::setPhoneState(audio_mode_t state)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    if (!settingsAllowed()) {
-        return PERMISSION_DENIED;
-    }
-    if (uint32_t(state) >= AUDIO_MODE_CNT) {
-        return BAD_VALUE;
-    }
-
-    ALOGV("setPhoneState()");
-
-    // TODO: check if it is more appropriate to do it in platform specific policy manager
-    AudioSystem::setMode(state);
-
-    Mutex::Autolock _l(mLock);
-    mpAudioPolicy->set_phone_state(mpAudioPolicy, state);
-    mPhoneState = state;
-    return NO_ERROR;
-}
-
-audio_mode_t AudioPolicyService::getPhoneState()
-{
-    Mutex::Autolock _l(mLock);
-    return mPhoneState;
-}
-
-status_t AudioPolicyService::setForceUse(audio_policy_force_use_t usage,
-                                         audio_policy_forced_cfg_t config)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    if (!settingsAllowed()) {
-        return PERMISSION_DENIED;
-    }
-    if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) {
-        return BAD_VALUE;
-    }
-    if (config < 0 || config >= AUDIO_POLICY_FORCE_CFG_CNT) {
-        return BAD_VALUE;
-    }
-    ALOGV("setForceUse()");
-    Mutex::Autolock _l(mLock);
-    mpAudioPolicy->set_force_use(mpAudioPolicy, usage, config);
-    return NO_ERROR;
-}
-
-audio_policy_forced_cfg_t AudioPolicyService::getForceUse(audio_policy_force_use_t usage)
-{
-    if (mpAudioPolicy == NULL) {
-        return AUDIO_POLICY_FORCE_NONE;
-    }
-    if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) {
-        return AUDIO_POLICY_FORCE_NONE;
-    }
-    return mpAudioPolicy->get_force_use(mpAudioPolicy, usage);
-}
-
-audio_io_handle_t AudioPolicyService::getOutput(audio_stream_type_t stream,
-                                    uint32_t samplingRate,
-                                    audio_format_t format,
-                                    audio_channel_mask_t channelMask,
-                                    audio_output_flags_t flags,
-                                    const audio_offload_info_t *offloadInfo)
-{
-    if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
-        return AUDIO_IO_HANDLE_NONE;
-    }
-    if (mpAudioPolicy == NULL) {
-        return AUDIO_IO_HANDLE_NONE;
-    }
-    ALOGV("getOutput()");
-    Mutex::Autolock _l(mLock);
-    return mpAudioPolicy->get_output(mpAudioPolicy, stream, samplingRate,
-                                    format, channelMask, flags, offloadInfo);
-}
-
-status_t AudioPolicyService::startOutput(audio_io_handle_t output,
-                                         audio_stream_type_t stream,
-                                         audio_session_t session)
-{
-    if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
-        return BAD_VALUE;
-    }
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    ALOGV("startOutput()");
-    // create audio processors according to stream
-    sp<AudioPolicyEffects>audioPolicyEffects;
-    {
-        Mutex::Autolock _l(mLock);
-        audioPolicyEffects = mAudioPolicyEffects;
-    }
-    if (audioPolicyEffects != 0) {
-        status_t status = audioPolicyEffects->addOutputSessionEffects(output, stream, session);
-        if (status != NO_ERROR && status != ALREADY_EXISTS) {
-            ALOGW("Failed to add effects on session %d", session);
-        }
-    }
-
-    Mutex::Autolock _l(mLock);
-    return mpAudioPolicy->start_output(mpAudioPolicy, output, stream, session);
-}
-
-status_t AudioPolicyService::stopOutput(audio_io_handle_t output,
-                                        audio_stream_type_t stream,
-                                        audio_session_t session)
-{
-    if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
-        return BAD_VALUE;
-    }
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    ALOGV("stopOutput()");
-    mOutputCommandThread->stopOutputCommand(output, stream, session);
-    return NO_ERROR;
-}
-
-status_t  AudioPolicyService::doStopOutput(audio_io_handle_t output,
-                                      audio_stream_type_t stream,
-                                      audio_session_t session)
-{
-    ALOGV("doStopOutput from tid %d", gettid());
-    // release audio processors from the stream
-    sp<AudioPolicyEffects>audioPolicyEffects;
-    {
-        Mutex::Autolock _l(mLock);
-        audioPolicyEffects = mAudioPolicyEffects;
-    }
-    if (audioPolicyEffects != 0) {
-        status_t status = audioPolicyEffects->releaseOutputSessionEffects(output, stream, session);
-        if (status != NO_ERROR && status != ALREADY_EXISTS) {
-            ALOGW("Failed to release effects on session %d", session);
-        }
-    }
-    Mutex::Autolock _l(mLock);
-    return mpAudioPolicy->stop_output(mpAudioPolicy, output, stream, session);
-}
-
-void AudioPolicyService::releaseOutput(audio_io_handle_t output,
-                                       audio_stream_type_t stream,
-                                       audio_session_t session)
-{
-    if (mpAudioPolicy == NULL) {
-        return;
-    }
-    ALOGV("releaseOutput()");
-    mOutputCommandThread->releaseOutputCommand(output, stream, session);
-}
-
-void AudioPolicyService::doReleaseOutput(audio_io_handle_t output,
-                                         audio_stream_type_t stream __unused,
-                                         audio_session_t session __unused)
-{
-    ALOGV("doReleaseOutput from tid %d", gettid());
-    Mutex::Autolock _l(mLock);
-    mpAudioPolicy->release_output(mpAudioPolicy, output);
-}
-
-status_t AudioPolicyService::getInputForAttr(const audio_attributes_t *attr,
-                                             audio_io_handle_t *input,
-                                             audio_session_t session,
-                                             pid_t pid __unused,
-                                             uid_t uid __unused,
-                                             uint32_t samplingRate,
-                                             audio_format_t format,
-                                             audio_channel_mask_t channelMask,
-                                             audio_input_flags_t flags __unused,
-                                             audio_port_handle_t selectedDeviceId __unused)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-
-    audio_source_t inputSource = attr->source;
-
-    // already checked by client, but double-check in case the client wrapper is bypassed
-    if (inputSource >= AUDIO_SOURCE_CNT && inputSource != AUDIO_SOURCE_HOTWORD &&
-        inputSource != AUDIO_SOURCE_FM_TUNER) {
-        return BAD_VALUE;
-    }
-
-    if (inputSource == AUDIO_SOURCE_DEFAULT) {
-        inputSource = AUDIO_SOURCE_MIC;
-    }
-
-    if ((inputSource == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) {
-        return BAD_VALUE;
-    }
-
-    sp<AudioPolicyEffects>audioPolicyEffects;
-    {
-        Mutex::Autolock _l(mLock);
-        // the audio_in_acoustics_t parameter is ignored by get_input()
-        *input = mpAudioPolicy->get_input(mpAudioPolicy, inputSource, samplingRate,
-                                             format, channelMask, (audio_in_acoustics_t) 0);
-        audioPolicyEffects = mAudioPolicyEffects;
-    }
-    if (*input == AUDIO_IO_HANDLE_NONE) {
-        return INVALID_OPERATION;
-    }
-
-    if (audioPolicyEffects != 0) {
-        // create audio pre processors according to input source
-        status_t status = audioPolicyEffects->addInputEffects(*input, inputSource, session);
-        if (status != NO_ERROR && status != ALREADY_EXISTS) {
-            ALOGW("Failed to add effects on input %d", input);
-        }
-    }
-    return NO_ERROR;
-}
-
-status_t AudioPolicyService::startInput(audio_io_handle_t input,
-                                        audio_session_t session __unused)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    Mutex::Autolock _l(mLock);
-
-    return mpAudioPolicy->start_input(mpAudioPolicy, input);
-}
-
-status_t AudioPolicyService::stopInput(audio_io_handle_t input,
-                                       audio_session_t session __unused)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    Mutex::Autolock _l(mLock);
-
-    return mpAudioPolicy->stop_input(mpAudioPolicy, input);
-}
-
-void AudioPolicyService::releaseInput(audio_io_handle_t input,
-                                      audio_session_t session __unused)
-{
-    if (mpAudioPolicy == NULL) {
-        return;
-    }
-
-    sp<AudioPolicyEffects>audioPolicyEffects;
-    {
-        Mutex::Autolock _l(mLock);
-        mpAudioPolicy->release_input(mpAudioPolicy, input);
-        audioPolicyEffects = mAudioPolicyEffects;
-    }
-    if (audioPolicyEffects != 0) {
-        // release audio processors from the input
-        status_t status = audioPolicyEffects->releaseInputEffects(input);
-        if(status != NO_ERROR) {
-            ALOGW("Failed to release effects on input %d", input);
-        }
-    }
-}
-
-status_t AudioPolicyService::initStreamVolume(audio_stream_type_t stream,
-                                            int indexMin,
-                                            int indexMax)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    if (!settingsAllowed()) {
-        return PERMISSION_DENIED;
-    }
-    if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
-        return BAD_VALUE;
-    }
-    Mutex::Autolock _l(mLock);
-    mpAudioPolicy->init_stream_volume(mpAudioPolicy, stream, indexMin, indexMax);
-    return NO_ERROR;
-}
-
-status_t AudioPolicyService::setStreamVolumeIndex(audio_stream_type_t stream,
-                                                  int index,
-                                                  audio_devices_t device)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    if (!settingsAllowed()) {
-        return PERMISSION_DENIED;
-    }
-    if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
-        return BAD_VALUE;
-    }
-    Mutex::Autolock _l(mLock);
-    if (mpAudioPolicy->set_stream_volume_index_for_device) {
-        return mpAudioPolicy->set_stream_volume_index_for_device(mpAudioPolicy,
-                                                                stream,
-                                                                index,
-                                                                device);
-    } else {
-        return mpAudioPolicy->set_stream_volume_index(mpAudioPolicy, stream, index);
-    }
-}
-
-status_t AudioPolicyService::getStreamVolumeIndex(audio_stream_type_t stream,
-                                                  int *index,
-                                                  audio_devices_t device)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
-        return BAD_VALUE;
-    }
-    Mutex::Autolock _l(mLock);
-    if (mpAudioPolicy->get_stream_volume_index_for_device) {
-        return mpAudioPolicy->get_stream_volume_index_for_device(mpAudioPolicy,
-                                                                stream,
-                                                                index,
-                                                                device);
-    } else {
-        return mpAudioPolicy->get_stream_volume_index(mpAudioPolicy, stream, index);
-    }
-}
-
-uint32_t AudioPolicyService::getStrategyForStream(audio_stream_type_t stream)
-{
-    if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
-        return 0;
-    }
-    if (mpAudioPolicy == NULL) {
-        return 0;
-    }
-    return mpAudioPolicy->get_strategy_for_stream(mpAudioPolicy, stream);
-}
-
-//audio policy: use audio_device_t appropriately
-
-audio_devices_t AudioPolicyService::getDevicesForStream(audio_stream_type_t stream)
-{
-    if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
-        return AUDIO_DEVICE_NONE;
-    }
-    if (mpAudioPolicy == NULL) {
-        return AUDIO_DEVICE_NONE;
-    }
-    return mpAudioPolicy->get_devices_for_stream(mpAudioPolicy, stream);
-}
-
-audio_io_handle_t AudioPolicyService::getOutputForEffect(const effect_descriptor_t *desc)
-{
-    // FIXME change return type to status_t, and return NO_INIT here
-    if (mpAudioPolicy == NULL) {
-        return 0;
-    }
-    Mutex::Autolock _l(mLock);
-    return mpAudioPolicy->get_output_for_effect(mpAudioPolicy, desc);
-}
-
-status_t AudioPolicyService::registerEffect(const effect_descriptor_t *desc,
-                                audio_io_handle_t io,
-                                uint32_t strategy,
-                                audio_session_t session,
-                                int id)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    return mpAudioPolicy->register_effect(mpAudioPolicy, desc, io, strategy, session, id);
-}
-
-status_t AudioPolicyService::unregisterEffect(int id)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    return mpAudioPolicy->unregister_effect(mpAudioPolicy, id);
-}
-
-status_t AudioPolicyService::setEffectEnabled(int id, bool enabled)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    return mpAudioPolicy->set_effect_enabled(mpAudioPolicy, id, enabled);
-}
-
-bool AudioPolicyService::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
-{
-    if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
-        return false;
-    }
-    if (mpAudioPolicy == NULL) {
-        return false;
-    }
-    Mutex::Autolock _l(mLock);
-    return mpAudioPolicy->is_stream_active(mpAudioPolicy, stream, inPastMs);
-}
-
-bool AudioPolicyService::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const
-{
-    if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
-        return false;
-    }
-    if (mpAudioPolicy == NULL) {
-        return false;
-    }
-    Mutex::Autolock _l(mLock);
-    return mpAudioPolicy->is_stream_active_remotely(mpAudioPolicy, stream, inPastMs);
-}
-
-bool AudioPolicyService::isSourceActive(audio_source_t source) const
-{
-    if (mpAudioPolicy == NULL) {
-        return false;
-    }
-    if (mpAudioPolicy->is_source_active == 0) {
-        return false;
-    }
-    Mutex::Autolock _l(mLock);
-    return mpAudioPolicy->is_source_active(mpAudioPolicy, source);
-}
-
-status_t AudioPolicyService::queryDefaultPreProcessing(audio_session_t audioSession,
-                                                       effect_descriptor_t *descriptors,
-                                                       uint32_t *count)
-{
-    if (mpAudioPolicy == NULL) {
-        *count = 0;
-        return NO_INIT;
-    }
-    sp<AudioPolicyEffects>audioPolicyEffects;
-    {
-        Mutex::Autolock _l(mLock);
-        audioPolicyEffects = mAudioPolicyEffects;
-    }
-    if (audioPolicyEffects == 0) {
-        *count = 0;
-        return NO_INIT;
-    }
-    return audioPolicyEffects->queryDefaultInputEffects(audioSession, descriptors, count);
-}
-
-bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info)
-{
-    if (mpAudioPolicy == NULL) {
-        ALOGV("mpAudioPolicy == NULL");
-        return false;
-    }
-
-    if (mpAudioPolicy->is_offload_supported == NULL) {
-        ALOGV("HAL does not implement is_offload_supported");
-        return false;
-    }
-
-    return mpAudioPolicy->is_offload_supported(mpAudioPolicy, &info);
-}
-
-status_t AudioPolicyService::listAudioPorts(audio_port_role_t role __unused,
-                                            audio_port_type_t type __unused,
-                                            unsigned int *num_ports,
-                                            struct audio_port *ports __unused,
-                                            unsigned int *generation __unused)
-{
-    *num_ports = 0;
-    return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::getAudioPort(struct audio_port *port __unused)
-{
-    return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::createAudioPatch(const struct audio_patch *patch __unused,
-        audio_patch_handle_t *handle __unused)
-{
-    return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::releaseAudioPatch(audio_patch_handle_t handle __unused)
-{
-    return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::listAudioPatches(unsigned int *num_patches,
-        struct audio_patch *patches __unused,
-        unsigned int *generation __unused)
-{
-    *num_patches = 0;
-    return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::setAudioPortConfig(const struct audio_port_config *config __unused)
-{
-    return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::getOutputForAttr(const audio_attributes_t *attr,
-                                              audio_io_handle_t *output,
-                                              audio_session_t session __unused,
-                                              audio_stream_type_t *stream,
-                                              uid_t uid __unused,
-                                              uint32_t samplingRate,
-                                              audio_format_t format,
-                                              audio_channel_mask_t channelMask,
-                                              audio_output_flags_t flags,
-                                              audio_port_handle_t selectedDeviceId __unused,
-                                              const audio_offload_info_t *offloadInfo)
-{
-    if (attr != NULL) {
-        *stream = audio_attributes_to_stream_type(attr);
-    } else {
-        if (*stream == AUDIO_STREAM_DEFAULT) {
-            return BAD_VALUE;
-        }
-    }
-    *output = getOutput(*stream, samplingRate, format, channelMask,
-                                          flags, offloadInfo);
-    if (*output == AUDIO_IO_HANDLE_NONE) {
-        return INVALID_OPERATION;
-    }
-    return NO_ERROR;
-}
-
-status_t AudioPolicyService::acquireSoundTriggerSession(audio_session_t *session __unused,
-                                       audio_io_handle_t *ioHandle __unused,
-                                       audio_devices_t *device __unused)
-{
-    return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::releaseSoundTriggerSession(audio_session_t session __unused)
-{
-    return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::registerPolicyMixes(const Vector<AudioMix>& mixes __unused,
-                                                 bool registration __unused)
-{
-    return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::startAudioSource(const struct audio_port_config *source,
-                                  const audio_attributes_t *attributes,
-                                  audio_io_handle_t *handle)
-{
-    return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::stopAudioSource(audio_io_handle_t handle)
-{
-    return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::setMasterMono(bool mono)
-{
-    return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::getMasterMono(bool *mono)
-{
-    return INVALID_OPERATION;
-}
-
-}; // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 52ed73e..c4f6367 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -34,13 +34,10 @@
 #include "ServiceUtilities.h"
 #include <hardware_legacy/power.h>
 #include <media/AudioEffect.h>
-#include <media/EffectsFactoryApi.h>
 #include <media/AudioParameter.h>
 
-#include <hardware/hardware.h>
 #include <system/audio.h>
 #include <system/audio_policy.h>
-#include <hardware/audio_policy.h>
 
 namespace android {
 
@@ -52,11 +49,6 @@
 
 static const nsecs_t kAudioCommandTimeoutNs = seconds(3); // 3 seconds
 
-#ifdef USE_LEGACY_AUDIO_POLICY
-namespace {
-    extern struct audio_policy_service_ops aps_ops;
-};
-#endif
 
 // ----------------------------------------------------------------------------
 
@@ -78,40 +70,8 @@
         // start output activity command thread
         mOutputCommandThread = new AudioCommandThread(String8("ApmOutput"), this);
 
-#ifdef USE_LEGACY_AUDIO_POLICY
-        ALOGI("AudioPolicyService CSTOR in legacy mode");
-
-        /* instantiate the audio policy manager */
-        const struct hw_module_t *module;
-        int rc = hw_get_module(AUDIO_POLICY_HARDWARE_MODULE_ID, &module);
-        if (rc) {
-            return;
-        }
-        rc = audio_policy_dev_open(module, &mpAudioPolicyDev);
-        ALOGE_IF(rc, "couldn't open audio policy device (%s)", strerror(-rc));
-        if (rc) {
-            return;
-        }
-
-        rc = mpAudioPolicyDev->create_audio_policy(mpAudioPolicyDev, &aps_ops, this,
-                                                   &mpAudioPolicy);
-        ALOGE_IF(rc, "couldn't create audio policy (%s)", strerror(-rc));
-        if (rc) {
-            return;
-        }
-
-        rc = mpAudioPolicy->init_check(mpAudioPolicy);
-        ALOGE_IF(rc, "couldn't init_check the audio policy (%s)", strerror(-rc));
-        if (rc) {
-            return;
-        }
-        ALOGI("Loaded audio policy from %s (%s)", module->name, module->id);
-#else
-        ALOGI("AudioPolicyService CSTOR in new mode");
-
         mAudioPolicyClient = new AudioPolicyClient(this);
         mAudioPolicyManager = createAudioPolicyManager(mAudioPolicyClient);
-#endif
     }
     // load audio processing modules
     sp<AudioPolicyEffects>audioPolicyEffects = new AudioPolicyEffects();
@@ -127,17 +87,8 @@
     mAudioCommandThread->exit();
     mOutputCommandThread->exit();
 
-#ifdef USE_LEGACY_AUDIO_POLICY
-    if (mpAudioPolicy != NULL && mpAudioPolicyDev != NULL) {
-        mpAudioPolicyDev->destroy_audio_policy(mpAudioPolicyDev, mpAudioPolicy);
-    }
-    if (mpAudioPolicyDev != NULL) {
-        audio_policy_dev_close(mpAudioPolicyDev);
-    }
-#else
     destroyAudioPolicyManager(mAudioPolicyManager);
     delete mAudioPolicyClient;
-#endif
 
     mNotificationClients.clear();
     mAudioPolicyEffects.clear();
@@ -185,14 +136,12 @@
         Mutex::Autolock _l(mNotificationClientsLock);
         mNotificationClients.removeItem(uid);
     }
-#ifndef USE_LEGACY_AUDIO_POLICY
     {
         Mutex::Autolock _l(mLock);
         if (mAudioPolicyManager) {
             mAudioPolicyManager->releaseResourcesForUid(uid);
         }
     }
-#endif
 }
 
 void AudioPolicyService::onAudioPortListUpdate()
@@ -358,11 +307,7 @@
     char buffer[SIZE];
     String8 result;
 
-#ifdef USE_LEGACY_AUDIO_POLICY
-    snprintf(buffer, SIZE, "PolicyManager Interface: %p\n", mpAudioPolicy);
-#else
     snprintf(buffer, SIZE, "AudioPolicyManager: %p\n", mAudioPolicyManager);
-#endif
     result.append(buffer);
     snprintf(buffer, SIZE, "Command Thread: %p\n", mAudioCommandThread.get());
     result.append(buffer);
@@ -392,15 +337,9 @@
             mTonePlaybackThread->dump(fd);
         }
 
-#ifdef USE_LEGACY_AUDIO_POLICY
-        if (mpAudioPolicy) {
-            mpAudioPolicy->dump(mpAudioPolicy, fd);
-        }
-#else
         if (mAudioPolicyManager) {
             mAudioPolicyManager->dump(fd);
         }
-#endif
 
         if (locked) mLock.unlock();
     }
@@ -1208,29 +1147,4 @@
 int aps_set_voice_volume(void *service, float volume, int delay_ms);
 };
 
-#ifdef USE_LEGACY_AUDIO_POLICY
-namespace {
-    struct audio_policy_service_ops aps_ops = {
-        .open_output           = aps_open_output,
-        .open_duplicate_output = aps_open_dup_output,
-        .close_output          = aps_close_output,
-        .suspend_output        = aps_suspend_output,
-        .restore_output        = aps_restore_output,
-        .open_input            = aps_open_input,
-        .close_input           = aps_close_input,
-        .set_stream_volume     = aps_set_stream_volume,
-        .invalidate_stream     = aps_invalidate_stream,
-        .set_parameters        = aps_set_parameters,
-        .get_parameters        = aps_get_parameters,
-        .start_tone            = aps_start_tone,
-        .stop_tone             = aps_stop_tone,
-        .set_voice_volume      = aps_set_voice_volume,
-        .move_effects          = aps_move_effects,
-        .load_hw_module        = aps_load_hw_module,
-        .open_output_on_module = aps_open_output_on_module,
-        .open_input_on_module  = aps_open_input_on_module,
-    };
-}; // namespace <unnamed>
-#endif
-
 }; // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 8c9b23c..9a083f4 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -26,14 +26,10 @@
 #include <binder/BinderService.h>
 #include <system/audio.h>
 #include <system/audio_policy.h>
-#include <hardware/audio_policy.h>
 #include <media/IAudioPolicyService.h>
 #include <media/ToneGenerator.h>
 #include <media/AudioEffect.h>
 #include <media/AudioPolicy.h>
-#ifdef USE_LEGACY_AUDIO_POLICY
-#include <hardware_legacy/AudioPolicyInterface.h>
-#endif
 #include "AudioPolicyEffects.h"
 #include "managerdefault/AudioPolicyManager.h"
 
@@ -84,12 +80,10 @@
                                       audio_session_t session,
                                       audio_stream_type_t *stream,
                                       uid_t uid,
-                                      uint32_t samplingRate = 0,
-                                      audio_format_t format = AUDIO_FORMAT_DEFAULT,
-                                      audio_channel_mask_t channelMask = 0,
-                                      audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
-                                      audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
-                                      const audio_offload_info_t *offloadInfo = NULL);
+                                      const audio_config_t *config,
+                                      audio_output_flags_t flags,
+                                      audio_port_handle_t selectedDeviceId,
+                                      audio_port_handle_t *portId);
     virtual status_t startOutput(audio_io_handle_t output,
                                  audio_stream_type_t stream,
                                  audio_session_t session);
@@ -104,11 +98,10 @@
                                      audio_session_t session,
                                      pid_t pid,
                                      uid_t uid,
-                                     uint32_t samplingRate,
-                                     audio_format_t format,
-                                     audio_channel_mask_t channelMask,
+                                     const audio_config_base_t *config,
                                      audio_input_flags_t flags,
-                                     audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
+                                     audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
+                                     audio_port_handle_t *portId = NULL);
     virtual status_t startInput(audio_io_handle_t input,
                                 audio_session_t session);
     virtual status_t stopInput(audio_io_handle_t input,
@@ -203,8 +196,8 @@
 
     virtual status_t startAudioSource(const struct audio_port_config *source,
                                       const audio_attributes_t *attributes,
-                                      audio_io_handle_t *handle);
-    virtual status_t stopAudioSource(audio_io_handle_t handle);
+                                      audio_patch_handle_t *handle);
+    virtual status_t stopAudioSource(audio_patch_handle_t handle);
 
     virtual status_t setMasterMono(bool mono);
     virtual status_t getMasterMono(bool *mono);
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index 7feed6b..5b4d10d 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -28,6 +28,7 @@
     common/Camera2ClientBase.cpp \
     common/CameraDeviceBase.cpp \
     common/CameraModule.cpp \
+    common/CameraProviderManager.cpp \
     common/FrameProcessorBase.cpp \
     api1/CameraClient.cpp \
     api1/Camera2Client.cpp \
@@ -47,8 +48,10 @@
     device3/Camera3OutputStream.cpp \
     device3/Camera3ZslStream.cpp \
     device3/Camera3DummyStream.cpp \
+    device3/Camera3SharedOutputStream.cpp \
     device3/StatusTracker.cpp \
     device3/Camera3BufferManager.cpp \
+    device3/Camera3StreamSplitter.cpp \
     gui/RingBufferConsumer.cpp \
     utils/CameraTraces.cpp \
     utils/AutoConditionLock.cpp \
@@ -63,24 +66,36 @@
     libmedia \
     libmediautils \
     libcamera_client \
+    libcamera_metadata \
     libgui \
     libhardware \
-    libcamera_metadata \
+    libhidlbase \
+    libhidltransport \
     libjpeg \
-    libmemunreachable
+    libmemunreachable \
+    android.hardware.camera.common@1.0 \
+    android.hardware.camera.provider@2.4 \
+    android.hardware.camera.device@1.0 \
+    android.hardware.camera.device@3.2 \
+    android.hidl.manager@1.0
 
 LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := libbinder libcamera_client
 
 LOCAL_C_INCLUDES += \
     system/media/private/camera/include \
-    frameworks/native/include/media/openmax \
-    external/jpeg
+    frameworks/native/include/media/openmax
 
 LOCAL_EXPORT_C_INCLUDE_DIRS := \
     frameworks/av/services/camera/libcameraservice
 
 LOCAL_CFLAGS += -Wall -Wextra -Werror
 
+ifeq ($(ENABLE_TREBLE), true)
+
+  LOCAL_CFLAGS += -DENABLE_TREBLE
+
+endif # ENABLE_TREBLE
+
 LOCAL_MODULE:= libcameraservice
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/services/camera/libcameraservice/CameraFlashlight.cpp b/services/camera/libcameraservice/CameraFlashlight.cpp
index 6314ba5..07d88f6 100644
--- a/services/camera/libcameraservice/CameraFlashlight.cpp
+++ b/services/camera/libcameraservice/CameraFlashlight.cpp
@@ -36,10 +36,18 @@
 // CameraFlashlight implementation begins
 // used by camera service to control flashflight.
 /////////////////////////////////////////////////////////////////////
-CameraFlashlight::CameraFlashlight(CameraModule& cameraModule,
-        const camera_module_callbacks_t& callbacks) :
-        mCameraModule(&cameraModule),
-        mCallbacks(&callbacks),
+CameraFlashlight::CameraFlashlight(CameraModule* cameraModule,
+        camera_module_callbacks_t* callbacks) :
+        mCameraModule(cameraModule),
+        mCallbacks(callbacks),
+        mFlashlightMapInitialized(false) {
+}
+
+CameraFlashlight::CameraFlashlight(sp<CameraProviderManager> providerManager,
+        camera_module_callbacks_t* callbacks) :
+        mCameraModule(nullptr),
+        mProviderManager(providerManager),
+        mCallbacks(callbacks),
         mFlashlightMapInitialized(false) {
 }
 
@@ -55,8 +63,10 @@
 
     status_t res = OK;
 
-    if (mCameraModule->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_4) {
-        mFlashControl = new ModuleFlashControl(*mCameraModule, *mCallbacks);
+    if (mCameraModule == nullptr) {
+        mFlashControl = new ProviderFlashControl(mProviderManager);
+    } else if (mCameraModule->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_4) {
+        mFlashControl = new ModuleFlashControl(*mCameraModule);
         if (mFlashControl == NULL) {
             ALOGV("%s: cannot create flash control for module api v2.4+",
                      __FUNCTION__);
@@ -69,7 +79,7 @@
                     CAMERA_MODULE_API_VERSION_2_0) {
             camera_info info;
             res = mCameraModule->getCameraInfo(
-                    atoi(String8(cameraId).string()), &info);
+                    atoi(cameraId.string()), &info);
             if (res) {
                 ALOGE("%s: failed to get camera info for camera %s",
                         __FUNCTION__, cameraId.string());
@@ -157,15 +167,27 @@
 status_t CameraFlashlight::findFlashUnits() {
     Mutex::Autolock l(mLock);
     status_t res;
-    int32_t numCameras = mCameraModule->getNumberOfCameras();
+
+    std::vector<String8> cameraIds;
+    if (mCameraModule) {
+        cameraIds.resize(mCameraModule->getNumberOfCameras());
+        for (size_t i = 0; i < cameraIds.size(); i++) {
+            cameraIds[i] = String8::format("%zu", i);
+        }
+    } else {
+        // No module, must be provider
+        std::vector<std::string> ids = mProviderManager->getCameraDeviceIds();
+        cameraIds.resize(ids.size());
+        for (size_t i = 0; i < cameraIds.size(); i++) {
+            cameraIds[i] = String8(ids[i].c_str());
+        }
+    }
 
     mHasFlashlightMap.clear();
     mFlashlightMapInitialized = false;
 
-    for (int32_t i = 0; i < numCameras; i++) {
+    for (auto &id : cameraIds) {
         bool hasFlash = false;
-        String8 id = String8::format("%d", i);
-
         res = createFlashlightControl(id);
         if (res) {
             ALOGE("%s: failed to create flash control for %s", __FUNCTION__,
@@ -224,7 +246,7 @@
         return NO_INIT;
     }
 
-    if (mCameraModule->getModuleApiVersion() < CAMERA_MODULE_API_VERSION_2_4) {
+    if (mCameraModule && mCameraModule->getModuleApiVersion() < CAMERA_MODULE_API_VERSION_2_4) {
         // framework is going to open a camera device, all flash light control
         // should be closed for backward compatible support.
         mFlashControl.clear();
@@ -274,7 +296,7 @@
     if (mOpenedCameraIds.size() != 0)
         return OK;
 
-    if (mCameraModule->getModuleApiVersion() < CAMERA_MODULE_API_VERSION_2_4) {
+    if (mCameraModule && mCameraModule->getModuleApiVersion() < CAMERA_MODULE_API_VERSION_2_4) {
         // notify torch available for all cameras with a flash
         int numCameras = mCameraModule->getNumberOfCameras();
         for (int i = 0; i < numCameras; i++) {
@@ -298,10 +320,35 @@
 // ModuleFlashControl implementation begins
 // Flash control for camera module v2.4 and above.
 /////////////////////////////////////////////////////////////////////
-ModuleFlashControl::ModuleFlashControl(CameraModule& cameraModule,
-        const camera_module_callbacks_t& callbacks) :
+ProviderFlashControl::ProviderFlashControl(sp<CameraProviderManager> providerManager) :
+        mProviderManager(providerManager) {
+}
+
+ProviderFlashControl::~ProviderFlashControl() {
+}
+
+status_t ProviderFlashControl::hasFlashUnit(const String8& cameraId, bool *hasFlash) {
+    if (!hasFlash) {
+        return BAD_VALUE;
+    }
+    *hasFlash = mProviderManager->hasFlashUnit(cameraId.string());
+    return OK;
+}
+
+status_t ProviderFlashControl::setTorchMode(const String8& cameraId, bool enabled) {
+    ALOGV("%s: set camera %s torch mode to %d", __FUNCTION__,
+            cameraId.string(), enabled);
+
+    return mProviderManager->setTorchMode(cameraId.string(), enabled);
+}
+// ProviderFlashControl implementation ends
+
+/////////////////////////////////////////////////////////////////////
+// ModuleFlashControl implementation begins
+// Flash control for camera module v2.4 and above.
+/////////////////////////////////////////////////////////////////////
+ModuleFlashControl::ModuleFlashControl(CameraModule& cameraModule) :
         mCameraModule(&cameraModule) {
-    (void) callbacks;
 }
 
 ModuleFlashControl::~ModuleFlashControl() {
@@ -477,7 +524,7 @@
     }
 
     sp<CameraDeviceBase> device =
-            new Camera3Device(atoi(cameraId.string()));
+            new Camera3Device(cameraId);
     if (device == NULL) {
         return NO_MEMORY;
     }
diff --git a/services/camera/libcameraservice/CameraFlashlight.h b/services/camera/libcameraservice/CameraFlashlight.h
index 59fc87d..b7c7690 100644
--- a/services/camera/libcameraservice/CameraFlashlight.h
+++ b/services/camera/libcameraservice/CameraFlashlight.h
@@ -17,14 +17,17 @@
 #ifndef ANDROID_SERVERS_CAMERA_CAMERAFLASHLIGHT_H
 #define ANDROID_SERVERS_CAMERA_CAMERAFLASHLIGHT_H
 
-#include "hardware/camera_common.h"
-#include "utils/KeyedVector.h"
-#include "utils/SortedVector.h"
-#include "gui/GLConsumer.h"
-#include "gui/Surface.h"
+#include <gui/GLConsumer.h>
+#include <gui/Surface.h>
+#include <hardware/camera_common.h>
+#include <utils/KeyedVector.h>
+#include <utils/SortedVector.h>
+#include "common/CameraProviderManager.h"
+#include "common/CameraModule.h"
 #include "common/CameraDeviceBase.h"
 #include "device1/CameraHardwareInterface.h"
 
+
 namespace android {
 
 /**
@@ -52,8 +55,10 @@
  */
 class CameraFlashlight : public virtual VirtualLightRefBase {
     public:
-        CameraFlashlight(CameraModule& cameraModule,
-                const camera_module_callbacks_t& callbacks);
+        CameraFlashlight(CameraModule* cameraModule,
+                camera_module_callbacks_t* callbacks);
+        CameraFlashlight(sp<CameraProviderManager> providerManager,
+                camera_module_callbacks_t* callbacks);
         virtual ~CameraFlashlight();
 
         // Find all flash units. This must be called before other methods. All
@@ -88,7 +93,10 @@
         bool hasFlashUnitLocked(const String8& cameraId);
 
         sp<FlashControlBase> mFlashControl;
+
         CameraModule *mCameraModule;
+        sp<CameraProviderManager> mProviderManager;
+
         const camera_module_callbacks_t *mCallbacks;
         SortedVector<String8> mOpenedCameraIds;
 
@@ -100,12 +108,29 @@
 };
 
 /**
+ * Flash control for camera provider v2.4 and above.
+ */
+class ProviderFlashControl : public FlashControlBase {
+    public:
+        ProviderFlashControl(sp<CameraProviderManager> providerManager);
+        virtual ~ProviderFlashControl();
+
+        // FlashControlBase
+        status_t hasFlashUnit(const String8& cameraId, bool *hasFlash);
+        status_t setTorchMode(const String8& cameraId, bool enabled);
+
+    private:
+        sp<CameraProviderManager> mProviderManager;
+
+        Mutex mLock;
+};
+
+/**
  * Flash control for camera module v2.4 and above.
  */
 class ModuleFlashControl : public FlashControlBase {
     public:
-        ModuleFlashControl(CameraModule& cameraModule,
-                const camera_module_callbacks_t& callbacks);
+        ModuleFlashControl(CameraModule& cameraModule);
         virtual ~ModuleFlashControl();
 
         // FlashControlBase
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 85faac6..f439590 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -18,6 +18,12 @@
 #define ATRACE_TAG ATRACE_TAG_CAMERA
 //#define LOG_NDEBUG 0
 
+#ifdef ENABLE_TREBLE
+  #define USE_HIDL true
+#else
+  #define USE_HIDL false
+#endif
+
 #include <algorithm>
 #include <climits>
 #include <stdio.h>
@@ -32,6 +38,7 @@
 #include <android/hardware/ICameraClient.h>
 
 #include <android-base/macros.h>
+#include <android-base/parseint.h>
 #include <binder/AppOpsManager.h>
 #include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
@@ -54,8 +61,12 @@
 #include <private/android_filesystem_config.h>
 #include <system/camera_vendor_tags.h>
 #include <system/camera_metadata.h>
+
 #include <system/camera.h>
 
+#include <android/hidl/manager/1.0/IServiceManager.h>
+#include <hidl/ServiceManagement.h>
+
 #include "CameraService.h"
 #include "api1/CameraClient.h"
 #include "api1/Camera2Client.h"
@@ -69,7 +80,11 @@
 namespace android {
 
 using binder::Status;
-using namespace hardware;
+using hardware::ICamera;
+using hardware::ICameraClient;
+using hardware::ICameraServiceListener;
+using hardware::camera::common::V1_0::CameraDeviceStatus;
+using hardware::camera::common::V1_0::TorchModeStatus;
 
 // ----------------------------------------------------------------------------
 // Logging support -- this is for debugging only
@@ -103,9 +118,24 @@
         int new_status) {
     sp<CameraService> cs = const_cast<CameraService*>(
             static_cast<const CameraService*>(callbacks));
+    String8 id = String8::format("%d", camera_id);
 
-    cs->onDeviceStatusChanged(camera_id,
-            static_cast<camera_device_status_t>(new_status));
+    CameraDeviceStatus newStatus{CameraDeviceStatus::NOT_PRESENT};
+    switch (new_status) {
+        case CAMERA_DEVICE_STATUS_NOT_PRESENT:
+            newStatus = CameraDeviceStatus::NOT_PRESENT;
+            break;
+        case CAMERA_DEVICE_STATUS_PRESENT:
+            newStatus = CameraDeviceStatus::PRESENT;
+            break;
+        case CAMERA_DEVICE_STATUS_ENUMERATING:
+            newStatus = CameraDeviceStatus::ENUMERATING;
+            break;
+        default:
+            ALOGW("Unknown device status change to %d", new_status);
+            break;
+    }
+    cs->onDeviceStatusChanged(id, newStatus);
 }
 
 static void torch_mode_status_change(
@@ -119,16 +149,16 @@
     sp<CameraService> cs = const_cast<CameraService*>(
                                 static_cast<const CameraService*>(callbacks));
 
-    int32_t status;
+    TorchModeStatus status;
     switch (new_status) {
         case TORCH_MODE_STATUS_NOT_AVAILABLE:
-            status = ICameraServiceListener::TORCH_STATUS_NOT_AVAILABLE;
+            status = TorchModeStatus::NOT_AVAILABLE;
             break;
         case TORCH_MODE_STATUS_AVAILABLE_OFF:
-            status = ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF;
+            status = TorchModeStatus::AVAILABLE_OFF;
             break;
         case TORCH_MODE_STATUS_AVAILABLE_ON:
-            status = ICameraServiceListener::TORCH_STATUS_AVAILABLE_ON;
+            status = TorchModeStatus::AVAILABLE_ON;
             break;
         default:
             ALOGE("Unknown torch status %d", new_status);
@@ -143,16 +173,11 @@
 
 // ----------------------------------------------------------------------------
 
-// This is ugly and only safe if we never re-create the CameraService, but
-// should be ok for now.
-static CameraService *gCameraService;
-
 CameraService::CameraService() :
         mEventLog(DEFAULT_EVENT_LOG_LENGTH),
         mNumberOfCameras(0), mNumberOfNormalCameras(0),
-        mSoundRef(0), mModule(nullptr) {
+        mSoundRef(0), mInitialized(false), mModule(nullptr) {
     ALOGI("CameraService started (pid=%d)", getpid());
-    gCameraService = this;
 
     this->camera_device_status_change = android::camera_device_status_change;
     this->torch_mode_status_change = android::torch_mode_status_change;
@@ -171,13 +196,27 @@
     notifier.noteResetCamera();
     notifier.noteResetFlashlight();
 
+    status_t res = INVALID_OPERATION;
+    if (USE_HIDL) {
+        res = enumerateProviders();
+    } else {
+        res = loadLegacyHalModule();
+    }
+    if (res == OK) {
+        mInitialized = true;
+    }
+
+    CameraService::pingCameraServiceProxy();
+}
+
+status_t CameraService::loadLegacyHalModule() {
     camera_module_t *rawModule;
     int err = hw_get_module(CAMERA_HARDWARE_MODULE_ID,
             (const hw_module_t **)&rawModule);
     if (err < 0) {
         ALOGE("Could not load camera HAL module: %d (%s)", err, strerror(-err));
         logServiceError("Could not load camera HAL module", err);
-        return;
+        return INVALID_OPERATION;
     }
 
     mModule = new CameraModule(rawModule);
@@ -189,7 +228,7 @@
 
         delete mModule;
         mModule = nullptr;
-        return;
+        return INVALID_OPERATION;
     }
     ALOGI("Loaded \"%s\" camera module", mModule->getModuleName());
 
@@ -203,7 +242,7 @@
         setUpVendorTags();
     }
 
-    mFlashlight = new CameraFlashlight(*mModule, *this);
+    mFlashlight = new CameraFlashlight(mModule, this);
     status_t res = mFlashlight->findFlashUnits();
     if (res) {
         // impossible because we haven't open any camera devices.
@@ -231,7 +270,7 @@
             if (checkCameraCapabilities(i, info, &latestStrangeCameraId) != OK) {
                 delete mModule;
                 mModule = nullptr;
-                return;
+                return INVALID_OPERATION;
             }
         }
 
@@ -261,7 +300,7 @@
 
         if (mFlashlight->hasFlashUnit(cameraId)) {
             mTorchStatusMap.add(cameraId,
-                    ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF);
+                    TorchModeStatus::AVAILABLE_OFF);
         }
     }
 
@@ -269,14 +308,70 @@
         mModule->setCallbacks(this);
     }
 
-    CameraService::pingCameraServiceProxy();
+    return OK;
 }
 
+status_t CameraService::enumerateProviders() {
+    mCameraProviderManager = new CameraProviderManager();
+    status_t res;
+    res = mCameraProviderManager->initialize(this);
+    if (res != OK) {
+        ALOGE("%s: Unable to initialize camera provider manager: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+
+    mNumberOfCameras = mCameraProviderManager->getCameraCount();
+    mNumberOfNormalCameras = mCameraProviderManager->getStandardCameraCount();
+
+    // Setup vendor tags before we call get_camera_info the first time
+    // because HAL might need to setup static vendor keys in get_camera_info
+    // TODO: maybe put this into CameraProviderManager::initialize()?
+    mCameraProviderManager->setUpVendorTags();
+
+    mFlashlight = new CameraFlashlight(mCameraProviderManager, this);
+    res = mFlashlight->findFlashUnits();
+    if (res != OK) {
+        ALOGE("Failed to enumerate flash units: %s (%d)", strerror(-res), res);
+    }
+
+    // TODO: Verify device versions are in support
+
+    for (auto& cameraId : mCameraProviderManager->getCameraDeviceIds()) {
+        hardware::camera::common::V1_0::CameraResourceCost cost;
+        res = mCameraProviderManager->getResourceCost(cameraId, &cost);
+        if (res != OK) {
+            ALOGE("Failed to query device resource cost: %s (%d)", strerror(-res), res);
+            continue;
+        }
+        std::set<String8> conflicting;
+        for (size_t i = 0; i < cost.conflictingDevices.size(); i++) {
+            conflicting.emplace(String8(cost.conflictingDevices[i].c_str()));
+        }
+        String8 id8 = String8(cameraId.c_str());
+
+        Mutex::Autolock lock(mCameraStatesLock);
+        mCameraStates.emplace(id8,
+            std::make_shared<CameraState>(id8, cost.resourceCost, conflicting));
+
+        if (mFlashlight->hasFlashUnit(id8)) {
+            mTorchStatusMap.add(id8,
+                TorchModeStatus::AVAILABLE_OFF);
+        }
+    }
+
+    return OK;
+}
+
+
 sp<ICameraServiceProxy> CameraService::getCameraServiceProxy() {
     sp<ICameraServiceProxy> proxyBinder = nullptr;
 #ifndef __BRILLO__
     sp<IServiceManager> sm = defaultServiceManager();
-    sp<IBinder> binder = sm->getService(String16("media.camera.proxy"));
+    // Use checkService because cameraserver normally starts before the
+    // system server and the proxy service. So the long timeout that getService
+    // has before giving up is inappropriate.
+    sp<IBinder> binder = sm->checkService(String16("media.camera.proxy"));
     if (binder != nullptr) {
         proxyBinder = interface_cast<ICameraServiceProxy>(binder);
     }
@@ -296,30 +391,30 @@
         mModule = nullptr;
     }
     VendorTagDescriptor::clearGlobalVendorTagDescriptor();
-    gCameraService = nullptr;
 }
 
-void CameraService::onDeviceStatusChanged(int  cameraId,
-        camera_device_status_t newStatus) {
-    ALOGI("%s: Status changed for cameraId=%d, newStatus=%d", __FUNCTION__,
-          cameraId, newStatus);
+void CameraService::onDeviceStatusChanged(const String8& id,
+        CameraDeviceStatus newHalStatus) {
+    ALOGI("%s: Status changed for cameraId=%s, newStatus=%d", __FUNCTION__,
+            id.string(), newHalStatus);
 
-    String8 id = String8::format("%d", cameraId);
+    StatusInternal newStatus = mapToInternal(newHalStatus);
+
     std::shared_ptr<CameraState> state = getCameraState(id);
 
     if (state == nullptr) {
-        ALOGE("%s: Bad camera ID %d", __FUNCTION__, cameraId);
+        ALOGE("%s: Bad camera ID %s", __FUNCTION__, id.string());
         return;
     }
 
-    int32_t oldStatus = state->getStatus();
+    StatusInternal oldStatus = state->getStatus();
 
-    if (oldStatus == static_cast<int32_t>(newStatus)) {
+    if (oldStatus == newStatus) {
         ALOGE("%s: State transition to the same status %#x not allowed", __FUNCTION__, newStatus);
         return;
     }
 
-    if (newStatus == CAMERA_DEVICE_STATUS_NOT_PRESENT) {
+    if (newStatus == StatusInternal::NOT_PRESENT) {
         logDeviceRemoved(id, String8::format("Device status changed from %d to %d", oldStatus,
                 newStatus));
         sp<BasicClient> clientToDisconnect;
@@ -329,7 +424,7 @@
 
             // Set the device status to NOT_PRESENT, clients will no longer be able to connect
             // to this device until the status changes
-            updateStatus(ICameraServiceListener::STATUS_NOT_PRESENT, id);
+            updateStatus(StatusInternal::NOT_PRESENT, id);
 
             // Remove cached shim parameters
             state->setShimParams(CameraParameters());
@@ -355,27 +450,27 @@
         }
 
     } else {
-        if (oldStatus == ICameraServiceListener::STATUS_NOT_PRESENT) {
+        if (oldStatus == StatusInternal::NOT_PRESENT) {
             logDeviceAdded(id, String8::format("Device status changed from %d to %d", oldStatus,
                     newStatus));
         }
-        updateStatus(static_cast<int32_t>(newStatus), id);
+        updateStatus(newStatus, id);
     }
 
 }
 
 void CameraService::onTorchStatusChanged(const String8& cameraId,
-        int32_t newStatus) {
+        TorchModeStatus newStatus) {
     Mutex::Autolock al(mTorchStatusMutex);
     onTorchStatusChangedLocked(cameraId, newStatus);
 }
 
 void CameraService::onTorchStatusChangedLocked(const String8& cameraId,
-        int32_t newStatus) {
+        TorchModeStatus newStatus) {
     ALOGI("%s: Torch status changed for cameraId=%s, newStatus=%d",
             __FUNCTION__, cameraId.string(), newStatus);
 
-    int32_t status;
+    TorchModeStatus status;
     status_t res = getTorchStatusLocked(cameraId, &status);
     if (res) {
         ALOGE("%s: cannot get torch status of camera %s: %s (%d)",
@@ -403,16 +498,16 @@
             BatteryNotifier& notifier(BatteryNotifier::getInstance());
             if (oldUid != newUid) {
                 // If the UID has changed, log the status and update current UID in mTorchUidMap
-                if (status == ICameraServiceListener::TORCH_STATUS_AVAILABLE_ON) {
+                if (status == TorchModeStatus::AVAILABLE_ON) {
                     notifier.noteFlashlightOff(cameraId, oldUid);
                 }
-                if (newStatus == ICameraServiceListener::TORCH_STATUS_AVAILABLE_ON) {
+                if (newStatus == TorchModeStatus::AVAILABLE_ON) {
                     notifier.noteFlashlightOn(cameraId, newUid);
                 }
                 iter->second.second = newUid;
             } else {
                 // If the UID has not changed, log the status
-                if (newStatus == ICameraServiceListener::TORCH_STATUS_AVAILABLE_ON) {
+                if (newStatus == TorchModeStatus::AVAILABLE_ON) {
                     notifier.noteFlashlightOn(cameraId, oldUid);
                 } else {
                     notifier.noteFlashlightOff(cameraId, oldUid);
@@ -424,7 +519,7 @@
     {
         Mutex::Autolock lock(mStatusListenerLock);
         for (auto& i : mListenerList) {
-            i->onTorchStatusChanged(newStatus, String16{cameraId});
+            i->onTorchStatusChanged(mapToInterface(newStatus), String16{cameraId});
         }
     }
 }
@@ -450,7 +545,7 @@
 Status CameraService::getCameraInfo(int cameraId,
         CameraInfo* cameraInfo) {
     ATRACE_CALL();
-    if (!mModule) {
+    if (!mInitialized) {
         return STATUS_ERROR(ERROR_DISCONNECTED,
                 "Camera subsystem is not available");
     }
@@ -460,170 +555,85 @@
                 "CameraId is not valid");
     }
 
-    struct camera_info info;
-    Status rc = filterGetInfoErrorCode(
-        mModule->getCameraInfo(cameraId, &info));
+    Status ret = Status::ok();
+    if (mModule != nullptr) {
+        struct camera_info info;
+        ret = filterGetInfoErrorCode(mModule->getCameraInfo(cameraId, &info));
 
-    if (rc.isOk()) {
-        cameraInfo->facing = info.facing;
-        cameraInfo->orientation = info.orientation;
-        // CameraInfo is for android.hardware.Camera which does not
-        // support external camera facing. The closest approximation would be
-        // front camera.
-        if (cameraInfo->facing == CAMERA_FACING_EXTERNAL) {
-            cameraInfo->facing = CAMERA_FACING_FRONT;
+        if (ret.isOk()) {
+            cameraInfo->facing = info.facing;
+            cameraInfo->orientation = info.orientation;
+            // CameraInfo is for android.hardware.Camera which does not
+            // support external camera facing. The closest approximation would be
+            // front camera.
+            if (cameraInfo->facing == CAMERA_FACING_EXTERNAL) {
+                cameraInfo->facing = hardware::CAMERA_FACING_FRONT;
+            }
+        }
+    } else {
+        status_t err = mCameraProviderManager->getCameraInfo(std::to_string(cameraId), cameraInfo);
+        if (err != OK) {
+            ret = STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+                    "Error retrieving camera info from device %d: %s (%d)", cameraId,
+                    strerror(-err), err);
         }
     }
-    return rc;
+    return ret;
 }
 
 int CameraService::cameraIdToInt(const String8& cameraId) {
-    errno = 0;
-    size_t pos = 0;
-    int ret = stoi(std::string{cameraId.string()}, &pos);
-    if (errno != 0 || pos != cameraId.size()) {
+    int id;
+    bool success = base::ParseInt(cameraId.string(), &id, 0);
+    if (!success) {
         return -1;
     }
-    return ret;
+    return id;
 }
 
-Status CameraService::generateShimMetadata(int cameraId, /*out*/CameraMetadata* cameraInfo) {
-    ATRACE_CALL();
-
-    Status ret = Status::ok();
-
-    struct CameraInfo info;
-    if (!(ret = getCameraInfo(cameraId, &info)).isOk()) {
-        return ret;
-    }
-
-    CameraMetadata shimInfo;
-    int32_t orientation = static_cast<int32_t>(info.orientation);
-    status_t rc;
-    if ((rc = shimInfo.update(ANDROID_SENSOR_ORIENTATION, &orientation, 1)) != OK) {
-        return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
-                "Error updating metadata: %d (%s)", rc, strerror(-rc));
-    }
-
-    uint8_t facing = (info.facing == CAMERA_FACING_FRONT) ?
-            ANDROID_LENS_FACING_FRONT : ANDROID_LENS_FACING_BACK;
-    if ((rc = shimInfo.update(ANDROID_LENS_FACING, &facing, 1)) != OK) {
-        return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
-                "Error updating metadata: %d (%s)", rc, strerror(-rc));
-    }
-
-    CameraParameters shimParams;
-    if (!(ret = getLegacyParametersLazy(cameraId, /*out*/&shimParams)).isOk()) {
-        // Error logged by callee
-        return ret;
-    }
-
-    Vector<Size> sizes;
-    Vector<Size> jpegSizes;
-    Vector<int32_t> formats;
-    {
-        shimParams.getSupportedPreviewSizes(/*out*/sizes);
-        shimParams.getSupportedPreviewFormats(/*out*/formats);
-        shimParams.getSupportedPictureSizes(/*out*/jpegSizes);
-    }
-
-    // Always include IMPLEMENTATION_DEFINED
-    formats.add(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED);
-
-    const size_t INTS_PER_CONFIG = 4;
-
-    // Build available stream configurations metadata
-    size_t streamConfigSize = (sizes.size() * formats.size() + jpegSizes.size()) * INTS_PER_CONFIG;
-
-    Vector<int32_t> streamConfigs;
-    streamConfigs.setCapacity(streamConfigSize);
-
-    for (size_t i = 0; i < formats.size(); ++i) {
-        for (size_t j = 0; j < sizes.size(); ++j) {
-            streamConfigs.add(formats[i]);
-            streamConfigs.add(sizes[j].width);
-            streamConfigs.add(sizes[j].height);
-            streamConfigs.add(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT);
-        }
-    }
-
-    for (size_t i = 0; i < jpegSizes.size(); ++i) {
-        streamConfigs.add(HAL_PIXEL_FORMAT_BLOB);
-        streamConfigs.add(jpegSizes[i].width);
-        streamConfigs.add(jpegSizes[i].height);
-        streamConfigs.add(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT);
-    }
-
-    if ((rc = shimInfo.update(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
-            streamConfigs.array(), streamConfigSize)) != OK) {
-        return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
-                "Error updating metadata: %d (%s)", rc, strerror(-rc));
-    }
-
-    int64_t fakeMinFrames[0];
-    // TODO: Fixme, don't fake min frame durations.
-    if ((rc = shimInfo.update(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS,
-            fakeMinFrames, 0)) != OK) {
-        return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
-                "Error updating metadata: %d (%s)", rc, strerror(-rc));
-    }
-
-    int64_t fakeStalls[0];
-    // TODO: Fixme, don't fake stall durations.
-    if ((rc = shimInfo.update(ANDROID_SCALER_AVAILABLE_STALL_DURATIONS,
-            fakeStalls, 0)) != OK) {
-        return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
-                "Error updating metadata: %d (%s)", rc, strerror(-rc));
-    }
-
-    *cameraInfo = shimInfo;
-    return ret;
-}
-
-Status CameraService::getCameraCharacteristics(int cameraId,
-                                                CameraMetadata* cameraInfo) {
+Status CameraService::getCameraCharacteristics(const String16& cameraId,
+        CameraMetadata* cameraInfo) {
     ATRACE_CALL();
     if (!cameraInfo) {
         ALOGE("%s: cameraInfo is NULL", __FUNCTION__);
         return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, "cameraInfo is NULL");
     }
 
-    if (!mModule) {
-        ALOGE("%s: camera hardware module doesn't exist", __FUNCTION__);
+    if (!mInitialized) {
+        ALOGE("%s: Camera HAL couldn't be initialized", __FUNCTION__);
         return STATUS_ERROR(ERROR_DISCONNECTED,
                 "Camera subsystem is not available");;
     }
 
-    if (cameraId < 0 || cameraId >= mNumberOfCameras) {
-        ALOGE("%s: Invalid camera id: %d", __FUNCTION__, cameraId);
-        return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
-                "Invalid camera id: %d", cameraId);
-    }
+    Status ret{};
 
-    int facing;
-    Status ret;
-    if (mModule->getModuleApiVersion() < CAMERA_MODULE_API_VERSION_2_0 ||
-            getDeviceVersion(cameraId, &facing) < CAMERA_DEVICE_API_VERSION_3_0) {
-        /**
-         * Backwards compatibility mode for old HALs:
-         * - Convert CameraInfo into static CameraMetadata properties.
-         * - Retrieve cached CameraParameters for this camera.  If none exist,
-         *   attempt to open CameraClient and retrieve the CameraParameters.
-         * - Convert cached CameraParameters into static CameraMetadata
-         *   properties.
-         */
-        ALOGI("%s: Switching to HAL1 shim implementation...", __FUNCTION__);
+    if (mModule != nullptr) {
+        int id = cameraIdToInt(String8(cameraId));
 
-        ret = generateShimMetadata(cameraId, cameraInfo);
-    } else {
-        /**
-         * Normal HAL 2.1+ codepath.
-         */
+        if (id < 0 || id >= mNumberOfCameras) {
+            ALOGE("%s: Invalid camera id: %d", __FUNCTION__, id);
+            return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+                    "Invalid camera id: %d", id);
+        }
+
+        int version = getDeviceVersion(String8(cameraId));
+        if (version < CAMERA_DEVICE_API_VERSION_3_0) {
+            return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT, "Can't get camera characteristics"
+                    " for devices with HAL version < 3.0, %d is version %x", id, version);
+        }
+
         struct camera_info info;
-        ret = filterGetInfoErrorCode(mModule->getCameraInfo(cameraId, &info));
+        ret = filterGetInfoErrorCode(mModule->getCameraInfo(id, &info));
         if (ret.isOk()) {
             *cameraInfo = info.static_camera_characteristics;
         }
+    } else {
+        status_t res = mCameraProviderManager->getCameraCharacteristics(
+                String16::std_string(cameraId), cameraInfo);
+        if (res != OK) {
+            return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION, "Unable to retrieve camera "
+                    "characteristics for device %s: %s (%d)", String8(cameraId).string(),
+                    strerror(-res), res);
+        }
     }
 
     return ret;
@@ -664,8 +674,8 @@
         /*out*/
         hardware::camera2::params::VendorTagDescriptor* desc) {
     ATRACE_CALL();
-    if (!mModule) {
-        ALOGE("%s: camera hardware module doesn't exist", __FUNCTION__);
+    if (!mInitialized) {
+        ALOGE("%s: Camera HAL couldn't be initialized", __FUNCTION__);
         return STATUS_ERROR(ERROR_DISCONNECTED, "Camera subsystem not available");
     }
     sp<VendorTagDescriptor> globalDescriptor = VendorTagDescriptor::getGlobalVendorTagDescriptor();
@@ -675,24 +685,37 @@
     return Status::ok();
 }
 
-int CameraService::getDeviceVersion(int cameraId, int* facing) {
+int CameraService::getDeviceVersion(const String8& cameraId, int* facing) {
     ATRACE_CALL();
     struct camera_info info;
-    if (mModule->getCameraInfo(cameraId, &info) != OK) {
-        return -1;
-    }
 
-    int deviceVersion;
-    if (mModule->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_0) {
-        deviceVersion = info.device_version;
+    int deviceVersion = 0;
+
+    if (mModule != nullptr) {
+        int id = cameraIdToInt(cameraId);
+        if (id < 0) return -1;
+
+        if (mModule->getCameraInfo(id, &info) != OK) {
+            return -1;
+        }
+
+        if (mModule->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_0) {
+            deviceVersion = info.device_version;
+        } else {
+            deviceVersion = CAMERA_DEVICE_API_VERSION_1_0;
+        }
+
+        if (facing) {
+            *facing = info.facing;
+        }
     } else {
-        deviceVersion = CAMERA_DEVICE_API_VERSION_1_0;
+        status_t res;
+        hardware::hidl_version maxVersion{0,0};
+        res = mCameraProviderManager->getHighestSupportedVersion(String8::std_string(cameraId),
+                &maxVersion);
+        if (res == NAME_NOT_FOUND) return -1;
+        deviceVersion = HARDWARE_DEVICE_API_VERSION(maxVersion.get_major(), maxVersion.get_minor());
     }
-
-    if (facing) {
-        *facing = info.facing;
-    }
-
     return deviceVersion;
 }
 
@@ -700,10 +723,10 @@
     switch(err) {
         case NO_ERROR:
             return Status::ok();
-        case -EINVAL:
+        case BAD_VALUE:
             return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT,
                     "CameraId is not valid for HAL module");
-        case -ENODEV:
+        case NO_INIT:
             return STATUS_ERROR(ERROR_DISCONNECTED,
                     "Camera device not available");
         default:
@@ -715,6 +738,8 @@
 
 bool CameraService::setUpVendorTags() {
     ATRACE_CALL();
+    if (mModule == nullptr) return false;
+
     vendor_tag_ops_t vOps = vendor_tag_ops_t();
 
     // Check if vendor operations have been implemented
@@ -751,7 +776,7 @@
 }
 
 Status CameraService::makeClient(const sp<CameraService>& cameraService,
-        const sp<IInterface>& cameraCb, const String16& packageName, int cameraId,
+        const sp<IInterface>& cameraCb, const String16& packageName, const String8& cameraId,
         int facing, int clientPid, uid_t clientUid, int servicePid, bool legacyMode,
         int halVersion, int deviceVersion, apiLevel effectiveApiLevel,
         /*out*/sp<BasicClient>* client) {
@@ -763,13 +788,13 @@
           case CAMERA_DEVICE_API_VERSION_1_0:
             if (effectiveApiLevel == API_1) {  // Camera1 API route
                 sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
-                *client = new CameraClient(cameraService, tmp, packageName, cameraId, facing,
-                        clientPid, clientUid, getpid(), legacyMode);
+                *client = new CameraClient(cameraService, tmp, packageName, cameraIdToInt(cameraId),
+                        facing, clientPid, clientUid, getpid(), legacyMode);
             } else { // Camera2 API route
                 ALOGW("Camera using old HAL version: %d", deviceVersion);
                 return STATUS_ERROR_FMT(ERROR_DEPRECATED_HAL,
-                        "Camera device \"%d\" HAL version %d does not support camera2 API",
-                        cameraId, deviceVersion);
+                        "Camera device \"%s\" HAL version %d does not support camera2 API",
+                        cameraId.string(), deviceVersion);
             }
             break;
           case CAMERA_DEVICE_API_VERSION_3_0:
@@ -779,8 +804,8 @@
           case CAMERA_DEVICE_API_VERSION_3_4:
             if (effectiveApiLevel == API_1) { // Camera1 API route
                 sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
-                *client = new Camera2Client(cameraService, tmp, packageName, cameraId, facing,
-                        clientPid, clientUid, servicePid, legacyMode);
+                *client = new Camera2Client(cameraService, tmp, packageName, cameraIdToInt(cameraId),
+                        facing, clientPid, clientUid, servicePid, legacyMode);
             } else { // Camera2 API route
                 sp<hardware::camera2::ICameraDeviceCallbacks> tmp =
                         static_cast<hardware::camera2::ICameraDeviceCallbacks*>(cameraCb.get());
@@ -792,8 +817,8 @@
             // Should not be reachable
             ALOGE("Unknown camera device HAL version: %d", deviceVersion);
             return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
-                    "Camera device \"%d\" has unknown HAL version %d",
-                    cameraId, deviceVersion);
+                    "Camera device \"%s\" has unknown HAL version %d",
+                    cameraId.string(), deviceVersion);
         }
     } else {
         // A particular HAL version is requested by caller. Create CameraClient
@@ -802,16 +827,16 @@
             halVersion == CAMERA_DEVICE_API_VERSION_1_0) {
             // Only support higher HAL version device opened as HAL1.0 device.
             sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
-            *client = new CameraClient(cameraService, tmp, packageName, cameraId, facing,
-                    clientPid, clientUid, servicePid, legacyMode);
+            *client = new CameraClient(cameraService, tmp, packageName, cameraIdToInt(cameraId),
+                    facing, clientPid, clientUid, servicePid, legacyMode);
         } else {
             // Other combinations (e.g. HAL3.x open as HAL2.x) are not supported yet.
             ALOGE("Invalid camera HAL version %x: HAL %x device can only be"
                     " opened as HAL %x device", halVersion, deviceVersion,
                     CAMERA_DEVICE_API_VERSION_1_0);
             return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
-                    "Camera device \"%d\" (HAL version %d) cannot be opened as HAL version %d",
-                    cameraId, deviceVersion, halVersion);
+                    "Camera device \"%s\" (HAL version %d) cannot be opened as HAL version %d",
+                    cameraId.string(), deviceVersion, halVersion);
         }
     }
     return Status::ok();
@@ -831,6 +856,66 @@
     return s;
 }
 
+int32_t CameraService::mapToInterface(TorchModeStatus status) {
+    int32_t serviceStatus = ICameraServiceListener::TORCH_STATUS_NOT_AVAILABLE;
+    switch (status) {
+        case TorchModeStatus::NOT_AVAILABLE:
+            serviceStatus = ICameraServiceListener::TORCH_STATUS_NOT_AVAILABLE;
+            break;
+        case TorchModeStatus::AVAILABLE_OFF:
+            serviceStatus = ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF;
+            break;
+        case TorchModeStatus::AVAILABLE_ON:
+            serviceStatus = ICameraServiceListener::TORCH_STATUS_AVAILABLE_ON;
+            break;
+        default:
+            ALOGW("Unknown new flash status: %d", status);
+    }
+    return serviceStatus;
+}
+
+CameraService::StatusInternal CameraService::mapToInternal(CameraDeviceStatus status) {
+    StatusInternal serviceStatus = StatusInternal::NOT_PRESENT;
+    switch (status) {
+        case CameraDeviceStatus::NOT_PRESENT:
+            serviceStatus = StatusInternal::NOT_PRESENT;
+            break;
+        case CameraDeviceStatus::PRESENT:
+            serviceStatus = StatusInternal::PRESENT;
+            break;
+        case CameraDeviceStatus::ENUMERATING:
+            serviceStatus = StatusInternal::ENUMERATING;
+            break;
+        default:
+            ALOGW("Unknown new HAL device status: %d", status);
+    }
+    return serviceStatus;
+}
+
+int32_t CameraService::mapToInterface(StatusInternal status) {
+    int32_t serviceStatus = ICameraServiceListener::STATUS_NOT_PRESENT;
+    switch (status) {
+        case StatusInternal::NOT_PRESENT:
+            serviceStatus = ICameraServiceListener::STATUS_NOT_PRESENT;
+            break;
+        case StatusInternal::PRESENT:
+            serviceStatus = ICameraServiceListener::STATUS_PRESENT;
+            break;
+        case StatusInternal::ENUMERATING:
+            serviceStatus = ICameraServiceListener::STATUS_ENUMERATING;
+            break;
+        case StatusInternal::NOT_AVAILABLE:
+            serviceStatus = ICameraServiceListener::STATUS_NOT_AVAILABLE;
+            break;
+        case StatusInternal::UNKNOWN:
+            serviceStatus = ICameraServiceListener::STATUS_UNKNOWN;
+            break;
+        default:
+            ALOGW("Unknown new internal device status: %d", status);
+    }
+    return serviceStatus;
+}
+
 Status CameraService::initializeShimMetadata(int cameraId) {
     int uid = getCallingUid();
 
@@ -942,7 +1027,7 @@
 
     int callingPid = getCallingPid();
 
-    if (!mModule) {
+    if (!mInitialized) {
         ALOGE("CameraService::connect X (PID %d) rejected (camera HAL module not loaded)",
                 callingPid);
         return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
@@ -1042,12 +1127,12 @@
         return -ENODEV;
     }
 
-    int32_t currentStatus = cameraState->getStatus();
-    if (currentStatus == ICameraServiceListener::STATUS_NOT_PRESENT) {
+    StatusInternal currentStatus = cameraState->getStatus();
+    if (currentStatus == StatusInternal::NOT_PRESENT) {
         ALOGE("CameraService::connect X (PID %d) rejected (camera %s is not connected)",
                 callingPid, cameraId.string());
         return -ENODEV;
-    } else if (currentStatus == ICameraServiceListener::STATUS_ENUMERATING) {
+    } else if (currentStatus == StatusInternal::ENUMERATING) {
         ALOGE("CameraService::connect X (PID %d) rejected, (camera %s is initializing)",
                 callingPid, cameraId.string());
         return -EBUSY;
@@ -1308,22 +1393,24 @@
 
     ATRACE_CALL();
     String8 id = String8::format("%d", cameraId);
-    int apiVersion = mModule->getModuleApiVersion();
-    if (halVersion != CAMERA_HAL_API_VERSION_UNSPECIFIED &&
-            apiVersion < CAMERA_MODULE_API_VERSION_2_3) {
-        /*
-         * Either the HAL version is unspecified in which case this just creates
-         * a camera client selected by the latest device version, or
-         * it's a particular version in which case the HAL must supported
-         * the open_legacy call
-         */
-        String8 msg = String8::format("Camera HAL module version %x too old for connectLegacy!",
-                apiVersion);
-        ALOGE("%s: %s",
-                __FUNCTION__, msg.string());
-        logRejected(id, getCallingPid(), String8(clientPackageName),
-                msg);
-        return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, msg.string());
+    if (mModule != nullptr) {
+        int apiVersion = mModule->getModuleApiVersion();
+        if (halVersion != CAMERA_HAL_API_VERSION_UNSPECIFIED &&
+                apiVersion < CAMERA_MODULE_API_VERSION_2_3) {
+            /*
+             * Either the HAL version is unspecified in which case this just creates
+             * a camera client selected by the latest device version, or
+             * it's a particular version in which case the HAL must supported
+             * the open_legacy call
+             */
+            String8 msg = String8::format("Camera HAL module version %x too old for connectLegacy!",
+                    apiVersion);
+            ALOGE("%s: %s",
+                    __FUNCTION__, msg.string());
+            logRejected(id, getCallingPid(), String8(clientPackageName),
+                    msg);
+            return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, msg.string());
+        }
     }
 
     Status ret = Status::ok();
@@ -1345,7 +1432,7 @@
 
 Status CameraService::connectDevice(
         const sp<hardware::camera2::ICameraDeviceCallbacks>& cameraCb,
-        int cameraId,
+        const String16& cameraId,
         const String16& clientPackageName,
         int clientUid,
         /*out*/
@@ -1353,7 +1440,7 @@
 
     ATRACE_CALL();
     Status ret = Status::ok();
-    String8 id = String8::format("%d", cameraId);
+    String8 id = String8(cameraId);
     sp<CameraDeviceClient> client = nullptr;
     ret = connectHelper<hardware::camera2::ICameraDeviceCallbacks,CameraDeviceClient>(cameraCb, id,
             CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageName,
@@ -1371,8 +1458,175 @@
     return ret;
 }
 
+template<class CALLBACK, class CLIENT>
+Status CameraService::connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
+        int halVersion, const String16& clientPackageName, int clientUid, int clientPid,
+        apiLevel effectiveApiLevel, bool legacyMode, bool shimUpdateOnly,
+        /*out*/sp<CLIENT>& device) {
+    binder::Status ret = binder::Status::ok();
+
+    String8 clientName8(clientPackageName);
+
+    int originalClientPid = 0;
+
+    ALOGI("CameraService::connect call (PID %d \"%s\", camera ID %s) for HAL version %s and "
+            "Camera API version %d", clientPid, clientName8.string(), cameraId.string(),
+            (halVersion == -1) ? "default" : std::to_string(halVersion).c_str(),
+            static_cast<int>(effectiveApiLevel));
+
+    sp<CLIENT> client = nullptr;
+    {
+        // Acquire mServiceLock and prevent other clients from connecting
+        std::unique_ptr<AutoConditionLock> lock =
+                AutoConditionLock::waitAndAcquire(mServiceLockWrapper, DEFAULT_CONNECT_TIMEOUT_NS);
+
+        if (lock == nullptr) {
+            ALOGE("CameraService::connect (PID %d) rejected (too many other clients connecting)."
+                    , clientPid);
+            return STATUS_ERROR_FMT(ERROR_MAX_CAMERAS_IN_USE,
+                    "Cannot open camera %s for \"%s\" (PID %d): Too many other clients connecting",
+                    cameraId.string(), clientName8.string(), clientPid);
+        }
+
+        // Enforce client permissions and do basic sanity checks
+        if(!(ret = validateConnectLocked(cameraId, clientName8,
+                /*inout*/clientUid, /*inout*/clientPid, /*out*/originalClientPid)).isOk()) {
+            return ret;
+        }
+
+        // Check the shim parameters after acquiring lock, if they have already been updated and
+        // we were doing a shim update, return immediately
+        if (shimUpdateOnly) {
+            auto cameraState = getCameraState(cameraId);
+            if (cameraState != nullptr) {
+                if (!cameraState->getShimParams().isEmpty()) return ret;
+            }
+        }
+
+        status_t err;
+
+        sp<BasicClient> clientTmp = nullptr;
+        std::shared_ptr<resource_policy::ClientDescriptor<String8, sp<BasicClient>>> partial;
+        if ((err = handleEvictionsLocked(cameraId, originalClientPid, effectiveApiLevel,
+                IInterface::asBinder(cameraCb), clientName8, /*out*/&clientTmp,
+                /*out*/&partial)) != NO_ERROR) {
+            switch (err) {
+                case -ENODEV:
+                    return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
+                            "No camera device with ID \"%s\" currently available",
+                            cameraId.string());
+                case -EBUSY:
+                    return STATUS_ERROR_FMT(ERROR_CAMERA_IN_USE,
+                            "Higher-priority client using camera, ID \"%s\" currently unavailable",
+                            cameraId.string());
+                default:
+                    return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+                            "Unexpected error %s (%d) opening camera \"%s\"",
+                            strerror(-err), err, cameraId.string());
+            }
+        }
+
+        if (clientTmp.get() != nullptr) {
+            // Handle special case for API1 MediaRecorder where the existing client is returned
+            device = static_cast<CLIENT*>(clientTmp.get());
+            return ret;
+        }
+
+        // give flashlight a chance to close devices if necessary.
+        mFlashlight->prepareDeviceOpen(cameraId);
+
+        // TODO: Update getDeviceVersion + HAL interface to use strings for Camera IDs
+        int id = cameraIdToInt(cameraId);
+        if (id == -1) {
+            ALOGE("%s: Invalid camera ID %s, cannot get device version from HAL.", __FUNCTION__,
+                    cameraId.string());
+            return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+                    "Bad camera ID \"%s\" passed to camera open", cameraId.string());
+        }
+
+        int facing = -1;
+        int deviceVersion = getDeviceVersion(cameraId, /*out*/&facing);
+        sp<BasicClient> tmp = nullptr;
+        if(!(ret = makeClient(this, cameraCb, clientPackageName, cameraId, facing, clientPid,
+                clientUid, getpid(), legacyMode, halVersion, deviceVersion, effectiveApiLevel,
+                /*out*/&tmp)).isOk()) {
+            return ret;
+        }
+        client = static_cast<CLIENT*>(tmp.get());
+
+        LOG_ALWAYS_FATAL_IF(client.get() == nullptr, "%s: CameraService in invalid state",
+                __FUNCTION__);
+
+        if (mModule != nullptr) {
+            err = client->initialize(mModule);
+        } else {
+            err = client->initialize(mCameraProviderManager);
+        }
+
+        if (err != OK) {
+            ALOGE("%s: Could not initialize client from HAL.", __FUNCTION__);
+            // Errors could be from the HAL module open call or from AppOpsManager
+            switch(err) {
+                case BAD_VALUE:
+                    return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+                            "Illegal argument to HAL module for camera \"%s\"", cameraId.string());
+                case -EBUSY:
+                    return STATUS_ERROR_FMT(ERROR_CAMERA_IN_USE,
+                            "Camera \"%s\" is already open", cameraId.string());
+                case -EUSERS:
+                    return STATUS_ERROR_FMT(ERROR_MAX_CAMERAS_IN_USE,
+                            "Too many cameras already open, cannot open camera \"%s\"",
+                            cameraId.string());
+                case PERMISSION_DENIED:
+                    return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
+                            "No permission to open camera \"%s\"", cameraId.string());
+                case -EACCES:
+                    return STATUS_ERROR_FMT(ERROR_DISABLED,
+                            "Camera \"%s\" disabled by policy", cameraId.string());
+                case -ENODEV:
+                default:
+                    return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+                            "Failed to initialize camera \"%s\": %s (%d)", cameraId.string(),
+                            strerror(-err), err);
+            }
+        }
+
+        // Update shim paremeters for legacy clients
+        if (effectiveApiLevel == API_1) {
+            // Assume we have always received a Client subclass for API1
+            sp<Client> shimClient = reinterpret_cast<Client*>(client.get());
+            String8 rawParams = shimClient->getParameters();
+            CameraParameters params(rawParams);
+
+            auto cameraState = getCameraState(cameraId);
+            if (cameraState != nullptr) {
+                cameraState->setShimParams(params);
+            } else {
+                ALOGE("%s: Cannot update shim parameters for camera %s, no such device exists.",
+                        __FUNCTION__, cameraId.string());
+            }
+        }
+
+        if (shimUpdateOnly) {
+            // If only updating legacy shim parameters, immediately disconnect client
+            mServiceLock.unlock();
+            client->disconnect();
+            mServiceLock.lock();
+        } else {
+            // Otherwise, add client to active clients list
+            finishConnectLocked(client, partial);
+        }
+    } // lock is destroyed, allow further connect calls
+
+    // Important: release the mutex here so the client can call back into the service from its
+    // destructor (can be at the end of the call)
+    device = client;
+    return ret;
+}
+
 Status CameraService::setTorchMode(const String16& cameraId, bool enabled,
         const sp<IBinder>& clientBinder) {
+    Mutex::Autolock lock(mServiceLock);
 
     ATRACE_CALL();
     if (enabled && clientBinder == nullptr) {
@@ -1392,17 +1646,17 @@
                 "Camera ID \"%s\" is a not valid camera ID", id.string());
     }
 
-    int32_t cameraStatus = state->getStatus();
-    if (cameraStatus != ICameraServiceListener::STATUS_PRESENT &&
-            cameraStatus != ICameraServiceListener::STATUS_NOT_AVAILABLE) {
-        ALOGE("%s: camera id is invalid %s", __FUNCTION__, id.string());
+    StatusInternal cameraStatus = state->getStatus();
+    if (cameraStatus != StatusInternal::PRESENT &&
+            cameraStatus != StatusInternal::NOT_AVAILABLE) {
+        ALOGE("%s: camera id is invalid %s, status %d", __FUNCTION__, id.string(), (int)cameraStatus);
         return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
                 "Camera ID \"%s\" is a not valid camera ID", id.string());
     }
 
     {
         Mutex::Autolock al(mTorchStatusMutex);
-        int32_t status;
+        TorchModeStatus status;
         status_t err = getTorchStatusLocked(id, &status);
         if (err != OK) {
             if (err == NAME_NOT_FOUND) {
@@ -1416,8 +1670,8 @@
                     strerror(-err), err);
         }
 
-        if (status == ICameraServiceListener::TORCH_STATUS_NOT_AVAILABLE) {
-            if (cameraStatus == ICameraServiceListener::STATUS_NOT_AVAILABLE) {
+        if (status == TorchModeStatus::NOT_AVAILABLE) {
+            if (cameraStatus == StatusInternal::NOT_AVAILABLE) {
                 ALOGE("%s: torch mode of camera %s is not available because "
                         "camera is in use", __FUNCTION__, id.string());
                 return STATUS_ERROR_FMT(ERROR_CAMERA_IN_USE,
@@ -1506,7 +1760,9 @@
     return Status::ok();
 }
 
-Status CameraService::addListener(const sp<ICameraServiceListener>& listener) {
+Status CameraService::addListener(const sp<ICameraServiceListener>& listener,
+        /*out*/
+        std::vector<hardware::CameraStatus> *cameraStatuses) {
     ATRACE_CALL();
 
     ALOGV("%s: Add listener %p", __FUNCTION__, listener.get());
@@ -1531,25 +1787,23 @@
         mListenerList.push_back(listener);
     }
 
-
-    /* Immediately signal current status to this listener only */
+    /* Collect current devices and status */
     {
         Mutex::Autolock lock(mCameraStatesLock);
         for (auto& i : mCameraStates) {
-            // TODO: Update binder to use String16 for camera IDs and remove;
-            int id = cameraIdToInt(i.first);
-            if (id == -1) continue;
-
-            listener->onStatusChanged(i.second->getStatus(), id);
+            cameraStatuses->emplace_back(i.first, mapToInterface(i.second->getStatus()));
         }
     }
 
-    /* Immediately signal current torch status to this listener only */
+    /*
+     * Immediately signal current torch status to this listener only
+     * This may be a subset of all the devices, so don't include it in the response directly
+     */
     {
         Mutex::Autolock al(mTorchStatusMutex);
         for (size_t i = 0; i < mTorchStatusMap.size(); i++ ) {
             String16 id = String16(mTorchStatusMap.keyAt(i).string());
-            listener->onTorchStatusChanged(mTorchStatusMap.valueAt(i), id);
+            listener->onTorchStatusChanged(mapToInterface(mTorchStatusMap.valueAt(i)), id);
         }
     }
 
@@ -1610,10 +1864,13 @@
     return ret;
 }
 
-Status CameraService::supportsCameraApi(int cameraId, int apiVersion, bool *isSupported) {
+Status CameraService::supportsCameraApi(const String16& cameraId, int apiVersion,
+        /*out*/ bool *isSupported) {
     ATRACE_CALL();
 
-    ALOGV("%s: for camera ID = %d", __FUNCTION__, cameraId);
+    const String8 id = String8(cameraId);
+
+    ALOGV("%s: for camera ID = %s", __FUNCTION__, id.string());
 
     switch (apiVersion) {
         case API_VERSION_1:
@@ -1626,37 +1883,38 @@
     }
 
     int facing = -1;
-    int deviceVersion = getDeviceVersion(cameraId, &facing);
+
+    int deviceVersion = getDeviceVersion(id, &facing);
 
     switch(deviceVersion) {
         case CAMERA_DEVICE_API_VERSION_1_0:
         case CAMERA_DEVICE_API_VERSION_3_0:
         case CAMERA_DEVICE_API_VERSION_3_1:
             if (apiVersion == API_VERSION_2) {
-                ALOGV("%s: Camera id %d uses HAL version %d <3.2, doesn't support api2 without shim",
-                        __FUNCTION__, cameraId, deviceVersion);
+                ALOGV("%s: Camera id %s uses HAL version %d <3.2, doesn't support api2 without shim",
+                        __FUNCTION__, id.string(), deviceVersion);
                 *isSupported = false;
             } else { // if (apiVersion == API_VERSION_1) {
-                ALOGV("%s: Camera id %d uses older HAL before 3.2, but api1 is always supported",
-                        __FUNCTION__, cameraId);
+                ALOGV("%s: Camera id %s uses older HAL before 3.2, but api1 is always supported",
+                        __FUNCTION__, id.string());
                 *isSupported = true;
             }
             break;
         case CAMERA_DEVICE_API_VERSION_3_2:
         case CAMERA_DEVICE_API_VERSION_3_3:
         case CAMERA_DEVICE_API_VERSION_3_4:
-            ALOGV("%s: Camera id %d uses HAL3.2 or newer, supports api1/api2 directly",
-                    __FUNCTION__, cameraId);
+            ALOGV("%s: Camera id %s uses HAL3.2 or newer, supports api1/api2 directly",
+                    __FUNCTION__, id.string());
             *isSupported = true;
             break;
         case -1: {
-            String8 msg = String8::format("Unknown camera ID %d", cameraId);
+            String8 msg = String8::format("Unknown camera ID %s", id.string());
             ALOGE("%s: %s", __FUNCTION__, msg.string());
             return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, msg.string());
         }
         default: {
-            String8 msg = String8::format("Unknown device version %d for device %d",
-                    deviceVersion, cameraId);
+            String8 msg = String8::format("Unknown device version %x for device %s",
+                    deviceVersion, id.string());
             ALOGE("%s: %s", __FUNCTION__, msg.string());
             return STATUS_ERROR(ERROR_INVALID_OPERATION, msg.string());
         }
@@ -1732,6 +1990,8 @@
  * Also check that the device HAL version is still in support
  */
 int CameraService::checkCameraCapabilities(int id, camera_info info, int *latestStrangeCameraId) {
+    if (mModule == nullptr) return NO_INIT;
+
     // device_version undefined in CAMERA_MODULE_API_VERSION_1_0,
     // All CAMERA_MODULE_API_VERSION_1_0 devices are backward-compatible
     if (mModule->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_0) {
@@ -2041,24 +2301,25 @@
 CameraService::Client::Client(const sp<CameraService>& cameraService,
         const sp<ICameraClient>& cameraClient,
         const String16& clientPackageName,
-        int cameraId, int cameraFacing,
+        const String8& cameraIdStr, int cameraFacing,
         int clientPid, uid_t clientUid,
         int servicePid) :
         CameraService::BasicClient(cameraService,
                 IInterface::asBinder(cameraClient),
                 clientPackageName,
-                cameraId, cameraFacing,
+                cameraIdStr, cameraFacing,
                 clientPid, clientUid,
-                servicePid)
+                servicePid),
+        mCameraId(CameraService::cameraIdToInt(cameraIdStr))
 {
     int callingPid = getCallingPid();
-    LOG1("Client::Client E (pid %d, id %d)", callingPid, cameraId);
+    LOG1("Client::Client E (pid %d, id %d)", callingPid, mCameraId);
 
     mRemoteCallback = cameraClient;
 
     cameraService->loadSound();
 
-    LOG1("Client::Client X (pid %d, id %d)", callingPid, cameraId);
+    LOG1("Client::Client X (pid %d, id %d)", callingPid, mCameraId);
 }
 
 // tear down the client
@@ -2066,26 +2327,28 @@
     ALOGV("~Client");
     mDestructionStarted = true;
 
-    mCameraService->releaseSound();
+    sCameraService->releaseSound();
     // unconditionally disconnect. function is idempotent
     Client::disconnect();
 }
 
+sp<CameraService> CameraService::BasicClient::BasicClient::sCameraService;
+
 CameraService::BasicClient::BasicClient(const sp<CameraService>& cameraService,
         const sp<IBinder>& remoteCallback,
         const String16& clientPackageName,
-        int cameraId, int cameraFacing,
+        const String8& cameraIdStr, int cameraFacing,
         int clientPid, uid_t clientUid,
         int servicePid):
-        mClientPackageName(clientPackageName), mDisconnected(false)
+        mCameraIdStr(cameraIdStr), mCameraFacing(cameraFacing),
+        mClientPackageName(clientPackageName), mClientPid(clientPid), mClientUid(clientUid),
+        mServicePid(servicePid),
+        mDisconnected(false),
+        mRemoteBinder(remoteCallback)
 {
-    mCameraService = cameraService;
-    mRemoteBinder = remoteCallback;
-    mCameraId = cameraId;
-    mCameraFacing = cameraFacing;
-    mClientPid = clientPid;
-    mClientUid = clientUid;
-    mServicePid = servicePid;
+    if (sCameraService == nullptr) {
+        sCameraService = cameraService;
+    }
     mOpsActive = false;
     mDestructionStarted = false;
 
@@ -2133,19 +2396,20 @@
     }
     mDisconnected = true;
 
-    mCameraService->removeByClient(this);
-    mCameraService->logDisconnected(String8::format("%d", mCameraId), mClientPid,
+    sCameraService->removeByClient(this);
+    sCameraService->logDisconnected(mCameraIdStr, mClientPid,
             String8(mClientPackageName));
 
     sp<IBinder> remote = getRemote();
     if (remote != nullptr) {
-        remote->unlinkToDeath(mCameraService);
+        remote->unlinkToDeath(sCameraService);
     }
 
     finishCameraOps();
     // Notify flashlight that a camera device is closed.
-    mCameraService->mFlashlight->deviceClosed(String8::format("%d", mCameraId));
-    ALOGI("%s: Disconnected client for camera %d for PID %d", __FUNCTION__, mCameraId, mClientPid);
+    sCameraService->mFlashlight->deviceClosed(mCameraIdStr);
+    ALOGI("%s: Disconnected client for camera %s for PID %d", __FUNCTION__, mCameraIdStr.string(),
+            mClientPid);
 
     // client shouldn't be able to call into us anymore
     mClientPid = 0;
@@ -2197,14 +2461,14 @@
             mClientUid, mClientPackageName);
 
     if (res == AppOpsManager::MODE_ERRORED) {
-        ALOGI("Camera %d: Access for \"%s\" has been revoked",
-                mCameraId, String8(mClientPackageName).string());
+        ALOGI("Camera %s: Access for \"%s\" has been revoked",
+                mCameraIdStr.string(), String8(mClientPackageName).string());
         return PERMISSION_DENIED;
     }
 
     if (res == AppOpsManager::MODE_IGNORED) {
-        ALOGI("Camera %d: Access for \"%s\" has been restricted",
-                mCameraId, String8(mClientPackageName).string());
+        ALOGI("Camera %s: Access for \"%s\" has been restricted",
+                mCameraIdStr.string(), String8(mClientPackageName).string());
         // Return the same error as for device policy manager rejection
         return -EACCES;
     }
@@ -2212,12 +2476,11 @@
     mOpsActive = true;
 
     // Transition device availability listeners from PRESENT -> NOT_AVAILABLE
-    mCameraService->updateStatus(ICameraServiceListener::STATUS_NOT_AVAILABLE,
-            String8::format("%d", mCameraId));
+    sCameraService->updateStatus(StatusInternal::NOT_AVAILABLE, mCameraIdStr);
 
     // Transition device state to OPEN
-    mCameraService->updateProxyDeviceState(ICameraServiceProxy::CAMERA_STATE_OPEN,
-            String8::format("%d", mCameraId));
+    sCameraService->updateProxyDeviceState(ICameraServiceProxy::CAMERA_STATE_OPEN,
+            mCameraIdStr);
 
     return OK;
 }
@@ -2232,16 +2495,16 @@
                 mClientPackageName);
         mOpsActive = false;
 
-        std::initializer_list<int32_t> rejected = {ICameraServiceListener::STATUS_NOT_PRESENT,
-                ICameraServiceListener::STATUS_ENUMERATING};
+        std::initializer_list<StatusInternal> rejected = {StatusInternal::PRESENT,
+                StatusInternal::ENUMERATING};
 
         // Transition to PRESENT if the camera is not in either of the rejected states
-        mCameraService->updateStatus(ICameraServiceListener::STATUS_PRESENT,
-                String8::format("%d", mCameraId), rejected);
+        sCameraService->updateStatus(StatusInternal::PRESENT,
+                mCameraIdStr, rejected);
 
         // Transition device state to CLOSED
-        mCameraService->updateProxyDeviceState(ICameraServiceProxy::CAMERA_STATE_CLOSED,
-                String8::format("%d", mCameraId));
+        sCameraService->updateProxyDeviceState(ICameraServiceProxy::CAMERA_STATE_CLOSED,
+                mCameraIdStr);
     }
     // Always stop watching, even if no camera op is active
     if (mOpsCallback != NULL) {
@@ -2273,7 +2536,7 @@
             "UNKNOWN");
 
     if (res != AppOpsManager::MODE_ALLOWED) {
-        ALOGI("Camera %d: Access for \"%s\" revoked", mCameraId,
+        ALOGI("Camera %s: Access for \"%s\" revoked", mCameraIdStr.string(),
                 myName.string());
         // Reset the client PID to allow server-initiated disconnect,
         // and to prevent further calls by client.
@@ -2286,17 +2549,6 @@
 
 // ----------------------------------------------------------------------------
 
-// Provide client strong pointer for callbacks.
-sp<CameraService::Client> CameraService::Client::getClientFromCookie(void* user) {
-    String8 cameraId = String8::format("%d", (int)(intptr_t) user);
-    auto clientDescriptor = gCameraService->mActiveClientManager.get(cameraId);
-    if (clientDescriptor != nullptr) {
-        return sp<Client>{
-                static_cast<Client*>(clientDescriptor->getValue().get())};
-    }
-    return sp<Client>{nullptr};
-}
-
 void CameraService::Client::notifyError(int32_t errorCode,
         const CaptureResultExtras& resultExtras) {
     (void) errorCode;
@@ -2336,11 +2588,11 @@
 
 CameraService::CameraState::CameraState(const String8& id, int cost,
         const std::set<String8>& conflicting) : mId(id),
-        mStatus(ICameraServiceListener::STATUS_PRESENT), mCost(cost), mConflicting(conflicting) {}
+        mStatus(StatusInternal::PRESENT), mCost(cost), mConflicting(conflicting) {}
 
 CameraService::CameraState::~CameraState() {}
 
-int32_t CameraService::CameraState::getStatus() const {
+CameraService::StatusInternal CameraService::CameraState::getStatus() const {
     Mutex::Autolock lock(mStatusLock);
     return mStatus;
 }
@@ -2502,8 +2754,8 @@
         }
 
         bool hasClient = false;
-        if (!mModule) {
-            result = String8::format("No camera module available!\n");
+        if (!mInitialized) {
+            result = String8::format("No camera HAL available!\n");
             write(fd, result.string(), result.size());
 
             // Dump event log for error information
@@ -2512,6 +2764,13 @@
             if (locked) mServiceLock.unlock();
             return NO_ERROR;
         }
+        if (mModule == nullptr) {
+            mCameraProviderManager->dump(fd, args);
+            // TODO - need way more dumping here
+
+            if (locked) mServiceLock.unlock();
+            return NO_ERROR;
+        }
 
         result = String8::format("Camera module HAL API version: 0x%x\n", mModule->getHalApiVersion());
         result.appendFormat("Camera module API version: 0x%x\n", mModule->getModuleApiVersion());
@@ -2549,7 +2808,6 @@
             result = String8::format("Camera %s information:\n", cameraId.string());
             camera_info info;
 
-            // TODO: Change getCameraInfo + HAL to use String cameraIds
             status_t rc = mModule->getCameraInfo(cameraIdToInt(cameraId), &info);
             if (rc != OK) {
                 result.appendFormat("  Error reading static information!\n");
@@ -2719,12 +2977,12 @@
             __FUNCTION__);
 }
 
-void CameraService::updateStatus(int32_t status, const String8& cameraId) {
+void CameraService::updateStatus(StatusInternal status, const String8& cameraId) {
     updateStatus(status, cameraId, {});
 }
 
-void CameraService::updateStatus(int32_t status, const String8& cameraId,
-        std::initializer_list<int32_t> rejectSourceStates) {
+void CameraService::updateStatus(StatusInternal status, const String8& cameraId,
+        std::initializer_list<StatusInternal> rejectSourceStates) {
     // Do not lock mServiceLock here or can get into a deadlock from
     // connect() -> disconnect -> updateStatus
 
@@ -2739,18 +2997,18 @@
     // Update the status for this camera state, then send the onStatusChangedCallbacks to each
     // of the listeners with both the mStatusStatus and mStatusListenerLock held
     state->updateStatus(status, cameraId, rejectSourceStates, [this]
-            (const String8& cameraId, int32_t status) {
+            (const String8& cameraId, StatusInternal status) {
 
-            if (status != ICameraServiceListener::STATUS_ENUMERATING) {
+            if (status != StatusInternal::ENUMERATING) {
                 // Update torch status if it has a flash unit.
                 Mutex::Autolock al(mTorchStatusMutex);
-                int32_t torchStatus;
+                TorchModeStatus torchStatus;
                 if (getTorchStatusLocked(cameraId, &torchStatus) !=
                         NAME_NOT_FOUND) {
-                    int32_t newTorchStatus =
-                            status == ICameraServiceListener::STATUS_PRESENT ?
-                            ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF :
-                            ICameraServiceListener::TORCH_STATUS_NOT_AVAILABLE;
+                    TorchModeStatus newTorchStatus =
+                            status == StatusInternal::PRESENT ?
+                            TorchModeStatus::AVAILABLE_OFF :
+                            TorchModeStatus::NOT_AVAILABLE;
                     if (torchStatus != newTorchStatus) {
                         onTorchStatusChangedLocked(cameraId, newTorchStatus);
                     }
@@ -2760,13 +3018,54 @@
             Mutex::Autolock lock(mStatusListenerLock);
 
             for (auto& listener : mListenerList) {
-                // TODO: Refactor status listeners to use strings for Camera IDs and remove this.
-                int id = cameraIdToInt(cameraId);
-                if (id != -1) listener->onStatusChanged(status, id);
+                listener->onStatusChanged(mapToInterface(status), String16(cameraId));
             }
         });
 }
 
+template<class Func>
+void CameraService::CameraState::updateStatus(StatusInternal status,
+        const String8& cameraId,
+        std::initializer_list<StatusInternal> rejectSourceStates,
+        Func onStatusUpdatedLocked) {
+    Mutex::Autolock lock(mStatusLock);
+    StatusInternal oldStatus = mStatus;
+    mStatus = status;
+
+    if (oldStatus == status) {
+        return;
+    }
+
+    ALOGV("%s: Status has changed for camera ID %s from %#x to %#x", __FUNCTION__,
+            cameraId.string(), oldStatus, status);
+
+    if (oldStatus == StatusInternal::NOT_PRESENT &&
+            (status != StatusInternal::PRESENT &&
+             status != StatusInternal::ENUMERATING)) {
+
+        ALOGW("%s: From NOT_PRESENT can only transition into PRESENT or ENUMERATING",
+                __FUNCTION__);
+        mStatus = oldStatus;
+        return;
+    }
+
+    /**
+     * Sometimes we want to conditionally do a transition.
+     * For example if a client disconnects, we want to go to PRESENT
+     * only if we weren't already in NOT_PRESENT or ENUMERATING.
+     */
+    for (auto& rejectStatus : rejectSourceStates) {
+        if (oldStatus == rejectStatus) {
+            ALOGV("%s: Rejecting status transition for Camera ID %s,  since the source "
+                    "state was was in one of the bad states.", __FUNCTION__, cameraId.string());
+            mStatus = oldStatus;
+            return;
+        }
+    }
+
+    onStatusUpdatedLocked(cameraId, status);
+}
+
 void CameraService::updateProxyDeviceState(ICameraServiceProxy::CameraState newState,
         const String8& cameraId) {
     sp<ICameraServiceProxy> proxyBinder = getCameraServiceProxy();
@@ -2777,7 +3076,7 @@
 
 status_t CameraService::getTorchStatusLocked(
         const String8& cameraId,
-        int32_t *status) const {
+        TorchModeStatus *status) const {
     if (!status) {
         return BAD_VALUE;
     }
@@ -2792,14 +3091,12 @@
 }
 
 status_t CameraService::setTorchStatusLocked(const String8& cameraId,
-        int32_t status) {
+        TorchModeStatus status) {
     ssize_t index = mTorchStatusMap.indexOfKey(cameraId);
     if (index == NAME_NOT_FOUND) {
         return BAD_VALUE;
     }
-    int32_t& item =
-            mTorchStatusMap.editValueAt(index);
-    item = status;
+    mTorchStatusMap.editValueAt(index) = status;
 
     return OK;
 }
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index b35f35c..d463b59 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -29,6 +29,8 @@
 #include <camera/ICameraServiceProxy.h>
 #include <hardware/camera.h>
 
+#include <android/hardware/camera/common/1.0/types.h>
+
 #include <camera/VendorTagDescriptor.h>
 #include <camera/CaptureResult.h>
 #include <camera/CameraParameters.h>
@@ -36,6 +38,7 @@
 #include "CameraFlashlight.h"
 
 #include "common/CameraModule.h"
+#include "common/CameraProviderManager.h"
 #include "media/RingBuffer.h"
 #include "utils/AutoConditionLock.h"
 #include "utils/ClientManager.h"
@@ -55,11 +58,13 @@
 
 class CameraService :
     public BinderService<CameraService>,
-    public ::android::hardware::BnCameraService,
-    public IBinder::DeathRecipient,
-    public camera_module_callbacks_t
+    public virtual ::android::hardware::BnCameraService,
+    public virtual IBinder::DeathRecipient,
+    public camera_module_callbacks_t,
+    public virtual CameraProviderManager::StatusListener
 {
     friend class BinderService<CameraService>;
+    friend class CameraClient;
 public:
     class Client;
     class BasicClient;
@@ -94,11 +99,12 @@
     virtual             ~CameraService();
 
     /////////////////////////////////////////////////////////////////////
-    // HAL Callbacks
-    virtual void        onDeviceStatusChanged(int cameraId,
-                                              camera_device_status_t newStatus);
+    // HAL Callbacks - implements CameraProviderManager::StatusListener
+
+    virtual void        onDeviceStatusChanged(const String8 &cameraId,
+            hardware::camera::common::V1_0::CameraDeviceStatus newHalStatus) override;
     virtual void        onTorchStatusChanged(const String8& cameraId,
-                                             int32_t newStatus);
+            hardware::camera::common::V1_0::TorchModeStatus newStatus) override;
 
     /////////////////////////////////////////////////////////////////////
     // ICameraService
@@ -106,7 +112,7 @@
 
     virtual binder::Status     getCameraInfo(int cameraId,
             hardware::CameraInfo* cameraInfo);
-    virtual binder::Status     getCameraCharacteristics(int cameraId,
+    virtual binder::Status     getCameraCharacteristics(const String16& cameraId,
             CameraMetadata* cameraInfo);
     virtual binder::Status     getCameraVendorTagDescriptor(
             /*out*/
@@ -125,12 +131,14 @@
             sp<hardware::ICamera>* device);
 
     virtual binder::Status     connectDevice(
-            const sp<hardware::camera2::ICameraDeviceCallbacks>& cameraCb, int32_t cameraId,
+            const sp<hardware::camera2::ICameraDeviceCallbacks>& cameraCb, const String16& cameraId,
             const String16& clientPackageName, int32_t clientUid,
             /*out*/
             sp<hardware::camera2::ICameraDeviceUser>* device);
 
-    virtual binder::Status    addListener(const sp<hardware::ICameraServiceListener>& listener);
+    virtual binder::Status    addListener(const sp<hardware::ICameraServiceListener>& listener,
+            /*out*/
+            std::vector<hardware::CameraStatus>* cameraStatuses);
     virtual binder::Status    removeListener(
             const sp<hardware::ICameraServiceListener>& listener);
 
@@ -147,7 +155,7 @@
 
     // OK = supports api of that version, -EOPNOTSUPP = does not support
     virtual binder::Status    supportsCameraApi(
-            int32_t cameraId, int32_t apiVersion,
+            const String16& cameraId, int32_t apiVersion,
             /*out*/
             bool *isSupported);
 
@@ -181,7 +189,7 @@
 
     /////////////////////////////////////////////////////////////////////
     // CameraDeviceFactory functionality
-    int                 getDeviceVersion(int cameraId, int* facing = NULL);
+    int                 getDeviceVersion(const String8& cameraId, int* facing = NULL);
 
     /////////////////////////////////////////////////////////////////////
     // Shared utilities
@@ -193,6 +201,7 @@
     class BasicClient : public virtual RefBase {
     public:
         virtual status_t       initialize(CameraModule *module) = 0;
+        virtual status_t       initialize(sp<CameraProviderManager> manager) = 0;
         virtual binder::Status disconnect();
 
         // because we can't virtually inherit IInterface, which breaks
@@ -229,7 +238,7 @@
         BasicClient(const sp<CameraService>& cameraService,
                 const sp<IBinder>& remoteCallback,
                 const String16& clientPackageName,
-                int cameraId,
+                const String8& cameraIdStr,
                 int cameraFacing,
                 int clientPid,
                 uid_t clientUid,
@@ -244,13 +253,13 @@
         bool                            mDestructionStarted;
 
         // these are initialized in the constructor.
-        sp<CameraService>               mCameraService;     // immutable after constructor
-        int                             mCameraId;          // immutable after constructor
-        int                             mCameraFacing;      // immutable after constructor
-        String16                        mClientPackageName; // immutable after constructor
+        static sp<CameraService>        sCameraService;
+        const String8                   mCameraIdStr;
+        const int                       mCameraFacing;
+        String16                        mClientPackageName;
         pid_t                           mClientPid;
-        uid_t                           mClientUid;         // immutable after constructor
-        pid_t                           mServicePid;        // immutable after constructor
+        const uid_t                     mClientUid;
+        const pid_t                     mServicePid;
         bool                            mDisconnected;
 
         // - The app-side Binder interface to receive callbacks from us
@@ -316,7 +325,7 @@
         Client(const sp<CameraService>& cameraService,
                 const sp<hardware::ICameraClient>& cameraClient,
                 const String16& clientPackageName,
-                int cameraId,
+                const String8& cameraIdStr,
                 int cameraFacing,
                 int clientPid,
                 uid_t clientUid,
@@ -339,14 +348,12 @@
         // superclass this can be cast to.
         virtual bool canCastToApiClient(apiLevel level) const;
     protected:
-        // Convert client from cookie.
-        static sp<CameraService::Client> getClientFromCookie(void* user);
-
         // Initialized in constructor
 
         // - The app-side Binder interface to receive callbacks from us
         sp<hardware::ICameraClient>               mRemoteCallback;
 
+        int mCameraId;  // All API1 clients use integer camera IDs
     }; // class Client
 
     /**
@@ -409,6 +416,20 @@
 
 private:
 
+    typedef hardware::camera::common::V1_0::CameraDeviceStatus CameraDeviceStatus;
+
+    /**
+     * Typesafe version of device status, containing both the HAL-layer and the service interface-
+     * layer values.
+     */
+    enum class StatusInternal : int32_t {
+        NOT_PRESENT = static_cast<int32_t>(CameraDeviceStatus::NOT_PRESENT),
+        PRESENT = static_cast<int32_t>(CameraDeviceStatus::PRESENT),
+        ENUMERATING = static_cast<int32_t>(CameraDeviceStatus::ENUMERATING),
+        NOT_AVAILABLE = static_cast<int32_t>(hardware::ICameraServiceListener::STATUS_NOT_AVAILABLE),
+        UNKNOWN = static_cast<int32_t>(hardware::ICameraServiceListener::STATUS_UNKNOWN)
+    };
+
     /**
      * Container class for the state of each logical camera device, including: ID, status, and
      * dependencies on other devices.  The mapping of camera ID -> state saved in mCameraStates
@@ -420,6 +441,7 @@
      */
     class CameraState {
     public:
+
         /**
          * Make a new CameraState and set the ID, cost, and conflicting devices using the values
          * returned in the HAL's camera_info struct for each device.
@@ -432,7 +454,7 @@
          *
          * This method acquires mStatusLock.
          */
-        int32_t getStatus() const;
+        StatusInternal getStatus() const;
 
         /**
          * This function updates the status for this camera device, unless the given status
@@ -445,8 +467,9 @@
          * This method aquires mStatusLock.
          */
         template<class Func>
-        void updateStatus(int32_t status, const String8& cameraId,
-                std::initializer_list<int32_t> rejectSourceStates,
+        void updateStatus(StatusInternal status,
+                const String8& cameraId,
+                std::initializer_list<StatusInternal> rejectSourceStates,
                 Func onStatusUpdatedLocked);
 
         /**
@@ -477,7 +500,7 @@
 
     private:
         const String8 mId;
-        int32_t mStatus; // protected by mStatusLock
+        StatusInternal mStatus; // protected by mStatusLock
         const int mCost;
         std::set<String8> mConflicting;
         mutable Mutex mStatusLock;
@@ -487,6 +510,12 @@
     // Delay-load the Camera HAL module
     virtual void onFirstRef();
 
+    // Load the legacy HAL module
+    status_t loadLegacyHalModule();
+
+    // Eumerate all camera providers in the system
+    status_t enumerateProviders();
+
     // Check if we can connect, before we acquire the service lock.
     // The returned originalClientPid is the PID of the original process that wants to connect to
     // camera.
@@ -657,7 +686,11 @@
     sp<MediaPlayer>     mSoundPlayer[NUM_SOUNDS];
     int                 mSoundRef;  // reference count (release all MediaPlayer when 0)
 
-    CameraModule*     mModule;
+    // Basic flag on whether the camera subsystem is in a usable state
+    bool                mInitialized;
+
+    CameraModule*       mModule;
+    sp<CameraProviderManager> mCameraProviderManager;
 
     // Guarded by mStatusListenerMutex
     std::vector<sp<hardware::ICameraServiceListener>> mListenerList;
@@ -671,9 +704,12 @@
      * This method must be idempotent.
      * This method acquires mStatusLock and mStatusListenerLock.
      */
-    void updateStatus(int32_t status, const String8& cameraId,
-            std::initializer_list<int32_t> rejectedSourceStates);
-    void updateStatus(int32_t status, const String8& cameraId);
+    void updateStatus(StatusInternal status,
+            const String8& cameraId,
+            std::initializer_list<StatusInternal>
+                rejectedSourceStates);
+    void updateStatus(StatusInternal status,
+            const String8& cameraId);
 
     // flashlight control
     sp<CameraFlashlight> mFlashlight;
@@ -684,7 +720,8 @@
     // guard mTorchUidMap
     Mutex                mTorchUidMapMutex;
     // camera id -> torch status
-    KeyedVector<String8, int32_t> mTorchStatusMap;
+    KeyedVector<String8, hardware::camera::common::V1_0::TorchModeStatus>
+            mTorchStatusMap;
     // camera id -> torch client binder
     // only store the last client that turns on each camera's torch mode
     KeyedVector<String8, sp<IBinder>> mTorchClientMap;
@@ -697,15 +734,15 @@
     // handle torch mode status change and invoke callbacks. mTorchStatusMutex
     // should be locked.
     void onTorchStatusChangedLocked(const String8& cameraId,
-            int32_t newStatus);
+            hardware::camera::common::V1_0::TorchModeStatus newStatus);
 
     // get a camera's torch status. mTorchStatusMutex should be locked.
     status_t getTorchStatusLocked(const String8 &cameraId,
-            int32_t *status) const;
+             hardware::camera::common::V1_0::TorchModeStatus *status) const;
 
     // set a camera's torch status. mTorchStatusMutex should be locked.
     status_t setTorchStatusLocked(const String8 &cameraId,
-            int32_t status);
+            hardware::camera::common::V1_0::TorchModeStatus status);
 
     // IBinder::DeathRecipient implementation
     virtual void        binderDied(const wp<IBinder> &who);
@@ -729,14 +766,6 @@
      */
     binder::Status      getLegacyParametersLazy(int cameraId, /*out*/CameraParameters* parameters);
 
-    /**
-     * Generate the CameraCharacteristics metadata required by the Camera2 API
-     * from the available HAL1 CameraParameters and CameraInfo.
-     *
-     * Sets Status to a service-specific error on failure
-     */
-    binder::Status      generateShimMetadata(int cameraId, /*out*/CameraMetadata* cameraInfo);
-
     static int getCallingPid();
 
     static int getCallingUid();
@@ -752,7 +781,7 @@
     static int getCameraPriorityFromProcState(int procState);
 
     static binder::Status makeClient(const sp<CameraService>& cameraService,
-            const sp<IInterface>& cameraCb, const String16& packageName, int cameraId,
+            const sp<IInterface>& cameraCb, const String16& packageName, const String8& cameraId,
             int facing, int clientPid, uid_t clientUid, int servicePid, bool legacyMode,
             int halVersion, int deviceVersion, apiLevel effectiveApiLevel,
             /*out*/sp<BasicClient>* client);
@@ -760,227 +789,15 @@
     status_t checkCameraAccess(const String16& opPackageName);
 
     static String8 toString(std::set<userid_t> intSet);
+    static int32_t mapToInterface(hardware::camera::common::V1_0::TorchModeStatus status);
+    static StatusInternal mapToInternal(hardware::camera::common::V1_0::CameraDeviceStatus status);
+    static int32_t mapToInterface(StatusInternal status);
 
     static sp<ICameraServiceProxy> getCameraServiceProxy();
     static void pingCameraServiceProxy();
 
 };
 
-template<class Func>
-void CameraService::CameraState::updateStatus(int32_t status,
-        const String8& cameraId,
-        std::initializer_list<int32_t> rejectSourceStates,
-        Func onStatusUpdatedLocked) {
-    Mutex::Autolock lock(mStatusLock);
-    int32_t oldStatus = mStatus;
-    mStatus = status;
-
-    if (oldStatus == status) {
-        return;
-    }
-
-    ALOGV("%s: Status has changed for camera ID %s from %#x to %#x", __FUNCTION__,
-            cameraId.string(), oldStatus, status);
-
-    if (oldStatus == hardware::ICameraServiceListener::STATUS_NOT_PRESENT &&
-            (status != hardware::ICameraServiceListener::STATUS_PRESENT &&
-             status != hardware::ICameraServiceListener::STATUS_ENUMERATING)) {
-
-        ALOGW("%s: From NOT_PRESENT can only transition into PRESENT or ENUMERATING",
-                __FUNCTION__);
-        mStatus = oldStatus;
-        return;
-    }
-
-    /**
-     * Sometimes we want to conditionally do a transition.
-     * For example if a client disconnects, we want to go to PRESENT
-     * only if we weren't already in NOT_PRESENT or ENUMERATING.
-     */
-    for (auto& rejectStatus : rejectSourceStates) {
-        if (oldStatus == rejectStatus) {
-            ALOGV("%s: Rejecting status transition for Camera ID %s,  since the source "
-                    "state was was in one of the bad states.", __FUNCTION__, cameraId.string());
-            mStatus = oldStatus;
-            return;
-        }
-    }
-
-    onStatusUpdatedLocked(cameraId, status);
-}
-
-#define STATUS_ERROR(errorCode, errorString) \
-    binder::Status::fromServiceSpecificError(errorCode, \
-            String8::format("%s:%d: %s", __FUNCTION__, __LINE__, errorString))
-
-#define STATUS_ERROR_FMT(errorCode, errorString, ...) \
-    binder::Status::fromServiceSpecificError(errorCode, \
-            String8::format("%s:%d: " errorString, __FUNCTION__, __LINE__, __VA_ARGS__))
-
-
-template<class CALLBACK, class CLIENT>
-binder::Status CameraService::connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
-        int halVersion, const String16& clientPackageName, int clientUid, int clientPid,
-        apiLevel effectiveApiLevel, bool legacyMode, bool shimUpdateOnly,
-        /*out*/sp<CLIENT>& device) {
-    binder::Status ret = binder::Status::ok();
-
-    String8 clientName8(clientPackageName);
-
-    int originalClientPid = 0;
-
-    ALOGI("CameraService::connect call (PID %d \"%s\", camera ID %s) for HAL version %s and "
-            "Camera API version %d", clientPid, clientName8.string(), cameraId.string(),
-            (halVersion == -1) ? "default" : std::to_string(halVersion).c_str(),
-            static_cast<int>(effectiveApiLevel));
-
-    sp<CLIENT> client = nullptr;
-    {
-        // Acquire mServiceLock and prevent other clients from connecting
-        std::unique_ptr<AutoConditionLock> lock =
-                AutoConditionLock::waitAndAcquire(mServiceLockWrapper, DEFAULT_CONNECT_TIMEOUT_NS);
-
-        if (lock == nullptr) {
-            ALOGE("CameraService::connect (PID %d) rejected (too many other clients connecting)."
-                    , clientPid);
-            return STATUS_ERROR_FMT(ERROR_MAX_CAMERAS_IN_USE,
-                    "Cannot open camera %s for \"%s\" (PID %d): Too many other clients connecting",
-                    cameraId.string(), clientName8.string(), clientPid);
-        }
-
-        // Enforce client permissions and do basic sanity checks
-        if(!(ret = validateConnectLocked(cameraId, clientName8,
-                /*inout*/clientUid, /*inout*/clientPid, /*out*/originalClientPid)).isOk()) {
-            return ret;
-        }
-
-        // Check the shim parameters after acquiring lock, if they have already been updated and
-        // we were doing a shim update, return immediately
-        if (shimUpdateOnly) {
-            auto cameraState = getCameraState(cameraId);
-            if (cameraState != nullptr) {
-                if (!cameraState->getShimParams().isEmpty()) return ret;
-            }
-        }
-
-        status_t err;
-
-        sp<BasicClient> clientTmp = nullptr;
-        std::shared_ptr<resource_policy::ClientDescriptor<String8, sp<BasicClient>>> partial;
-        if ((err = handleEvictionsLocked(cameraId, originalClientPid, effectiveApiLevel,
-                IInterface::asBinder(cameraCb), clientName8, /*out*/&clientTmp,
-                /*out*/&partial)) != NO_ERROR) {
-            switch (err) {
-                case -ENODEV:
-                    return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
-                            "No camera device with ID \"%s\" currently available",
-                            cameraId.string());
-                case -EBUSY:
-                    return STATUS_ERROR_FMT(ERROR_CAMERA_IN_USE,
-                            "Higher-priority client using camera, ID \"%s\" currently unavailable",
-                            cameraId.string());
-                default:
-                    return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
-                            "Unexpected error %s (%d) opening camera \"%s\"",
-                            strerror(-err), err, cameraId.string());
-            }
-        }
-
-        if (clientTmp.get() != nullptr) {
-            // Handle special case for API1 MediaRecorder where the existing client is returned
-            device = static_cast<CLIENT*>(clientTmp.get());
-            return ret;
-        }
-
-        // give flashlight a chance to close devices if necessary.
-        mFlashlight->prepareDeviceOpen(cameraId);
-
-        // TODO: Update getDeviceVersion + HAL interface to use strings for Camera IDs
-        int id = cameraIdToInt(cameraId);
-        if (id == -1) {
-            ALOGE("%s: Invalid camera ID %s, cannot get device version from HAL.", __FUNCTION__,
-                    cameraId.string());
-            return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
-                    "Bad camera ID \"%s\" passed to camera open", cameraId.string());
-        }
-
-        int facing = -1;
-        int deviceVersion = getDeviceVersion(id, /*out*/&facing);
-        sp<BasicClient> tmp = nullptr;
-        if(!(ret = makeClient(this, cameraCb, clientPackageName, id, facing, clientPid,
-                clientUid, getpid(), legacyMode, halVersion, deviceVersion, effectiveApiLevel,
-                /*out*/&tmp)).isOk()) {
-            return ret;
-        }
-        client = static_cast<CLIENT*>(tmp.get());
-
-        LOG_ALWAYS_FATAL_IF(client.get() == nullptr, "%s: CameraService in invalid state",
-                __FUNCTION__);
-
-        if ((err = client->initialize(mModule)) != OK) {
-            ALOGE("%s: Could not initialize client from HAL module.", __FUNCTION__);
-            // Errors could be from the HAL module open call or from AppOpsManager
-            switch(err) {
-                case BAD_VALUE:
-                    return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
-                            "Illegal argument to HAL module for camera \"%s\"", cameraId.string());
-                case -EBUSY:
-                    return STATUS_ERROR_FMT(ERROR_CAMERA_IN_USE,
-                            "Camera \"%s\" is already open", cameraId.string());
-                case -EUSERS:
-                    return STATUS_ERROR_FMT(ERROR_MAX_CAMERAS_IN_USE,
-                            "Too many cameras already open, cannot open camera \"%s\"",
-                            cameraId.string());
-                case PERMISSION_DENIED:
-                    return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
-                            "No permission to open camera \"%s\"", cameraId.string());
-                case -EACCES:
-                    return STATUS_ERROR_FMT(ERROR_DISABLED,
-                            "Camera \"%s\" disabled by policy", cameraId.string());
-                case -ENODEV:
-                default:
-                    return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
-                            "Failed to initialize camera \"%s\": %s (%d)", cameraId.string(),
-                            strerror(-err), err);
-            }
-        }
-
-        // Update shim paremeters for legacy clients
-        if (effectiveApiLevel == API_1) {
-            // Assume we have always received a Client subclass for API1
-            sp<Client> shimClient = reinterpret_cast<Client*>(client.get());
-            String8 rawParams = shimClient->getParameters();
-            CameraParameters params(rawParams);
-
-            auto cameraState = getCameraState(cameraId);
-            if (cameraState != nullptr) {
-                cameraState->setShimParams(params);
-            } else {
-                ALOGE("%s: Cannot update shim parameters for camera %s, no such device exists.",
-                        __FUNCTION__, cameraId.string());
-            }
-        }
-
-        if (shimUpdateOnly) {
-            // If only updating legacy shim parameters, immediately disconnect client
-            mServiceLock.unlock();
-            client->disconnect();
-            mServiceLock.lock();
-        } else {
-            // Otherwise, add client to active clients list
-            finishConnectLocked(client, partial);
-        }
-    } // lock is destroyed, allow further connect calls
-
-    // Important: release the mutex here so the client can call back into the service from its
-    // destructor (can be at the end of the call)
-    device = client;
-    return ret;
-}
-
-#undef STATUS_ERROR_FMT
-#undef STATUS_ERROR
-
 } // namespace android
 
 #endif
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index bcd62d6..3aec562 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -56,7 +56,8 @@
         int servicePid,
         bool legacyMode):
         Camera2ClientBase(cameraService, cameraClient, clientPackageName,
-                cameraId, cameraFacing, clientPid, clientUid, servicePid),
+                String8::format("%d", cameraId), cameraFacing,
+                clientPid, clientUid, servicePid),
         mParameters(cameraId, cameraFacing)
 {
     ATRACE_CALL();
@@ -67,13 +68,22 @@
     mLegacyMode = legacyMode;
 }
 
-status_t Camera2Client::initialize(CameraModule *module)
+status_t Camera2Client::initialize(CameraModule *module) {
+    return initializeImpl(module);
+}
+
+status_t Camera2Client::initialize(sp<CameraProviderManager> manager) {
+    return initializeImpl(manager);
+}
+
+template<typename TProviderPtr>
+status_t Camera2Client::initializeImpl(TProviderPtr providerPtr)
 {
     ATRACE_CALL();
     ALOGV("%s: Initializing client for camera %d", __FUNCTION__, mCameraId);
     status_t res;
 
-    res = Camera2ClientBase::initialize(module);
+    res = Camera2ClientBase::initialize(providerPtr);
     if (res != OK) {
         return res;
     }
@@ -993,7 +1003,7 @@
 }
 
 status_t Camera2Client::startRecordingL(Parameters &params, bool restart) {
-    status_t res;
+    status_t res = OK;
 
     ALOGV("%s: state == %d, restart = %d", __FUNCTION__, params.state, restart);
 
@@ -1034,7 +1044,7 @@
     }
 
     if (!restart) {
-        mCameraService->playSound(CameraService::SOUND_RECORDING_START);
+        sCameraService->playSound(CameraService::SOUND_RECORDING_START);
         mStreamingProcessor->updateRecordingRequest(params);
         if (res != OK) {
             ALOGE("%s: Camera %d: Unable to update recording request: %s (%d)",
@@ -1191,7 +1201,7 @@
             return;
     };
 
-    mCameraService->playSound(CameraService::SOUND_RECORDING_STOP);
+    sCameraService->playSound(CameraService::SOUND_RECORDING_STOP);
 
     // Remove recording stream because the video target may be abandoned soon.
     res = stopStream();
@@ -1621,7 +1631,7 @@
 }
 
 status_t Camera2Client::commandPlayRecordingSoundL() {
-    mCameraService->playSound(CameraService::SOUND_RECORDING_START);
+    sCameraService->playSound(CameraService::SOUND_RECORDING_START);
     return OK;
 }
 
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index e2129f5..87c91a0 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -98,7 +98,8 @@
 
     virtual ~Camera2Client();
 
-    status_t initialize(CameraModule *module);
+    virtual status_t initialize(CameraModule *module) override;
+    virtual status_t initialize(sp<CameraProviderManager> manager) override;
 
     virtual status_t dump(int fd, const Vector<String16>& args);
 
@@ -219,6 +220,9 @@
 
     // Video snapshot jpeg size overriding helper function
     status_t overrideVideoSnapshotSize(Parameters &params);
+
+    template<typename TProviderPtr>
+    status_t initializeImpl(TProviderPtr providerPtr);
 };
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index 266fb03..b83d425 100644
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -41,7 +41,8 @@
         int clientPid, int clientUid,
         int servicePid, bool legacyMode):
         Client(cameraService, cameraClient, clientPackageName,
-                cameraId, cameraFacing, clientPid, clientUid, servicePid)
+                String8::format("%d", cameraId), cameraFacing, clientPid,
+                clientUid, servicePid)
 {
     int callingPid = getCallingPid();
     LOG1("CameraClient::CameraClient E (pid %d, id %d)", callingPid, cameraId);
@@ -61,6 +62,15 @@
 }
 
 status_t CameraClient::initialize(CameraModule *module) {
+    return initializeImpl<CameraModule*>(module);
+}
+
+status_t CameraClient::initialize(sp<CameraProviderManager> manager) {
+    return initializeImpl<sp<CameraProviderManager>>(manager);
+}
+
+template<typename TProviderPtr>
+status_t CameraClient::initializeImpl(TProviderPtr providerPtr) {
     int callingPid = getCallingPid();
     status_t res;
 
@@ -76,7 +86,7 @@
     snprintf(camera_device_name, sizeof(camera_device_name), "%d", mCameraId);
 
     mHardware = new CameraHardwareInterface(camera_device_name);
-    res = mHardware->initialize(module);
+    res = mHardware->initialize(providerPtr);
     if (res != OK) {
         ALOGE("%s: Camera %d: unable to initialize device: %s (%d)",
                 __FUNCTION__, mCameraId, strerror(-res), res);
@@ -252,7 +262,7 @@
     // Turn off all messages.
     disableMsgType(CAMERA_MSG_ALL_MSGS);
     mHardware->stopPreview();
-    mCameraService->updateProxyDeviceState(
+    sCameraService->updateProxyDeviceState(
         ICameraServiceProxy::CAMERA_STATE_IDLE,
         String8::format("%d", mCameraId));
     mHardware->cancelPicture();
@@ -414,7 +424,7 @@
     mHardware->setPreviewWindow(mPreviewWindow);
     result = mHardware->startPreview();
     if (result == NO_ERROR) {
-        mCameraService->updateProxyDeviceState(
+        sCameraService->updateProxyDeviceState(
             ICameraServiceProxy::CAMERA_STATE_ACTIVE,
             String8::format("%d", mCameraId));
     }
@@ -440,7 +450,7 @@
 
     // start recording mode
     enableMsgType(CAMERA_MSG_VIDEO_FRAME);
-    mCameraService->playSound(CameraService::SOUND_RECORDING_START);
+    sCameraService->playSound(CameraService::SOUND_RECORDING_START);
     result = mHardware->startRecording();
     if (result != NO_ERROR) {
         ALOGE("mHardware->startRecording() failed with status %d", result);
@@ -457,7 +467,7 @@
 
     disableMsgType(CAMERA_MSG_PREVIEW_FRAME);
     mHardware->stopPreview();
-    mCameraService->updateProxyDeviceState(
+    sCameraService->updateProxyDeviceState(
         ICameraServiceProxy::CAMERA_STATE_IDLE,
         String8::format("%d", mCameraId));
     mPreviewBuffer.clear();
@@ -471,7 +481,7 @@
 
     disableMsgType(CAMERA_MSG_VIDEO_FRAME);
     mHardware->stopRecording();
-    mCameraService->playSound(CameraService::SOUND_RECORDING_STOP);
+    sCameraService->playSound(CameraService::SOUND_RECORDING_STOP);
 
     mPreviewBuffer.clear();
 }
@@ -697,7 +707,7 @@
         }
         return OK;
     } else if (cmd == CAMERA_CMD_PLAY_RECORDING_SOUND) {
-        mCameraService->playSound(CameraService::SOUND_RECORDING_START);
+        sCameraService->playSound(CameraService::SOUND_RECORDING_START);
     } else if (cmd == CAMERA_CMD_SET_VIDEO_BUFFER_COUNT) {
         // Silently ignore this command
         return INVALID_OPERATION;
@@ -748,6 +758,16 @@
     return false;
 }
 
+sp<CameraClient> CameraClient::getClientFromCookie(void* user) {
+    String8 cameraId = String8::format("%d", (int)(intptr_t) user);
+    auto clientDescriptor = sCameraService->mActiveClientManager.get(cameraId);
+    if (clientDescriptor != nullptr) {
+        return sp<CameraClient>{
+                static_cast<CameraClient*>(clientDescriptor->getValue().get())};
+    }
+    return sp<CameraClient>{nullptr};
+}
+
 // Callback messages can be dispatched to internal handlers or pass to our
 // client's callback functions, depending on the message type.
 //
@@ -767,7 +787,7 @@
         int32_t ext2, void* user) {
     LOG2("notifyCallback(%d)", msgType);
 
-    sp<CameraClient> client = static_cast<CameraClient*>(getClientFromCookie(user).get());
+    sp<CameraClient> client = getClientFromCookie(user);
     if (client.get() == nullptr) return;
 
     if (!client->lockIfMessageWanted(msgType)) return;
@@ -787,7 +807,7 @@
         const sp<IMemory>& dataPtr, camera_frame_metadata_t *metadata, void* user) {
     LOG2("dataCallback(%d)", msgType);
 
-    sp<CameraClient> client = static_cast<CameraClient*>(getClientFromCookie(user).get());
+    sp<CameraClient> client = getClientFromCookie(user);
     if (client.get() == nullptr) return;
 
     if (!client->lockIfMessageWanted(msgType)) return;
@@ -820,7 +840,7 @@
         int32_t msgType, const sp<IMemory>& dataPtr, void* user) {
     LOG2("dataCallbackTimestamp(%d)", msgType);
 
-    sp<CameraClient> client = static_cast<CameraClient*>(getClientFromCookie(user).get());
+    sp<CameraClient> client = getClientFromCookie(user);
     if (client.get() == nullptr) return;
 
     if (!client->lockIfMessageWanted(msgType)) return;
@@ -837,7 +857,7 @@
 // snapshot taken callback
 void CameraClient::handleShutter(void) {
     if (mPlayShutterSound) {
-        mCameraService->playSound(CameraService::SOUND_SHUTTER);
+        sCameraService->playSound(CameraService::SOUND_SHUTTER);
     }
 
     sp<hardware::ICameraClient> c = mRemoteCallback;
@@ -850,7 +870,7 @@
 
     // Shutters only happen in response to takePicture, so mark device as
     // idle now, until preview is restarted
-    mCameraService->updateProxyDeviceState(
+    sCameraService->updateProxyDeviceState(
         ICameraServiceProxy::CAMERA_STATE_IDLE,
         String8::format("%d", mCameraId));
 
diff --git a/services/camera/libcameraservice/api1/CameraClient.h b/services/camera/libcameraservice/api1/CameraClient.h
index 4f46fc4..91f00e3 100644
--- a/services/camera/libcameraservice/api1/CameraClient.h
+++ b/services/camera/libcameraservice/api1/CameraClient.h
@@ -70,7 +70,8 @@
             bool legacyMode = false);
     ~CameraClient();
 
-    status_t initialize(CameraModule *module);
+    virtual status_t initialize(CameraModule *module) override;
+    virtual status_t initialize(sp<CameraProviderManager> manager) override;
 
     virtual status_t dump(int fd, const Vector<String16>& args);
 
@@ -78,6 +79,9 @@
 
 private:
 
+    template<typename TProviderPtr>
+    status_t initializeImpl(TProviderPtr providerPtr);
+
     // check whether the calling process matches mClientPid.
     status_t                checkPid() const;
     status_t                checkPidAndHardware() const;  // also check mHardware != 0
@@ -98,6 +102,8 @@
     // internal function used by sendCommand to enable/disable shutter sound.
     status_t                enableShutterSound(bool enable);
 
+    static sp<CameraClient>        getClientFromCookie(void* user);
+
     // these are static callback functions
     static void             notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2, void* user);
     static void             dataCallback(int32_t msgType, const sp<IMemory>& dataPtr,
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 9a7839b..6efe4e3 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -911,14 +911,14 @@
             CameraParameters::FALSE);
     }
 
-    char value[PROPERTY_VALUE_MAX];
-    property_get("camera.disable_zsl_mode", value, "0");
-    if (!strcmp(value,"1") || slowJpegMode) {
+    if (slowJpegMode || property_get_bool("camera.disable_zsl_mode", false)) {
         ALOGI("Camera %d: Disabling ZSL mode", cameraId);
         allowZslMode = false;
     } else {
         allowZslMode = true;
     }
+    // TODO (b/34131351): turn ZSL back on after fixing the issue
+    allowZslMode = false;
 
     ALOGI("%s: allowZslMode: %d slowJpegMode %d", __FUNCTION__, allowZslMode, slowJpegMode);
 
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index b4f8e21..a55c23b 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -46,7 +46,7 @@
         const sp<CameraService>& cameraService,
         const sp<hardware::camera2::ICameraDeviceCallbacks>& remoteCallback,
         const String16& clientPackageName,
-        int cameraId,
+        const String8& cameraId,
         int cameraFacing,
         int clientPid,
         uid_t clientUid,
@@ -67,7 +67,7 @@
 CameraDeviceClient::CameraDeviceClient(const sp<CameraService>& cameraService,
         const sp<hardware::camera2::ICameraDeviceCallbacks>& remoteCallback,
         const String16& clientPackageName,
-        int cameraId,
+        const String8& cameraId,
         int cameraFacing,
         int clientPid,
         uid_t clientUid,
@@ -79,22 +79,30 @@
     mRequestIdCounter(0) {
 
     ATRACE_CALL();
-    ALOGI("CameraDeviceClient %d: Opened", cameraId);
+    ALOGI("CameraDeviceClient %s: Opened", cameraId.string());
 }
 
-status_t CameraDeviceClient::initialize(CameraModule *module)
-{
+status_t CameraDeviceClient::initialize(CameraModule *module) {
+    return initializeImpl(module);
+}
+
+status_t CameraDeviceClient::initialize(sp<CameraProviderManager> manager) {
+    return initializeImpl(manager);
+}
+
+template<typename TProviderPtr>
+status_t CameraDeviceClient::initializeImpl(TProviderPtr providerPtr) {
     ATRACE_CALL();
     status_t res;
 
-    res = Camera2ClientBase::initialize(module);
+    res = Camera2ClientBase::initialize(providerPtr);
     if (res != OK) {
         return res;
     }
 
     String8 threadName;
     mFrameProcessor = new FrameProcessorBase(mDevice);
-    threadName = String8::format("CDU-%d-FrameProc", mCameraId);
+    threadName = String8::format("CDU-%s-FrameProc", mCameraIdStr.string());
     mFrameProcessor->run(threadName.string());
 
     mFrameProcessor->registerListener(FRAME_PROCESSOR_LISTENER_MIN_ID,
@@ -138,25 +146,27 @@
     }
 
     if (requests.empty()) {
-        ALOGE("%s: Camera %d: Sent null request. Rejecting request.",
-              __FUNCTION__, mCameraId);
+        ALOGE("%s: Camera %s: Sent null request. Rejecting request.",
+              __FUNCTION__, mCameraIdStr.string());
         return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Empty request list");
     }
 
     List<const CameraMetadata> metadataRequestList;
+    std::list<const SurfaceMap> surfaceMapList;
     submitInfo->mRequestId = mRequestIdCounter;
     uint32_t loopCounter = 0;
 
     for (auto&& request: requests) {
         if (request.mIsReprocess) {
             if (!mInputStream.configured) {
-                ALOGE("%s: Camera %d: no input stream is configured.", __FUNCTION__, mCameraId);
+                ALOGE("%s: Camera %s: no input stream is configured.", __FUNCTION__,
+                        mCameraIdStr.string());
                 return STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
-                        "No input configured for camera %d but request is for reprocessing",
-                        mCameraId);
+                        "No input configured for camera %s but request is for reprocessing",
+                        mCameraIdStr.string());
             } else if (streaming) {
-                ALOGE("%s: Camera %d: streaming reprocess requests not supported.", __FUNCTION__,
-                        mCameraId);
+                ALOGE("%s: Camera %s: streaming reprocess requests not supported.", __FUNCTION__,
+                        mCameraIdStr.string());
                 return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
                         "Repeating reprocess requests not supported");
             }
@@ -164,13 +174,13 @@
 
         CameraMetadata metadata(request.mMetadata);
         if (metadata.isEmpty()) {
-            ALOGE("%s: Camera %d: Sent empty metadata packet. Rejecting request.",
-                   __FUNCTION__, mCameraId);
+            ALOGE("%s: Camera %s: Sent empty metadata packet. Rejecting request.",
+                   __FUNCTION__, mCameraIdStr.string());
             return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
                     "Request settings are empty");
         } else if (request.mSurfaceList.isEmpty()) {
-            ALOGE("%s: Camera %d: Requests must have at least one surface target. "
-                    "Rejecting request.", __FUNCTION__, mCameraId);
+            ALOGE("%s: Camera %s: Requests must have at least one surface target. "
+                    "Rejecting request.", __FUNCTION__, mCameraIdStr.string());
             return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
                     "Request has no output targets");
         }
@@ -182,11 +192,11 @@
         }
 
         /**
-         * Write in the output stream IDs which we calculate from
-         * the capture request's list of surface targets
+         * Write in the output stream IDs and map from stream ID to surface ID
+         * which we calculate from the capture request's list of surface target
          */
+        SurfaceMap surfaceMap;
         Vector<int32_t> outputStreamIds;
-        outputStreamIds.setCapacity(request.mSurfaceList.size());
         for (sp<Surface> surface : request.mSurfaceList) {
             if (surface == 0) continue;
 
@@ -195,17 +205,23 @@
 
             // Trying to submit request with surface that wasn't created
             if (idx == NAME_NOT_FOUND) {
-                ALOGE("%s: Camera %d: Tried to submit a request with a surface that"
+                ALOGE("%s: Camera %s: Tried to submit a request with a surface that"
                         " we have not called createStream on",
-                        __FUNCTION__, mCameraId);
+                        __FUNCTION__, mCameraIdStr.string());
                 return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
                         "Request targets Surface that is not part of current capture session");
             }
 
-            int streamId = mStreamMap.valueAt(idx);
-            outputStreamIds.push_back(streamId);
-            ALOGV("%s: Camera %d: Appending output stream %d to request",
-                    __FUNCTION__, mCameraId, streamId);
+            const StreamSurfaceId& streamSurfaceId = mStreamMap.valueAt(idx);
+            if (surfaceMap.find(streamSurfaceId.streamId()) == surfaceMap.end()) {
+                surfaceMap[streamSurfaceId.streamId()] = std::vector<size_t>();
+                outputStreamIds.push_back(streamSurfaceId.streamId());
+            }
+            surfaceMap[streamSurfaceId.streamId()].push_back(streamSurfaceId.surfaceId());
+
+            ALOGV("%s: Camera %s: Appending output stream %d surface %d to request",
+                    __FUNCTION__, mCameraIdStr.string(), streamSurfaceId.streamId(),
+                    streamSurfaceId.surfaceId());
         }
 
         metadata.update(ANDROID_REQUEST_OUTPUT_STREAMS, &outputStreamIds[0],
@@ -217,19 +233,22 @@
 
         metadata.update(ANDROID_REQUEST_ID, &(submitInfo->mRequestId), /*size*/1);
         loopCounter++; // loopCounter starts from 1
-        ALOGV("%s: Camera %d: Creating request with ID %d (%d of %zu)",
-              __FUNCTION__, mCameraId, submitInfo->mRequestId, loopCounter, requests.size());
+        ALOGV("%s: Camera %s: Creating request with ID %d (%d of %zu)",
+                __FUNCTION__, mCameraIdStr.string(), submitInfo->mRequestId,
+                loopCounter, requests.size());
 
         metadataRequestList.push_back(metadata);
+        surfaceMapList.push_back(surfaceMap);
     }
     mRequestIdCounter++;
 
     if (streaming) {
-        err = mDevice->setStreamingRequestList(metadataRequestList, &(submitInfo->mLastFrameNumber));
+        err = mDevice->setStreamingRequestList(metadataRequestList, surfaceMapList,
+                &(submitInfo->mLastFrameNumber));
         if (err != OK) {
             String8 msg = String8::format(
-                "Camera %d:  Got error %s (%d) after trying to set streaming request",
-                mCameraId, strerror(-err), err);
+                "Camera %s:  Got error %s (%d) after trying to set streaming request",
+                mCameraIdStr.string(), strerror(-err), err);
             ALOGE("%s: %s", __FUNCTION__, msg.string());
             res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION,
                     msg.string());
@@ -238,11 +257,12 @@
             mStreamingRequestId = submitInfo->mRequestId;
         }
     } else {
-        err = mDevice->captureList(metadataRequestList, &(submitInfo->mLastFrameNumber));
+        err = mDevice->captureList(metadataRequestList, surfaceMapList,
+                &(submitInfo->mLastFrameNumber));
         if (err != OK) {
             String8 msg = String8::format(
-                "Camera %d: Got error %s (%d) after trying to submit capture request",
-                mCameraId, strerror(-err), err);
+                "Camera %s: Got error %s (%d) after trying to submit capture request",
+                mCameraIdStr.string(), strerror(-err), err);
             ALOGE("%s: %s", __FUNCTION__, msg.string());
             res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION,
                     msg.string());
@@ -250,7 +270,7 @@
         ALOGV("%s: requestId = %d ", __FUNCTION__, submitInfo->mRequestId);
     }
 
-    ALOGV("%s: Camera %d: End of function", __FUNCTION__, mCameraId);
+    ALOGV("%s: Camera %s: End of function", __FUNCTION__, mCameraIdStr.string());
     return res;
 }
 
@@ -274,8 +294,8 @@
 
     Mutex::Autolock idLock(mStreamingRequestIdLock);
     if (mStreamingRequestId != requestId) {
-        String8 msg = String8::format("Camera %d: Canceling request ID %d doesn't match "
-                "current request ID %d", mCameraId, requestId, mStreamingRequestId);
+        String8 msg = String8::format("Camera %s: Canceling request ID %d doesn't match "
+                "current request ID %d", mCameraIdStr.string(), requestId, mStreamingRequestId);
         ALOGE("%s: %s", __FUNCTION__, msg.string());
         return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
     }
@@ -283,13 +303,13 @@
     err = mDevice->clearStreamingRequest(lastFrameNumber);
 
     if (err == OK) {
-        ALOGV("%s: Camera %d: Successfully cleared streaming request",
-              __FUNCTION__, mCameraId);
+        ALOGV("%s: Camera %s: Successfully cleared streaming request",
+                __FUNCTION__, mCameraIdStr.string());
         mStreamingRequestId = REQUEST_ID_NONE;
     } else {
         res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
-                "Camera %d: Error clearing streaming request: %s (%d)",
-                mCameraId, strerror(-err), err);
+                "Camera %s: Error clearing streaming request: %s (%d)",
+                mCameraIdStr.string(), strerror(-err), err);
     }
 
     return res;
@@ -302,8 +322,9 @@
 }
 
 binder::Status CameraDeviceClient::endConfigure(bool isConstrainedHighSpeed) {
-    ALOGV("%s: ending configure (%d input stream, %zu output streams)",
-            __FUNCTION__, mInputStream.configured ? 1 : 0, mStreamMap.size());
+    ALOGV("%s: ending configure (%d input stream, %zu output surfaces)",
+            __FUNCTION__, mInputStream.configured ? 1 : 0,
+            mStreamMap.size());
 
     binder::Status res;
     if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
@@ -328,8 +349,8 @@
         }
         if (!isConstrainedHighSpeedSupported) {
             String8 msg = String8::format(
-                "Camera %d: Try to create a constrained high speed configuration on a device"
-                " that doesn't support it.", mCameraId);
+                "Camera %s: Try to create a constrained high speed configuration on a device"
+                " that doesn't support it.", mCameraIdStr.string());
             ALOGE("%s: %s", __FUNCTION__, msg.string());
             return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
                     msg.string());
@@ -338,13 +359,13 @@
 
     status_t err = mDevice->configureStreams(isConstrainedHighSpeed);
     if (err == BAD_VALUE) {
-        String8 msg = String8::format("Camera %d: Unsupported set of inputs/outputs provided",
-                mCameraId);
+        String8 msg = String8::format("Camera %s: Unsupported set of inputs/outputs provided",
+                mCameraIdStr.string());
         ALOGE("%s: %s", __FUNCTION__, msg.string());
         res = STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
     } else if (err != OK) {
-        String8 msg = String8::format("Camera %d: Error configuring streams: %s (%d)",
-                mCameraId, strerror(-err), err);
+        String8 msg = String8::format("Camera %s: Error configuring streams: %s (%d)",
+                mCameraIdStr.string(), strerror(-err), err);
         ALOGE("%s: %s", __FUNCTION__, msg.string());
         res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
     }
@@ -366,7 +387,7 @@
     }
 
     bool isInput = false;
-    ssize_t index = NAME_NOT_FOUND;
+    std::vector<sp<IBinder>> surfaces;
     ssize_t dIndex = NAME_NOT_FOUND;
 
     if (mInputStream.configured && mInputStream.id == streamId) {
@@ -374,26 +395,24 @@
     } else {
         // Guard against trying to delete non-created streams
         for (size_t i = 0; i < mStreamMap.size(); ++i) {
-            if (streamId == mStreamMap.valueAt(i)) {
-                index = i;
+            if (streamId == mStreamMap.valueAt(i).streamId()) {
+                surfaces.push_back(mStreamMap.keyAt(i));
+            }
+        }
+
+        // See if this stream is one of the deferred streams.
+        for (size_t i = 0; i < mDeferredStreams.size(); ++i) {
+            if (streamId == mDeferredStreams[i]) {
+                dIndex = i;
                 break;
             }
         }
 
-        if (index == NAME_NOT_FOUND) {
-            // See if this stream is one of the deferred streams.
-            for (size_t i = 0; i < mDeferredStreams.size(); ++i) {
-                if (streamId == mDeferredStreams[i]) {
-                    dIndex = i;
-                    break;
-                }
-            }
-            if (dIndex == NAME_NOT_FOUND) {
-                String8 msg = String8::format("Camera %d: Invalid stream ID (%d) specified, no such"
-                        " stream created yet", mCameraId, streamId);
-                ALOGW("%s: %s", __FUNCTION__, msg.string());
-                return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
-            }
+        if (surfaces.empty() && dIndex == NAME_NOT_FOUND) {
+            String8 msg = String8::format("Camera %s: Invalid stream ID (%d) specified, no such"
+                    " stream created yet", mCameraIdStr.string(), streamId);
+            ALOGW("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
         }
     }
 
@@ -401,17 +420,21 @@
     status_t err = mDevice->deleteStream(streamId);
 
     if (err != OK) {
-        String8 msg = String8::format("Camera %d: Unexpected error %s (%d) when deleting stream %d",
-                mCameraId, strerror(-err), err, streamId);
+        String8 msg = String8::format("Camera %s: Unexpected error %s (%d) when deleting stream %d",
+                mCameraIdStr.string(), strerror(-err), err, streamId);
         ALOGE("%s: %s", __FUNCTION__, msg.string());
         res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
     } else {
         if (isInput) {
             mInputStream.configured = false;
-        } else if (index != NAME_NOT_FOUND) {
-            mStreamMap.removeItemsAt(index);
         } else {
-            mDeferredStreams.removeItemsAt(dIndex);
+            for (auto& surface : surfaces) {
+                mStreamMap.removeItem(surface);
+            }
+
+            if (dIndex != NAME_NOT_FOUND) {
+                mDeferredStreams.removeItemsAt(dIndex);
+            }
         }
     }
 
@@ -429,14 +452,39 @@
 
     Mutex::Autolock icl(mBinderSerializationLock);
 
-    sp<IGraphicBufferProducer> bufferProducer = outputConfiguration.getGraphicBufferProducer();
-    bool deferredConsumer = bufferProducer == NULL;
+    const std::vector<sp<IGraphicBufferProducer>>& bufferProducers =
+            outputConfiguration.getGraphicBufferProducers();
+    size_t numBufferProducers = bufferProducers.size();
+
+    if (numBufferProducers > MAX_SURFACES_PER_STREAM) {
+        ALOGE("%s: GraphicBufferProducer count %zu for stream exceeds limit of %d",
+              __FUNCTION__, bufferProducers.size(), MAX_SURFACES_PER_STREAM);
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Surface count is too high");
+    }
+    if (numBufferProducers == 0) {
+        ALOGE("%s: GraphicBufferProducer count 0 is not valid", __FUNCTION__);
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Malformed surface");
+    }
+    size_t deferredConsumerCnt = 0;
+    for (auto bufferProducer : bufferProducers) {
+        if (bufferProducer == nullptr) {
+            deferredConsumerCnt++;
+        }
+    }
+    if (deferredConsumerCnt > MAX_DEFERRED_SURFACES) {
+        ALOGE("%s: %zu deferred consumer is not supported", __FUNCTION__, deferredConsumerCnt);
+        return STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
+                "More than %d deferred consumer", MAX_DEFERRED_SURFACES);
+    }
+    bool deferredConsumer = deferredConsumerCnt > 0;
+    bool deferredConsumerOnly = deferredConsumer && numBufferProducers == 1;
     int surfaceType = outputConfiguration.getSurfaceType();
     bool validSurfaceType = ((surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_VIEW) ||
             (surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_TEXTURE));
+
     if (deferredConsumer && !validSurfaceType) {
         ALOGE("%s: Target surface is invalid: bufferProducer = %p, surfaceType = %d.",
-                __FUNCTION__, bufferProducer.get(), surfaceType);
+                __FUNCTION__, bufferProducers[0].get(), surfaceType);
         return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Target Surface is invalid");
     }
 
@@ -444,116 +492,182 @@
         return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
     }
 
+    std::vector<sp<Surface>> surfaces;
+    std::vector<sp<IBinder>> binders;
+    int streamWidth, streamHeight, streamFormat;
     int width, height, format;
+    int32_t streamConsumerUsage;
     int32_t consumerUsage;
-    android_dataspace dataSpace;
+    android_dataspace dataSpace, streamDataSpace;
     status_t err;
 
     // Create stream for deferred surface case.
-    if (deferredConsumer) {
+    if (deferredConsumerOnly) {
         return createDeferredSurfaceStreamLocked(outputConfiguration, newStreamId);
     }
 
-    // Don't create multiple streams for the same target surface
-    {
-        ssize_t index = mStreamMap.indexOfKey(IInterface::asBinder(bufferProducer));
-        if (index != NAME_NOT_FOUND) {
-            String8 msg = String8::format("Camera %d: Surface already has a stream created for it "
-                    "(ID %zd)", mCameraId, index);
-            ALOGW("%s: %s", __FUNCTION__, msg.string());
-            return STATUS_ERROR(CameraService::ERROR_ALREADY_EXISTS, msg.string());
+    bool isFirstSurface = true;
+    streamWidth = -1;
+    streamHeight = -1;
+    streamFormat = -1;
+    streamDataSpace = HAL_DATASPACE_UNKNOWN;
+    streamConsumerUsage = 0;
+
+    for (auto& bufferProducer : bufferProducers) {
+        if (bufferProducer == nullptr) {
+            continue;
         }
-    }
 
-    // HACK b/10949105
-    // Query consumer usage bits to set async operation mode for
-    // GLConsumer using controlledByApp parameter.
-    bool useAsync = false;
-    if ((err = bufferProducer->query(NATIVE_WINDOW_CONSUMER_USAGE_BITS,
-            &consumerUsage)) != OK) {
-        String8 msg = String8::format("Camera %d: Failed to query Surface consumer usage: %s (%d)",
-                mCameraId, strerror(-err), err);
-        ALOGE("%s: %s", __FUNCTION__, msg.string());
-        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
-    }
-    if (consumerUsage & GraphicBuffer::USAGE_HW_TEXTURE) {
-        ALOGW("%s: Camera %d with consumer usage flag: 0x%x: Forcing asynchronous mode for stream",
-                __FUNCTION__, mCameraId, consumerUsage);
-        useAsync = true;
-    }
+        // Don't create multiple streams for the same target surface
+        {
+            ssize_t index = mStreamMap.indexOfKey(IInterface::asBinder(bufferProducer));
+            if (index != NAME_NOT_FOUND) {
+                String8 msg = String8::format("Camera %s: Surface already has a stream created for it "
+                        "(ID %zd)", mCameraIdStr.string(), index);
+                ALOGW("%s: %s", __FUNCTION__, msg.string());
+                return STATUS_ERROR(CameraService::ERROR_ALREADY_EXISTS, msg.string());
+            }
+        }
 
-    int32_t disallowedFlags = GraphicBuffer::USAGE_HW_VIDEO_ENCODER |
-                              GRALLOC_USAGE_RENDERSCRIPT;
-    int32_t allowedFlags = GraphicBuffer::USAGE_SW_READ_MASK |
-                           GraphicBuffer::USAGE_HW_TEXTURE |
-                           GraphicBuffer::USAGE_HW_COMPOSER;
-    bool flexibleConsumer = (consumerUsage & disallowedFlags) == 0 &&
-            (consumerUsage & allowedFlags) != 0;
+        // HACK b/10949105
+        // Query consumer usage bits to set async operation mode for
+        // GLConsumer using controlledByApp parameter.
+        bool useAsync = false;
+        if ((err = bufferProducer->query(NATIVE_WINDOW_CONSUMER_USAGE_BITS,
+                &consumerUsage)) != OK) {
+            String8 msg = String8::format("Camera %s: Failed to query Surface consumer usage: %s (%d)",
+                    mCameraIdStr.string(), strerror(-err), err);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+        }
+        if (consumerUsage & GraphicBuffer::USAGE_HW_TEXTURE) {
+            ALOGW("%s: Camera %s with consumer usage flag: 0x%x: Forcing asynchronous mode for stream",
+                    __FUNCTION__, mCameraIdStr.string(), consumerUsage);
+            useAsync = true;
+        }
 
-    sp<IBinder> binder = IInterface::asBinder(bufferProducer);
-    sp<Surface> surface = new Surface(bufferProducer, useAsync);
-    ANativeWindow *anw = surface.get();
+        int32_t disallowedFlags = GraphicBuffer::USAGE_HW_VIDEO_ENCODER |
+                                  GRALLOC_USAGE_RENDERSCRIPT;
+        int32_t allowedFlags = GraphicBuffer::USAGE_SW_READ_MASK |
+                               GraphicBuffer::USAGE_HW_TEXTURE |
+                               GraphicBuffer::USAGE_HW_COMPOSER;
+        bool flexibleConsumer = (consumerUsage & disallowedFlags) == 0 &&
+                (consumerUsage & allowedFlags) != 0;
 
-    if ((err = anw->query(anw, NATIVE_WINDOW_WIDTH, &width)) != OK) {
-        String8 msg = String8::format("Camera %d: Failed to query Surface width: %s (%d)",
-                mCameraId, strerror(-err), err);
-        ALOGE("%s: %s", __FUNCTION__, msg.string());
-        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
-    }
-    if ((err = anw->query(anw, NATIVE_WINDOW_HEIGHT, &height)) != OK) {
-        String8 msg = String8::format("Camera %d: Failed to query Surface height: %s (%d)",
-                mCameraId, strerror(-err), err);
-        ALOGE("%s: %s", __FUNCTION__, msg.string());
-        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
-    }
-    if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
-        String8 msg = String8::format("Camera %d: Failed to query Surface format: %s (%d)",
-                mCameraId, strerror(-err), err);
-        ALOGE("%s: %s", __FUNCTION__, msg.string());
-        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
-    }
-    if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE,
-                            reinterpret_cast<int*>(&dataSpace))) != OK) {
-        String8 msg = String8::format("Camera %d: Failed to query Surface dataspace: %s (%d)",
-                mCameraId, strerror(-err), err);
-        ALOGE("%s: %s", __FUNCTION__, msg.string());
-        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
-    }
+        sp<IBinder> binder = IInterface::asBinder(bufferProducer);
+        sp<Surface> surface = new Surface(bufferProducer, useAsync);
+        ANativeWindow *anw = surface.get();
 
-    // FIXME: remove this override since the default format should be
-    //       IMPLEMENTATION_DEFINED. b/9487482
-    if (format >= HAL_PIXEL_FORMAT_RGBA_8888 &&
-        format <= HAL_PIXEL_FORMAT_BGRA_8888) {
-        ALOGW("%s: Camera %d: Overriding format %#x to IMPLEMENTATION_DEFINED",
-              __FUNCTION__, mCameraId, format);
-        format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
-    }
+        if ((err = anw->query(anw, NATIVE_WINDOW_WIDTH, &width)) != OK) {
+            String8 msg = String8::format("Camera %s: Failed to query Surface width: %s (%d)",
+                     mCameraIdStr.string(), strerror(-err), err);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+        }
+        if ((err = anw->query(anw, NATIVE_WINDOW_HEIGHT, &height)) != OK) {
+            String8 msg = String8::format("Camera %s: Failed to query Surface height: %s (%d)",
+                    mCameraIdStr.string(), strerror(-err), err);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+        }
+        if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
+            String8 msg = String8::format("Camera %s: Failed to query Surface format: %s (%d)",
+                    mCameraIdStr.string(), strerror(-err), err);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+        }
+        if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE,
+                                reinterpret_cast<int*>(&dataSpace))) != OK) {
+            String8 msg = String8::format("Camera %s: Failed to query Surface dataspace: %s (%d)",
+                    mCameraIdStr.string(), strerror(-err), err);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+        }
 
-    // Round dimensions to the nearest dimensions available for this format
-    if (flexibleConsumer && isPublicFormat(format) &&
-            !CameraDeviceClient::roundBufferDimensionNearest(width, height,
-            format, dataSpace, mDevice->info(), /*out*/&width, /*out*/&height)) {
-        String8 msg = String8::format("Camera %d: No supported stream configurations with "
-                "format %#x defined, failed to create output stream", mCameraId, format);
-        ALOGE("%s: %s", __FUNCTION__, msg.string());
-        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+        // FIXME: remove this override since the default format should be
+        //       IMPLEMENTATION_DEFINED. b/9487482
+        if (format >= HAL_PIXEL_FORMAT_RGBA_8888 &&
+            format <= HAL_PIXEL_FORMAT_BGRA_8888) {
+            ALOGW("%s: Camera %s: Overriding format %#x to IMPLEMENTATION_DEFINED",
+                  __FUNCTION__, mCameraIdStr.string(), format);
+            format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+        }
+        // Round dimensions to the nearest dimensions available for this format
+        if (flexibleConsumer && isPublicFormat(format) &&
+                !CameraDeviceClient::roundBufferDimensionNearest(width, height,
+                format, dataSpace, mDevice->info(), /*out*/&width, /*out*/&height)) {
+            String8 msg = String8::format("Camera %s: No supported stream configurations with "
+                    "format %#x defined, failed to create output stream",
+                    mCameraIdStr.string(), format);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+        }
+        if (isFirstSurface) {
+            streamWidth = width;
+            streamHeight = height;
+            streamFormat = format;
+            streamDataSpace = dataSpace;
+            streamConsumerUsage = consumerUsage;
+            isFirstSurface = false;
+        }
+        if (width != streamWidth) {
+            String8 msg = String8::format("Camera %s:Surface width doesn't match: %d vs %d",
+                     mCameraIdStr.string(), width, streamWidth);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+        }
+        if (height != streamHeight) {
+            String8 msg = String8::format("Camera %s:Surface height doesn't match: %d vs %d",
+                     mCameraIdStr.string(), height, streamHeight);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+        }
+        if (format != streamFormat) {
+            String8 msg = String8::format("Camera %s:Surface format doesn't match: %d vs %d",
+                     mCameraIdStr.string(), format, streamFormat);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+        }
+        if (dataSpace != streamDataSpace) {
+            String8 msg = String8::format("Camera %s:Surface dataSpace doesn't match: %d vs %d",
+                     mCameraIdStr.string(), dataSpace, streamDataSpace);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+        }
+        //At the native side, there isn't a way to check whether 2 surfaces come from the same
+        //surface class type. Use usage flag to approximate the comparison.
+        //TODO: Support surfaces of different surface class type.
+        if (consumerUsage != streamConsumerUsage) {
+            String8 msg = String8::format(
+                    "Camera %s:Surface usage flag doesn't match 0x%x vs 0x%x",
+                    mCameraIdStr.string(), consumerUsage, streamConsumerUsage);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+        }
+
+        binders.push_back(binder);
+        surfaces.push_back(surface);
     }
 
     int streamId = camera3::CAMERA3_STREAM_ID_INVALID;
-    err = mDevice->createStream(surface, width, height, format, dataSpace,
+    err = mDevice->createStream(surfaces, deferredConsumer, width, height, format, dataSpace,
             static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
             &streamId, outputConfiguration.getSurfaceSetID());
 
     if (err != OK) {
         res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
-                "Camera %d: Error creating output stream (%d x %d, fmt %x, dataSpace %x): %s (%d)",
-                mCameraId, width, height, format, dataSpace, strerror(-err), err);
+                "Camera %s: Error creating output stream (%d x %d, fmt %x, dataSpace %x): %s (%d)",
+                mCameraIdStr.string(), width, height, format, dataSpace, strerror(-err), err);
     } else {
-        mStreamMap.add(binder, streamId);
-
-        ALOGV("%s: Camera %d: Successfully created a new stream ID %d for output surface"
-                " (%d x %d) with format 0x%x.",
-              __FUNCTION__, mCameraId, streamId, width, height, format);
+        int i = 0;
+        for (auto& binder : binders) {
+            ALOGV("%s: mStreamMap add binder %p streamId %d, surfaceId %d",
+                    __FUNCTION__, binder.get(), streamId, i);
+            mStreamMap.add(binder, StreamSurfaceId(streamId, i++));
+        }
+        ALOGV("%s: Camera %s: Successfully created a new stream ID %d for output surface"
+                    " (%d x %d) with format 0x%x.",
+                  __FUNCTION__, mCameraIdStr.string(), streamId, width, height, format);
 
         // Set transform flags to ensure preview to be rotated correctly.
         res = setStreamTransformLocked(streamId);
@@ -590,23 +704,25 @@
         consumerUsage |= GraphicBuffer::USAGE_HW_COMPOSER;
     }
     int streamId = camera3::CAMERA3_STREAM_ID_INVALID;
-    err = mDevice->createStream(/*surface*/nullptr, width, height, format, dataSpace,
+    std::vector<sp<Surface>> noSurface;
+    err = mDevice->createStream(noSurface, /*hasDeferredConsumer*/true, width,
+            height, format, dataSpace,
             static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
             &streamId, outputConfiguration.getSurfaceSetID(), consumerUsage);
 
     if (err != OK) {
         res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
-                "Camera %d: Error creating output stream (%d x %d, fmt %x, dataSpace %x): %s (%d)",
-                mCameraId, width, height, format, dataSpace, strerror(-err), err);
+                "Camera %s: Error creating output stream (%d x %d, fmt %x, dataSpace %x): %s (%d)",
+                mCameraIdStr.string(), width, height, format, dataSpace, strerror(-err), err);
     } else {
         // Can not add streamId to mStreamMap here, as the surface is deferred. Add it to
         // a separate list to track. Once the deferred surface is set, this id will be
         // relocated to mStreamMap.
         mDeferredStreams.push_back(streamId);
 
-        ALOGV("%s: Camera %d: Successfully created a new stream ID %d for a deferred surface"
+        ALOGV("%s: Camera %s: Successfully created a new stream ID %d for a deferred surface"
                 " (%d x %d) stream with format 0x%x.",
-              __FUNCTION__, mCameraId, streamId, width, height, format);
+              __FUNCTION__, mCameraIdStr.string(), streamId, width, height, format);
 
         // Set transform flags to ensure preview to be rotated correctly.
         res = setStreamTransformLocked(streamId);
@@ -661,8 +777,8 @@
     }
 
     if (mInputStream.configured) {
-        String8 msg = String8::format("Camera %d: Already has an input stream "
-                "configured (ID %zd)", mCameraId, mInputStream.id);
+        String8 msg = String8::format("Camera %s: Already has an input stream "
+                "configured (ID %zd)", mCameraIdStr.string(), mInputStream.id);
         ALOGE("%s: %s", __FUNCTION__, msg.string() );
         return STATUS_ERROR(CameraService::ERROR_ALREADY_EXISTS, msg.string());
     }
@@ -676,13 +792,13 @@
         mInputStream.format = format;
         mInputStream.id = streamId;
 
-        ALOGV("%s: Camera %d: Successfully created a new input stream ID %d",
-                __FUNCTION__, mCameraId, streamId);
+        ALOGV("%s: Camera %s: Successfully created a new input stream ID %d",
+                __FUNCTION__, mCameraIdStr.string(), streamId);
 
         *newStreamId = streamId;
     } else {
         res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
-                "Camera %d: Error creating new input stream: %s (%d)", mCameraId,
+                "Camera %s: Error creating new input stream: %s (%d)", mCameraIdStr.string(),
                 strerror(-err), err);
     }
 
@@ -706,8 +822,8 @@
     status_t err = mDevice->getInputBufferProducer(&producer);
     if (err != OK) {
         res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
-                "Camera %d: Error getting input Surface: %s (%d)",
-                mCameraId, strerror(-err), err);
+                "Camera %s: Error getting input Surface: %s (%d)",
+                mCameraIdStr.string(), strerror(-err), err);
     } else {
         inputSurface->name = String16("CameraInput");
         inputSurface->graphicBufferProducer = producer;
@@ -828,13 +944,13 @@
         request->swap(metadata);
     } else if (err == BAD_VALUE) {
         res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
-                "Camera %d: Template ID %d is invalid or not supported: %s (%d)",
-                mCameraId, templateId, strerror(-err), err);
+                "Camera %s: Template ID %d is invalid or not supported: %s (%d)",
+                mCameraIdStr.string(), templateId, strerror(-err), err);
 
     } else {
         res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
-                "Camera %d: Error creating default request for template %d: %s (%d)",
-                mCameraId, templateId, strerror(-err), err);
+                "Camera %s: Error creating default request for template %d: %s (%d)",
+                mCameraIdStr.string(), templateId, strerror(-err), err);
     }
     return res;
 }
@@ -882,16 +998,16 @@
     Mutex::Autolock idLock(mStreamingRequestIdLock);
     if (mStreamingRequestId != REQUEST_ID_NONE) {
         String8 msg = String8::format(
-            "Camera %d: Try to waitUntilIdle when there are active streaming requests",
-            mCameraId);
+            "Camera %s: Try to waitUntilIdle when there are active streaming requests",
+            mCameraIdStr.string());
         ALOGE("%s: %s", __FUNCTION__, msg.string());
         return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
     }
     status_t err = mDevice->waitUntilDrained();
     if (err != OK) {
         res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
-                "Camera %d: Error waiting to drain: %s (%d)",
-                mCameraId, strerror(-err), err);
+                "Camera %s: Error waiting to drain: %s (%d)",
+                mCameraIdStr.string(), strerror(-err), err);
     }
     ALOGV("%s Done", __FUNCTION__);
     return res;
@@ -917,7 +1033,7 @@
     status_t err = mDevice->flush(lastFrameNumber);
     if (err != OK) {
         res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
-                "Camera %d: Error flushing device: %s (%d)", mCameraId, strerror(-err), err);
+                "Camera %s: Error flushing device: %s (%d)", mCameraIdStr.string(), strerror(-err), err);
     }
     return res;
 }
@@ -934,15 +1050,15 @@
     // Guard against trying to prepare non-created streams
     ssize_t index = NAME_NOT_FOUND;
     for (size_t i = 0; i < mStreamMap.size(); ++i) {
-        if (streamId == mStreamMap.valueAt(i)) {
+        if (streamId == mStreamMap.valueAt(i).streamId()) {
             index = i;
             break;
         }
     }
 
     if (index == NAME_NOT_FOUND) {
-        String8 msg = String8::format("Camera %d: Invalid stream ID (%d) specified, no stream "
-              "with that ID exists", mCameraId, streamId);
+        String8 msg = String8::format("Camera %s: Invalid stream ID (%d) specified, no stream "
+              "with that ID exists", mCameraIdStr.string(), streamId);
         ALOGW("%s: %s", __FUNCTION__, msg.string());
         return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
     }
@@ -952,11 +1068,11 @@
     status_t err = mDevice->prepare(streamId);
     if (err == BAD_VALUE) {
         res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
-                "Camera %d: Stream %d has already been used, and cannot be prepared",
-                mCameraId, streamId);
+                "Camera %s: Stream %d has already been used, and cannot be prepared",
+                mCameraIdStr.string(), streamId);
     } else if (err != OK) {
         res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
-                "Camera %d: Error preparing stream %d: %s (%d)", mCameraId, streamId,
+                "Camera %s: Error preparing stream %d: %s (%d)", mCameraIdStr.string(), streamId,
                 strerror(-err), err);
     }
     return res;
@@ -974,22 +1090,22 @@
     // Guard against trying to prepare non-created streams
     ssize_t index = NAME_NOT_FOUND;
     for (size_t i = 0; i < mStreamMap.size(); ++i) {
-        if (streamId == mStreamMap.valueAt(i)) {
+        if (streamId == mStreamMap.valueAt(i).streamId()) {
             index = i;
             break;
         }
     }
 
     if (index == NAME_NOT_FOUND) {
-        String8 msg = String8::format("Camera %d: Invalid stream ID (%d) specified, no stream "
-              "with that ID exists", mCameraId, streamId);
+        String8 msg = String8::format("Camera %s: Invalid stream ID (%d) specified, no stream "
+              "with that ID exists", mCameraIdStr.string(), streamId);
         ALOGW("%s: %s", __FUNCTION__, msg.string());
         return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
     }
 
     if (maxCount <= 0) {
-        String8 msg = String8::format("Camera %d: maxCount (%d) must be greater than 0",
-                mCameraId, maxCount);
+        String8 msg = String8::format("Camera %s: maxCount (%d) must be greater than 0",
+                mCameraIdStr.string(), maxCount);
         ALOGE("%s: %s", __FUNCTION__, msg.string());
         return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
     }
@@ -999,11 +1115,11 @@
     status_t err = mDevice->prepare(maxCount, streamId);
     if (err == BAD_VALUE) {
         res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
-                "Camera %d: Stream %d has already been used, and cannot be prepared",
-                mCameraId, streamId);
+                "Camera %s: Stream %d has already been used, and cannot be prepared",
+                mCameraIdStr.string(), streamId);
     } else if (err != OK) {
         res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
-                "Camera %d: Error preparing stream %d: %s (%d)", mCameraId, streamId,
+                "Camera %s: Error preparing stream %d: %s (%d)", mCameraIdStr.string(), streamId,
                 strerror(-err), err);
     }
 
@@ -1022,15 +1138,15 @@
     // Guard against trying to prepare non-created streams
     ssize_t index = NAME_NOT_FOUND;
     for (size_t i = 0; i < mStreamMap.size(); ++i) {
-        if (streamId == mStreamMap.valueAt(i)) {
+        if (streamId == mStreamMap.valueAt(i).streamId()) {
             index = i;
             break;
         }
     }
 
     if (index == NAME_NOT_FOUND) {
-        String8 msg = String8::format("Camera %d: Invalid stream ID (%d) specified, no stream "
-              "with that ID exists", mCameraId, streamId);
+        String8 msg = String8::format("Camera %s: Invalid stream ID (%d) specified, no stream "
+              "with that ID exists", mCameraIdStr.string(), streamId);
         ALOGW("%s: %s", __FUNCTION__, msg.string());
         return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
     }
@@ -1040,11 +1156,11 @@
     status_t err = mDevice->tearDown(streamId);
     if (err == BAD_VALUE) {
         res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
-                "Camera %d: Stream %d is still in use, cannot be torn down",
-                mCameraId, streamId);
+                "Camera %s: Stream %d is still in use, cannot be torn down",
+                mCameraIdStr.string(), streamId);
     } else if (err != OK) {
         res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
-                "Camera %d: Error tearing down stream %d: %s (%d)", mCameraId, streamId,
+                "Camera %s: Error tearing down stream %d: %s (%d)", mCameraIdStr.string(), streamId,
                 strerror(-err), err);
     }
 
@@ -1060,26 +1176,42 @@
 
     Mutex::Autolock icl(mBinderSerializationLock);
 
-    sp<IGraphicBufferProducer> bufferProducer = outputConfiguration.getGraphicBufferProducer();
+    const std::vector<sp<IGraphicBufferProducer> >& bufferProducers =
+            outputConfiguration.getGraphicBufferProducers();
 
     // Client code should guarantee that the surface is from SurfaceView or SurfaceTexture.
-    if (bufferProducer == NULL) {
-        ALOGE("%s: bufferProducer must not be null", __FUNCTION__);
+    // And it's also saved in the last entry of graphicBufferProducer list
+    if (bufferProducers.size() == 0) {
+        ALOGE("%s: bufferProducers must not be empty", __FUNCTION__);
         return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Target Surface is invalid");
     }
-    // Check if this stram id is one of the deferred streams
-    ssize_t index = NAME_NOT_FOUND;
-    for (size_t i = 0; i < mDeferredStreams.size(); i++) {
-        if (streamId == mDeferredStreams[i]) {
-            index = i;
-            break;
-        }
+
+    // Right now, only first surface in the OutputConfiguration is allowed to be
+    // deferred. And all other surfaces are checked to be the same (not null) at
+    // the Java side.
+    sp<IGraphicBufferProducer> bufferProducer = bufferProducers[0];
+    if (bufferProducer == nullptr) {
+        ALOGE("%s: bufferProducer must not be null", __FUNCTION__);
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+                "Target Surface is invalid");
     }
-    if (index == NAME_NOT_FOUND) {
-        String8 msg = String8::format("Camera %d: deferred surface is set to a unknown stream"
-                "(ID %d)", mCameraId, streamId);
-        ALOGW("%s: %s", __FUNCTION__, msg.string());
-        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+
+    // Check if this stream id is one of the deferred only streams
+    ssize_t index = NAME_NOT_FOUND;
+    if (bufferProducers.size() == 1) {
+        for (size_t i = 0; i < mDeferredStreams.size(); i++) {
+            if (streamId == mDeferredStreams[i]) {
+                index = i;
+                break;
+            }
+        }
+
+        if (index == NAME_NOT_FOUND) {
+            String8 msg = String8::format("Camera %s: deferred surface is set to a unknown stream"
+                    "(ID %d)", mCameraIdStr.string(), streamId);
+            ALOGW("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+        }
     }
 
     if (!mDevice.get()) {
@@ -1090,8 +1222,8 @@
     {
         ssize_t index = mStreamMap.indexOfKey(IInterface::asBinder(bufferProducer));
         if (index != NAME_NOT_FOUND) {
-            String8 msg = String8::format("Camera %d: Surface already has a stream created "
-                    " for it (ID %zd)", mCameraId, index);
+            String8 msg = String8::format("Camera %s: Surface already has a stream created "
+                    " for it (ID %zd)", mCameraIdStr.string(), index);
             ALOGW("%s: %s", __FUNCTION__, msg.string());
             return STATUS_ERROR(CameraService::ERROR_ALREADY_EXISTS, msg.string());
         }
@@ -1106,16 +1238,20 @@
     err = mDevice->setConsumerSurface(streamId, consumerSurface);
     if (err == OK) {
         sp<IBinder> binder = IInterface::asBinder(bufferProducer);
-        mStreamMap.add(binder, streamId);
-        mDeferredStreams.removeItemsAt(index);
+        ALOGV("%s: mStreamMap add binder %p streamId %d, surfaceId %zu", __FUNCTION__,
+                binder.get(), streamId, bufferProducers.size()-1);
+        mStreamMap.add(binder, StreamSurfaceId(streamId, bufferProducers.size()-1));
+        if (index != NAME_NOT_FOUND) {
+            mDeferredStreams.removeItemsAt(index);
+        }
     } else if (err == NO_INIT) {
         res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
-                "Camera %d: Deferred surface is invalid: %s (%d)",
-                mCameraId, strerror(-err), err);
+                "Camera %s: Deferred surface is invalid: %s (%d)",
+                mCameraIdStr.string(), strerror(-err), err);
     } else {
         res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
-                "Camera %d: Error setting output stream deferred surface: %s (%d)",
-                mCameraId, strerror(-err), err);
+                "Camera %s: Error setting output stream deferred surface: %s (%d)",
+                mCameraIdStr.string(), strerror(-err), err);
     }
 
     return res;
@@ -1127,8 +1263,8 @@
 
 status_t CameraDeviceClient::dumpClient(int fd, const Vector<String16>& args) {
     String8 result;
-    result.appendFormat("CameraDeviceClient[%d] (%p) dump:\n",
-            mCameraId,
+    result.appendFormat("CameraDeviceClient[%s] (%p) dump:\n",
+            mCameraIdStr.string(),
             (getRemoteCallback() != NULL ?
                     IInterface::asBinder(getRemoteCallback()).get() : NULL) );
     result.appendFormat("  Current client UID %u\n", mClientUid);
@@ -1142,9 +1278,11 @@
         result.append("    No input stream configured.\n");
     }
     if (!mStreamMap.isEmpty()) {
-        result.append("    Current output stream IDs:\n");
+        result.append("    Current output stream/surface IDs:\n");
         for (size_t i = 0; i < mStreamMap.size(); i++) {
-            result.appendFormat("      Stream %d\n", mStreamMap.valueAt(i));
+            result.appendFormat("      Stream %d Surface %d\n",
+                                mStreamMap.valueAt(i).streamId(),
+                                mStreamMap.valueAt(i).surfaceId());
         }
     } else if (!mDeferredStreams.isEmpty()) {
         result.append("    Current deferred surface output stream IDs:\n");
@@ -1210,18 +1348,26 @@
     }
 }
 
+void CameraDeviceClient::notifyRequestQueueEmpty() {
+    // Thread safe. Don't bother locking.
+    sp<hardware::camera2::ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
+    if (remoteCb != 0) {
+        remoteCb->onRequestQueueEmpty();
+    }
+}
+
 void CameraDeviceClient::detachDevice() {
     if (mDevice == 0) return;
 
-    ALOGV("Camera %d: Stopping processors", mCameraId);
+    ALOGV("Camera %s: Stopping processors", mCameraIdStr.string());
 
     mFrameProcessor->removeListener(FRAME_PROCESSOR_LISTENER_MIN_ID,
                                     FRAME_PROCESSOR_LISTENER_MAX_ID,
                                     /*listener*/this);
     mFrameProcessor->requestExit();
-    ALOGV("Camera %d: Waiting for threads", mCameraId);
+    ALOGV("Camera %s: Waiting for threads", mCameraIdStr.string());
     mFrameProcessor->join();
-    ALOGV("Camera %d: Disconnecting device", mCameraId);
+    ALOGV("Camera %s: Disconnecting device", mCameraIdStr.string());
 
     // WORKAROUND: HAL refuses to disconnect while there's streams in flight
     {
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index de283ea..047ccf2 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -42,7 +42,7 @@
     CameraDeviceClientBase(const sp<CameraService>& cameraService,
             const sp<hardware::camera2::ICameraDeviceCallbacks>& remoteCallback,
             const String16& clientPackageName,
-            int cameraId,
+            const String8& cameraId,
             int cameraFacing,
             int clientPid,
             uid_t clientUid,
@@ -142,14 +142,15 @@
     CameraDeviceClient(const sp<CameraService>& cameraService,
             const sp<hardware::camera2::ICameraDeviceCallbacks>& remoteCallback,
             const String16& clientPackageName,
-            int cameraId,
+            const String8& cameraId,
             int cameraFacing,
             int clientPid,
             uid_t clientUid,
             int servicePid);
     virtual ~CameraDeviceClient();
 
-    virtual status_t      initialize(CameraModule *module);
+    virtual status_t      initialize(CameraModule *module) override;
+    virtual status_t      initialize(sp<CameraProviderManager> manager) override;
 
     virtual status_t      dump(int fd, const Vector<String16>& args);
 
@@ -164,6 +165,7 @@
                              const CaptureResultExtras& resultExtras);
     virtual void notifyShutter(const CaptureResultExtras& resultExtras, nsecs_t timestamp);
     virtual void notifyPrepared(int streamId);
+    virtual void notifyRequestQueueEmpty();
     virtual void notifyRepeatingRequestError(long lastFrameNumber);
 
     /**
@@ -178,6 +180,34 @@
     status_t              getRotationTransformLocked(/*out*/int32_t* transform);
 
 private:
+    // StreamSurfaceId encapsulates streamId + surfaceId for a particular surface.
+    // streamId specifies the index of the stream the surface belongs to, and the
+    // surfaceId specifies the index of the surface within the stream. (one stream
+    // could contain multiple surfaces.)
+    class StreamSurfaceId final {
+    public:
+        StreamSurfaceId() {
+            mStreamId = -1;
+            mSurfaceId = -1;
+        }
+        StreamSurfaceId(int32_t streamId, int32_t surfaceId) {
+            mStreamId = streamId;
+            mSurfaceId = surfaceId;
+        }
+        int32_t streamId() const {
+            return mStreamId;
+        }
+        int32_t surfaceId() const {
+            return mSurfaceId;
+        }
+
+    private:
+        int32_t mStreamId;
+        int32_t mSurfaceId;
+
+    }; // class StreamSurfaceId
+
+private:
     /** ICameraDeviceUser interface-related private members */
 
     /** Preview callback related members */
@@ -185,6 +215,9 @@
     static const int32_t FRAME_PROCESSOR_LISTENER_MIN_ID = 0;
     static const int32_t FRAME_PROCESSOR_LISTENER_MAX_ID = 0x7fffffffL;
 
+    template<typename TProviderPtr>
+    status_t      initializeImpl(TProviderPtr providerPtr);
+
     /** Utility members */
     binder::Status checkPidStatus(const char* checkLocation);
     bool enforceRequestPermissions(CameraMetadata& metadata);
@@ -211,8 +244,8 @@
     //check if format is not custom format
     static bool isPublicFormat(int32_t format);
 
-    // IGraphicsBufferProducer binder -> Stream ID for output streams
-    KeyedVector<sp<IBinder>, int> mStreamMap;
+    // IGraphicsBufferProducer binder -> Stream ID + Surface ID for output streams
+    KeyedVector<sp<IBinder>, StreamSurfaceId> mStreamMap;
 
     struct InputStreamConfiguration {
         bool configured;
@@ -233,6 +266,9 @@
     // as there are no surfaces available and can not be put into mStreamMap. Once the deferred
     // Surface is configured, the stream id will be moved to mStreamMap.
     Vector<int32_t> mDeferredStreams;
+
+    static const int32_t MAX_SURFACES_PER_STREAM = 2;
+    static const int32_t MAX_DEFERRED_SURFACES = 1;
 };
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index ccd1e4d..93a584b 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -47,7 +47,7 @@
         const sp<CameraService>& cameraService,
         const sp<TCamCallbacks>& remoteCallback,
         const String16& clientPackageName,
-        int cameraId,
+        const String8& cameraId,
         int cameraFacing,
         int clientPid,
         uid_t clientUid,
@@ -55,10 +55,10 @@
         TClientBase(cameraService, remoteCallback, clientPackageName,
                 cameraId, cameraFacing, clientPid, clientUid, servicePid),
         mSharedCameraCallbacks(remoteCallback),
-        mDeviceVersion(cameraService->getDeviceVersion(cameraId)),
+        mDeviceVersion(cameraService->getDeviceVersion(TClientBase::mCameraIdStr)),
         mDeviceActive(false)
 {
-    ALOGI("Camera %d: Opened. Client: %s (PID %d, UID %d)", cameraId,
+    ALOGI("Camera %s: Opened. Client: %s (PID %d, UID %d)", cameraId.string(),
             String8(clientPackageName).string(), clientPid, clientUid);
 
     mInitialClientPid = clientPid;
@@ -80,9 +80,20 @@
 
 template <typename TClientBase>
 status_t Camera2ClientBase<TClientBase>::initialize(CameraModule *module) {
+    return initializeImpl(module);
+}
+
+template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::initialize(sp<CameraProviderManager> manager) {
+    return initializeImpl(manager);
+}
+
+template <typename TClientBase>
+template <typename TProviderPtr>
+status_t Camera2ClientBase<TClientBase>::initializeImpl(TProviderPtr providerPtr) {
     ATRACE_CALL();
-    ALOGV("%s: Initializing client for camera %d", __FUNCTION__,
-          TClientBase::mCameraId);
+    ALOGV("%s: Initializing client for camera %s", __FUNCTION__,
+          TClientBase::mCameraIdStr.string());
     status_t res;
 
     // Verify ops permissions
@@ -92,15 +103,15 @@
     }
 
     if (mDevice == NULL) {
-        ALOGE("%s: Camera %d: No device connected",
-                __FUNCTION__, TClientBase::mCameraId);
+        ALOGE("%s: Camera %s: No device connected",
+                __FUNCTION__, TClientBase::mCameraIdStr.string());
         return NO_INIT;
     }
 
-    res = mDevice->initialize(module);
+    res = mDevice->initialize(providerPtr);
     if (res != OK) {
-        ALOGE("%s: Camera %d: unable to initialize device: %s (%d)",
-                __FUNCTION__, TClientBase::mCameraId, strerror(-res), res);
+        ALOGE("%s: Camera %s: unable to initialize device: %s (%d)",
+                __FUNCTION__, TClientBase::mCameraIdStr.string(), strerror(-res), res);
         return res;
     }
 
@@ -118,8 +129,8 @@
 
     disconnect();
 
-    ALOGI("Closed Camera %d. Client was: %s (PID %d, UID %u)",
-            TClientBase::mCameraId,
+    ALOGI("Closed Camera %s. Client was: %s (PID %d, UID %u)",
+            TClientBase::mCameraIdStr.string(),
             String8(TClientBase::mClientPackageName).string(),
             mInitialClientPid, TClientBase::mClientUid);
 }
@@ -128,8 +139,8 @@
 status_t Camera2ClientBase<TClientBase>::dumpClient(int fd,
                                               const Vector<String16>& args) {
     String8 result;
-    result.appendFormat("Camera2ClientBase[%d] (%p) PID: %d, dump:\n",
-            TClientBase::mCameraId,
+    result.appendFormat("Camera2ClientBase[%s] (%p) PID: %d, dump:\n",
+            TClientBase::mCameraIdStr.string(),
             (TClientBase::getRemoteCallback() != NULL ?
                     IInterface::asBinder(TClientBase::getRemoteCallback()).get() : NULL),
             TClientBase::mClientPid);
@@ -180,13 +191,13 @@
     if (callingPid != TClientBase::mClientPid &&
         callingPid != TClientBase::mServicePid) return res;
 
-    ALOGV("Camera %d: Shutting down", TClientBase::mCameraId);
+    ALOGV("Camera %s: Shutting down", TClientBase::mCameraIdStr.string());
 
     detachDevice();
 
     CameraService::BasicClient::disconnect();
 
-    ALOGV("Camera %d: Shut down complete complete", TClientBase::mCameraId);
+    ALOGV("Camera %s: Shut down complete complete", TClientBase::mCameraIdStr.string());
 
     return res;
 }
@@ -198,7 +209,7 @@
 
     mDevice.clear();
 
-    ALOGV("Camera %d: Detach complete", TClientBase::mCameraId);
+    ALOGV("Camera %s: Detach complete", TClientBase::mCameraIdStr.string());
 }
 
 template <typename TClientBase>
@@ -211,10 +222,10 @@
     if (TClientBase::mClientPid != 0 &&
         getCallingPid() != TClientBase::mClientPid) {
 
-        ALOGE("%s: Camera %d: Connection attempt from pid %d; "
+        ALOGE("%s: Camera %s: Connection attempt from pid %d; "
                 "current locked to pid %d",
                 __FUNCTION__,
-                TClientBase::mCameraId,
+                TClientBase::mCameraIdStr.string(),
                 getCallingPid(),
                 TClientBase::mClientPid);
         return BAD_VALUE;
@@ -242,8 +253,7 @@
 void Camera2ClientBase<TClientBase>::notifyIdle() {
     if (mDeviceActive) {
         getCameraService()->updateProxyDeviceState(
-            ICameraServiceProxy::CAMERA_STATE_IDLE,
-            String8::format("%d", TClientBase::mCameraId));
+            ICameraServiceProxy::CAMERA_STATE_IDLE, TClientBase::mCameraIdStr);
     }
     mDeviceActive = false;
 
@@ -258,8 +268,7 @@
 
     if (!mDeviceActive) {
         getCameraService()->updateProxyDeviceState(
-            ICameraServiceProxy::CAMERA_STATE_ACTIVE,
-            String8::format("%d", TClientBase::mCameraId));
+            ICameraServiceProxy::CAMERA_STATE_ACTIVE, TClientBase::mCameraIdStr);
     }
     mDeviceActive = true;
 
@@ -307,6 +316,12 @@
 }
 
 template <typename TClientBase>
+void Camera2ClientBase<TClientBase>::notifyRequestQueueEmpty() {
+
+    ALOGV("%s: Request queue now empty", __FUNCTION__);
+}
+
+template <typename TClientBase>
 void Camera2ClientBase<TClientBase>::notifyRepeatingRequestError(long lastFrameNumber) {
     (void)lastFrameNumber;
 
@@ -316,7 +331,7 @@
 
 template <typename TClientBase>
 int Camera2ClientBase<TClientBase>::getCameraId() const {
-    return TClientBase::mCameraId;
+    return std::stoi(TClientBase::mCameraIdStr.string());
 }
 
 template <typename TClientBase>
@@ -331,7 +346,7 @@
 
 template <typename TClientBase>
 const sp<CameraService>& Camera2ClientBase<TClientBase>::getCameraService() {
-    return TClientBase::mCameraService;
+    return TClientBase::sCameraService;
 }
 
 template <typename TClientBase>
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index dbbf638..a4c08ef 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -49,7 +49,7 @@
     Camera2ClientBase(const sp<CameraService>& cameraService,
                       const sp<TCamCallbacks>& remoteCallback,
                       const String16& clientPackageName,
-                      int cameraId,
+                      const String8& cameraId,
                       int cameraFacing,
                       int clientPid,
                       uid_t clientUid,
@@ -57,6 +57,7 @@
     virtual ~Camera2ClientBase();
 
     virtual status_t      initialize(CameraModule *module);
+    virtual status_t      initialize(sp<CameraProviderManager> manager);
     virtual status_t      dumpClient(int fd, const Vector<String16>& args);
 
     /**
@@ -73,6 +74,7 @@
     virtual void          notifyAutoWhitebalance(uint8_t newState,
                                                  int triggerId);
     virtual void          notifyPrepared(int streamId);
+    virtual void          notifyRequestQueueEmpty();
     virtual void          notifyRepeatingRequestError(long lastFrameNumber);
 
     int                   getCameraId() const;
@@ -139,6 +141,10 @@
     virtual void          detachDevice();
 
     bool                  mDeviceActive;
+
+private:
+    template<typename TProviderPtr>
+    status_t              initializeImpl(TProviderPtr providerPtr);
 };
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 984d84b..a873402 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -17,6 +17,8 @@
 #ifndef ANDROID_SERVERS_CAMERA_CAMERADEVICEBASE_H
 #define ANDROID_SERVERS_CAMERA_CAMERADEVICEBASE_H
 
+#include <list>
+
 #include <utils/RefBase.h>
 #include <utils/String8.h>
 #include <utils/String16.h>
@@ -35,6 +37,11 @@
 
 namespace android {
 
+class CameraProviderManager;
+
+// Mapping of output stream index to surface ids
+typedef std::unordered_map<int, std::vector<size_t> > SurfaceMap;
+
 /**
  * Base interface for version >= 2 camera device classes, which interface to
  * camera HAL device versions >= 2.
@@ -46,9 +53,10 @@
     /**
      * The device's camera ID
      */
-    virtual int      getId() const = 0;
+    virtual const String8& getId() const = 0;
 
     virtual status_t initialize(CameraModule *module) = 0;
+    virtual status_t initialize(sp<CameraProviderManager> manager) = 0;
     virtual status_t disconnect() = 0;
 
     virtual status_t dump(int fd, const Vector<String16> &args) = 0;
@@ -70,6 +78,7 @@
      * Output lastFrameNumber is the expected last frame number of the list of requests.
      */
     virtual status_t captureList(const List<const CameraMetadata> &requests,
+                                 const std::list<const SurfaceMap> &surfaceMaps,
                                  int64_t *lastFrameNumber = NULL) = 0;
 
     /**
@@ -85,6 +94,7 @@
      * Output lastFrameNumber is the last frame number of the previous streaming request.
      */
     virtual status_t setStreamingRequestList(const List<const CameraMetadata> &requests,
+                                             const std::list<const SurfaceMap> &surfaceMaps,
                                              int64_t *lastFrameNumber = NULL) = 0;
 
     /**
@@ -114,6 +124,19 @@
             uint32_t consumerUsage = 0) = 0;
 
     /**
+     * Create an output stream of the requested size, format, rotation and
+     * dataspace with a number of consumers.
+     *
+     * For HAL_PIXEL_FORMAT_BLOB formats, the width and height should be the
+     * logical dimensions of the buffer, not the number of bytes.
+     */
+    virtual status_t createStream(const std::vector<sp<Surface>>& consumers,
+            bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+            int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
+            uint32_t consumerUsage = 0) = 0;
+
+    /**
      * Create an input stream of width, height, and format.
      *
      * Return value is the stream ID if non-negative and an error if negative.
@@ -204,6 +227,7 @@
         virtual void notifyShutter(const CaptureResultExtras &resultExtras,
                 nsecs_t timestamp) = 0;
         virtual void notifyPrepared(int streamId) = 0;
+        virtual void notifyRequestQueueEmpty() = 0;
 
         // Required only for API1
         virtual void notifyAutoFocus(uint8_t newState, int triggerId) = 0;
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
new file mode 100644
index 0000000..f691dc1
--- /dev/null
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -0,0 +1,1119 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "CameraProviderManager"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include "CameraProviderManager.h"
+
+#include <chrono>
+#include <android/hidl/manager/1.0/IServiceManager.h>
+#include <hidl/ServiceManagement.h>
+
+namespace android {
+
+using namespace ::android::hardware::camera;
+using namespace ::android::hardware::camera::common::V1_0;
+
+namespace {
+// Hardcoded name for the passthrough HAL implementation, since it can't be discovered via the
+// service manager
+const std::string kLegacyProviderName("legacy/0");
+
+// Slash-separated list of provider types to consider for use via the old camera API
+const std::string kStandardProviderTypes("internal/legacy");
+
+} // anonymous namespace
+
+CameraProviderManager::HardwareServiceInteractionProxy
+CameraProviderManager::sHardwareServiceInteractionProxy{};
+
+CameraProviderManager::~CameraProviderManager() {
+}
+
+status_t CameraProviderManager::initialize(wp<CameraProviderManager::StatusListener> listener,
+        ServiceInteractionProxy* proxy) {
+    int numProviders = 0;
+    {
+        std::lock_guard<std::mutex> lock(mInterfaceMutex);
+        if (proxy == nullptr) {
+            ALOGE("%s: No valid service interaction proxy provided", __FUNCTION__);
+            return BAD_VALUE;
+        }
+        mListener = listener;
+        mServiceProxy = proxy;
+
+        // Registering will trigger notifications for all already-known providers
+        bool success = mServiceProxy->registerForNotifications(
+            /* instance name, empty means no filter */ "",
+            this);
+        if (!success) {
+            ALOGE("%s: Unable to register with hardware service manager for notifications "
+                    "about camera providers", __FUNCTION__);
+            return INVALID_OPERATION;
+        }
+        numProviders = mProviders.size();
+    }
+
+    if (numProviders == 0) {
+        // Remote provider might have not been initialized
+        // Wait for a bit and see if we get one registered
+        std::mutex mtx;
+        std::unique_lock<std::mutex> lock(mtx);
+        mProviderRegistered.wait_for(lock, std::chrono::seconds(15));
+        if (mProviders.size() == 0) {
+            ALOGI("%s: Unable to get one registered provider within timeout!",
+                    __FUNCTION__);
+            std::lock_guard<std::mutex> lock(mInterfaceMutex);
+            // See if there's a passthrough HAL, but let's not complain if there's not
+            addProvider(kLegacyProviderName, /*expected*/ false);
+        }
+    }
+
+    return OK;
+}
+
+int CameraProviderManager::getCameraCount() const {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+    int count = 0;
+    for (auto& provider : mProviders) {
+        count += provider->mDevices.size();
+    }
+    return count;
+}
+
+int CameraProviderManager::getStandardCameraCount() const {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+    int count = 0;
+    for (auto& provider : mProviders) {
+        if (kStandardProviderTypes.find(provider->getType()) != std::string::npos) {
+            count += provider->mDevices.size();
+        }
+    }
+    return count;
+}
+
+std::vector<std::string> CameraProviderManager::getCameraDeviceIds() const {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+    std::vector<std::string> deviceIds;
+    for (auto& provider : mProviders) {
+        for (auto& deviceInfo : provider->mDevices) {
+            deviceIds.push_back(deviceInfo->mId);
+        }
+    }
+    return deviceIds;
+}
+
+bool CameraProviderManager::isValidDevice(const std::string &id, uint16_t majorVersion) const {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+    return isValidDeviceLocked(id, majorVersion);
+}
+
+bool CameraProviderManager::isValidDeviceLocked(const std::string &id, uint16_t majorVersion) const {
+    for (auto& provider : mProviders) {
+        for (auto& deviceInfo : provider->mDevices) {
+            if (deviceInfo->mId == id && deviceInfo->mVersion.get_major() == majorVersion) {
+                return true;
+            }
+        }
+    }
+    return false;
+}
+
+bool CameraProviderManager::hasFlashUnit(const std::string &id) const {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    auto deviceInfo = findDeviceInfoLocked(id);
+    if (deviceInfo == nullptr) return false;
+
+    return deviceInfo->hasFlashUnit();
+}
+
+status_t CameraProviderManager::getResourceCost(const std::string &id,
+        CameraResourceCost* cost) const {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    auto deviceInfo = findDeviceInfoLocked(id);
+    if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+    *cost = deviceInfo->mResourceCost;
+    return OK;
+}
+
+status_t CameraProviderManager::getCameraInfo(const std::string &id,
+        hardware::CameraInfo* info) const {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    auto deviceInfo = findDeviceInfoLocked(id);
+    if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+    return deviceInfo->getCameraInfo(info);
+}
+
+status_t CameraProviderManager::getCameraCharacteristics(const std::string &id,
+        CameraMetadata* characteristics) const {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    auto deviceInfo = findDeviceInfoLocked(id, /*minVersion*/ {3,0}, /*maxVersion*/ {4,0});
+    if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+    return deviceInfo->getCameraCharacteristics(characteristics);
+}
+
+status_t CameraProviderManager::getHighestSupportedVersion(const std::string &id,
+        hardware::hidl_version *v) {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    hardware::hidl_version maxVersion{0,0};
+    bool found = false;
+    for (auto& provider : mProviders) {
+        for (auto& deviceInfo : provider->mDevices) {
+            if (deviceInfo->mId == id) {
+                if (deviceInfo->mVersion > maxVersion) {
+                    maxVersion = deviceInfo->mVersion;
+                    found = true;
+                }
+            }
+        }
+    }
+    if (!found) {
+        return NAME_NOT_FOUND;
+    }
+    *v = maxVersion;
+    return OK;
+}
+
+status_t CameraProviderManager::setTorchMode(const std::string &id, bool enabled) {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    auto deviceInfo = findDeviceInfoLocked(id);
+    if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+    return deviceInfo->setTorchMode(enabled);
+}
+
+status_t CameraProviderManager::setUpVendorTags() {
+    // TODO (b/34275821): support aggregating vendor tags for more than one provider
+    for (auto& provider : mProviders) {
+        hardware::hidl_vec<VendorTagSection> vts;
+        Status status;
+        provider->mInterface->getVendorTags(
+            [&](auto s, const auto& vendorTagSecs) {
+                status = s;
+                if (s == Status::OK) {
+                    vts = vendorTagSecs;
+                }
+        });
+
+        if (status != Status::OK) {
+            return mapToStatusT(status);
+        }
+
+        VendorTagDescriptor::clearGlobalVendorTagDescriptor();
+
+        // Read all vendor tag definitions into a descriptor
+        sp<VendorTagDescriptor> desc;
+        status_t res;
+        if ((res = HidlVendorTagDescriptor::createDescriptorFromHidl(vts, /*out*/desc))
+                != OK) {
+            ALOGE("%s: Could not generate descriptor from vendor tag operations,"
+                  "received error %s (%d). Camera clients will not be able to use"
+                  "vendor tags", __FUNCTION__, strerror(res), res);
+            return res;
+        }
+
+        // Set the global descriptor to use with camera metadata
+        VendorTagDescriptor::setAsGlobalVendorTagDescriptor(desc);
+    }
+    return OK;
+}
+
+status_t CameraProviderManager::openSession(const std::string &id,
+        const sp<hardware::camera::device::V3_2::ICameraDeviceCallback>& callback,
+        /*out*/
+        sp<hardware::camera::device::V3_2::ICameraDeviceSession> *session) {
+
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    auto deviceInfo = findDeviceInfoLocked(id,
+            /*minVersion*/ {3,0}, /*maxVersion*/ {4,0});
+    if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+    auto *deviceInfo3 = static_cast<ProviderInfo::DeviceInfo3*>(deviceInfo);
+
+    Status status;
+    deviceInfo3->mInterface->open(callback, [&status, &session]
+            (Status s, const sp<device::V3_2::ICameraDeviceSession>& cameraSession) {
+                status = s;
+                if (status == Status::OK) {
+                    *session = cameraSession;
+                }
+            });
+    return mapToStatusT(status);
+}
+
+status_t CameraProviderManager::openSession(const std::string &id,
+        const sp<hardware::camera::device::V1_0::ICameraDeviceCallback>& callback,
+        /*out*/
+        sp<hardware::camera::device::V1_0::ICameraDevice> *session) {
+
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    auto deviceInfo = findDeviceInfoLocked(id,
+            /*minVersion*/ {1,0}, /*maxVersion*/ {2,0});
+    if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+    auto *deviceInfo1 = static_cast<ProviderInfo::DeviceInfo1*>(deviceInfo);
+
+    Status status = deviceInfo1->mInterface->open(callback);
+    if (status == Status::OK) {
+        *session = deviceInfo1->mInterface;
+    }
+    return mapToStatusT(status);
+}
+
+
+hardware::Return<void> CameraProviderManager::onRegistration(
+        const hardware::hidl_string& /*fqName*/,
+        const hardware::hidl_string& name,
+        bool /*preexisting*/) {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    addProvider(name);
+    mProviderRegistered.notify_one();
+    return hardware::Return<void>();
+}
+
+status_t CameraProviderManager::dump(int fd, const Vector<String16>& args) {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    dprintf(fd, "Available camera providers and devices:\n");
+    for (auto& provider : mProviders) {
+        provider->dump(fd, args);
+    }
+    return OK;
+}
+
+CameraProviderManager::ProviderInfo::DeviceInfo* CameraProviderManager::findDeviceInfoLocked(
+        const std::string& id,
+        hardware::hidl_version minVersion, hardware::hidl_version maxVersion) const {
+    for (auto& provider : mProviders) {
+        for (auto& deviceInfo : provider->mDevices) {
+            if (deviceInfo->mId == id &&
+                    minVersion <= deviceInfo->mVersion && maxVersion >= deviceInfo->mVersion) {
+                return deviceInfo.get();
+            }
+        }
+    }
+    return nullptr;
+}
+
+
+status_t CameraProviderManager::addProvider(const std::string& newProvider, bool expected) {
+    for (const auto& providerInfo : mProviders) {
+        if (providerInfo->mProviderName == newProvider) {
+            ALOGW("%s: Camera provider HAL with name '%s' already registered", __FUNCTION__,
+                    newProvider.c_str());
+            return ALREADY_EXISTS;
+        }
+    }
+    sp<provider::V2_4::ICameraProvider> interface =
+            mServiceProxy->getService(newProvider);
+
+    if (interface == nullptr) {
+        if (expected) {
+            ALOGW("%s: Camera provider HAL '%s' is not actually available", __FUNCTION__,
+                    newProvider.c_str());
+            return BAD_VALUE;
+        } else {
+            // Not guaranteed to be found, so not an error if it wasn't
+            return OK;
+        }
+    }
+
+    sp<ProviderInfo> providerInfo =
+            new ProviderInfo(newProvider, interface, this);
+    status_t res = providerInfo->initialize();
+    if (res != OK) {
+        return res;
+    }
+
+    mProviders.push_back(providerInfo);
+
+    return OK;
+}
+
+status_t CameraProviderManager::removeProvider(const std::string& provider) {
+    for (auto it = mProviders.begin(); it != mProviders.end(); it++) {
+        if ((*it)->mProviderName == provider) {
+            mProviders.erase(it);
+            return OK;
+        }
+    }
+    ALOGW("%s: Camera provider HAL with name '%s' is not registered", __FUNCTION__,
+            provider.c_str());
+    return NAME_NOT_FOUND;
+}
+
+/**** Methods for ProviderInfo ****/
+
+
+CameraProviderManager::ProviderInfo::ProviderInfo(
+        const std::string &providerName,
+        sp<provider::V2_4::ICameraProvider>& interface,
+        CameraProviderManager *manager) :
+        mProviderName(providerName),
+        mInterface(interface),
+        mManager(manager) {
+    (void) mManager;
+}
+
+status_t CameraProviderManager::ProviderInfo::initialize() {
+    status_t res = parseProviderName(mProviderName, &mType, &mId);
+    if (res != OK) {
+        ALOGE("%s: Invalid provider name, ignoring", __FUNCTION__);
+        return BAD_VALUE;
+    }
+    ALOGI("Connecting to new camera provider: %s, isRemote? %d",
+            mProviderName.c_str(), mInterface->isRemote());
+    Status status = mInterface->setCallback(this);
+    if (status != Status::OK) {
+        ALOGE("%s: Unable to register callbacks with camera provider '%s'",
+                __FUNCTION__, mProviderName.c_str());
+        return mapToStatusT(status);
+    }
+    // TODO: Register for hw binder death notifications as well
+
+    // Get initial list of camera devices, if any
+    std::vector<std::string> devices;
+    mInterface->getCameraIdList([&status, &devices](
+            Status idStatus,
+            const hardware::hidl_vec<hardware::hidl_string>& cameraDeviceNames) {
+        status = idStatus;
+        if (status == Status::OK) {
+            for (size_t i = 0; i < cameraDeviceNames.size(); i++) {
+                devices.push_back(cameraDeviceNames[i]);
+            }
+        } });
+
+    if (status != Status::OK) {
+        ALOGE("%s: Unable to query for camera devices from provider '%s'",
+                __FUNCTION__, mProviderName.c_str());
+        return mapToStatusT(status);
+    }
+
+    for (auto& device : devices) {
+        status_t res = addDevice(device);
+        if (res != OK) {
+            ALOGE("%s: Unable to enumerate camera device '%s': %s (%d)",
+                    __FUNCTION__, device.c_str(), strerror(-res), res);
+        }
+    }
+
+    ALOGI("Camera provider %s ready with %zu camera devices",
+            mProviderName.c_str(), mDevices.size());
+
+    return OK;
+}
+
+const std::string& CameraProviderManager::ProviderInfo::getType() const {
+    return mType;
+}
+
+status_t CameraProviderManager::ProviderInfo::addDevice(const std::string& name,
+        CameraDeviceStatus initialStatus, /*out*/ std::string* parsedId) {
+
+    ALOGI("Enumerating new camera device: %s", name.c_str());
+
+    uint16_t major, minor;
+    std::string type, id;
+
+    status_t res = parseDeviceName(name, &major, &minor, &type, &id);
+    if (res != OK) {
+        return res;
+    }
+    if (type != mType) {
+        ALOGE("%s: Device type %s does not match provider type %s", __FUNCTION__,
+                type.c_str(), mType.c_str());
+        return BAD_VALUE;
+    }
+    if (mManager->isValidDeviceLocked(id, major)) {
+        ALOGE("%s: Device %s: ID %s is already in use for device major version %d", __FUNCTION__,
+                name.c_str(), id.c_str(), major);
+        return BAD_VALUE;
+    }
+
+    std::unique_ptr<DeviceInfo> deviceInfo;
+    switch (major) {
+        case 1:
+            deviceInfo = initializeDeviceInfo<DeviceInfo1>(name, id, minor);
+            break;
+        case 3:
+            deviceInfo = initializeDeviceInfo<DeviceInfo3>(name, id, minor);
+            break;
+        default:
+            ALOGE("%s: Device %s: Unknown HIDL device HAL major version %d:", __FUNCTION__,
+                    name.c_str(), major);
+            return BAD_VALUE;
+    }
+    if (deviceInfo == nullptr) return BAD_VALUE;
+    deviceInfo->mStatus = initialStatus;
+
+    mDevices.push_back(std::move(deviceInfo));
+
+    if (parsedId != nullptr) {
+        *parsedId = id;
+    }
+    return OK;
+}
+
+status_t CameraProviderManager::ProviderInfo::dump(int fd, const Vector<String16>&) const {
+    dprintf(fd, "    %s: %zu devices:\n", mProviderName.c_str(), mDevices.size());
+
+    for (auto& device : mDevices) {
+        dprintf(fd, "        %s: Resource cost: %d\n", device->mName.c_str(),
+                device->mResourceCost.resourceCost);
+        if (device->mResourceCost.conflictingDevices.size() > 0) {
+            dprintf(fd, "            Conflicting devices:\n");
+            for (size_t i = 0; i < device->mResourceCost.conflictingDevices.size(); i++) {
+                dprintf(fd, "              %s\n",
+                        device->mResourceCost.conflictingDevices[i].c_str());
+            }
+        }
+    }
+    return OK;
+}
+
+hardware::Return<void> CameraProviderManager::ProviderInfo::cameraDeviceStatusChange(
+        const hardware::hidl_string& cameraDeviceName,
+        CameraDeviceStatus newStatus) {
+    sp<StatusListener> listener;
+    std::string id;
+    {
+        std::lock_guard<std::mutex> lock(mManager->mStatusListenerMutex);
+        bool known = false;
+        for (auto& deviceInfo : mDevices) {
+            if (deviceInfo->mName == cameraDeviceName) {
+                ALOGI("Camera device %s status is now %s, was %s", cameraDeviceName.c_str(),
+                        deviceStatusToString(newStatus), deviceStatusToString(deviceInfo->mStatus));
+                deviceInfo->mStatus = newStatus;
+                // TODO: Handle device removal (NOT_PRESENT)
+                id = deviceInfo->mId;
+                known = true;
+                break;
+            }
+        }
+        // Previously unseen device; status must not be NOT_PRESENT
+        if (!known) {
+            if (newStatus == CameraDeviceStatus::NOT_PRESENT) {
+                ALOGW("Camera provider %s says an unknown camera device %s is not present. Curious.",
+                    mProviderName.c_str(), cameraDeviceName.c_str());
+                return hardware::Void();
+            }
+            addDevice(cameraDeviceName, newStatus, &id);
+        }
+        listener = mManager->mListener.promote();
+    }
+    // Call without lock held to allow reentrancy into provider manager
+    if (listener != nullptr) {
+        listener->onDeviceStatusChanged(String8(id.c_str()), newStatus);
+    }
+    return hardware::Void();
+}
+
+hardware::Return<void> CameraProviderManager::ProviderInfo::torchModeStatusChange(
+        const hardware::hidl_string& cameraDeviceName,
+        TorchModeStatus newStatus) {
+    sp<StatusListener> listener;
+    std::string id;
+    {
+        std::lock_guard<std::mutex> lock(mManager->mStatusListenerMutex);
+        bool known = false;
+        for (auto& deviceInfo : mDevices) {
+            if (deviceInfo->mName == cameraDeviceName) {
+                ALOGI("Camera device %s torch status is now %s", cameraDeviceName.c_str(),
+                        torchStatusToString(newStatus));
+                id = deviceInfo->mId;
+                known = true;
+                break;
+            }
+        }
+        if (!known) {
+            ALOGW("Camera provider %s says an unknown camera %s now has torch status %d. Curious.",
+                    mProviderName.c_str(), cameraDeviceName.c_str(), newStatus);
+            return hardware::Void();
+        }
+        listener = mManager->mListener.promote();
+    }
+    // Call without lock held to allow reentrancy into provider manager
+    if (listener != nullptr) {
+        listener->onTorchStatusChanged(String8(id.c_str()), newStatus);
+    }
+    return hardware::Void();
+}
+
+
+template<class DeviceInfoT>
+std::unique_ptr<CameraProviderManager::ProviderInfo::DeviceInfo>
+    CameraProviderManager::ProviderInfo::initializeDeviceInfo(
+        const std::string &name,
+        const std::string &id, uint16_t minorVersion) const {
+    Status status;
+
+    auto cameraInterface =
+            getDeviceInterface<typename DeviceInfoT::InterfaceT>(name);
+    if (cameraInterface == nullptr) return nullptr;
+
+    CameraResourceCost resourceCost;
+    cameraInterface->getResourceCost([&status, &resourceCost](
+        Status s, CameraResourceCost cost) {
+                status = s;
+                resourceCost = cost;
+            });
+    if (status != Status::OK) {
+        ALOGE("%s: Unable to obtain resource costs for camera device %s: %s", __FUNCTION__,
+                name.c_str(), statusToString(status));
+        return nullptr;
+    }
+    return std::unique_ptr<DeviceInfo>(
+        new DeviceInfoT(name, id, minorVersion, resourceCost, cameraInterface));
+}
+
+template<class InterfaceT>
+sp<InterfaceT>
+CameraProviderManager::ProviderInfo::getDeviceInterface(const std::string &name) const {
+    ALOGE("%s: Device %s: Unknown HIDL device HAL major version %d:", __FUNCTION__,
+            name.c_str(), InterfaceT::version.get_major());
+    return nullptr;
+}
+
+template<>
+sp<device::V1_0::ICameraDevice>
+CameraProviderManager::ProviderInfo::getDeviceInterface
+        <device::V1_0::ICameraDevice>(const std::string &name) const {
+    Status status;
+    sp<device::V1_0::ICameraDevice> cameraInterface;
+    mInterface->getCameraDeviceInterface_V1_x(name, [&status, &cameraInterface](
+        Status s, sp<device::V1_0::ICameraDevice> interface) {
+                status = s;
+                cameraInterface = interface;
+            });
+    if (status != Status::OK) {
+        ALOGE("%s: Unable to obtain interface for camera device %s: %s", __FUNCTION__,
+                name.c_str(), statusToString(status));
+        return nullptr;
+    }
+    return cameraInterface;
+}
+
+template<>
+sp<device::V3_2::ICameraDevice>
+CameraProviderManager::ProviderInfo::getDeviceInterface
+        <device::V3_2::ICameraDevice>(const std::string &name) const {
+    Status status;
+    sp<device::V3_2::ICameraDevice> cameraInterface;
+    mInterface->getCameraDeviceInterface_V3_x(name, [&status, &cameraInterface](
+        Status s, sp<device::V3_2::ICameraDevice> interface) {
+                status = s;
+                cameraInterface = interface;
+            });
+    if (status != Status::OK) {
+        ALOGE("%s: Unable to obtain interface for camera device %s: %s", __FUNCTION__,
+                name.c_str(), statusToString(status));
+        return nullptr;
+    }
+    return cameraInterface;
+}
+
+CameraProviderManager::ProviderInfo::DeviceInfo::~DeviceInfo() {}
+
+template<class InterfaceT>
+status_t CameraProviderManager::ProviderInfo::DeviceInfo::setTorchMode(InterfaceT& interface,
+        bool enabled) {
+    Status s = interface->setTorchMode(enabled ? TorchMode::ON : TorchMode::OFF);
+    return mapToStatusT(s);
+}
+
+CameraProviderManager::ProviderInfo::DeviceInfo1::DeviceInfo1(const std::string& name,
+        const std::string &id,
+        uint16_t minorVersion,
+        const CameraResourceCost& resourceCost,
+        sp<InterfaceT> interface) :
+        DeviceInfo(name, id, hardware::hidl_version{1, minorVersion}, resourceCost),
+        mInterface(interface) {
+    // Get default parameters and initialize flash unit availability
+    // Requires powering on the camera device
+    Status status = mInterface->open(nullptr);
+    if (status != Status::OK) {
+        ALOGE("%s: Unable to open camera device %s to check for a flash unit: %s (%d)", __FUNCTION__,
+                mId.c_str(), CameraProviderManager::statusToString(status), status);
+        return;
+    }
+    mInterface->getParameters([this](const hardware::hidl_string& parms) {
+                mDefaultParameters.unflatten(String8(parms.c_str()));
+            });
+
+    const char *flashMode =
+            mDefaultParameters.get(CameraParameters::KEY_SUPPORTED_FLASH_MODES);
+    if (flashMode && strstr(flashMode, CameraParameters::FLASH_MODE_TORCH)) {
+        mHasFlashUnit = true;
+    } else {
+        mHasFlashUnit = false;
+    }
+
+    mInterface->close();
+}
+
+CameraProviderManager::ProviderInfo::DeviceInfo1::~DeviceInfo1() {}
+
+status_t CameraProviderManager::ProviderInfo::DeviceInfo1::setTorchMode(bool enabled) {
+    return DeviceInfo::setTorchMode(mInterface, enabled);
+}
+
+status_t CameraProviderManager::ProviderInfo::DeviceInfo1::getCameraInfo(
+        hardware::CameraInfo *info) const {
+    if (info == nullptr) return BAD_VALUE;
+
+    Status status;
+    device::V1_0::CameraInfo cInfo;
+    mInterface->getCameraInfo([&status, &cInfo](Status s, device::V1_0::CameraInfo camInfo) {
+                status = s;
+                cInfo = camInfo;
+            });
+    if (status != Status::OK) {
+        return mapToStatusT(status);
+    }
+
+    switch(cInfo.facing) {
+        case device::V1_0::CameraFacing::BACK:
+            info->facing = hardware::CAMERA_FACING_BACK;
+            break;
+        case device::V1_0::CameraFacing::EXTERNAL:
+            // Map external to front for legacy API
+        case device::V1_0::CameraFacing::FRONT:
+            info->facing = hardware::CAMERA_FACING_FRONT;
+            break;
+        default:
+            ALOGW("%s: Unknown camera facing: %d", __FUNCTION__, cInfo.facing);
+            info->facing = hardware::CAMERA_FACING_BACK;
+    }
+    info->orientation = cInfo.orientation;
+
+    return OK;
+}
+
+CameraProviderManager::ProviderInfo::DeviceInfo3::DeviceInfo3(const std::string& name,
+        const std::string &id,
+        uint16_t minorVersion,
+        const CameraResourceCost& resourceCost,
+        sp<InterfaceT> interface) :
+        DeviceInfo(name, id, hardware::hidl_version{3, minorVersion}, resourceCost),
+        mInterface(interface) {
+    // Get camera characteristics and initialize flash unit availability
+    Status status;
+    mInterface->getCameraCharacteristics([&status, this](Status s,
+                    device::V3_2::CameraMetadata metadata) {
+                status = s;
+                if (s == Status::OK) {
+                    camera_metadata_t *buffer =
+                            reinterpret_cast<camera_metadata_t*>(metadata.data());
+                    mCameraCharacteristics = buffer;
+                }
+            });
+    if (status != Status::OK) {
+        ALOGE("%s: Unable to get camera characteristics for device %s: %s (%d)",
+                __FUNCTION__, mId.c_str(), CameraProviderManager::statusToString(status), status);
+        return;
+    }
+    camera_metadata_entry flashAvailable =
+            mCameraCharacteristics.find(ANDROID_FLASH_INFO_AVAILABLE);
+    if (flashAvailable.count == 1 &&
+            flashAvailable.data.u8[0] == ANDROID_FLASH_INFO_AVAILABLE_TRUE) {
+        mHasFlashUnit = true;
+    } else {
+        mHasFlashUnit = false;
+    }
+}
+
+CameraProviderManager::ProviderInfo::DeviceInfo3::~DeviceInfo3() {}
+
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::setTorchMode(bool enabled) {
+    return DeviceInfo::setTorchMode(mInterface, enabled);
+}
+
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::getCameraInfo(
+        hardware::CameraInfo *info) const {
+    if (info == nullptr) return BAD_VALUE;
+
+    camera_metadata_ro_entry facing =
+            mCameraCharacteristics.find(ANDROID_LENS_FACING);
+    if (facing.count == 1) {
+        switch (facing.data.u8[0]) {
+            case ANDROID_LENS_FACING_BACK:
+                info->facing = hardware::CAMERA_FACING_BACK;
+                break;
+            case ANDROID_LENS_FACING_EXTERNAL:
+                // Map external to front for legacy API
+            case ANDROID_LENS_FACING_FRONT:
+                info->facing = hardware::CAMERA_FACING_FRONT;
+                break;
+        }
+    } else {
+        ALOGE("%s: Unable to find android.lens.facing static metadata", __FUNCTION__);
+        return NAME_NOT_FOUND;
+    }
+
+    camera_metadata_ro_entry orientation =
+            mCameraCharacteristics.find(ANDROID_SENSOR_ORIENTATION);
+    if (orientation.count == 1) {
+        info->orientation = orientation.data.i32[0];
+    } else {
+        ALOGE("%s: Unable to find android.sensor.orientation static metadata", __FUNCTION__);
+        return NAME_NOT_FOUND;
+    }
+
+    return OK;
+}
+
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::getCameraCharacteristics(
+        CameraMetadata *characteristics) const {
+    if (characteristics == nullptr) return BAD_VALUE;
+
+    *characteristics = mCameraCharacteristics;
+    return OK;
+}
+
+status_t CameraProviderManager::ProviderInfo::parseProviderName(const std::string& name,
+        std::string *type, uint32_t *id) {
+    // Format must be "<type>/<id>"
+#define ERROR_MSG_PREFIX "%s: Invalid provider name '%s'. "       \
+    "Should match '<type>/<id>' - "
+
+    if (!type || !id) return INVALID_OPERATION;
+
+    std::string::size_type slashIdx = name.find('/');
+    if (slashIdx == std::string::npos || slashIdx == name.size() - 1) {
+        ALOGE(ERROR_MSG_PREFIX
+                "does not have / separator between type and id",
+                __FUNCTION__, name.c_str());
+        return BAD_VALUE;
+    }
+
+    std::string typeVal = name.substr(0, slashIdx);
+
+    char *endPtr;
+    errno = 0;
+    long idVal = strtol(name.c_str() + slashIdx + 1, &endPtr, 10);
+    if (errno != 0) {
+        ALOGE(ERROR_MSG_PREFIX
+                "cannot parse provider id as an integer: %s (%d)",
+                __FUNCTION__, name.c_str(), strerror(errno), errno);
+        return BAD_VALUE;
+    }
+    if (endPtr != name.c_str() + name.size()) {
+        ALOGE(ERROR_MSG_PREFIX
+                "provider id has unexpected length",
+                __FUNCTION__, name.c_str());
+        return BAD_VALUE;
+    }
+    if (idVal < 0) {
+        ALOGE(ERROR_MSG_PREFIX
+                "id is negative: %ld",
+                __FUNCTION__, name.c_str(), idVal);
+        return BAD_VALUE;
+    }
+
+#undef ERROR_MSG_PREFIX
+
+    *type = typeVal;
+    *id = static_cast<uint32_t>(idVal);
+
+    return OK;
+}
+
+status_t CameraProviderManager::ProviderInfo::parseDeviceName(const std::string& name,
+        uint16_t *major, uint16_t *minor, std::string *type, std::string *id) {
+
+    // Format must be "device@<major>.<minor>/<type>/<id>"
+
+#define ERROR_MSG_PREFIX "%s: Invalid device name '%s'. " \
+    "Should match 'device@<major>.<minor>/<type>/<id>' - "
+
+    if (!major || !minor || !type || !id) return INVALID_OPERATION;
+
+    // Verify starting prefix
+    const char expectedPrefix[] = "device@";
+
+    if (name.find(expectedPrefix) != 0) {
+        ALOGE(ERROR_MSG_PREFIX
+                "does not start with '%s'",
+                __FUNCTION__, name.c_str(), expectedPrefix);
+        return BAD_VALUE;
+    }
+
+    // Extract major/minor versions
+    constexpr std::string::size_type atIdx = sizeof(expectedPrefix) - 2;
+    std::string::size_type dotIdx = name.find('.', atIdx);
+    if (dotIdx == std::string::npos) {
+        ALOGE(ERROR_MSG_PREFIX
+                "does not have @<major>. version section",
+                __FUNCTION__, name.c_str());
+        return BAD_VALUE;
+    }
+    std::string::size_type typeSlashIdx = name.find('/', dotIdx);
+    if (typeSlashIdx == std::string::npos) {
+        ALOGE(ERROR_MSG_PREFIX
+                "does not have .<minor>/ version section",
+                __FUNCTION__, name.c_str());
+        return BAD_VALUE;
+    }
+
+    char *endPtr;
+    errno = 0;
+    long majorVal = strtol(name.c_str() + atIdx + 1, &endPtr, 10);
+    if (errno != 0) {
+        ALOGE(ERROR_MSG_PREFIX
+                "cannot parse major version: %s (%d)",
+                __FUNCTION__, name.c_str(), strerror(errno), errno);
+        return BAD_VALUE;
+    }
+    if (endPtr != name.c_str() + dotIdx) {
+        ALOGE(ERROR_MSG_PREFIX
+                "major version has unexpected length",
+                __FUNCTION__, name.c_str());
+        return BAD_VALUE;
+    }
+    long minorVal = strtol(name.c_str() + dotIdx + 1, &endPtr, 10);
+    if (errno != 0) {
+        ALOGE(ERROR_MSG_PREFIX
+                "cannot parse minor version: %s (%d)",
+                __FUNCTION__, name.c_str(), strerror(errno), errno);
+        return BAD_VALUE;
+    }
+    if (endPtr != name.c_str() + typeSlashIdx) {
+        ALOGE(ERROR_MSG_PREFIX
+                "minor version has unexpected length",
+                __FUNCTION__, name.c_str());
+        return BAD_VALUE;
+    }
+    if (majorVal < 0 || majorVal > UINT16_MAX || minorVal < 0 || minorVal > UINT16_MAX) {
+        ALOGE(ERROR_MSG_PREFIX
+                "major/minor version is out of range of uint16_t: %ld.%ld",
+                __FUNCTION__, name.c_str(), majorVal, minorVal);
+        return BAD_VALUE;
+    }
+
+    // Extract type and id
+
+    std::string::size_type instanceSlashIdx = name.find('/', typeSlashIdx + 1);
+    if (instanceSlashIdx == std::string::npos) {
+        ALOGE(ERROR_MSG_PREFIX
+                "does not have /<type>/ component",
+                __FUNCTION__, name.c_str());
+        return BAD_VALUE;
+    }
+    std::string typeVal = name.substr(typeSlashIdx + 1, instanceSlashIdx - typeSlashIdx - 1);
+
+    if (instanceSlashIdx == name.size() - 1) {
+        ALOGE(ERROR_MSG_PREFIX
+                "does not have an /<id> component",
+                __FUNCTION__, name.c_str());
+        return BAD_VALUE;
+    }
+    std::string idVal = name.substr(instanceSlashIdx + 1);
+
+#undef ERROR_MSG_PREFIX
+
+    *major = static_cast<uint16_t>(majorVal);
+    *minor = static_cast<uint16_t>(minorVal);
+    *type = typeVal;
+    *id = idVal;
+
+    return OK;
+}
+
+
+
+CameraProviderManager::ProviderInfo::~ProviderInfo() {
+    // Destruction of ProviderInfo is only supposed to happen when the respective
+    // CameraProvider interface dies, so do not unregister callbacks.
+
+}
+
+status_t CameraProviderManager::mapToStatusT(const Status& s)  {
+    switch(s) {
+        case Status::OK:
+            return OK;
+        case Status::ILLEGAL_ARGUMENT:
+            return BAD_VALUE;
+        case Status::CAMERA_IN_USE:
+            return -EBUSY;
+        case Status::MAX_CAMERAS_IN_USE:
+            return -EUSERS;
+        case Status::METHOD_NOT_SUPPORTED:
+            return UNKNOWN_TRANSACTION;
+        case Status::OPERATION_NOT_SUPPORTED:
+            return INVALID_OPERATION;
+        case Status::CAMERA_DISCONNECTED:
+            return DEAD_OBJECT;
+        case Status::INTERNAL_ERROR:
+            return INVALID_OPERATION;
+    }
+    ALOGW("Unexpected HAL status code %d", s);
+    return INVALID_OPERATION;
+}
+
+const char* CameraProviderManager::statusToString(const Status& s) {
+    switch(s) {
+        case Status::OK:
+            return "OK";
+        case Status::ILLEGAL_ARGUMENT:
+            return "ILLEGAL_ARGUMENT";
+        case Status::CAMERA_IN_USE:
+            return "CAMERA_IN_USE";
+        case Status::MAX_CAMERAS_IN_USE:
+            return "MAX_CAMERAS_IN_USE";
+        case Status::METHOD_NOT_SUPPORTED:
+            return "METHOD_NOT_SUPPORTED";
+        case Status::OPERATION_NOT_SUPPORTED:
+            return "OPERATION_NOT_SUPPORTED";
+        case Status::CAMERA_DISCONNECTED:
+            return "CAMERA_DISCONNECTED";
+        case Status::INTERNAL_ERROR:
+            return "INTERNAL_ERROR";
+    }
+    ALOGW("Unexpected HAL status code %d", s);
+    return "UNKNOWN_ERROR";
+}
+
+const char* CameraProviderManager::deviceStatusToString(const CameraDeviceStatus& s) {
+    switch(s) {
+        case CameraDeviceStatus::NOT_PRESENT:
+            return "NOT_PRESENT";
+        case CameraDeviceStatus::PRESENT:
+            return "PRESENT";
+        case CameraDeviceStatus::ENUMERATING:
+            return "ENUMERATING";
+    }
+    ALOGW("Unexpected HAL device status code %d", s);
+    return "UNKNOWN_STATUS";
+}
+
+const char* CameraProviderManager::torchStatusToString(const TorchModeStatus& s) {
+    switch(s) {
+        case TorchModeStatus::NOT_AVAILABLE:
+            return "NOT_AVAILABLE";
+        case TorchModeStatus::AVAILABLE_OFF:
+            return "AVAILABLE_OFF";
+        case TorchModeStatus::AVAILABLE_ON:
+            return "AVAILABLE_ON";
+    }
+    ALOGW("Unexpected HAL torch mode status code %d", s);
+    return "UNKNOWN_STATUS";
+}
+
+
+status_t HidlVendorTagDescriptor::createDescriptorFromHidl(
+        const hardware::hidl_vec<hardware::camera::common::V1_0::VendorTagSection>& vts,
+        /*out*/
+        sp<VendorTagDescriptor>& descriptor) {
+
+    int tagCount = 0;
+
+    for (size_t s = 0; s < vts.size(); s++) {
+        tagCount += vts[s].tags.size();
+    }
+
+    if (tagCount < 0 || tagCount > INT32_MAX) {
+        ALOGE("%s: tag count %d from vendor tag sections is invalid.", __FUNCTION__, tagCount);
+        return BAD_VALUE;
+    }
+
+    Vector<uint32_t> tagArray;
+    LOG_ALWAYS_FATAL_IF(tagArray.resize(tagCount) != tagCount,
+            "%s: too many (%u) vendor tags defined.", __FUNCTION__, tagCount);
+
+
+    sp<HidlVendorTagDescriptor> desc = new HidlVendorTagDescriptor();
+    desc->mTagCount = tagCount;
+
+    SortedVector<String8> sections;
+    KeyedVector<uint32_t, String8> tagToSectionMap;
+
+    int idx = 0;
+    for (size_t s = 0; s < vts.size(); s++) {
+        const hardware::camera::common::V1_0::VendorTagSection& section = vts[s];
+        const char *sectionName = section.sectionName.c_str();
+        if (sectionName == NULL) {
+            ALOGE("%s: no section name defined for vendor tag section %zu.", __FUNCTION__, s);
+            return BAD_VALUE;
+        }
+        String8 sectionString(sectionName);
+        sections.add(sectionString);
+
+        for (size_t j = 0; j < section.tags.size(); j++) {
+            uint32_t tag = section.tags[j].tagId;
+            if (tag < CAMERA_METADATA_VENDOR_TAG_BOUNDARY) {
+                ALOGE("%s: vendor tag %d not in vendor tag section.", __FUNCTION__, tag);
+                return BAD_VALUE;
+            }
+
+            tagArray.editItemAt(idx++) = section.tags[j].tagId;
+
+            const char *tagName = section.tags[j].tagName.c_str();
+            if (tagName == NULL) {
+                ALOGE("%s: no tag name defined for vendor tag %d.", __FUNCTION__, tag);
+                return BAD_VALUE;
+            }
+            desc->mTagToNameMap.add(tag, String8(tagName));
+            tagToSectionMap.add(tag, sectionString);
+
+            int tagType = (int) section.tags[j].tagType;
+            if (tagType < 0 || tagType >= NUM_TYPES) {
+                ALOGE("%s: tag type %d from vendor ops does not exist.", __FUNCTION__, tagType);
+                return BAD_VALUE;
+            }
+            desc->mTagToTypeMap.add(tag, tagType);
+        }
+    }
+
+    desc->mSections = sections;
+
+    for (size_t i = 0; i < tagArray.size(); ++i) {
+        uint32_t tag = tagArray[i];
+        String8 sectionString = tagToSectionMap.valueFor(tag);
+
+        // Set up tag to section index map
+        ssize_t index = sections.indexOf(sectionString);
+        LOG_ALWAYS_FATAL_IF(index < 0, "index %zd must be non-negative", index);
+        desc->mTagToSectionMap.add(tag, static_cast<uint32_t>(index));
+
+        // Set up reverse mapping
+        ssize_t reverseIndex = -1;
+        if ((reverseIndex = desc->mReverseMapping.indexOfKey(sectionString)) < 0) {
+            KeyedVector<String8, uint32_t>* nameMapper = new KeyedVector<String8, uint32_t>();
+            reverseIndex = desc->mReverseMapping.add(sectionString, nameMapper);
+        }
+        desc->mReverseMapping[reverseIndex]->add(desc->mTagToNameMap.valueFor(tag), tag);
+    }
+
+    descriptor = desc;
+    return OK;
+}
+
+
+} // namespace android
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
new file mode 100644
index 0000000..f21e07d
--- /dev/null
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -0,0 +1,379 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERAPROVIDER_H
+#define ANDROID_SERVERS_CAMERA_CAMERAPROVIDER_H
+
+#include <vector>
+#include <string>
+#include <mutex>
+#include <condition_variable>
+
+#include <camera/CameraParameters2.h>
+#include <camera/CameraMetadata.h>
+#include <camera/CameraBase.h>
+#include <utils/Errors.h>
+#include <android/hardware/camera/common/1.0/types.h>
+#include <android/hardware/camera/provider/2.4/ICameraProvider.h>
+//#include <android/hardware/camera/provider/2.4/ICameraProviderCallbacks.h>
+#include <android/hidl/manager/1.0/IServiceNotification.h>
+#include <camera/VendorTagDescriptor.h>
+
+namespace android {
+
+/**
+ * The vendor tag descriptor class that takes HIDL vendor tag information as
+ * input. Not part of VendorTagDescriptor class because that class is used
+ * in AIDL generated sources which don't have access to HIDL headers.
+ */
+class HidlVendorTagDescriptor : public VendorTagDescriptor {
+public:
+    /**
+     * Create a VendorTagDescriptor object from the HIDL VendorTagSection
+     * vector.
+     *
+     * Returns OK on success, or a negative error code.
+     */
+    static status_t createDescriptorFromHidl(
+            const hardware::hidl_vec<hardware::camera::common::V1_0::VendorTagSection>& vts,
+            /*out*/
+            sp<VendorTagDescriptor>& descriptor);
+};
+
+/**
+ * A manager for all camera providers available on an Android device.
+ *
+ * Responsible for enumerating providers and the individual camera devices
+ * they export, both at startup and as providers and devices are added/removed.
+ *
+ * Provides methods for requesting information about individual devices and for
+ * opening them for active use.
+ *
+ */
+class CameraProviderManager : virtual public hidl::manager::V1_0::IServiceNotification {
+public:
+
+    ~CameraProviderManager();
+
+    // Tiny proxy for the static methods in a HIDL interface that communicate with the hardware
+    // service manager, to be replacable in unit tests with a fake.
+    struct ServiceInteractionProxy {
+        virtual bool registerForNotifications(
+                const std::string &serviceName,
+                const sp<hidl::manager::V1_0::IServiceNotification>
+                &notification) = 0;
+        virtual sp<hardware::camera::provider::V2_4::ICameraProvider> getService(
+                const std::string &serviceName) = 0;
+        virtual ~ServiceInteractionProxy() {}
+    };
+
+    // Standard use case - call into the normal generated static methods which invoke
+    // the real hardware service manager
+    struct HardwareServiceInteractionProxy : public ServiceInteractionProxy {
+        virtual bool registerForNotifications(
+                const std::string &serviceName,
+                const sp<hidl::manager::V1_0::IServiceNotification>
+                &notification) override {
+            return hardware::camera::provider::V2_4::ICameraProvider::registerForNotifications(
+                    serviceName, notification);
+        }
+        virtual sp<hardware::camera::provider::V2_4::ICameraProvider> getService(
+                const std::string &serviceName) override {
+            return hardware::camera::provider::V2_4::ICameraProvider::getService(serviceName);
+        }
+    };
+
+    /**
+     * Listener interface for device/torch status changes
+     */
+    struct StatusListener : virtual public RefBase {
+        ~StatusListener() {}
+
+        virtual void onDeviceStatusChanged(const String8 &cameraId,
+                hardware::camera::common::V1_0::CameraDeviceStatus newStatus) = 0;
+        virtual void onTorchStatusChanged(const String8 &cameraId,
+                hardware::camera::common::V1_0::TorchModeStatus newStatus) = 0;
+    };
+
+    /**
+     * Initialize the manager and give it a status listener; optionally accepts a service
+     * interaction proxy.
+     *
+     * The default proxy communicates via the hardware service manager; alternate proxies can be
+     * used for testing. The lifetime of the proxy must exceed the lifetime of the manager.
+     */
+    status_t initialize(wp<StatusListener> listener,
+            ServiceInteractionProxy *proxy = &sHardwareServiceInteractionProxy);
+
+    /**
+     * Retrieve the total number of available cameras. This value may change dynamically as cameras
+     * are added or removed.
+     */
+    int getCameraCount() const;
+
+    /**
+     * Retrieve the number of 'standard' cameras; these are internal and
+     * backwards-compatible. This is the set of cameras that will be
+     * accessible via the old camera API, with IDs in range of
+     * [0, getStandardCameraCount()-1]. This value is not expected to change dynamically.
+     */
+    int getStandardCameraCount() const;
+
+    std::vector<std::string> getCameraDeviceIds() const;
+
+    /**
+     * Return true if a device with a given ID and major version exists
+     */
+    bool isValidDevice(const std::string &id, uint16_t majorVersion) const;
+
+    /**
+     * Return true if a device with a given ID has a flash unit. Returns false
+     * for devices that are unknown.
+     */
+    bool hasFlashUnit(const std::string &id) const;
+
+    /**
+     * Return the resource cost of this camera device
+     */
+    status_t getResourceCost(const std::string &id,
+            hardware::camera::common::V1_0::CameraResourceCost* cost) const;
+
+    /**
+     * Return the old camera API camera info
+     */
+    status_t getCameraInfo(const std::string &id,
+            hardware::CameraInfo* info) const;
+
+    /**
+     * Return API2 camera characteristics - returns NAME_NOT_FOUND if a device ID does
+     * not have a v3 or newer HAL version.
+     */
+    status_t getCameraCharacteristics(const std::string &id,
+            CameraMetadata* characteristics) const;
+
+    /**
+     * Return the highest supported device interface version for this ID
+     */
+    status_t getHighestSupportedVersion(const std::string &id,
+            hardware::hidl_version *v);
+
+    /**
+     * Turn on or off the flashlight on a given camera device.
+     * May fail if the device is in active use, or if the device doesn't exist, etc.
+     */
+    status_t setTorchMode(const std::string &id, bool enabled);
+
+    /**
+     * Setup vendor tags for all registered providers
+     */
+    status_t setUpVendorTags();
+
+    /**
+     * Open an active session to a camera device.
+     *
+     * This fully powers on the camera device hardware, and returns a handle to a
+     * session to be used for hardware configuration and operation.
+     */
+    status_t openSession(const std::string &id,
+            const sp<hardware::camera::device::V3_2::ICameraDeviceCallback>& callback,
+            /*out*/
+            sp<hardware::camera::device::V3_2::ICameraDeviceSession> *session);
+
+    status_t openSession(const std::string &id,
+            const sp<hardware::camera::device::V1_0::ICameraDeviceCallback>& callback,
+            /*out*/
+            sp<hardware::camera::device::V1_0::ICameraDevice> *session);
+
+    /**
+     * IServiceNotification::onRegistration
+     * Invoked by the hardware service manager when a new camera provider is registered
+     */
+    virtual hardware::Return<void> onRegistration(const hardware::hidl_string& fqName,
+            const hardware::hidl_string& name,
+            bool preexisting) override;
+
+    /**
+     * Dump out information about available providers and devices
+     */
+    status_t dump(int fd, const Vector<String16>& args);
+
+    /**
+     * Conversion methods between HAL Status and status_t and strings
+     */
+    static status_t mapToStatusT(const hardware::camera::common::V1_0::Status& s);
+    static const char* statusToString(const hardware::camera::common::V1_0::Status& s);
+
+private:
+    // All private members, unless otherwise noted, expect mInterfaceMutex to be locked before use
+    mutable std::mutex mInterfaceMutex;
+
+    std::condition_variable mProviderRegistered;
+
+    // the status listener update callbacks will lock mStatusMutex
+    mutable std::mutex mStatusListenerMutex;
+    wp<StatusListener> mListener;
+    ServiceInteractionProxy* mServiceProxy;
+
+    static HardwareServiceInteractionProxy sHardwareServiceInteractionProxy;
+
+    struct ProviderInfo : virtual public hardware::camera::provider::V2_4::ICameraProviderCallback {
+        const std::string mProviderName;
+        const sp<hardware::camera::provider::V2_4::ICameraProvider> mInterface;
+
+        ProviderInfo(const std::string &providerName,
+                sp<hardware::camera::provider::V2_4::ICameraProvider>& interface,
+                CameraProviderManager *manager);
+        ~ProviderInfo();
+
+        status_t initialize();
+
+        const std::string& getType() const;
+
+        status_t addDevice(const std::string& name,
+                hardware::camera::common::V1_0::CameraDeviceStatus initialStatus =
+                hardware::camera::common::V1_0::CameraDeviceStatus::PRESENT,
+                /*out*/ std::string *parsedId = nullptr);
+
+        status_t dump(int fd, const Vector<String16>& args) const;
+
+        // ICameraProviderCallbacks interface - these lock the parent mInterfaceMutex
+        virtual hardware::Return<void> cameraDeviceStatusChange(
+                const hardware::hidl_string& cameraDeviceName,
+                hardware::camera::common::V1_0::CameraDeviceStatus newStatus) override;
+        virtual hardware::Return<void> torchModeStatusChange(
+                const hardware::hidl_string& cameraDeviceName,
+                hardware::camera::common::V1_0::TorchModeStatus newStatus) override;
+
+        // Basic device information, common to all camera devices
+        struct DeviceInfo {
+            const std::string mName;  // Full instance name
+            const std::string mId;    // ID section of full name
+            const hardware::hidl_version mVersion;
+
+            const hardware::camera::common::V1_0::CameraResourceCost mResourceCost;
+
+            hardware::camera::common::V1_0::CameraDeviceStatus mStatus;
+
+            bool hasFlashUnit() const { return mHasFlashUnit; }
+            virtual status_t setTorchMode(bool enabled) = 0;
+            virtual status_t getCameraInfo(hardware::CameraInfo *info) const = 0;
+            virtual status_t getCameraCharacteristics(CameraMetadata *characteristics) const {
+                (void) characteristics;
+                return INVALID_OPERATION;
+            }
+
+            DeviceInfo(const std::string& name, const std::string &id,
+                    const hardware::hidl_version& version,
+                    const hardware::camera::common::V1_0::CameraResourceCost& resourceCost) :
+                    mName(name), mId(id), mVersion(version), mResourceCost(resourceCost),
+                    mStatus(hardware::camera::common::V1_0::CameraDeviceStatus::PRESENT),
+                    mHasFlashUnit(false) {}
+            virtual ~DeviceInfo();
+        protected:
+            bool mHasFlashUnit;
+
+            template<class InterfaceT>
+            static status_t setTorchMode(InterfaceT& interface, bool enabled);
+        };
+        std::vector<std::unique_ptr<DeviceInfo>> mDevices;
+
+        // HALv1-specific camera fields, including the actual device interface
+        struct DeviceInfo1 : public DeviceInfo {
+            typedef hardware::camera::device::V1_0::ICameraDevice InterfaceT;
+            const sp<InterfaceT> mInterface;
+
+            virtual status_t setTorchMode(bool enabled) override;
+            virtual status_t getCameraInfo(hardware::CameraInfo *info) const override;
+
+            DeviceInfo1(const std::string& name, const std::string &id,
+                    uint16_t minorVersion,
+                    const hardware::camera::common::V1_0::CameraResourceCost& resourceCost,
+                    sp<InterfaceT> interface);
+            virtual ~DeviceInfo1();
+        private:
+            CameraParameters2 mDefaultParameters;
+        };
+
+        // HALv3-specific camera fields, including the actual device interface
+        struct DeviceInfo3 : public DeviceInfo {
+            typedef hardware::camera::device::V3_2::ICameraDevice InterfaceT;
+            const sp<InterfaceT> mInterface;
+
+            virtual status_t setTorchMode(bool enabled) override;
+            virtual status_t getCameraInfo(hardware::CameraInfo *info) const override;
+            virtual status_t getCameraCharacteristics(
+                    CameraMetadata *characteristics) const override;
+
+            DeviceInfo3(const std::string& name, const std::string &id,
+                    uint16_t minorVersion,
+                    const hardware::camera::common::V1_0::CameraResourceCost& resourceCost,
+                    sp<InterfaceT> interface);
+            virtual ~DeviceInfo3();
+        private:
+            CameraMetadata mCameraCharacteristics;
+        };
+
+    private:
+        std::string mType;
+        uint32_t mId;
+
+        CameraProviderManager *mManager;
+
+        // Templated method to instantiate the right kind of DeviceInfo and call the
+        // right CameraProvider getCameraDeviceInterface_* method.
+        template<class DeviceInfoT>
+        std::unique_ptr<DeviceInfo> initializeDeviceInfo(const std::string &name,
+                const std::string &id, uint16_t minorVersion) const;
+
+        // Helper for initializeDeviceInfo to use the right CameraProvider get method.
+        template<class InterfaceT>
+        sp<InterfaceT> getDeviceInterface(const std::string &name) const;
+
+        // Parse provider instance name for type and id
+        static status_t parseProviderName(const std::string& name,
+                std::string *type, uint32_t *id);
+
+        // Parse device instance name for device version, type, and id.
+        static status_t parseDeviceName(const std::string& name,
+                uint16_t *major, uint16_t *minor, std::string *type, std::string *id);
+    };
+
+    // Utility to find a DeviceInfo by ID; pointer is only valid while mInterfaceMutex is held
+    // and the calling code doesn't mutate the list of providers or their lists of devices.
+    // Finds the first device of the given ID that falls within the requested version range
+    //   minVersion <= deviceVersion < maxVersion
+    // No guarantees on the order of traversal
+    ProviderInfo::DeviceInfo* findDeviceInfoLocked(const std::string& id,
+            hardware::hidl_version minVersion = hardware::hidl_version{0,0},
+            hardware::hidl_version maxVersion = hardware::hidl_version{1000,0}) const;
+
+    status_t addProvider(const std::string& newProvider, bool expected = true);
+    status_t removeProvider(const std::string& provider);
+
+    bool isValidDeviceLocked(const std::string &id, uint16_t majorVersion) const;
+
+    std::vector<sp<ProviderInfo>> mProviders;
+
+    static const char* deviceStatusToString(
+        const hardware::camera::common::V1_0::CameraDeviceStatus&);
+    static const char* torchStatusToString(
+        const hardware::camera::common::V1_0::TorchModeStatus&);
+
+};
+
+} // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/common/FrameProcessorBase.cpp b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
index 2ef3057..9e78f88 100644
--- a/services/camera/libcameraservice/common/FrameProcessorBase.cpp
+++ b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
@@ -123,7 +123,7 @@
     ATRACE_CALL();
     CaptureResult result;
 
-    ALOGV("%s: Camera %d: Process new frames", __FUNCTION__, device->getId());
+    ALOGV("%s: Camera %s: Process new frames", __FUNCTION__, device->getId().string());
 
     while ( (res = device->getNextResult(&result)) == OK) {
 
@@ -133,8 +133,8 @@
 
         entry = result.mMetadata.find(ANDROID_REQUEST_FRAME_COUNT);
         if (entry.count == 0) {
-            ALOGE("%s: Camera %d: Error reading frame number",
-                    __FUNCTION__, device->getId());
+            ALOGE("%s: Camera %s: Error reading frame number",
+                    __FUNCTION__, device->getId().string());
             break;
         }
         ATRACE_INT("cam2_frame", entry.data.i32[0]);
@@ -149,8 +149,8 @@
         }
     }
     if (res != NOT_ENOUGH_DATA) {
-        ALOGE("%s: Camera %d: Error getting next frame: %s (%d)",
-                __FUNCTION__, device->getId(), strerror(-res), res);
+        ALOGE("%s: Camera %s: Error getting next frame: %s (%d)",
+                __FUNCTION__, device->getId().string(), strerror(-res), res);
         return;
     }
 
@@ -159,8 +159,8 @@
 
 bool FrameProcessorBase::processSingleFrame(CaptureResult &result,
                                             const sp<CameraDeviceBase> &device) {
-    ALOGV("%s: Camera %d: Process single frame (is empty? %d)",
-          __FUNCTION__, device->getId(), result.mMetadata.isEmpty());
+    ALOGV("%s: Camera %s: Process single frame (is empty? %d)",
+            __FUNCTION__, device->getId().string(), result.mMetadata.isEmpty());
     return processListeners(result, device) == OK;
 }
 
@@ -178,8 +178,8 @@
         entry = result.mMetadata.find(ANDROID_QUIRKS_PARTIAL_RESULT);
         if (entry.count != 0 &&
                 entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
-            ALOGV("%s: Camera %d: This is a partial result",
-                    __FUNCTION__, device->getId());
+            ALOGV("%s: Camera %s: This is a partial result",
+                    __FUNCTION__, device->getId().string());
             isPartialResult = true;
         }
     }
@@ -190,7 +190,7 @@
     // include CaptureResultExtras.
     entry = result.mMetadata.find(ANDROID_REQUEST_ID);
     if (entry.count == 0) {
-        ALOGE("%s: Camera %d: Error reading frame id", __FUNCTION__, device->getId());
+        ALOGE("%s: Camera %s: Error reading frame id", __FUNCTION__, device->getId().string());
         return BAD_VALUE;
     }
     int32_t requestId = entry.data.i32[0];
@@ -215,8 +215,8 @@
             item++;
         }
     }
-    ALOGV("%s: Camera %d: Got %zu range listeners out of %zu", __FUNCTION__,
-          device->getId(), listeners.size(), mRangeListeners.size());
+    ALOGV("%s: Camera %s: Got %zu range listeners out of %zu", __FUNCTION__,
+          device->getId().string(), listeners.size(), mRangeListeners.size());
 
     List<sp<FilteredListener> >::iterator item = listeners.begin();
     for (; item != listeners.end(); item++) {
diff --git a/services/camera/libcameraservice/device1/CameraHardwareInterface.h b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
index 952bae1..c8210b7 100644
--- a/services/camera/libcameraservice/device1/CameraHardwareInterface.h
+++ b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
@@ -27,6 +27,9 @@
 #include <system/window.h>
 #include <hardware/camera.h>
 
+#include <common/CameraModule.h>
+#include <common/CameraProviderManager.h>
+
 namespace android {
 
 typedef void (*notify_callback)(int32_t msgType,
@@ -124,6 +127,12 @@
         return rc;
     }
 
+    status_t initialize(sp<CameraProviderManager> manager) {
+        (void) manager;
+        ALOGE("%s: Not supported yet", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+
     /** Set the ANativeWindow to which preview frames are sent */
     status_t setPreviewWindow(const sp<ANativeWindow>& buf)
     {
diff --git a/services/camera/libcameraservice/device3/Camera3BufferManager.cpp b/services/camera/libcameraservice/device3/Camera3BufferManager.cpp
index 1f01144..5a5d7b7 100644
--- a/services/camera/libcameraservice/device3/Camera3BufferManager.cpp
+++ b/services/camera/libcameraservice/device3/Camera3BufferManager.cpp
@@ -221,7 +221,8 @@
             status_t res = OK;
             buffer.fenceFd = -1;
             buffer.graphicBuffer = mAllocator->createGraphicBuffer(
-                    info.width, info.height, info.format, info.combinedUsage, &res);
+                    info.width, info.height, info.format, 1 /* layerCount */,
+                    info.combinedUsage, &res);
             ALOGV("%s: allocating a new graphic buffer (%dx%d, format 0x%x) %p with handle %p",
                     __FUNCTION__, info.width, info.height, info.format,
                     buffer.graphicBuffer.get(), buffer.graphicBuffer->handle);
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 495de44..1675584 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -26,7 +26,7 @@
 #endif
 
 // Convenience macro for transient errors
-#define CLOGE(fmt, ...) ALOGE("Camera %d: %s: " fmt, mId, __FUNCTION__, \
+#define CLOGE(fmt, ...) ALOGE("Camera %s: %s: " fmt, mId.string(), __FUNCTION__, \
             ##__VA_ARGS__)
 
 // Convenience macros for transitioning to the error state
@@ -53,16 +53,18 @@
 #include "device3/Camera3InputStream.h"
 #include "device3/Camera3ZslStream.h"
 #include "device3/Camera3DummyStream.h"
+#include "device3/Camera3SharedOutputStream.h"
 #include "CameraService.h"
 
 using namespace android::camera3;
+using namespace android::hardware::camera;
+using namespace android::hardware::camera::device::V3_2;
 
 namespace android {
 
-Camera3Device::Camera3Device(int id):
+Camera3Device::Camera3Device(const String8 &id):
         mId(id),
         mIsConstrainedHighSpeedConfiguration(false),
-        mHal3Device(NULL),
         mStatus(STATUS_UNINITIALIZED),
         mStatusWaiters(0),
         mUsePartialResult(false),
@@ -77,17 +79,17 @@
     ATRACE_CALL();
     camera3_callback_ops::notify = &sNotify;
     camera3_callback_ops::process_capture_result = &sProcessCaptureResult;
-    ALOGV("%s: Created device for camera %d", __FUNCTION__, id);
+    ALOGV("%s: Created device for camera %s", __FUNCTION__, mId.string());
 }
 
 Camera3Device::~Camera3Device()
 {
     ATRACE_CALL();
-    ALOGV("%s: Tearing down for camera id %d", __FUNCTION__, mId);
+    ALOGV("%s: Tearing down for camera id %s", __FUNCTION__, mId.string());
     disconnect();
 }
 
-int Camera3Device::getId() const {
+const String8& Camera3Device::getId() const {
     return mId;
 }
 
@@ -101,7 +103,7 @@
     Mutex::Autolock il(mInterfaceLock);
     Mutex::Autolock l(mLock);
 
-    ALOGV("%s: Initializing device for camera %d", __FUNCTION__, mId);
+    ALOGV("%s: Initializing device for camera %s", __FUNCTION__, mId.string());
     if (mStatus != STATUS_UNINITIALIZED) {
         CLOGE("Already initialized!");
         return INVALID_OPERATION;
@@ -110,12 +112,11 @@
     /** Open HAL device */
 
     status_t res;
-    String8 deviceName = String8::format("%d", mId);
 
     camera3_device_t *device;
 
-    ATRACE_BEGIN("camera3->open");
-    res = module->open(deviceName.string(),
+    ATRACE_BEGIN("CameraHal::open");
+    res = module->open(mId.string(),
             reinterpret_cast<hw_device_t**>(&device));
     ATRACE_END();
 
@@ -125,17 +126,17 @@
     }
 
     /** Cross-check device version */
-    if (device->common.version < CAMERA_DEVICE_API_VERSION_3_0) {
+    if (device->common.version < CAMERA_DEVICE_API_VERSION_3_2) {
         SET_ERR_L("Could not open camera: "
                 "Camera device should be at least %x, reports %x instead",
-                CAMERA_DEVICE_API_VERSION_3_0,
+                CAMERA_DEVICE_API_VERSION_3_2,
                 device->common.version);
         device->common.close(&device->common);
         return BAD_VALUE;
     }
 
     camera_info info;
-    res = module->getCameraInfo(mId, &info);
+    res = module->getCameraInfo(atoi(mId), &info);
     if (res != OK) return res;
 
     if (info.device_version != device->common.version) {
@@ -148,7 +149,7 @@
 
     /** Initialize device with callback functions */
 
-    ATRACE_BEGIN("camera3->initialize");
+    ATRACE_BEGIN("CameraHal::initialize");
     res = device->ops->initialize(device, this);
     ATRACE_END();
 
@@ -156,16 +157,64 @@
         SET_ERR_L("Unable to initialize HAL device: %s (%d)",
                 strerror(-res), res);
         device->common.close(&device->common);
-        return BAD_VALUE;
+        return res;
     }
 
+    /** Everything is good to go */
+
+    mDeviceVersion = device->common.version;
+    mDeviceInfo = info.static_camera_characteristics;
+    mInterface = std::make_unique<HalInterface>(device);
+
+    return initializeCommonLocked();
+}
+
+status_t Camera3Device::initialize(sp<CameraProviderManager> manager) {
+    ATRACE_CALL();
+    Mutex::Autolock il(mInterfaceLock);
+    Mutex::Autolock l(mLock);
+
+    ALOGV("%s: Initializing HIDL device for camera %s", __FUNCTION__, mId.string());
+    if (mStatus != STATUS_UNINITIALIZED) {
+        CLOGE("Already initialized!");
+        return INVALID_OPERATION;
+    }
+    if (manager == nullptr) return INVALID_OPERATION;
+
+    sp<ICameraDeviceSession> session;
+    ATRACE_BEGIN("CameraHal::openSession");
+    status_t res = manager->openSession(String8::std_string(mId), this,
+            /*out*/ &session);
+    ATRACE_END();
+    if (res != OK) {
+        SET_ERR_L("Could not open camera session: %s (%d)", strerror(-res), res);
+        return res;
+    }
+
+    res = manager->getCameraCharacteristics(String8::std_string(mId), &mDeviceInfo);
+    if (res != OK) {
+        SET_ERR_L("Could not retrive camera characteristics: %s (%d)", strerror(-res), res);
+        session->close();
+        return res;
+    }
+
+    // TODO: camera service will absorb 3_2/3_3/3_4 differences in the future
+    //       for now use 3_4 to keep legacy devices working
+    mDeviceVersion = CAMERA_DEVICE_API_VERSION_3_4;
+    mInterface = std::make_unique<HalInterface>(session);
+
+    return initializeCommonLocked();
+}
+
+status_t Camera3Device::initializeCommonLocked() {
+
     /** Start up status tracker thread */
     mStatusTracker = new StatusTracker(this);
-    res = mStatusTracker->run(String8::format("C3Dev-%d-Status", mId).string());
+    status_t res = mStatusTracker->run(String8::format("C3Dev-%s-Status", mId.string()).string());
     if (res != OK) {
         SET_ERR_L("Unable to start status tracking thread: %s (%d)",
                 strerror(-res), res);
-        device->common.close(&device->common);
+        mInterface->close();
         mStatusTracker.clear();
         return res;
     }
@@ -177,38 +226,31 @@
     mBufferManager = new Camera3BufferManager();
 
     bool aeLockAvailable = false;
-    camera_metadata_ro_entry aeLockAvailableEntry;
-    res = find_camera_metadata_ro_entry(info.static_camera_characteristics,
-            ANDROID_CONTROL_AE_LOCK_AVAILABLE, &aeLockAvailableEntry);
-    if (res == OK && aeLockAvailableEntry.count > 0) {
+    camera_metadata_entry aeLockAvailableEntry = mDeviceInfo.find(
+            ANDROID_CONTROL_AE_LOCK_AVAILABLE);
+    if (aeLockAvailableEntry.count > 0) {
         aeLockAvailable = (aeLockAvailableEntry.data.u8[0] ==
                 ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE);
     }
 
     /** Start up request queue thread */
-    mRequestThread = new RequestThread(this, mStatusTracker, device, aeLockAvailable);
-    res = mRequestThread->run(String8::format("C3Dev-%d-ReqQueue", mId).string());
+    mRequestThread = new RequestThread(this, mStatusTracker, mInterface.get(), mDeviceVersion,
+            aeLockAvailable);
+    res = mRequestThread->run(String8::format("C3Dev-%s-ReqQueue", mId.string()).string());
     if (res != OK) {
         SET_ERR_L("Unable to start request queue thread: %s (%d)",
                 strerror(-res), res);
-        device->common.close(&device->common);
+        mInterface->close();
         mRequestThread.clear();
         return res;
     }
 
     mPreparerThread = new PreparerThread();
 
-    /** Everything is good to go */
-
-    mDeviceVersion = device->common.version;
-    mDeviceInfo = info.static_camera_characteristics;
-    mHal3Device = device;
-
     // Determine whether we need to derive sensitivity boost values for older devices.
     // If post-RAW sensitivity boost range is listed, so should post-raw sensitivity control
     // be listed (as the default value 100)
-    if (mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_4 &&
-            mDeviceInfo.exists(ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST_RANGE)) {
+    if (mDeviceInfo.exists(ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST_RANGE)) {
         mDerivePostRawSensKey = true;
     }
 
@@ -312,7 +354,7 @@
         mStatusTracker->join();
     }
 
-    camera3_device_t *hal3Device;
+    HalInterface* interface;
     {
         Mutex::Autolock l(mLock);
 
@@ -320,20 +362,16 @@
         mStatusTracker.clear();
         mBufferManager.clear();
 
-        hal3Device = mHal3Device;
+        interface = mInterface.get();
     }
 
     // Call close without internal mutex held, as the HAL close may need to
     // wait on assorted callbacks,etc, to complete before it can return.
-    if (hal3Device != NULL) {
-        ATRACE_BEGIN("camera3->close");
-        hal3Device->common.close(&hal3Device->common);
-        ATRACE_END();
-    }
+    interface->close();
 
     {
         Mutex::Autolock l(mLock);
-        mHal3Device = NULL;
+        mInterface->clear();
         internalUpdateStatusLocked(STATUS_UNINITIALIZED);
     }
 
@@ -447,12 +485,80 @@
     }
 }
 
+hardware::graphics::common::V1_0::PixelFormat Camera3Device::mapToPixelFormat(
+        int frameworkFormat) {
+    return (hardware::graphics::common::V1_0::PixelFormat) frameworkFormat;
+}
+
+DataspaceFlags Camera3Device::mapToHidlDataspace(
+        android_dataspace dataSpace) {
+    return dataSpace;
+}
+
+ConsumerUsageFlags Camera3Device::mapToConsumerUsage(
+        uint32_t usage) {
+    return usage;
+}
+
+StreamRotation Camera3Device::mapToStreamRotation(camera3_stream_rotation_t rotation) {
+    switch (rotation) {
+        case CAMERA3_STREAM_ROTATION_0:
+            return StreamRotation::ROTATION_0;
+        case CAMERA3_STREAM_ROTATION_90:
+            return StreamRotation::ROTATION_90;
+        case CAMERA3_STREAM_ROTATION_180:
+            return StreamRotation::ROTATION_180;
+        case CAMERA3_STREAM_ROTATION_270:
+            return StreamRotation::ROTATION_270;
+    }
+    ALOGE("%s: Unknown stream rotation %d", __FUNCTION__, rotation);
+    return StreamRotation::ROTATION_0;
+}
+
+StreamConfigurationMode Camera3Device::mapToStreamConfigurationMode(
+        camera3_stream_configuration_mode_t operationMode) {
+    switch(operationMode) {
+        case CAMERA3_STREAM_CONFIGURATION_NORMAL_MODE:
+            return StreamConfigurationMode::NORMAL_MODE;
+        case CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE:
+            return StreamConfigurationMode::CONSTRAINED_HIGH_SPEED_MODE;
+        case CAMERA3_VENDOR_STREAM_CONFIGURATION_MODE_START:
+            // Needs to be mapped by vendor extensions
+            break;
+    }
+    ALOGE("%s: Unknown stream configuration mode %d", __FUNCTION__, operationMode);
+    return StreamConfigurationMode::NORMAL_MODE;
+}
+
+camera3_buffer_status_t Camera3Device::mapHidlBufferStatus(BufferStatus status) {
+    switch (status) {
+        case BufferStatus::OK: return CAMERA3_BUFFER_STATUS_OK;
+        case BufferStatus::ERROR: return CAMERA3_BUFFER_STATUS_ERROR;
+    }
+    return CAMERA3_BUFFER_STATUS_ERROR;
+}
+
+int Camera3Device::mapToFrameworkFormat(
+        hardware::graphics::common::V1_0::PixelFormat pixelFormat) {
+    return static_cast<uint32_t>(pixelFormat);
+}
+
+uint32_t Camera3Device::mapConsumerToFrameworkUsage(
+        ConsumerUsageFlags usage) {
+    return usage;
+}
+
+uint32_t Camera3Device::mapProducerToFrameworkUsage(
+        ProducerUsageFlags usage) {
+    return usage;
+}
+
 ssize_t Camera3Device::getJpegBufferSize(uint32_t width, uint32_t height) const {
     // Get max jpeg size (area-wise).
     Size maxJpegResolution = getMaxJpegResolution();
     if (maxJpegResolution.width == 0) {
-        ALOGE("%s: Camera %d: Can't find valid available jpeg sizes in static metadata!",
-                __FUNCTION__, mId);
+        ALOGE("%s: Camera %s: Can't find valid available jpeg sizes in static metadata!",
+                __FUNCTION__, mId.string());
         return BAD_VALUE;
     }
 
@@ -460,7 +566,8 @@
     ssize_t maxJpegBufferSize = 0;
     camera_metadata_ro_entry jpegBufMaxSize = mDeviceInfo.find(ANDROID_JPEG_MAX_SIZE);
     if (jpegBufMaxSize.count == 0) {
-        ALOGE("%s: Camera %d: Can't find maximum JPEG size in static metadata!", __FUNCTION__, mId);
+        ALOGE("%s: Camera %s: Can't find maximum JPEG size in static metadata!", __FUNCTION__,
+                mId.string());
         return BAD_VALUE;
     }
     maxJpegBufferSize = jpegBufMaxSize.data.i32[0];
@@ -482,8 +589,8 @@
     const int FLOATS_PER_POINT=4;
     camera_metadata_ro_entry maxPointCount = mDeviceInfo.find(ANDROID_DEPTH_MAX_DEPTH_SAMPLES);
     if (maxPointCount.count == 0) {
-        ALOGE("%s: Camera %d: Can't find maximum depth point cloud size in static metadata!",
-                __FUNCTION__, mId);
+        ALOGE("%s: Camera %s: Can't find maximum depth point cloud size in static metadata!",
+                __FUNCTION__, mId.string());
         return BAD_VALUE;
     }
     ssize_t maxBytesForPointCloud = sizeof(android_depth_points) +
@@ -500,8 +607,8 @@
         mDeviceInfo.find(ANDROID_SENSOR_OPAQUE_RAW_SIZE);
     size_t count = rawOpaqueSizes.count;
     if (count == 0 || (count % PER_CONFIGURATION_SIZE)) {
-        ALOGE("%s: Camera %d: bad opaque RAW size static metadata length(%zu)!",
-                __FUNCTION__, mId, count);
+        ALOGE("%s: Camera %s: bad opaque RAW size static metadata length(%zu)!",
+                __FUNCTION__, mId.string(), count);
         return BAD_VALUE;
     }
 
@@ -512,8 +619,8 @@
         }
     }
 
-    ALOGE("%s: Camera %d: cannot find size for %dx%d opaque RAW image!",
-            __FUNCTION__, mId, width, height);
+    ALOGE("%s: Camera %s: cannot find size for %dx%d opaque RAW image!",
+            __FUNCTION__, mId.string(), width, height);
     return BAD_VALUE;
 }
 
@@ -527,11 +634,11 @@
     bool gotLock = tryLockSpinRightRound(mLock);
 
     ALOGW_IF(!gotInterfaceLock,
-            "Camera %d: %s: Unable to lock interface lock, proceeding anyway",
-            mId, __FUNCTION__);
+            "Camera %s: %s: Unable to lock interface lock, proceeding anyway",
+            mId.string(), __FUNCTION__);
     ALOGW_IF(!gotLock,
-            "Camera %d: %s: Unable to lock main lock, proceeding anyway",
-            mId, __FUNCTION__);
+            "Camera %s: %s: Unable to lock main lock, proceeding anyway",
+            mId.string(), __FUNCTION__);
 
     bool dumpTemplates = false;
 
@@ -624,12 +731,11 @@
         };
 
         for (int i = 1; i < CAMERA3_TEMPLATE_COUNT; i++) {
-            const camera_metadata_t *templateRequest;
-            templateRequest =
-                mHal3Device->ops->construct_default_request_settings(
-                    mHal3Device, i);
+            camera_metadata_t *templateRequest = nullptr;
+            mInterface->constructDefaultRequestSettings(
+                    (camera3_request_template_t) i, &templateRequest);
             lines = String8::format("    HAL Request %s:\n", templateNames[i-1]);
-            if (templateRequest == NULL) {
+            if (templateRequest == nullptr) {
                 lines.append("       Not supported\n");
                 write(fd, lines.string(), lines.size());
             } else {
@@ -637,15 +743,16 @@
                 dump_indented_camera_metadata(templateRequest,
                         fd, /*verbosity*/2, /*indentation*/8);
             }
+            free_camera_metadata(templateRequest);
         }
     }
 
     mTagMonitor.dumpMonitoredMetadata(fd);
 
-    if (mHal3Device != NULL) {
+    if (mInterface->valid()) {
         lines = String8("    HAL device dump:\n");
         write(fd, lines.string(), lines.size());
-        mHal3Device->ops->dump(mHal3Device, fd);
+        mInterface->dump(fd);
     }
 
     if (gotLock) mLock.unlock();
@@ -686,29 +793,36 @@
 }
 
 status_t Camera3Device::convertMetadataListToRequestListLocked(
-        const List<const CameraMetadata> &metadataList, RequestList *requestList) {
+        const List<const CameraMetadata> &metadataList,
+        const std::list<const SurfaceMap> &surfaceMaps,
+        bool repeating,
+        RequestList *requestList) {
     if (requestList == NULL) {
         CLOGE("requestList cannot be NULL.");
         return BAD_VALUE;
     }
 
     int32_t burstId = 0;
-    for (List<const CameraMetadata>::const_iterator it = metadataList.begin();
-            it != metadataList.end(); ++it) {
-        sp<CaptureRequest> newRequest = setUpRequestLocked(*it);
+    List<const CameraMetadata>::const_iterator metadataIt = metadataList.begin();
+    std::list<const SurfaceMap>::const_iterator surfaceMapIt = surfaceMaps.begin();
+    for (; metadataIt != metadataList.end() && surfaceMapIt != surfaceMaps.end();
+            ++metadataIt, ++surfaceMapIt) {
+        sp<CaptureRequest> newRequest = setUpRequestLocked(*metadataIt, *surfaceMapIt);
         if (newRequest == 0) {
             CLOGE("Can't create capture request");
             return BAD_VALUE;
         }
 
+        newRequest->mRepeating = repeating;
+
         // Setup burst Id and request Id
         newRequest->mResultExtras.burstId = burstId++;
-        if (it->exists(ANDROID_REQUEST_ID)) {
-            if (it->find(ANDROID_REQUEST_ID).count == 0) {
+        if (metadataIt->exists(ANDROID_REQUEST_ID)) {
+            if (metadataIt->find(ANDROID_REQUEST_ID).count == 0) {
                 CLOGE("RequestID entry exists; but must not be empty in metadata");
                 return BAD_VALUE;
             }
-            newRequest->mResultExtras.requestId = it->find(ANDROID_REQUEST_ID).data.i32[0];
+            newRequest->mResultExtras.requestId = metadataIt->find(ANDROID_REQUEST_ID).data.i32[0];
         } else {
             CLOGE("RequestID does not exist in metadata");
             return BAD_VALUE;
@@ -718,6 +832,10 @@
 
         ALOGV("%s: requestId = %" PRId32, __FUNCTION__, newRequest->mResultExtras.requestId);
     }
+    if (metadataIt != metadataList.end() || surfaceMapIt != surfaceMaps.end()) {
+        ALOGE("%s: metadataList and surfaceMaps are not the same size!", __FUNCTION__);
+        return BAD_VALUE;
+    }
 
     // Setup batch size if this is a high speed video recording request.
     if (mIsConstrainedHighSpeedConfiguration && requestList->size() > 0) {
@@ -737,12 +855,31 @@
     ATRACE_CALL();
 
     List<const CameraMetadata> requests;
+    std::list<const SurfaceMap> surfaceMaps;
+    convertToRequestList(requests, surfaceMaps, request);
+
+    return captureList(requests, surfaceMaps, /*lastFrameNumber*/NULL);
+}
+
+void Camera3Device::convertToRequestList(List<const CameraMetadata>& requests,
+        std::list<const SurfaceMap>& surfaceMaps,
+        const CameraMetadata& request) {
     requests.push_back(request);
-    return captureList(requests, /*lastFrameNumber*/NULL);
+
+    SurfaceMap surfaceMap;
+    camera_metadata_ro_entry streams = request.find(ANDROID_REQUEST_OUTPUT_STREAMS);
+    // With no surface list passed in, stream and surface will have 1-to-1
+    // mapping. So the surface index is 0 for each stream in the surfaceMap.
+    for (size_t i = 0; i < streams.count; i++) {
+        surfaceMap[streams.data.i32[i]].push_back(0);
+    }
+    surfaceMaps.push_back(surfaceMap);
 }
 
 status_t Camera3Device::submitRequestsHelper(
-        const List<const CameraMetadata> &requests, bool repeating,
+        const List<const CameraMetadata> &requests,
+        const std::list<const SurfaceMap> &surfaceMaps,
+        bool repeating,
         /*out*/
         int64_t *lastFrameNumber) {
     ATRACE_CALL();
@@ -757,7 +894,8 @@
 
     RequestList requestList;
 
-    res = convertMetadataListToRequestListLocked(requests, /*out*/&requestList);
+    res = convertMetadataListToRequestListLocked(requests, surfaceMaps,
+            repeating, /*out*/&requestList);
     if (res != OK) {
         // error logged by previous call
         return res;
@@ -775,7 +913,7 @@
             SET_ERR_L("Can't transition to active in %f seconds!",
                     kActiveTimeout/1e9);
         }
-        ALOGV("Camera %d: Capture request %" PRId32 " enqueued", mId,
+        ALOGV("Camera %s: Capture request %" PRId32 " enqueued", mId.string(),
               (*(requestList.begin()))->mResultExtras.requestId);
     } else {
         CLOGE("Cannot queue request. Impossible.");
@@ -785,11 +923,150 @@
     return res;
 }
 
+hardware::Return<void> Camera3Device::processCaptureResult(
+        const device::V3_2::CaptureResult& result) {
+    camera3_capture_result r;
+    status_t res;
+    r.frame_number = result.frameNumber;
+    if (result.result.size() != 0) {
+        r.result = reinterpret_cast<const camera_metadata_t*>(result.result.data());
+        size_t expected_metadata_size = result.result.size();
+        if ((res = validate_camera_metadata_structure(r.result, &expected_metadata_size)) != OK) {
+            ALOGE("%s: Frame %d: Invalid camera metadata received by camera service from HAL: %s (%d)",
+                    __FUNCTION__, result.frameNumber, strerror(-res), res);
+            return hardware::Void();
+        }
+    } else {
+        r.result = nullptr;
+    }
+
+    std::vector<camera3_stream_buffer_t> outputBuffers(result.outputBuffers.size());
+    std::vector<buffer_handle_t> outputBufferHandles(result.outputBuffers.size());
+    for (size_t i = 0; i < result.outputBuffers.size(); i++) {
+        auto& bDst = outputBuffers[i];
+        const StreamBuffer &bSrc = result.outputBuffers[i];
+
+        ssize_t idx = mOutputStreams.indexOfKey(bSrc.streamId);
+        if (idx == -1) {
+            ALOGE("%s: Frame %d: Buffer %zu: Invalid output stream id %d",
+                    __FUNCTION__, result.frameNumber, i, bSrc.streamId);
+            return hardware::Void();
+        }
+        bDst.stream = mOutputStreams.valueAt(idx)->asHalStream();
+
+        buffer_handle_t *buffer;
+        res = mInterface->popInflightBuffer(result.frameNumber, bSrc.streamId, &buffer);
+        if (res != OK) {
+            ALOGE("%s: Frame %d: Buffer %zu: No in-flight buffer for stream %d",
+                    __FUNCTION__, result.frameNumber, i, bSrc.streamId);
+            return hardware::Void();
+        }
+        bDst.buffer = buffer;
+        bDst.status = mapHidlBufferStatus(bSrc.status);
+        bDst.acquire_fence = -1;
+        if (bSrc.releaseFence == nullptr) {
+            bDst.release_fence = -1;
+        } else if (bSrc.releaseFence->numFds == 1) {
+            bDst.release_fence = dup(bSrc.releaseFence->data[0]);
+        } else {
+            ALOGE("%s: Frame %d: Invalid release fence for buffer %zu, fd count is %d, not 1",
+                    __FUNCTION__, result.frameNumber, i, bSrc.releaseFence->numFds);
+            return hardware::Void();
+        }
+    }
+    r.num_output_buffers = outputBuffers.size();
+    r.output_buffers = outputBuffers.data();
+
+    camera3_stream_buffer_t inputBuffer;
+    if (result.inputBuffer.streamId == -1) {
+        r.input_buffer = nullptr;
+    } else {
+        if (mInputStream->getId() != result.inputBuffer.streamId) {
+            ALOGE("%s: Frame %d: Invalid input stream id %d", __FUNCTION__,
+                    result.frameNumber, result.inputBuffer.streamId);
+            return hardware::Void();
+        }
+        inputBuffer.stream = mInputStream->asHalStream();
+        buffer_handle_t *buffer;
+        res = mInterface->popInflightBuffer(result.frameNumber, result.inputBuffer.streamId,
+                &buffer);
+        if (res != OK) {
+            ALOGE("%s: Frame %d: Input buffer: No in-flight buffer for stream %d",
+                    __FUNCTION__, result.frameNumber, result.inputBuffer.streamId);
+            return hardware::Void();
+        }
+        inputBuffer.buffer = buffer;
+        inputBuffer.status = mapHidlBufferStatus(result.inputBuffer.status);
+        inputBuffer.acquire_fence = -1;
+        if (result.inputBuffer.releaseFence == nullptr) {
+            inputBuffer.release_fence = -1;
+        } else if (result.inputBuffer.releaseFence->numFds == 1) {
+            inputBuffer.release_fence = dup(result.inputBuffer.releaseFence->data[0]);
+        } else {
+            ALOGE("%s: Frame %d: Invalid release fence for input buffer, fd count is %d, not 1",
+                    __FUNCTION__, result.frameNumber, result.inputBuffer.releaseFence->numFds);
+            return hardware::Void();
+        }
+        r.input_buffer = &inputBuffer;
+    }
+
+    r.partial_result = result.partialResult;
+
+    processCaptureResult(&r);
+
+    return hardware::Void();
+}
+
+hardware::Return<void> Camera3Device::notify(
+        const NotifyMsg& msg) {
+    camera3_notify_msg m;
+    switch (msg.type) {
+        case MsgType::ERROR:
+            m.type = CAMERA3_MSG_ERROR;
+            m.message.error.frame_number = msg.msg.error.frameNumber;
+            if (msg.msg.error.errorStreamId >= 0) {
+                ssize_t idx = mOutputStreams.indexOfKey(msg.msg.error.errorStreamId);
+                if (idx == -1) {
+                    ALOGE("%s: Frame %d: Invalid error stream id %d",
+                            __FUNCTION__, m.message.error.frame_number, msg.msg.error.errorStreamId);
+                    return hardware::Void();
+                }
+                m.message.error.error_stream = mOutputStreams.valueAt(idx)->asHalStream();
+            } else {
+                m.message.error.error_stream = nullptr;
+            }
+            switch (msg.msg.error.errorCode) {
+                case ErrorCode::ERROR_DEVICE:
+                    m.message.error.error_code = CAMERA3_MSG_ERROR_DEVICE;
+                    break;
+                case ErrorCode::ERROR_REQUEST:
+                    m.message.error.error_code = CAMERA3_MSG_ERROR_REQUEST;
+                    break;
+                case ErrorCode::ERROR_RESULT:
+                    m.message.error.error_code = CAMERA3_MSG_ERROR_RESULT;
+                    break;
+                case ErrorCode::ERROR_BUFFER:
+                    m.message.error.error_code = CAMERA3_MSG_ERROR_BUFFER;
+                    break;
+            }
+            break;
+        case MsgType::SHUTTER:
+            m.type = CAMERA3_MSG_SHUTTER;
+            m.message.shutter.frame_number = msg.msg.shutter.frameNumber;
+            m.message.shutter.timestamp = msg.msg.shutter.timestamp;
+            break;
+    }
+    notify(&m);
+
+    return hardware::Void();
+}
+
 status_t Camera3Device::captureList(const List<const CameraMetadata> &requests,
+                                    const std::list<const SurfaceMap> &surfaceMaps,
                                     int64_t *lastFrameNumber) {
     ATRACE_CALL();
 
-    return submitRequestsHelper(requests, /*repeating*/false, lastFrameNumber);
+    return submitRequestsHelper(requests, surfaceMaps, /*repeating*/false, lastFrameNumber);
 }
 
 status_t Camera3Device::setStreamingRequest(const CameraMetadata &request,
@@ -797,19 +1074,23 @@
     ATRACE_CALL();
 
     List<const CameraMetadata> requests;
-    requests.push_back(request);
-    return setStreamingRequestList(requests, /*lastFrameNumber*/NULL);
+    std::list<const SurfaceMap> surfaceMaps;
+    convertToRequestList(requests, surfaceMaps, request);
+
+    return setStreamingRequestList(requests, /*surfaceMap*/surfaceMaps,
+                                   /*lastFrameNumber*/NULL);
 }
 
 status_t Camera3Device::setStreamingRequestList(const List<const CameraMetadata> &requests,
+                                                const std::list<const SurfaceMap> &surfaceMaps,
                                                 int64_t *lastFrameNumber) {
     ATRACE_CALL();
 
-    return submitRequestsHelper(requests, /*repeating*/true, lastFrameNumber);
+    return submitRequestsHelper(requests, surfaceMaps, /*repeating*/true, lastFrameNumber);
 }
 
 sp<Camera3Device::CaptureRequest> Camera3Device::setUpRequestLocked(
-        const CameraMetadata &request) {
+        const CameraMetadata &request, const SurfaceMap &surfaceMap) {
     status_t res;
 
     if (mStatus == STATUS_UNCONFIGURED || mNeedConfig) {
@@ -825,7 +1106,7 @@
         }
     }
 
-    sp<CaptureRequest> newRequest = createCaptureRequest(request);
+    sp<CaptureRequest> newRequest = createCaptureRequest(request, surfaceMap);
     return newRequest;
 }
 
@@ -850,7 +1131,7 @@
             SET_ERR_L("Unexpected status: %d", mStatus);
             return INVALID_OPERATION;
     }
-    ALOGV("Camera %d: Clearing repeating request", mId);
+    ALOGV("Camera %s: Clearing repeating request", mId.string());
 
     return mRequestThread->clearRepeatingRequests(lastFrameNumber);
 }
@@ -867,8 +1148,8 @@
     ATRACE_CALL();
     Mutex::Autolock il(mInterfaceLock);
     Mutex::Autolock l(mLock);
-    ALOGV("Camera %d: Creating new input stream %d: %d x %d, format %d",
-            mId, mNextStreamId, width, height, format);
+    ALOGV("Camera %s: Creating new input stream %d: %d x %d, format %d",
+            mId.string(), mNextStreamId, width, height, format);
 
     status_t res;
     bool wasActive = false;
@@ -924,7 +1205,7 @@
         internalResumeLocked();
     }
 
-    ALOGV("Camera %d: Created input stream", mId);
+    ALOGV("Camera %s: Created input stream", mId.string());
     return OK;
 }
 
@@ -938,8 +1219,8 @@
     ATRACE_CALL();
     Mutex::Autolock il(mInterfaceLock);
     Mutex::Autolock l(mLock);
-    ALOGV("Camera %d: Creating ZSL stream %d: %d x %d, depth %d",
-            mId, mNextStreamId, width, height, depth);
+    ALOGV("Camera %s: Creating ZSL stream %d: %d x %d, depth %d",
+            mId.string(), mNextStreamId, width, height, depth);
 
     status_t res;
     bool wasActive = false;
@@ -1004,18 +1285,37 @@
         internalResumeLocked();
     }
 
-    ALOGV("Camera %d: Created ZSL stream", mId);
+    ALOGV("Camera %s: Created ZSL stream", mId.string());
     return OK;
 }
 
 status_t Camera3Device::createStream(sp<Surface> consumer,
-        uint32_t width, uint32_t height, int format, android_dataspace dataSpace,
-        camera3_stream_rotation_t rotation, int *id, int streamSetId, uint32_t consumerUsage) {
+            uint32_t width, uint32_t height, int format,
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+            int streamSetId, uint32_t consumerUsage) {
+    ATRACE_CALL();
+
+    if (consumer == nullptr) {
+        ALOGE("%s: consumer must not be null", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    std::vector<sp<Surface>> consumers;
+    consumers.push_back(consumer);
+
+    return createStream(consumers, /*hasDeferredConsumer*/ false, width, height,
+            format, dataSpace, rotation, id, streamSetId, consumerUsage);
+}
+
+status_t Camera3Device::createStream(const std::vector<sp<Surface>>& consumers,
+        bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+        android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+        int streamSetId, uint32_t consumerUsage) {
     ATRACE_CALL();
     Mutex::Autolock il(mInterfaceLock);
     Mutex::Autolock l(mLock);
-    ALOGV("Camera %d: Creating new stream %d: %d x %d, format %d, dataspace %d rotation %d"
-            " consumer usage 0x%x", mId, mNextStreamId, width, height, format, dataSpace, rotation,
+    ALOGV("Camera %s: Creating new stream %d: %d x %d, format %d, dataspace %d rotation %d"
+            " consumer usage 0x%x", mId.string(), mNextStreamId, width, height, format, dataSpace, rotation,
             consumerUsage);
 
     status_t res;
@@ -1054,18 +1354,24 @@
         streamSetId = CAMERA3_STREAM_SET_ID_INVALID;
     }
 
+    if (consumers.size() == 0 && !hasDeferredConsumer) {
+        ALOGE("%s: Number of consumers cannot be smaller than 1", __FUNCTION__);
+        return BAD_VALUE;
+    }
     // HAL3.1 doesn't support deferred consumer stream creation as it requires buffer registration
     // which requires a consumer surface to be available.
-    if (consumer == nullptr && mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) {
+    if (hasDeferredConsumer && mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) {
         ALOGE("HAL3.1 doesn't support deferred consumer stream creation");
         return BAD_VALUE;
     }
 
-    if (consumer == nullptr && format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+    if (hasDeferredConsumer && format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
         ALOGE("Deferred consumer stream creation only support IMPLEMENTATION_DEFINED format");
         return BAD_VALUE;
     }
 
+    bool streamSharing  = consumers.size() > 1 || (consumers.size()  > 0 && hasDeferredConsumer);
+
     // Use legacy dataspace values for older HALs
     if (mDeviceVersion <= CAMERA_DEVICE_API_VERSION_3_3) {
         dataSpace = mapToLegacyDataspace(dataSpace);
@@ -1085,7 +1391,7 @@
                 return BAD_VALUE;
             }
         }
-        newStream = new Camera3OutputStream(mNextStreamId, consumer,
+        newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
                 width, height, blobBufferSize, format, dataSpace, rotation,
                 mTimestampOffset, streamSetId);
     } else if (format == HAL_PIXEL_FORMAT_RAW_OPAQUE) {
@@ -1094,15 +1400,19 @@
             SET_ERR_L("Invalid RAW opaque buffer size %zd", rawOpaqueBufferSize);
             return BAD_VALUE;
         }
-        newStream = new Camera3OutputStream(mNextStreamId, consumer,
+        newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
                 width, height, rawOpaqueBufferSize, format, dataSpace, rotation,
                 mTimestampOffset, streamSetId);
-    } else if (consumer == nullptr) {
+    } else if (consumers.size() == 0 && hasDeferredConsumer) {
         newStream = new Camera3OutputStream(mNextStreamId,
                 width, height, format, consumerUsage, dataSpace, rotation,
                 mTimestampOffset, streamSetId);
+    } else if (streamSharing) {
+        newStream = new Camera3SharedOutputStream(mNextStreamId, consumers,
+                hasDeferredConsumer, width, height, format, consumerUsage,
+                dataSpace, rotation, mTimestampOffset, streamSetId);
     } else {
-        newStream = new Camera3OutputStream(mNextStreamId, consumer,
+        newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
                 width, height, format, dataSpace, rotation,
                 mTimestampOffset, streamSetId);
     }
@@ -1138,7 +1448,7 @@
         }
         internalResumeLocked();
     }
-    ALOGV("Camera %d: Created new stream", mId);
+    ALOGV("Camera %s: Created new stream", mId.string());
     return OK;
 }
 
@@ -1227,12 +1537,12 @@
     Mutex::Autolock l(mLock);
     status_t res;
 
-    ALOGV("%s: Camera %d: Deleting stream %d", __FUNCTION__, mId, id);
+    ALOGV("%s: Camera %s: Deleting stream %d", __FUNCTION__, mId.string(), id);
 
     // CameraDevice semantics require device to already be idle before
     // deleteStream is called, unlike for createStream.
     if (mStatus == STATUS_ACTIVE) {
-        ALOGV("%s: Camera %d: Device not idle", __FUNCTION__, mId);
+        ALOGV("%s: Camera %s: Device not idle", __FUNCTION__, mId.string());
         return -EBUSY;
     }
 
@@ -1339,18 +1649,20 @@
         return OK;
     }
 
-    const camera_metadata_t *rawRequest;
-    ATRACE_BEGIN("camera3->construct_default_request_settings");
-    rawRequest = mHal3Device->ops->construct_default_request_settings(
-        mHal3Device, templateId);
-    ATRACE_END();
-    if (rawRequest == NULL) {
+    camera_metadata_t *rawRequest;
+    status_t res = mInterface->constructDefaultRequestSettings(
+            (camera3_request_template_t) templateId, &rawRequest);
+    if (res == BAD_VALUE) {
         ALOGI("%s: template %d is not supported on this camera device",
               __FUNCTION__, templateId);
-        return BAD_VALUE;
+        return res;
+    } else if (res != OK) {
+        CLOGE("Unable to construct request template %d: %s (%d)",
+                templateId, strerror(-res), res);
+        return res;
     }
 
-    mRequestTemplateCache[templateId] = rawRequest;
+    mRequestTemplateCache[templateId].acquire(rawRequest);
 
     // Derive some new keys for backward compatibility
     if (mDerivePostRawSensKey && !mRequestTemplateCache[templateId].exists(
@@ -1390,7 +1702,7 @@
             return INVALID_OPERATION;
     }
 
-    ALOGV("%s: Camera %d: Waiting until idle", __FUNCTION__, mId);
+    ALOGV("%s: Camera %s: Waiting until idle", __FUNCTION__, mId.string());
     status_t res = waitUntilStateThenRelock(/*active*/ false, kShutdownTimeout);
     if (res != OK) {
         SET_ERR_L("Error waiting for HAL to drain: %s (%d)", strerror(-res),
@@ -1411,7 +1723,7 @@
     mRequestThread->setPaused(true);
     mPauseStateNotify = true;
 
-    ALOGV("%s: Camera %d: Internal wait until idle", __FUNCTION__, mId);
+    ALOGV("%s: Camera %s: Internal wait until idle", __FUNCTION__, mId.string());
     status_t res = waitUntilStateThenRelock(/*active*/ false, kShutdownTimeout);
     if (res != OK) {
         SET_ERR_L("Can't idle device in %f seconds!",
@@ -1510,8 +1822,8 @@
         if (res == TIMED_OUT) {
             return res;
         } else if (res != OK) {
-            ALOGW("%s: Camera %d: No frame in %" PRId64 " ns: %s (%d)",
-                    __FUNCTION__, mId, timeout, strerror(-res), res);
+            ALOGW("%s: Camera %s: No frame in %" PRId64 " ns: %s (%d)",
+                    __FUNCTION__, mId.string(), timeout, strerror(-res), res);
             return res;
         }
     }
@@ -1613,7 +1925,7 @@
 
 status_t Camera3Device::flush(int64_t *frameNumber) {
     ATRACE_CALL();
-    ALOGV("%s: Camera %d: Flushing all requests", __FUNCTION__, mId);
+    ALOGV("%s: Camera %s: Flushing all requests", __FUNCTION__, mId.string());
     Mutex::Autolock il(mInterfaceLock);
 
     {
@@ -1622,7 +1934,7 @@
     }
 
     status_t res;
-    if (mHal3Device->common.version >= CAMERA_DEVICE_API_VERSION_3_1) {
+    if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_1) {
         res = mRequestThread->flush();
     } else {
         Mutex::Autolock l(mLock);
@@ -1638,7 +1950,7 @@
 
 status_t Camera3Device::prepare(int maxCount, int streamId) {
     ATRACE_CALL();
-    ALOGV("%s: Camera %d: Preparing stream %d", __FUNCTION__, mId, streamId);
+    ALOGV("%s: Camera %s: Preparing stream %d", __FUNCTION__, mId.string(), streamId);
     Mutex::Autolock il(mInterfaceLock);
     Mutex::Autolock l(mLock);
 
@@ -1666,15 +1978,15 @@
 
 status_t Camera3Device::tearDown(int streamId) {
     ATRACE_CALL();
-    ALOGV("%s: Camera %d: Tearing down stream %d", __FUNCTION__, mId, streamId);
+    ALOGV("%s: Camera %s: Tearing down stream %d", __FUNCTION__, mId.string(), streamId);
     Mutex::Autolock il(mInterfaceLock);
     Mutex::Autolock l(mLock);
 
     // Teardown can only be accomplished on devices that don't require register_stream_buffers,
     // since we cannot call register_stream_buffers except right after configure_streams.
-    if (mHal3Device->common.version < CAMERA_DEVICE_API_VERSION_3_2) {
+    if (mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) {
         ALOGE("%s: Unable to tear down streams on device HAL v%x",
-                __FUNCTION__, mHal3Device->common.version);
+                __FUNCTION__, mDeviceVersion);
         return NO_INIT;
     }
 
@@ -1698,7 +2010,7 @@
 status_t Camera3Device::addBufferListenerForStream(int streamId,
         wp<Camera3StreamBufferListener> listener) {
     ATRACE_CALL();
-    ALOGV("%s: Camera %d: Adding buffer listener for stream %d", __FUNCTION__, mId, streamId);
+    ALOGV("%s: Camera %s: Adding buffer listener for stream %d", __FUNCTION__, mId.string(), streamId);
     Mutex::Autolock il(mInterfaceLock);
     Mutex::Autolock l(mLock);
 
@@ -1736,7 +2048,7 @@
         if (mStatus != STATUS_ACTIVE && mStatus != STATUS_CONFIGURED) {
             return;
         }
-        ALOGV("%s: Camera %d: Now %s", __FUNCTION__, mId,
+        ALOGV("%s: Camera %s: Now %s", __FUNCTION__, mId.string(),
                 idle ? "idle" : "active");
         internalUpdateStatusLocked(idle ? STATUS_CONFIGURED : STATUS_ACTIVE);
 
@@ -1757,7 +2069,7 @@
 
 status_t Camera3Device::setConsumerSurface(int streamId, sp<Surface> consumer) {
     ATRACE_CALL();
-    ALOGV("%s: Camera %d: set consumer surface for stream %d", __FUNCTION__, mId, streamId);
+    ALOGV("%s: Camera %s: set consumer surface for stream %d", __FUNCTION__, mId.string(), streamId);
     Mutex::Autolock il(mInterfaceLock);
     Mutex::Autolock l(mLock);
 
@@ -1778,16 +2090,18 @@
         return res;
     }
 
-    if (!stream->isConfiguring()) {
-        CLOGE("Stream %d was already fully configured.", streamId);
-        return INVALID_OPERATION;
-    }
+    if (stream->isConsumerConfigurationDeferred()) {
+        if (!stream->isConfiguring()) {
+            CLOGE("Stream %d was already fully configured.", streamId);
+            return INVALID_OPERATION;
+        }
 
-    res = stream->finishConfiguration(mHal3Device);
-    if (res != OK) {
-        SET_ERR_L("Can't finish configuring output stream %d: %s (%d)",
-                stream->getId(), strerror(-res), res);
-        return res;
+        res = stream->finishConfiguration();
+        if (res != OK) {
+            SET_ERR_L("Can't finish configuring output stream %d: %s (%d)",
+                   stream->getId(), strerror(-res), res);
+            return res;
+        }
     }
 
     return OK;
@@ -1798,7 +2112,7 @@
  */
 
 sp<Camera3Device::CaptureRequest> Camera3Device::createCaptureRequest(
-        const CameraMetadata &request) {
+        const CameraMetadata &request, const SurfaceMap &surfaceMap) {
     ATRACE_CALL();
     status_t res;
 
@@ -1817,7 +2131,7 @@
         // Lazy completion of stream configuration (allocation/registration)
         // on first use
         if (mInputStream->isConfiguring()) {
-            res = mInputStream->finishConfiguration(mHal3Device);
+            res = mInputStream->finishConfiguration();
             if (res != OK) {
                 SET_ERR_L("Unable to finish configuring input stream %d:"
                         " %s (%d)",
@@ -1853,16 +2167,23 @@
                 mOutputStreams.editValueAt(idx);
 
         // It is illegal to include a deferred consumer output stream into a request
-        if (stream->isConsumerConfigurationDeferred()) {
-            CLOGE("Stream %d hasn't finished configuration yet due to deferred consumer",
-                    stream->getId());
-            return NULL;
+        auto iter = surfaceMap.find(streams.data.i32[i]);
+        if (iter != surfaceMap.end()) {
+            const std::vector<size_t>& surfaces = iter->second;
+            for (const auto& surface : surfaces) {
+                if (stream->isConsumerConfigurationDeferred(surface)) {
+                    CLOGE("Stream %d surface %zu hasn't finished configuration yet "
+                          "due to deferred consumer", stream->getId(), surface);
+                    return NULL;
+                }
+            }
+            newRequest->mOutputSurfaces[i] = surfaces;
         }
 
         // Lazy completion of stream configuration (allocation/registration)
         // on first use
         if (stream->isConfiguring()) {
-            res = stream->finishConfiguration(mHal3Device);
+            res = stream->finishConfiguration();
             if (res != OK) {
                 SET_ERR_L("Unable to finish configuring stream %d: %s (%d)",
                         stream->getId(), strerror(-res), res);
@@ -1945,7 +2266,7 @@
     }
 
     // Start configuring the streams
-    ALOGV("%s: Camera %d: Starting stream configuration", __FUNCTION__, mId);
+    ALOGV("%s: Camera %s: Starting stream configuration", __FUNCTION__, mId.string());
 
     camera3_stream_configuration config;
     config.operation_mode = mIsConstrainedHighSpeedConfiguration ?
@@ -1991,9 +2312,8 @@
 
     // Do the HAL configuration; will potentially touch stream
     // max_buffers, usage, priv fields.
-    ATRACE_BEGIN("camera3->configure_streams");
-    res = mHal3Device->ops->configure_streams(mHal3Device, &config);
-    ATRACE_END();
+
+    res = mInterface->configureStreams(&config);
 
     if (res == BAD_VALUE) {
         // HAL rejected this set of streams as unsupported, clean up config
@@ -2014,7 +2334,7 @@
     // faster
 
     if (mInputStream != NULL && mInputStream->isConfiguring()) {
-        res = mInputStream->finishConfiguration(mHal3Device);
+        res = mInputStream->finishConfiguration();
         if (res != OK) {
             CLOGE("Can't finish configuring input stream %d: %s (%d)",
                     mInputStream->getId(), strerror(-res), res);
@@ -2027,7 +2347,7 @@
         sp<Camera3OutputStreamInterface> outputStream =
             mOutputStreams.editValueAt(i);
         if (outputStream->isConfiguring() && !outputStream->isConsumerConfigurationDeferred()) {
-            res = outputStream->finishConfiguration(mHal3Device);
+            res = outputStream->finishConfiguration();
             if (res != OK) {
                 CLOGE("Can't finish configuring output stream %d: %s (%d)",
                         outputStream->getId(), strerror(-res), res);
@@ -2064,7 +2384,7 @@
     internalUpdateStatusLocked((mDummyStreamId == NO_STREAM) ?
             STATUS_CONFIGURED : STATUS_UNCONFIGURED);
 
-    ALOGV("%s: Camera %d: Stream configuration complete", __FUNCTION__, mId);
+    ALOGV("%s: Camera %s: Stream configuration complete", __FUNCTION__, mId.string());
 
     // tear down the deleted streams after configure streams.
     mDeletedStreams.clear();
@@ -2079,12 +2399,12 @@
     if (mDummyStreamId != NO_STREAM) {
         // Should never be adding a second dummy stream when one is already
         // active
-        SET_ERR_L("%s: Camera %d: A dummy stream already exists!",
-                __FUNCTION__, mId);
+        SET_ERR_L("%s: Camera %s: A dummy stream already exists!",
+                __FUNCTION__, mId.string());
         return INVALID_OPERATION;
     }
 
-    ALOGV("%s: Camera %d: Adding a dummy stream", __FUNCTION__, mId);
+    ALOGV("%s: Camera %s: Adding a dummy stream", __FUNCTION__, mId.string());
 
     sp<Camera3OutputStreamInterface> dummyStream =
             new Camera3DummyStream(mNextStreamId);
@@ -2108,7 +2428,7 @@
     if (mDummyStreamId == NO_STREAM) return OK;
     if (mOutputStreams.size() == 1) return OK;
 
-    ALOGV("%s: Camera %d: Removing the dummy stream", __FUNCTION__, mId);
+    ALOGV("%s: Camera %s: Removing the dummy stream", __FUNCTION__, mId.string());
 
     // Ok, have a dummy stream and there's at least one other output stream,
     // so remove the dummy
@@ -2162,7 +2482,7 @@
 void Camera3Device::setErrorStateLockedV(const char *fmt, va_list args) {
     // Print out all error messages to log
     String8 errorCause = String8::formatV(fmt, args);
-    ALOGE("Camera %d: %s", mId, errorCause.string());
+    ALOGE("Camera %s: %s", mId.string(), errorCause.string());
 
     // But only do error state transition steps for the first error
     if (mStatus == STATUS_ERROR || mStatus == STATUS_UNINITIALIZED) return;
@@ -2635,8 +2955,8 @@
                 Camera3Stream::cast(msg.error_stream);
         streamId = stream->getId();
     }
-    ALOGV("Camera %d: %s: HAL error, frame %d, stream %d: %d",
-            mId, __FUNCTION__, msg.frame_number,
+    ALOGV("Camera %s: %s: HAL error, frame %d, stream %d: %d",
+            mId.string(), __FUNCTION__, msg.frame_number,
             streamId, msg.error_code);
 
     CaptureResultExtras resultExtras;
@@ -2657,8 +2977,8 @@
                     resultExtras = r.resultExtras;
                 } else {
                     resultExtras.frameNumber = msg.frame_number;
-                    ALOGE("Camera %d: %s: cannot find in-flight request on "
-                            "frame %" PRId64 " error", mId, __FUNCTION__,
+                    ALOGE("Camera %s: %s: cannot find in-flight request on "
+                            "frame %" PRId64 " error", mId.string(), __FUNCTION__,
                             resultExtras.frameNumber);
                 }
             }
@@ -2666,7 +2986,7 @@
             if (listener != NULL) {
                 listener->notifyError(errorCode, resultExtras);
             } else {
-                ALOGE("Camera %d: %s: no listener available", mId, __FUNCTION__);
+                ALOGE("Camera %s: %s: no listener available", mId.string(), __FUNCTION__);
             }
             break;
         default:
@@ -2711,8 +3031,8 @@
                 }
             }
 
-            ALOGVV("Camera %d: %s: Shutter fired for frame %d (id %d) at %" PRId64,
-                    mId, __FUNCTION__,
+            ALOGVV("Camera %s: %s: Shutter fired for frame %d (id %d) at %" PRId64,
+                    mId.string(), __FUNCTION__,
                     msg.frame_number, r.resultExtras.requestId, msg.timestamp);
             // Call listener, if any
             if (listener != NULL) {
@@ -2758,17 +3078,407 @@
 }
 
 /**
+ * HalInterface inner class methods
+ */
+
+Camera3Device::HalInterface::HalInterface(camera3_device_t *device) :
+        mHal3Device(device) {}
+
+Camera3Device::HalInterface::HalInterface(sp<ICameraDeviceSession> &session) :
+        mHal3Device(nullptr),
+        mHidlSession(session) {}
+
+Camera3Device::HalInterface::HalInterface() :
+        mHal3Device(nullptr) {}
+
+Camera3Device::HalInterface::HalInterface(const HalInterface& other) :
+        mHal3Device(other.mHal3Device), mHidlSession(other.mHidlSession) {}
+
+bool Camera3Device::HalInterface::valid() {
+    return (mHal3Device != nullptr) || (mHidlSession != nullptr);
+}
+
+void Camera3Device::HalInterface::clear() {
+    mHal3Device = nullptr;
+    mHidlSession.clear();
+}
+
+status_t Camera3Device::HalInterface::constructDefaultRequestSettings(
+        camera3_request_template_t templateId,
+        /*out*/ camera_metadata_t **requestTemplate) {
+    ATRACE_NAME("CameraHal::constructDefaultRequestSettings");
+    if (!valid()) return INVALID_OPERATION;
+    status_t res = OK;
+
+    if (mHal3Device != nullptr) {
+        const camera_metadata *r;
+        r = mHal3Device->ops->construct_default_request_settings(
+                mHal3Device, templateId);
+        if (r == nullptr) return BAD_VALUE;
+        *requestTemplate = clone_camera_metadata(r);
+        if (requestTemplate == nullptr) {
+            ALOGE("%s: Unable to clone camera metadata received from HAL",
+                    __FUNCTION__);
+            return INVALID_OPERATION;
+        }
+    } else {
+        common::V1_0::Status status;
+        RequestTemplate id;
+        switch (templateId) {
+            case CAMERA3_TEMPLATE_PREVIEW:
+                id = RequestTemplate::PREVIEW;
+                break;
+            case CAMERA3_TEMPLATE_STILL_CAPTURE:
+                id = RequestTemplate::STILL_CAPTURE;
+                break;
+            case CAMERA3_TEMPLATE_VIDEO_RECORD:
+                id = RequestTemplate::VIDEO_RECORD;
+                break;
+            case CAMERA3_TEMPLATE_VIDEO_SNAPSHOT:
+                id = RequestTemplate::VIDEO_SNAPSHOT;
+                break;
+            case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG:
+                id = RequestTemplate::ZERO_SHUTTER_LAG;
+                break;
+            case CAMERA3_TEMPLATE_MANUAL:
+                id = RequestTemplate::MANUAL;
+                break;
+            default:
+                // Unknown template ID
+                return BAD_VALUE;
+        }
+        mHidlSession->constructDefaultRequestSettings(id,
+                [&status, &requestTemplate]
+                (common::V1_0::Status s, const device::V3_2::CameraMetadata& request) {
+                    status = s;
+                    if (status == common::V1_0::Status::OK) {
+                        const camera_metadata *r =
+                                reinterpret_cast<const camera_metadata_t*>(request.data());
+                        size_t expectedSize = request.size();
+                        int ret = validate_camera_metadata_structure(r, &expectedSize);
+                        if (ret == OK) {
+                            *requestTemplate = clone_camera_metadata(r);
+                            if (*requestTemplate == nullptr) {
+                                ALOGE("%s: Unable to clone camera metadata received from HAL",
+                                        __FUNCTION__);
+                                status = common::V1_0::Status::INTERNAL_ERROR;
+                            }
+                        } else {
+                            ALOGE("%s: Malformed camera metadata received from HAL", __FUNCTION__);
+                            status = common::V1_0::Status::INTERNAL_ERROR;
+                        }
+                    }
+                });
+        res = CameraProviderManager::mapToStatusT(status);
+    }
+    return res;
+}
+
+status_t Camera3Device::HalInterface::configureStreams(camera3_stream_configuration *config) {
+    ATRACE_NAME("CameraHal::configureStreams");
+    if (!valid()) return INVALID_OPERATION;
+    status_t res = OK;
+
+    if (mHal3Device != nullptr) {
+        res = mHal3Device->ops->configure_streams(mHal3Device, config);
+    } else {
+        // Convert stream config to HIDL
+        std::set<int> activeStreams;
+        StreamConfiguration requestedConfiguration;
+        requestedConfiguration.streams.resize(config->num_streams);
+        for (size_t i = 0; i < config->num_streams; i++) {
+            Stream &dst = requestedConfiguration.streams[i];
+            camera3_stream_t *src = config->streams[i];
+
+            int streamId = Camera3Stream::cast(src)->getId();
+            StreamType streamType;
+            switch (src->stream_type) {
+                case CAMERA3_STREAM_OUTPUT:
+                    streamType = StreamType::OUTPUT;
+                    break;
+                case CAMERA3_STREAM_INPUT:
+                    streamType = StreamType::INPUT;
+                    break;
+                default:
+                    ALOGE("%s: Stream %d: Unsupported stream type %d",
+                            __FUNCTION__, streamId, config->streams[i]->stream_type);
+                    return BAD_VALUE;
+            }
+            dst.id = streamId;
+            dst.streamType = streamType;
+            dst.width = src->width;
+            dst.height = src->height;
+            dst.format = mapToPixelFormat(src->format);
+            dst.usage = mapToConsumerUsage(src->usage);
+            dst.dataSpace = mapToHidlDataspace(src->data_space);
+            dst.rotation = mapToStreamRotation((camera3_stream_rotation_t) src->rotation);
+
+            activeStreams.insert(streamId);
+            // Create Buffer ID map if necessary
+            if (mBufferIdMaps.count(streamId) == 0) {
+                mBufferIdMaps.emplace(streamId, BufferIdMap{});
+            }
+        }
+        // remove BufferIdMap for deleted streams
+        for(auto it = mBufferIdMaps.begin(); it != mBufferIdMaps.end();) {
+            int streamId = it->first;
+            bool active = activeStreams.count(streamId) > 0;
+            if (!active) {
+                it = mBufferIdMaps.erase(it);
+            } else {
+                ++it;
+            }
+        }
+
+        requestedConfiguration.operationMode = mapToStreamConfigurationMode(
+                (camera3_stream_configuration_mode_t) config->operation_mode);
+
+        // Invoke configureStreams
+
+        HalStreamConfiguration finalConfiguration;
+        common::V1_0::Status status;
+        mHidlSession->configureStreams(requestedConfiguration,
+                [&status, &finalConfiguration]
+                (common::V1_0::Status s, const HalStreamConfiguration& halConfiguration) {
+                    finalConfiguration = halConfiguration;
+                    status = s;
+                });
+        if (status != common::V1_0::Status::OK ) {
+            return CameraProviderManager::mapToStatusT(status);
+        }
+
+        // And convert output stream configuration from HIDL
+
+        for (size_t i = 0; i < config->num_streams; i++) {
+            camera3_stream_t *dst = config->streams[i];
+            int streamId = Camera3Stream::cast(dst)->getId();
+
+            // Start scan at i, with the assumption that the stream order matches
+            size_t realIdx = i;
+            bool found = false;
+            for (size_t idx = 0; idx < finalConfiguration.streams.size(); idx++) {
+                if (finalConfiguration.streams[realIdx].id == streamId) {
+                    found = true;
+                    break;
+                }
+                realIdx = (realIdx >= finalConfiguration.streams.size()) ? 0 : realIdx + 1;
+            }
+            if (!found) {
+                ALOGE("%s: Stream %d not found in stream configuration response from HAL",
+                        __FUNCTION__, streamId);
+                return INVALID_OPERATION;
+            }
+            HalStream &src = finalConfiguration.streams[realIdx];
+
+            int overrideFormat = mapToFrameworkFormat(src.overrideFormat);
+            if (dst->format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+                if (dst->format != overrideFormat) {
+                    ALOGE("%s: Stream %d: Format override not allowed for format 0x%x", __FUNCTION__,
+                            streamId, dst->format);
+                }
+            } else {
+                // Override allowed with IMPLEMENTATION_DEFINED
+                dst->format = overrideFormat;
+            }
+
+            if (dst->stream_type == CAMERA3_STREAM_INPUT) {
+                if (src.producerUsage != 0) {
+                    ALOGE("%s: Stream %d: INPUT streams must have 0 for producer usage",
+                            __FUNCTION__, streamId);
+                    return INVALID_OPERATION;
+                }
+                dst->usage = mapConsumerToFrameworkUsage(src.consumerUsage);
+            } else {
+                // OUTPUT
+                if (src.consumerUsage != 0) {
+                    ALOGE("%s: Stream %d: OUTPUT streams must have 0 for consumer usage",
+                            __FUNCTION__, streamId);
+                    return INVALID_OPERATION;
+                }
+                dst->usage = mapProducerToFrameworkUsage(src.producerUsage);
+            }
+            dst->max_buffers = src.maxBuffers;
+        }
+    }
+    return res;
+}
+
+status_t Camera3Device::HalInterface::processCaptureRequest(
+        camera3_capture_request_t *request) {
+    ATRACE_NAME("CameraHal::processCaptureRequest");
+    if (!valid()) return INVALID_OPERATION;
+    status_t res = OK;
+
+    if (mHal3Device != nullptr) {
+        res = mHal3Device->ops->process_capture_request(mHal3Device, request);
+    } else {
+        device::V3_2::CaptureRequest captureRequest;
+        captureRequest.frameNumber = request->frame_number;
+        std::vector<native_handle_t*> handlesCreated;
+        // A null request settings maps to a size-0 CameraMetadata
+        if (request->settings != nullptr) {
+            captureRequest.settings.setToExternal(
+                    reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(request->settings)),
+                    get_camera_metadata_size(request->settings));
+        }
+
+        {
+            std::lock_guard<std::mutex> lock(mInflightLock);
+            if (request->input_buffer != nullptr) {
+                int32_t streamId = Camera3Stream::cast(request->input_buffer->stream)->getId();
+                buffer_handle_t buf = *(request->input_buffer->buffer);
+                auto pair = getBufferId(buf, streamId);
+                bool isNewBuffer = pair.first;
+                uint64_t bufferId = pair.second;
+                captureRequest.inputBuffer.streamId = streamId;
+                captureRequest.inputBuffer.bufferId = bufferId;
+                captureRequest.inputBuffer.buffer = (isNewBuffer) ? buf : nullptr;
+                captureRequest.inputBuffer.status = BufferStatus::OK;
+                native_handle_t *acquireFence = nullptr;
+                if (request->input_buffer->acquire_fence != -1) {
+                    acquireFence = native_handle_create(1,0);
+                    acquireFence->data[0] = request->input_buffer->acquire_fence;
+                    handlesCreated.push_back(acquireFence);
+                }
+                captureRequest.inputBuffer.acquireFence = acquireFence;
+                captureRequest.inputBuffer.releaseFence = nullptr;
+
+                pushInflightBufferLocked(captureRequest.frameNumber, streamId,
+                        request->input_buffer->buffer,
+                        request->input_buffer->acquire_fence);
+            } else {
+                captureRequest.inputBuffer.streamId = -1;
+                captureRequest.inputBuffer.bufferId = BUFFER_ID_NO_BUFFER;
+            }
+
+            captureRequest.outputBuffers.resize(request->num_output_buffers);
+            for (size_t i = 0; i < request->num_output_buffers; i++) {
+                const camera3_stream_buffer_t *src = request->output_buffers + i;
+                StreamBuffer &dst = captureRequest.outputBuffers[i];
+                int32_t streamId = Camera3Stream::cast(src->stream)->getId();
+                buffer_handle_t buf = *(src->buffer);
+                auto pair = getBufferId(buf, streamId);
+                bool isNewBuffer = pair.first;
+                dst.streamId = streamId;
+                dst.bufferId = pair.second;
+                dst.buffer = isNewBuffer ? buf : nullptr;
+                dst.status = BufferStatus::OK;
+                native_handle_t *acquireFence = nullptr;
+                if (src->acquire_fence != -1) {
+                    acquireFence = native_handle_create(1,0);
+                    acquireFence->data[0] = src->acquire_fence;
+                    handlesCreated.push_back(acquireFence);
+                }
+                dst.acquireFence = acquireFence;
+                dst.releaseFence = nullptr;
+
+                pushInflightBufferLocked(captureRequest.frameNumber, streamId,
+                        src->buffer, src->acquire_fence);
+            }
+        }
+        common::V1_0::Status status = mHidlSession->processCaptureRequest(captureRequest);
+
+        for (auto& handle : handlesCreated) {
+            native_handle_delete(handle);
+        }
+        res = CameraProviderManager::mapToStatusT(status);
+    }
+    return res;
+}
+
+status_t Camera3Device::HalInterface::flush() {
+    ATRACE_NAME("CameraHal::flush");
+    if (!valid()) return INVALID_OPERATION;
+    status_t res = OK;
+
+    if (mHal3Device != nullptr) {
+        res = mHal3Device->ops->flush(mHal3Device);
+    } else {
+        res = CameraProviderManager::mapToStatusT(mHidlSession->flush());
+    }
+    return res;
+}
+
+status_t Camera3Device::HalInterface::dump(int fd) {
+    ATRACE_NAME("CameraHal::dump");
+    if (!valid()) return INVALID_OPERATION;
+    status_t res = OK;
+
+    if (mHal3Device != nullptr) {
+        mHal3Device->ops->dump(mHal3Device, fd);
+    } else {
+        // Handled by CameraProviderManager::dump
+    }
+    return res;
+}
+
+status_t Camera3Device::HalInterface::close() {
+    ATRACE_NAME("CameraHal::close()");
+    if (!valid()) return INVALID_OPERATION;
+    status_t res = OK;
+
+    if (mHal3Device != nullptr) {
+        mHal3Device->common.close(&mHal3Device->common);
+    } else {
+        mHidlSession->close();
+    }
+    return res;
+}
+
+status_t Camera3Device::HalInterface::pushInflightBufferLocked(
+        int32_t frameNumber, int32_t streamId, buffer_handle_t *buffer, int acquireFence) {
+    uint64_t key = static_cast<uint64_t>(frameNumber) << 32 | static_cast<uint64_t>(streamId);
+    auto pair = std::make_pair(buffer, acquireFence);
+    mInflightBufferMap[key] = pair;
+    return OK;
+}
+
+status_t Camera3Device::HalInterface::popInflightBuffer(
+        int32_t frameNumber, int32_t streamId,
+        /*out*/ buffer_handle_t **buffer) {
+    std::lock_guard<std::mutex> lock(mInflightLock);
+
+    uint64_t key = static_cast<uint64_t>(frameNumber) << 32 | static_cast<uint64_t>(streamId);
+    auto it = mInflightBufferMap.find(key);
+    if (it == mInflightBufferMap.end()) return NAME_NOT_FOUND;
+    auto pair = it->second;
+    *buffer = pair.first;
+    int acquireFence = pair.second;
+    if (acquireFence > 0) {
+        ::close(acquireFence);
+    }
+    mInflightBufferMap.erase(it);
+    return OK;
+}
+
+std::pair<bool, uint64_t> Camera3Device::HalInterface::getBufferId(
+        const buffer_handle_t& buf, int streamId) {
+    std::lock_guard<std::mutex> lock(mBufferIdMapLock);
+
+    BufferIdMap& bIdMap = mBufferIdMaps.at(streamId);
+    auto it = bIdMap.find(buf);
+    if (it == bIdMap.end()) {
+        bIdMap[buf] = mNextBufferId++;
+        return std::make_pair(true, mNextBufferId - 1);
+    } else {
+        return std::make_pair(false, it->second);
+    }
+}
+
+/**
  * RequestThread inner class methods
  */
 
 Camera3Device::RequestThread::RequestThread(wp<Camera3Device> parent,
         sp<StatusTracker> statusTracker,
-        camera3_device_t *hal3Device,
+        HalInterface* interface,
+        uint32_t deviceVersion,
         bool aeLockAvailable) :
         Thread(/*canCallJava*/false),
         mParent(parent),
         mStatusTracker(statusTracker),
-        mHal3Device(hal3Device),
+        mInterface(interface),
+        mDeviceVersion(deviceVersion),
         mListener(nullptr),
         mId(getId(parent)),
         mReconfigured(false),
@@ -2785,6 +3495,8 @@
     mStatusId = statusTracker->addComponent();
 }
 
+Camera3Device::RequestThread::~RequestThread() {}
+
 void Camera3Device::RequestThread::setNotificationListener(
         wp<NotificationListener> listener) {
     Mutex::Autolock l(mRequestLock);
@@ -2839,10 +3551,11 @@
     return OK;
 }
 
-int Camera3Device::RequestThread::getId(const wp<Camera3Device> &device) {
+const String8& Camera3Device::RequestThread::getId(const wp<Camera3Device> &device) {
+    static String8 deadId("<DeadDevice>");
     sp<Camera3Device> d = device.promote();
-    if (d != NULL) return d->mId;
-    return 0;
+    if (d != nullptr) return d->mId;
+    return deadId;
 }
 
 status_t Camera3Device::RequestThread::queueTriggerLocked(
@@ -2972,8 +3685,8 @@
     ATRACE_CALL();
     Mutex::Autolock l(mFlushLock);
 
-    if (mHal3Device->common.version >= CAMERA_DEVICE_API_VERSION_3_1) {
-        return mHal3Device->ops->flush(mHal3Device);
+    if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_1) {
+        return mInterface->flush();
     }
 
     return -ENOTSUP;
@@ -3021,7 +3734,7 @@
     request->mAeTriggerCancelOverride.applyAeLock = false;
     request->mAeTriggerCancelOverride.applyAePrecaptureTrigger = false;
 
-    if (mHal3Device->common.version > CAMERA_DEVICE_API_VERSION_3_2) {
+    if (mDeviceVersion > CAMERA_DEVICE_API_VERSION_3_2) {
         return;
     }
 
@@ -3161,9 +3874,7 @@
     for (auto& nextRequest : mNextRequests) {
         // Submit request and block until ready for next one
         ATRACE_ASYNC_BEGIN("frame capture", nextRequest.halRequest.frame_number);
-        ATRACE_BEGIN("camera3->process_capture_request");
-        res = mHal3Device->ops->process_capture_request(mHal3Device, &nextRequest.halRequest);
-        ATRACE_END();
+        res = mInterface->processCaptureRequest(&nextRequest.halRequest);
 
         if (res != OK) {
             // Should only get a failure here for malformed requests or device-level
@@ -3337,6 +4048,14 @@
                 return TIMED_OUT;
             }
             halRequest->num_output_buffers++;
+
+            res = outputStream->notifyRequestedSurfaces(halRequest->frame_number,
+                    captureRequest->mOutputSurfaces[i]);
+            if (res != OK) {
+                ALOGE("RequestThread: Cannot register output surfaces: %s (%d)",
+                      strerror(-res), res);
+                return INVALID_OPERATION;
+            }
         }
         totalNumBuffers += halRequest->num_output_buffers;
 
@@ -3553,6 +4272,12 @@
                 mRequestQueue.begin();
         nextRequest = *firstRequest;
         mRequestQueue.erase(firstRequest);
+        if (mRequestQueue.empty() && !nextRequest->mRepeating) {
+            sp<NotificationListener> listener = mListener.promote();
+            if (listener != NULL) {
+                listener->notifyRequestQueueEmpty();
+            }
+        }
     }
 
     // In case we've been unpaused by setPaused clearing mDoPause, need to
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 87c43f3..9b869a9 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -17,6 +17,9 @@
 #ifndef ANDROID_SERVERS_CAMERA3DEVICE_H
 #define ANDROID_SERVERS_CAMERA3DEVICE_H
 
+#include <utility>
+#include <unordered_map>
+
 #include <utils/Condition.h>
 #include <utils/Errors.h>
 #include <utils/List.h>
@@ -24,7 +27,12 @@
 #include <utils/Thread.h>
 #include <utils/KeyedVector.h>
 #include <utils/Timers.h>
+
+#include <android/hardware/camera/device/3.2/ICameraDevice.h>
+#include <android/hardware/camera/device/3.2/ICameraDeviceSession.h>
+#include <android/hardware/camera/device/3.2/ICameraDeviceCallback.h>
 #include <hardware/camera3.h>
+
 #include <camera/CaptureResult.h>
 
 #include "common/CameraDeviceBase.h"
@@ -55,17 +63,18 @@
 class Camera3OutputStreamInterface;
 class Camera3StreamInterface;
 
-}
+} // namespace camera3
 
 /**
  * CameraDevice for HAL devices with version CAMERA_DEVICE_API_VERSION_3_0 or higher.
  */
 class Camera3Device :
             public CameraDeviceBase,
+            virtual public hardware::camera::device::V3_2::ICameraDeviceCallback,
             private camera3_callback_ops {
   public:
 
-    explicit Camera3Device(int id);
+    explicit Camera3Device(const String8& id);
 
     virtual ~Camera3Device();
 
@@ -73,91 +82,100 @@
      * CameraDeviceBase interface
      */
 
-    virtual int      getId() const;
+    const String8& getId() const override;
 
     // Transitions to idle state on success.
-    virtual status_t initialize(CameraModule *module);
-    virtual status_t disconnect();
-    virtual status_t dump(int fd, const Vector<String16> &args);
-    virtual const CameraMetadata& info() const;
+    status_t initialize(CameraModule *module) override;
+    status_t initialize(sp<CameraProviderManager> manager) override;
+    status_t disconnect() override;
+    status_t dump(int fd, const Vector<String16> &args) override;
+    const CameraMetadata& info() const override;
 
     // Capture and setStreamingRequest will configure streams if currently in
     // idle state
-    virtual status_t capture(CameraMetadata &request, int64_t *lastFrameNumber = NULL);
-    virtual status_t captureList(const List<const CameraMetadata> &requests,
-                                 int64_t *lastFrameNumber = NULL);
-    virtual status_t setStreamingRequest(const CameraMetadata &request,
-                                         int64_t *lastFrameNumber = NULL);
-    virtual status_t setStreamingRequestList(const List<const CameraMetadata> &requests,
-                                             int64_t *lastFrameNumber = NULL);
-    virtual status_t clearStreamingRequest(int64_t *lastFrameNumber = NULL);
+    status_t capture(CameraMetadata &request, int64_t *lastFrameNumber = NULL) override;
+    status_t captureList(const List<const CameraMetadata> &requests,
+            const std::list<const SurfaceMap> &surfaceMaps,
+            int64_t *lastFrameNumber = NULL) override;
+    status_t setStreamingRequest(const CameraMetadata &request,
+            int64_t *lastFrameNumber = NULL) override;
+    status_t setStreamingRequestList(const List<const CameraMetadata> &requests,
+            const std::list<const SurfaceMap> &surfaceMaps,
+            int64_t *lastFrameNumber = NULL) override;
+    status_t clearStreamingRequest(int64_t *lastFrameNumber = NULL) override;
 
-    virtual status_t waitUntilRequestReceived(int32_t requestId, nsecs_t timeout);
+    status_t waitUntilRequestReceived(int32_t requestId, nsecs_t timeout) override;
 
     // Actual stream creation/deletion is delayed until first request is submitted
     // If adding streams while actively capturing, will pause device before adding
     // stream, reconfiguring device, and unpausing. If the client create a stream
     // with nullptr consumer surface, the client must then call setConsumer()
     // and finish the stream configuration before starting output streaming.
-    virtual status_t createStream(sp<Surface> consumer,
+    status_t createStream(sp<Surface> consumer,
             uint32_t width, uint32_t height, int format,
             android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
             int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
-            uint32_t consumerUsage = 0);
-    virtual status_t createInputStream(
+            uint32_t consumerUsage = 0) override;
+    status_t createStream(const std::vector<sp<Surface>>& consumers,
+            bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+            int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
+            uint32_t consumerUsage = 0) override;
+
+    status_t createInputStream(
             uint32_t width, uint32_t height, int format,
-            int *id);
-    virtual status_t createZslStream(
+            int *id) override;
+    status_t createZslStream(
             uint32_t width, uint32_t height,
             int depth,
             /*out*/
             int *id,
             sp<camera3::Camera3ZslStream>* zslStream);
-    virtual status_t createReprocessStreamFromStream(int outputId, int *id);
+    status_t createReprocessStreamFromStream(int outputId, int *id) override;
 
-    virtual status_t getStreamInfo(int id,
+    status_t getStreamInfo(int id,
             uint32_t *width, uint32_t *height,
-            uint32_t *format, android_dataspace *dataSpace);
-    virtual status_t setStreamTransform(int id, int transform);
+            uint32_t *format, android_dataspace *dataSpace) override;
+    status_t setStreamTransform(int id, int transform) override;
 
-    virtual status_t deleteStream(int id);
-    virtual status_t deleteReprocessStream(int id);
+    status_t deleteStream(int id) override;
+    status_t deleteReprocessStream(int id) override;
 
-    virtual status_t configureStreams(bool isConstraiedHighSpeed = false);
-    virtual status_t getInputBufferProducer(
-            sp<IGraphicBufferProducer> *producer);
+    status_t configureStreams(bool isConstraiedHighSpeed = false) override;
+    status_t getInputBufferProducer(
+            sp<IGraphicBufferProducer> *producer) override;
 
-    virtual status_t createDefaultRequest(int templateId, CameraMetadata *request);
+    status_t createDefaultRequest(int templateId, CameraMetadata *request) override;
 
     // Transitions to the idle state on success
-    virtual status_t waitUntilDrained();
+    status_t waitUntilDrained() override;
 
-    virtual status_t setNotifyCallback(wp<NotificationListener> listener);
-    virtual bool     willNotify3A();
-    virtual status_t waitForNextFrame(nsecs_t timeout);
-    virtual status_t getNextResult(CaptureResult *frame);
+    status_t setNotifyCallback(wp<NotificationListener> listener) override;
+    bool     willNotify3A() override;
+    status_t waitForNextFrame(nsecs_t timeout) override;
+    status_t getNextResult(CaptureResult *frame) override;
 
-    virtual status_t triggerAutofocus(uint32_t id);
-    virtual status_t triggerCancelAutofocus(uint32_t id);
-    virtual status_t triggerPrecaptureMetering(uint32_t id);
+    status_t triggerAutofocus(uint32_t id) override;
+    status_t triggerCancelAutofocus(uint32_t id) override;
+    status_t triggerPrecaptureMetering(uint32_t id) override;
 
-    virtual status_t pushReprocessBuffer(int reprocessStreamId,
-            buffer_handle_t *buffer, wp<BufferReleasedListener> listener);
+    status_t pushReprocessBuffer(int reprocessStreamId,
+            buffer_handle_t *buffer, wp<BufferReleasedListener> listener) override;
 
-    virtual status_t flush(int64_t *lastFrameNumber = NULL);
+    status_t flush(int64_t *lastFrameNumber = NULL) override;
 
-    virtual status_t prepare(int streamId);
+    status_t prepare(int streamId) override;
 
-    virtual status_t tearDown(int streamId);
+    status_t tearDown(int streamId) override;
 
-    virtual status_t addBufferListenerForStream(int streamId,
-            wp<camera3::Camera3StreamBufferListener> listener);
+    status_t addBufferListenerForStream(int streamId,
+            wp<camera3::Camera3StreamBufferListener> listener) override;
 
-    virtual status_t prepare(int maxCount, int streamId);
+    status_t prepare(int maxCount, int streamId) override;
 
-    virtual uint32_t getDeviceVersion();
+    uint32_t getDeviceVersion() override;
 
-    virtual ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const;
+    ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const override;
     ssize_t getPointCloudBufferSize() const;
     ssize_t getRawOpaqueBufferSize(int32_t width, int32_t height) const;
 
@@ -168,7 +186,7 @@
      * Set the deferred consumer surface to the output stream and finish the deferred
      * consumer configuration.
      */
-    virtual status_t setConsumerSurface(int streamId, sp<Surface> consumer);
+    status_t setConsumerSurface(int streamId, sp<Surface> consumer) override;
 
   private:
     static const size_t        kDumpLockAttempts  = 10;
@@ -198,14 +216,106 @@
     Mutex                      mLock;
 
     // Camera device ID
-    const int                  mId;
+    const String8              mId;
 
     // Flag indicating is the current active stream configuration is constrained high speed.
     bool                       mIsConstrainedHighSpeedConfiguration;
 
     /**** Scope for mLock ****/
 
-    camera3_device_t          *mHal3Device;
+    /**
+     * Adapter for legacy HAL / HIDL HAL interface calls; calls either into legacy HALv3 or the
+     * HIDL HALv3 interfaces.
+     */
+    class HalInterface {
+      public:
+        HalInterface(camera3_device_t *device);
+        HalInterface(sp<hardware::camera::device::V3_2::ICameraDeviceSession> &session);
+        HalInterface(const HalInterface &other);
+        HalInterface();
+
+        // Returns true if constructed with a valid device or session, and not yet cleared
+        bool valid();
+
+        // Reset this HalInterface object (does not call close())
+        void clear();
+
+        // Calls into the HAL interface
+
+        // Caller takes ownership of requestTemplate
+        status_t constructDefaultRequestSettings(camera3_request_template_t templateId,
+                /*out*/ camera_metadata_t **requestTemplate);
+        status_t configureStreams(/*inout*/ camera3_stream_configuration *config);
+        status_t processCaptureRequest(camera3_capture_request_t *request);
+        status_t flush();
+        status_t dump(int fd);
+        status_t close();
+
+        // Find a buffer_handle_t based on frame number and stream ID
+        status_t popInflightBuffer(int32_t frameNumber, int32_t streamId,
+                /*out*/ buffer_handle_t **buffer);
+
+      private:
+        camera3_device_t *mHal3Device;
+        sp<hardware::camera::device::V3_2::ICameraDeviceSession> mHidlSession;
+
+        std::mutex mInflightLock;
+
+        status_t pushInflightBufferLocked(int32_t frameNumber, int32_t streamId,
+                buffer_handle_t *buffer, int acquireFence);
+        // Cache of buffer handles keyed off (frameNumber << 32 | streamId)
+        // value is a pair of (buffer_handle_t*, acquire_fence FD)
+        std::unordered_map<uint64_t, std::pair<buffer_handle_t*, int>> mInflightBufferMap;
+
+        struct BufferHasher {
+            size_t operator()(const buffer_handle_t& buf) const {
+                if (buf == nullptr)
+                    return 0;
+
+                size_t result = 1;
+                result = 31 * result + buf->numFds;
+                result = 31 * result + buf->numInts;
+                int length = buf->numFds + buf->numInts;
+                for (int i = 0; i < length; i++) {
+                    result = 31 * result + buf->data[i];
+                }
+                return result;
+            }
+        };
+
+        struct BufferComparator {
+            bool operator()(const buffer_handle_t& buf1, const buffer_handle_t& buf2) const {
+                if (buf1->numFds == buf2->numFds && buf1->numInts == buf2->numInts) {
+                    int length = buf1->numFds + buf1->numInts;
+                    for (int i = 0; i < length; i++) {
+                        if (buf1->data[i] != buf2->data[i]) {
+                            return false;
+                        }
+                    }
+                    return true;
+                }
+                return false;
+            }
+        };
+
+        std::mutex mBufferIdMapLock; // protecting mBufferIdMaps and mNextBufferId
+        typedef std::unordered_map<const buffer_handle_t, uint64_t,
+                BufferHasher, BufferComparator> BufferIdMap;
+        // stream ID -> per stream buffer ID map
+        std::unordered_map<int, BufferIdMap> mBufferIdMaps;
+        uint64_t mNextBufferId = 1; // 0 means no buffer
+        static const uint64_t BUFFER_ID_NO_BUFFER = 0;
+
+        // method to extract buffer's unique ID
+        // TODO: we should switch to use gralloc mapper's getBackingStore API
+        //       once we ran in binderized gralloc mode, but before that is ready,
+        //       we need to rely on the conventional buffer queue behavior where
+        //       buffer_handle_t's FD won't change.
+        // return pair of (newlySeenBuffer?, bufferId)
+        std::pair<bool, uint64_t> getBufferId(const buffer_handle_t& buf, int streamId);
+    };
+
+    std::unique_ptr<HalInterface> mInterface;
 
     CameraMetadata             mDeviceInfo;
 
@@ -288,6 +398,7 @@
         camera3_stream_buffer_t             mInputBuffer;
         Vector<sp<camera3::Camera3OutputStreamInterface> >
                                             mOutputStreams;
+        SurfaceMap                          mOutputSurfaces;
         CaptureResultExtras                 mResultExtras;
         // Used to cancel AE precapture trigger for devices doesn't support
         // CONTROL_AE_PRECAPTURE_TRIGGER_CANCEL
@@ -297,6 +408,8 @@
         // requests will be submitted to HAL at a time. The batch size for
         // the following 7 requests will be ignored by the request thread.
         int                                 mBatchSize;
+        //  Whether this request is from a repeating or repeating burst.
+        bool                                mRepeating;
     };
     typedef List<sp<CaptureRequest> > RequestList;
 
@@ -304,16 +417,41 @@
 
     status_t convertMetadataListToRequestListLocked(
             const List<const CameraMetadata> &metadataList,
+            const std::list<const SurfaceMap> &surfaceMaps,
+            bool repeating,
             /*out*/
             RequestList *requestList);
 
-    status_t submitRequestsHelper(const List<const CameraMetadata> &requests, bool repeating,
+    void convertToRequestList(List<const CameraMetadata>& requests,
+            std::list<const SurfaceMap>& surfaceMaps,
+            const CameraMetadata& request);
+
+    status_t submitRequestsHelper(const List<const CameraMetadata> &requests,
+                                  const std::list<const SurfaceMap> &surfaceMaps,
+                                  bool repeating,
                                   int64_t *lastFrameNumber = NULL);
 
+
+    /**
+     * Implementation of android::hardware::camera::device::V3_2::ICameraDeviceCallback
+     */
+
+    hardware::Return<void> processCaptureResult(
+            const hardware::camera::device::V3_2::CaptureResult& result) override;
+    hardware::Return<void> notify(
+            const hardware::camera::device::V3_2::NotifyMsg& msg) override;
+
+    /**
+     * Common initialization code shared by both HAL paths
+     *
+     * Must be called with mLock and mInterfaceLock held.
+     */
+    status_t initializeCommonLocked();
+
     /**
      * Get the last request submitted to the hal by the request thread.
      *
-     * Takes mLock.
+     * Must be called with mLock held.
      */
     virtual CameraMetadata getLatestRequestLocked();
 
@@ -362,13 +500,15 @@
      * Do common work for setting up a streaming or single capture request.
      * On success, will transition to ACTIVE if in IDLE.
      */
-    sp<CaptureRequest> setUpRequestLocked(const CameraMetadata &request);
+    sp<CaptureRequest> setUpRequestLocked(const CameraMetadata &request,
+                                          const SurfaceMap &surfaceMap);
 
     /**
      * Build a CaptureRequest request from the CameraDeviceBase request
      * settings.
      */
-    sp<CaptureRequest> createCaptureRequest(const CameraMetadata &request);
+    sp<CaptureRequest> createCaptureRequest(const CameraMetadata &request,
+                                            const SurfaceMap &surfaceMap);
 
     /**
      * Take the currently-defined set of streams and configure the HAL to use
@@ -431,6 +571,24 @@
      */
     static android_dataspace mapToLegacyDataspace(android_dataspace dataSpace);
 
+    /**
+     * Helper functions to map between framework and HIDL values
+     */
+    static hardware::graphics::common::V1_0::PixelFormat mapToPixelFormat(int frameworkFormat);
+    static hardware::camera::device::V3_2::DataspaceFlags mapToHidlDataspace(
+            android_dataspace dataSpace);
+    static hardware::camera::device::V3_2::ConsumerUsageFlags mapToConsumerUsage(uint32_t usage);
+    static hardware::camera::device::V3_2::StreamRotation mapToStreamRotation(
+            camera3_stream_rotation_t rotation);
+    static hardware::camera::device::V3_2::StreamConfigurationMode mapToStreamConfigurationMode(
+            camera3_stream_configuration_mode_t operationMode);
+    static camera3_buffer_status_t mapHidlBufferStatus(hardware::camera::device::V3_2::BufferStatus status);
+    static int mapToFrameworkFormat(hardware::graphics::common::V1_0::PixelFormat pixelFormat);
+    static uint32_t mapConsumerToFrameworkUsage(
+            hardware::camera::device::V3_2::ConsumerUsageFlags usage);
+    static uint32_t mapProducerToFrameworkUsage(
+            hardware::camera::device::V3_2::ProducerUsageFlags usage);
+
     struct RequestTrigger {
         // Metadata tag number, e.g. android.control.aePrecaptureTrigger
         uint32_t metadataTag;
@@ -457,8 +615,10 @@
 
         RequestThread(wp<Camera3Device> parent,
                 sp<camera3::StatusTracker> statusTracker,
-                camera3_device_t *hal3Device,
+                HalInterface* interface,
+                uint32_t deviceVersion,
                 bool aeLockAvailable);
+        ~RequestThread();
 
         void     setNotificationListener(wp<NotificationListener> listener);
 
@@ -538,7 +698,7 @@
         virtual bool threadLoop();
 
       private:
-        static int         getId(const wp<Camera3Device> &device);
+        static const String8& getId(const wp<Camera3Device> &device);
 
         status_t           queueTriggerLocked(RequestTrigger trigger);
         // Mix-in queued triggers into this request
@@ -599,11 +759,12 @@
 
         wp<Camera3Device>  mParent;
         wp<camera3::StatusTracker>  mStatusTracker;
-        camera3_device_t  *mHal3Device;
+        HalInterface*      mInterface;
+        uint32_t           mDeviceVersion;
 
         wp<NotificationListener> mListener;
 
-        const int          mId;       // The camera ID
+        const String8&     mId;       // The camera ID
         int                mStatusId; // The RequestThread's component ID for
                                       // status tracking
 
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
index 5123785..7f61c7a 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
@@ -83,6 +83,14 @@
     return OK;
 }
 
+status_t Camera3DummyStream::notifyRequestedSurfaces(uint32_t frame_number,
+        const std::vector<size_t>& surface_ids) {
+    (void) frame_number;
+    (void) surface_ids;
+    // Do nothing
+    return OK;
+}
+
 status_t Camera3DummyStream::configureQueueLocked() {
     // Do nothing
     return OK;
@@ -103,7 +111,7 @@
     return false;
 }
 
-bool Camera3DummyStream::isConsumerConfigurationDeferred() const {
+bool Camera3DummyStream::isConsumerConfigurationDeferred(size_t /*surface_id*/) const {
     return false;
 }
 
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.h b/services/camera/libcameraservice/device3/Camera3DummyStream.h
index 18e8a23..37efbbb 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.h
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.h
@@ -56,6 +56,9 @@
 
     virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd);
 
+    virtual status_t notifyRequestedSurfaces(uint32_t frame_number,
+            const std::vector<size_t>& surface_ids);
+
     /**
      * Return if this output stream is for video encoding.
      */
@@ -64,7 +67,7 @@
     /**
      * Return if the consumer configuration of this stream is deferred.
      */
-    virtual bool isConsumerConfigurationDeferred() const;
+    virtual bool isConsumerConfigurationDeferred(size_t surface_id) const;
 
     /**
      * Set the consumer surface to the output stream.
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 7229929..1e76a27 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -124,6 +124,7 @@
                                          int format,
                                          android_dataspace dataSpace,
                                          camera3_stream_rotation_t rotation,
+                                         uint32_t consumerUsage, nsecs_t timestampOffset,
                                          int setId) :
         Camera3IOStreamBase(id, type, width, height,
                             /*maxSize*/0,
@@ -132,7 +133,8 @@
         mTraceFirstBuffer(true),
         mUseMonoTimestamp(false),
         mUseBufferManager(false),
-        mConsumerUsage(0) {
+        mTimestampOffset(timestampOffset),
+        mConsumerUsage(consumerUsage) {
 
     if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
         mBufferReleasedListener = new BufferReleasedListener(this);
@@ -373,6 +375,24 @@
         return res;
     }
 
+    if ((res = configureConsumerQueueLocked()) != OK) {
+        return res;
+    }
+
+    // Set dequeueBuffer/attachBuffer timeout if the consumer is not hw composer or hw texture.
+    // We need skip these cases as timeout will disable the non-blocking (async) mode.
+    if (!(isConsumedByHWComposer() || isConsumedByHWTexture())) {
+        mConsumer->setDequeueTimeout(kDequeueBufferTimeout);
+    }
+
+    return OK;
+}
+
+status_t Camera3OutputStream::configureConsumerQueueLocked() {
+    status_t res;
+
+    mTraceFirstBuffer = true;
+
     ALOG_ASSERT(mConsumer != 0, "mConsumer should never be NULL");
 
     // Configure consumer-side ANativeWindow interface. The listener may be used
@@ -470,12 +490,7 @@
     if (res != OK) {
         ALOGE("%s: Unable to configure stream transform to %x: %s (%d)",
                 __FUNCTION__, mTransform, strerror(-res), res);
-    }
-
-    // Set dequeueBuffer/attachBuffer timeout if the consumer is not hw composer or hw texture.
-    // We need skip these cases as timeout will disable the non-blocking (async) mode.
-    if (!(isConsumedByHWComposer() || isConsumedByHWTexture())) {
-        mConsumer->setDequeueTimeout(kDequeueBufferTimeout);
+        return res;
     }
 
     /**
@@ -568,14 +583,24 @@
 status_t Camera3OutputStream::getEndpointUsage(uint32_t *usage) const {
 
     status_t res;
-    int32_t u = 0;
+
     if (mConsumer == nullptr) {
         // mConsumerUsage was sanitized before the Camera3OutputStream was constructed.
         *usage = mConsumerUsage;
         return OK;
     }
 
-    res = static_cast<ANativeWindow*>(mConsumer.get())->query(mConsumer.get(),
+    res = getEndpointUsageForSurface(usage, mConsumer);
+
+    return res;
+}
+
+status_t Camera3OutputStream::getEndpointUsageForSurface(uint32_t *usage,
+        const sp<Surface>& surface) const {
+    status_t res;
+    int32_t u = 0;
+
+    res = static_cast<ANativeWindow*>(surface.get())->query(surface.get(),
             NATIVE_WINDOW_CONSUMER_USAGE_BITS, &u);
 
     // If an opaque output stream's endpoint is ImageReader, add
@@ -587,8 +612,8 @@
     //     3. GRALLOC_USAGE_HW_COMPOSER
     //     4. GRALLOC_USAGE_HW_VIDEO_ENCODER
     if (camera3_stream::format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED &&
-            (u & (GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_COMPOSER |
-            GRALLOC_USAGE_HW_VIDEO_ENCODER)) == 0) {
+            (u & (GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_RENDER |
+            GRALLOC_USAGE_HW_COMPOSER | GRALLOC_USAGE_HW_VIDEO_ENCODER)) == 0) {
         u |= GRALLOC_USAGE_HW_CAMERA_ZSL;
     }
 
@@ -676,8 +701,17 @@
     return OK;
 }
 
-bool Camera3OutputStream::isConsumerConfigurationDeferred() const {
+status_t Camera3OutputStream::notifyRequestedSurfaces(uint32_t /*frame_number*/,
+        const std::vector<size_t>& /*surface_ids*/) {
+    return OK;
+}
+
+bool Camera3OutputStream::isConsumerConfigurationDeferred(size_t surface_id) const {
     Mutex::Autolock l(mLock);
+
+    if (surface_id != 0) {
+        ALOGE("%s: surface_id for Camera3OutputStream should be 0!", __FUNCTION__);
+    }
     return mConsumer == nullptr;
 }
 
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 12d497e..26ea63f 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -135,7 +135,7 @@
     /**
      * Return if the consumer configuration of this stream is deferred.
      */
-    virtual bool isConsumerConfigurationDeferred() const;
+    virtual bool isConsumerConfigurationDeferred(size_t surface_id) const;
 
     /**
      * Set the consumer surface to the output stream.
@@ -158,6 +158,9 @@
 
     virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd);
 
+    virtual status_t notifyRequestedSurfaces(uint32_t frame_number,
+            const std::vector<size_t>& surface_ids);
+
     /**
      * Set the graphic buffer manager to get/return the stream buffers.
      *
@@ -169,6 +172,7 @@
     Camera3OutputStream(int id, camera3_stream_type_t type,
             uint32_t width, uint32_t height, int format,
             android_dataspace dataSpace, camera3_stream_rotation_t rotation,
+            uint32_t consumerUsage = 0, nsecs_t timestampOffset = 0,
             int setId = CAMERA3_STREAM_SET_ID_INVALID);
 
     /**
@@ -183,12 +187,19 @@
 
     virtual status_t disconnectLocked();
 
+    status_t getEndpointUsageForSurface(uint32_t *usage,
+            const sp<Surface>& surface) const;
+    status_t configureConsumerQueueLocked();
+
+    // Consumer as the output of camera HAL
     sp<Surface> mConsumer;
 
-  private:
+    uint32_t getPresetConsumerUsage() const { return mConsumerUsage; }
 
     static const nsecs_t       kDequeueBufferTimeout   = 1000000000; // 1 sec
 
+  private:
+
     int               mTransform;
 
     virtual status_t  setTransformLocked(int transform);
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
index 3f83c89..6a911c6 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
@@ -43,7 +43,7 @@
     /**
      * Return if the consumer configuration of this stream is deferred.
      */
-    virtual bool isConsumerConfigurationDeferred() const = 0;
+    virtual bool isConsumerConfigurationDeferred(size_t surface_id = 0) const = 0;
 
     /**
      * Set the consumer surface to the output stream.
@@ -59,6 +59,20 @@
      *
      */
     virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd) = 0;
+
+    /**
+     * Notify which surfaces are requested for a particular frame number.
+     *
+     * Mulitple surfaces could share the same output stream, but a request may
+     * be only for a subset of surfaces. In this case, the
+     * Camera3OutputStreamInterface object needs to manage the output surfaces on
+     * a per request basis.
+     *
+     * If there is only one surface for this output stream, calling this
+     * function is a no-op.
+     */
+    virtual status_t notifyRequestedSurfaces(uint32_t frame_number,
+            const std::vector<size_t>& surface_ids) = 0;
 };
 
 } // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
new file mode 100644
index 0000000..b419e06
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Camera3SharedOutputStream.h"
+
+namespace android {
+
+namespace camera3 {
+
+Camera3SharedOutputStream::Camera3SharedOutputStream(int id,
+        const std::vector<sp<Surface>>& surfaces,
+        bool hasDeferredSurface,
+        uint32_t width, uint32_t height, int format,
+        uint32_t consumerUsage, android_dataspace dataSpace,
+        camera3_stream_rotation_t rotation,
+        nsecs_t timestampOffset, int setId) :
+        Camera3OutputStream(id, CAMERA3_STREAM_OUTPUT, width, height,
+                            format, dataSpace, rotation, consumerUsage,
+                            timestampOffset, setId),
+        mSurfaces(surfaces),
+        mDeferred(hasDeferredSurface) {
+}
+
+Camera3SharedOutputStream::~Camera3SharedOutputStream() {
+    disconnectLocked();
+}
+
+status_t Camera3SharedOutputStream::connectStreamSplitterLocked() {
+    status_t res = OK;
+
+    mStreamSplitter = new Camera3StreamSplitter();
+
+    uint32_t usage;
+    getEndpointUsage(&usage);
+
+    res = mStreamSplitter->connect(mSurfaces, usage, camera3_stream::max_buffers, mConsumer);
+    if (res != OK) {
+        ALOGE("%s: Failed to connect to stream splitter: %s(%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+
+    return res;
+}
+
+status_t Camera3SharedOutputStream::notifyRequestedSurfaces(uint32_t /*frame_number*/,
+        const std::vector<size_t>& surface_ids) {
+    Mutex::Autolock l(mLock);
+    status_t res = OK;
+
+    if (mStreamSplitter != nullptr) {
+        res = mStreamSplitter->notifyRequestedSurfaces(surface_ids);
+    }
+
+    return res;
+}
+
+bool Camera3SharedOutputStream::isConsumerConfigurationDeferred(size_t surface_id) const {
+    Mutex::Autolock l(mLock);
+    return (mDeferred && surface_id >= mSurfaces.size());
+}
+
+status_t Camera3SharedOutputStream::setConsumer(sp<Surface> surface) {
+    if (surface == nullptr) {
+        ALOGE("%s: it's illegal to set a null consumer surface!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+
+    if (!mDeferred) {
+        ALOGE("%s: Current stream isn't deferred!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+
+    mSurfaces.push_back(surface);
+
+    return mStreamSplitter->addOutput(surface, camera3_stream::max_buffers);
+}
+
+status_t Camera3SharedOutputStream::configureQueueLocked() {
+    status_t res;
+
+    if ((res = Camera3IOStreamBase::configureQueueLocked()) != OK) {
+        return res;
+    }
+
+    res = connectStreamSplitterLocked();
+    if (res != OK) {
+        ALOGE("Cannot connect to stream splitter: %s(%d)", strerror(-res), res);
+        return res;
+    }
+
+    res = configureConsumerQueueLocked();
+    if (res != OK) {
+        ALOGE("Failed to configureConsumerQueueLocked: %s(%d)", strerror(-res), res);
+        return res;
+    }
+
+    return OK;
+}
+
+status_t Camera3SharedOutputStream::disconnectLocked() {
+    status_t res;
+    res = Camera3OutputStream::disconnectLocked();
+
+    if (mStreamSplitter != nullptr) {
+        mStreamSplitter->disconnect();
+    }
+
+    return res;
+}
+
+status_t Camera3SharedOutputStream::getEndpointUsage(uint32_t *usage) const {
+
+    status_t res;
+    uint32_t u = 0;
+
+    if (mConsumer == nullptr) {
+        // Called before shared buffer queue is constructed.
+        *usage = getPresetConsumerUsage();
+
+        for (auto surface : mSurfaces) {
+            if (surface != nullptr) {
+                res = getEndpointUsageForSurface(&u, surface);
+                *usage |= u;
+            }
+        }
+    } else {
+        // Called after shared buffer queue is constructed.
+        res = getEndpointUsageForSurface(&u, mConsumer);
+        *usage |= u;
+    }
+
+    return res;
+}
+
+} // namespace camera3
+
+} // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
new file mode 100644
index 0000000..1b37d7c
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA3_SHARED_OUTPUT_STREAM_H
+#define ANDROID_SERVERS_CAMERA3_SHARED_OUTPUT_STREAM_H
+
+#include "Camera3StreamSplitter.h"
+#include "Camera3OutputStream.h"
+
+namespace android {
+
+namespace camera3 {
+
+class Camera3SharedOutputStream :
+        public Camera3OutputStream {
+public:
+    /**
+     * Set up a stream for formats that have 2 dimensions, with multiple
+     * surfaces. A valid stream set id needs to be set to support buffer
+     * sharing between multiple streams.
+     */
+    Camera3SharedOutputStream(int id, const std::vector<sp<Surface>>& surfaces,
+            bool hasDeferredSurface, uint32_t width, uint32_t height, int format,
+            uint32_t consumerUsage, android_dataspace dataSpace,
+            camera3_stream_rotation_t rotation, nsecs_t timestampOffset,
+            int setId = CAMERA3_STREAM_SET_ID_INVALID);
+
+    virtual ~Camera3SharedOutputStream();
+
+    virtual status_t notifyRequestedSurfaces(uint32_t frame_number,
+            const std::vector<size_t>& surface_ids);
+
+    virtual bool isConsumerConfigurationDeferred(size_t surface_id) const;
+
+    virtual status_t setConsumer(sp<Surface> consumer);
+
+private:
+    // Surfaces passed in constructor from app
+    std::vector<sp<Surface> > mSurfaces;
+
+    /**
+     * The Camera3StreamSplitter object this stream uses for stream
+     * sharing.
+     */
+    sp<Camera3StreamSplitter> mStreamSplitter;
+
+    /**
+     * Initialize stream splitter.
+     */
+    status_t connectStreamSplitterLocked();
+
+    virtual status_t configureQueueLocked();
+
+    virtual status_t disconnectLocked();
+
+    virtual status_t getEndpointUsage(uint32_t *usage) const;
+
+    bool mDeferred;
+
+}; // class Camera3SharedOutputStream
+
+} // namespace camera3
+
+} // namespace android
+
+#endif // ANDROID_SERVERS_CAMERA3_SHARED_OUTPUT_STREAM_H
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 3ffd9d1..c3b7565 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -166,7 +166,7 @@
     return (mState == STATE_IN_CONFIG) || (mState == STATE_IN_RECONFIG);
 }
 
-status_t Camera3Stream::finishConfiguration(camera3_device *hal3Device) {
+status_t Camera3Stream::finishConfiguration() {
     ATRACE_CALL();
     Mutex::Autolock l(mLock);
     switch (mState) {
@@ -216,14 +216,6 @@
         return res;
     }
 
-    res = registerBuffersLocked(hal3Device);
-    if (res != OK) {
-        ALOGE("%s: Unable to register stream buffers with HAL: %s (%d)",
-                __FUNCTION__, strerror(-res), res);
-        mState = STATE_ERROR;
-        return res;
-    }
-
     mState = STATE_CONFIGURED;
 
     return res;
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 1ff215d..471b393 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -144,6 +144,10 @@
     int               getFormat() const;
     android_dataspace getDataSpace() const;
 
+    camera3_stream*   asHalStream() override {
+        return this;
+    }
+
     /**
      * Start the stream configuration process. Returns a handle to the stream's
      * information to be passed into the HAL device's configure_streams call.
@@ -165,11 +169,10 @@
     bool             isConfiguring() const;
 
     /**
-     * Completes the stream configuration process. During this call, the stream
-     * may call the device's register_stream_buffers() method. The stream
-     * information structure returned by startConfiguration() may no longer be
-     * modified after this call, but can still be read until the destruction of
-     * the stream.
+     * Completes the stream configuration process. The stream information
+     * structure returned by startConfiguration() may no longer be modified
+     * after this call, but can still be read until the destruction of the
+     * stream.
      *
      * Returns:
      *   OK on a successful configuration
@@ -178,7 +181,7 @@
      *   INVALID_OPERATION in case connecting to the consumer failed or consumer
      *       doesn't exist yet.
      */
-    status_t         finishConfiguration(camera3_device *hal3Device);
+    status_t         finishConfiguration();
 
     /**
      * Cancels the stream configuration process. This returns the stream to the
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index 6cb7a54..ceea08a 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -72,6 +72,11 @@
     virtual android_dataspace getDataSpace() const = 0;
 
     /**
+     * Get a HAL3 handle for the stream, without starting stream configuration.
+     */
+    virtual camera3_stream* asHalStream() = 0;
+
+    /**
      * Start the stream configuration process. Returns a handle to the stream's
      * information to be passed into the HAL device's configure_streams call.
      *
@@ -104,7 +109,7 @@
      *   NO_MEMORY in case of an error registering buffers
      *   INVALID_OPERATION in case connecting to the consumer failed
      */
-    virtual status_t finishConfiguration(camera3_device *hal3Device) = 0;
+    virtual status_t finishConfiguration() = 0;
 
     /**
      * Cancels the stream configuration process. This returns the stream to the
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
new file mode 100644
index 0000000..b935141
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
@@ -0,0 +1,441 @@
+/*
+ * Copyright 2014,2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+
+#define LOG_TAG "Camera3StreamSplitter"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <gui/BufferItem.h>
+#include <gui/IGraphicBufferConsumer.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/BufferQueue.h>
+#include <gui/Surface.h>
+
+#include <ui/GraphicBuffer.h>
+
+#include <binder/ProcessState.h>
+
+#include <utils/Trace.h>
+
+#include "Camera3StreamSplitter.h"
+
+namespace android {
+
+status_t Camera3StreamSplitter::connect(const std::vector<sp<Surface> >& surfaces,
+                                           uint32_t consumerUsage, size_t hal_max_buffers,
+                                           sp<Surface>& consumer) {
+    if (consumer != nullptr) {
+        ALOGE("%s: output Surface is not NULL", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock lock(mMutex);
+    status_t res = OK;
+
+    if (mOutputs.size() > 0 || mConsumer != nullptr) {
+        ALOGE("%s: StreamSplitter already connected", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    // Add output surfaces. This has to be before creating internal buffer queue
+    // in order to get max consumer side buffers.
+    for (size_t i = 0; i < surfaces.size(); i++) {
+        if (surfaces[i] != nullptr) {
+            res = addOutputLocked(surfaces[i], hal_max_buffers,
+                    OutputType::NonDeferred);
+            if (res != OK) {
+                ALOGE("%s: Failed to add output surface: %s(%d)",
+                        __FUNCTION__, strerror(-res), res);
+                return res;
+            }
+        }
+    }
+
+    // Create buffer queue for input
+    BufferQueue::createBufferQueue(&mProducer, &mConsumer);
+
+    mBufferItemConsumer = new BufferItemConsumer(mConsumer, consumerUsage,
+                                                 mMaxConsumerBuffers);
+    if (mBufferItemConsumer == nullptr) {
+        return NO_MEMORY;
+    }
+    mConsumer->setConsumerName(getUniqueConsumerName());
+
+    mSurface = new Surface(mProducer);
+    if (mSurface == nullptr) {
+        return NO_MEMORY;
+    }
+    consumer = mSurface;
+
+    res = mConsumer->consumerConnect(this, /* controlledByApp */ false);
+
+    return res;
+}
+
+void Camera3StreamSplitter::disconnect() {
+    Mutex::Autolock lock(mMutex);
+
+    for (auto& output : mOutputs) {
+        output->disconnect(NATIVE_WINDOW_API_CAMERA);
+    }
+    mOutputs.clear();
+
+    if (mConsumer != nullptr) {
+        mConsumer->consumerDisconnect();
+        mConsumer.clear();
+    }
+
+    if (mBuffers.size() > 0) {
+        ALOGI("%zu buffers still being tracked", mBuffers.size());
+    }
+}
+
+Camera3StreamSplitter::~Camera3StreamSplitter() {
+    disconnect();
+}
+
+status_t Camera3StreamSplitter::addOutput(
+        sp<Surface>& outputQueue, size_t hal_max_buffers) {
+    Mutex::Autolock lock(mMutex);
+    return addOutputLocked(outputQueue, hal_max_buffers, OutputType::Deferred);
+}
+
+status_t Camera3StreamSplitter::addOutputLocked(
+        const sp<Surface>& outputQueue, size_t hal_max_buffers,
+        OutputType outputType) {
+    if (outputQueue == nullptr) {
+        ALOGE("addOutput: outputQueue must not be NULL");
+        return BAD_VALUE;
+    }
+    if (hal_max_buffers < 1) {
+        ALOGE("%s: Camera HAL requested max_buffer count: %zu, requires at least 1",
+                __FUNCTION__, hal_max_buffers);
+        return BAD_VALUE;
+    }
+
+    sp<IGraphicBufferProducer> gbp = outputQueue->getIGraphicBufferProducer();
+    // Connect to the buffer producer
+    IGraphicBufferProducer::QueueBufferOutput queueBufferOutput;
+    sp<OutputListener> listener(new OutputListener(this, gbp));
+    IInterface::asBinder(gbp)->linkToDeath(listener);
+    status_t status = gbp->connect(listener, NATIVE_WINDOW_API_CAMERA,
+            /* producerControlledByApp */ true, &queueBufferOutput);
+    if (status != NO_ERROR) {
+        ALOGE("addOutput: failed to connect (%d)", status);
+       return status;
+    }
+
+    // Query consumer side buffer count, and update overall buffer count
+    int maxConsumerBuffers = 0;
+    status = static_cast<ANativeWindow*>(outputQueue.get())->query(
+            outputQueue.get(),
+            NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxConsumerBuffers);
+    if (status != OK) {
+        ALOGE("%s: Unable to query consumer undequeued buffer count"
+              " for surface", __FUNCTION__);
+        return status;
+    }
+
+    if (maxConsumerBuffers > mMaxConsumerBuffers) {
+        if (outputType == OutputType::Deferred) {
+            ALOGE("%s: Fatal: Deferred surface has higher consumer buffer count"
+                  " %d than what's already configured %d", __FUNCTION__,
+                  maxConsumerBuffers, mMaxConsumerBuffers);
+            return BAD_VALUE;
+        }
+        mMaxConsumerBuffers = maxConsumerBuffers;
+    }
+
+    ALOGV("%s: Consumer wants %d buffers, HAL wants %zu", __FUNCTION__,
+            maxConsumerBuffers, hal_max_buffers);
+    size_t totalBufferCount = maxConsumerBuffers + hal_max_buffers;
+    status = native_window_set_buffer_count(outputQueue.get(),
+            totalBufferCount);
+    if (status != OK) {
+        ALOGE("%s: Unable to set buffer count for surface %p",
+                __FUNCTION__, outputQueue.get());
+        return status;
+    }
+
+    // Set dequeueBuffer/attachBuffer timeout if the consumer is not hw composer or hw texture.
+    // We need skip these cases as timeout will disable the non-blocking (async) mode.
+    int32_t usage = 0;
+    static_cast<ANativeWindow*>(outputQueue.get())->query(
+            outputQueue.get(),
+            NATIVE_WINDOW_CONSUMER_USAGE_BITS, &usage);
+    if (!(usage & (GRALLOC_USAGE_HW_COMPOSER | GRALLOC_USAGE_HW_TEXTURE))) {
+        outputQueue->setDequeueTimeout(kDequeueBufferTimeout);
+    }
+
+    status = gbp->allowAllocation(false);
+    if (status != OK) {
+        ALOGE("%s: Failed to turn off allocation for outputQueue", __FUNCTION__);
+        return status;
+    }
+
+    // Add new entry into mOutputs
+    mOutputs.push_back(gbp);
+    return NO_ERROR;
+}
+
+String8 Camera3StreamSplitter::getUniqueConsumerName() {
+    static volatile int32_t counter = 0;
+    return String8::format("Camera3StreamSplitter-%d", android_atomic_inc(&counter));
+}
+
+status_t Camera3StreamSplitter::notifyRequestedSurfaces(
+        const std::vector<size_t>& surfaces) {
+    ATRACE_CALL();
+    Mutex::Autolock lock(mMutex);
+
+    mRequestedSurfaces.push_back(surfaces);
+    return OK;
+}
+
+
+void Camera3StreamSplitter::onFrameAvailable(const BufferItem& /* item */) {
+    ATRACE_CALL();
+    Mutex::Autolock lock(mMutex);
+
+    // The current policy is that if any one consumer is consuming buffers too
+    // slowly, the splitter will stall the rest of the outputs by not acquiring
+    // any more buffers from the input. This will cause back pressure on the
+    // input queue, slowing down its producer.
+
+    // If there are too many outstanding buffers, we block until a buffer is
+    // released back to the input in onBufferReleased
+    while (mOutstandingBuffers >= mMaxConsumerBuffers) {
+        mReleaseCondition.wait(mMutex);
+
+        // If the splitter is abandoned while we are waiting, the release
+        // condition variable will be broadcast, and we should just return
+        // without attempting to do anything more (since the input queue will
+        // also be abandoned).
+        if (mIsAbandoned) {
+            return;
+        }
+    }
+    // If the splitter is abandoned without reaching mMaxConsumerBuffers, just
+    // return without attempting to do anything more.
+    if (mIsAbandoned) {
+        return;
+    }
+
+    ++mOutstandingBuffers;
+
+    // Acquire and detach the buffer from the input
+    BufferItem bufferItem;
+    status_t status = mConsumer->acquireBuffer(&bufferItem, /* presentWhen */ 0);
+    LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+            "acquiring buffer from input failed (%d)", status);
+
+    ALOGV("acquired buffer %#" PRIx64 " from input",
+            bufferItem.mGraphicBuffer->getId());
+
+    status = mConsumer->detachBuffer(bufferItem.mSlot);
+    LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+            "detaching buffer from input failed (%d)", status);
+
+    IGraphicBufferProducer::QueueBufferInput queueInput(
+            bufferItem.mTimestamp, bufferItem.mIsAutoTimestamp,
+            bufferItem.mDataSpace, bufferItem.mCrop,
+            static_cast<int32_t>(bufferItem.mScalingMode),
+            bufferItem.mTransform, bufferItem.mFence);
+
+    // Attach and queue the buffer to each of the outputs
+    std::vector<std::vector<size_t> >::iterator surfaces = mRequestedSurfaces.begin();
+    if (surfaces != mRequestedSurfaces.end()) {
+
+        LOG_ALWAYS_FATAL_IF(surfaces->size() == 0,
+                "requested surface ids shouldn't be empty");
+
+        // Initialize our reference count for this buffer
+        mBuffers[bufferItem.mGraphicBuffer->getId()] =
+                std::unique_ptr<BufferTracker>(
+                new BufferTracker(bufferItem.mGraphicBuffer, surfaces->size()));
+
+        for (auto id : *surfaces) {
+
+            LOG_ALWAYS_FATAL_IF(id >= mOutputs.size(),
+                    "requested surface id exceeding max registered ids");
+
+            int slot = BufferItem::INVALID_BUFFER_SLOT;
+            status = mOutputs[id]->attachBuffer(&slot, bufferItem.mGraphicBuffer);
+            if (status == NO_INIT) {
+                // If we just discovered that this output has been abandoned, note
+                // that, decrement the reference count so that we still release this
+                // buffer eventually, and move on to the next output
+                onAbandonedLocked();
+                mBuffers[bufferItem.mGraphicBuffer->getId()]->
+                        decrementReferenceCountLocked();
+                continue;
+            } else if (status == WOULD_BLOCK) {
+                // If the output is async, attachBuffer may return WOULD_BLOCK
+                // indicating number of dequeued buffers has reached limit. In
+                // this case, simply decrement the reference count, and move on
+                // to the next output.
+                // TODO: Do we need to report BUFFER_ERROR for this result?
+                mBuffers[bufferItem.mGraphicBuffer->getId()]->
+                        decrementReferenceCountLocked();
+                continue;
+            } else if (status == TIMED_OUT) {
+                // If attachBuffer times out due to the value set by
+                // setDequeueTimeout, simply decrement the reference count, and
+                // move on to the next output.
+                // TODO: Do we need to report BUFFER_ERROR for this result?
+                mBuffers[bufferItem.mGraphicBuffer->getId()]->
+                        decrementReferenceCountLocked();
+                continue;
+            } else {
+                LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+                        "attaching buffer to output failed (%d)", status);
+            }
+
+            IGraphicBufferProducer::QueueBufferOutput queueOutput;
+            status = mOutputs[id]->queueBuffer(slot, queueInput, &queueOutput);
+            if (status == NO_INIT) {
+                // If we just discovered that this output has been abandoned, note
+                // that, increment the release count so that we still release this
+                // buffer eventually, and move on to the next output
+                onAbandonedLocked();
+                mBuffers[bufferItem.mGraphicBuffer->getId()]->
+                        decrementReferenceCountLocked();
+                continue;
+            } else {
+                LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+                        "queueing buffer to output failed (%d)", status);
+            }
+
+            ALOGV("queued buffer %#" PRIx64 " to output %p",
+                    bufferItem.mGraphicBuffer->getId(), mOutputs[id].get());
+        }
+
+        mRequestedSurfaces.erase(surfaces);
+    }
+}
+
+void Camera3StreamSplitter::onBufferReleasedByOutput(
+        const sp<IGraphicBufferProducer>& from) {
+    ATRACE_CALL();
+    Mutex::Autolock lock(mMutex);
+
+    sp<GraphicBuffer> buffer;
+    sp<Fence> fence;
+    status_t status = from->detachNextBuffer(&buffer, &fence);
+    if (status == NO_INIT) {
+        // If we just discovered that this output has been abandoned, note that,
+        // but we can't do anything else, since buffer is invalid
+        onAbandonedLocked();
+        return;
+    } else {
+        LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+                "detaching buffer from output failed (%d)", status);
+    }
+
+    ALOGV("detached buffer %#" PRIx64 " from output %p",
+          buffer->getId(), from.get());
+
+    BufferTracker& tracker = *(mBuffers[buffer->getId()]);
+
+    // Merge the release fence of the incoming buffer so that the fence we send
+    // back to the input includes all of the outputs' fences
+    tracker.mergeFence(fence);
+
+    // Check to see if this is the last outstanding reference to this buffer
+    size_t referenceCount = tracker.decrementReferenceCountLocked();
+    ALOGV("buffer %#" PRIx64 " reference count %zu", buffer->getId(),
+            referenceCount);
+    if (referenceCount > 0) {
+        return;
+    }
+
+    // If we've been abandoned, we can't return the buffer to the input, so just
+    // stop tracking it and move on
+    if (mIsAbandoned) {
+        mBuffers.erase(buffer->getId());
+        return;
+    }
+
+    // Attach and release the buffer back to the input
+    int consumerSlot = BufferItem::INVALID_BUFFER_SLOT;
+    status = mConsumer->attachBuffer(&consumerSlot, tracker.getBuffer());
+    LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+            "attaching buffer to input failed (%d)", status);
+
+    status = mConsumer->releaseBuffer(consumerSlot, /* frameNumber */ 0,
+            EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, tracker.getMergedFence());
+    LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+            "releasing buffer to input failed (%d)", status);
+
+    ALOGV("released buffer %#" PRIx64 " to input", buffer->getId());
+
+    // We no longer need to track the buffer once it has been returned to the
+    // input
+    mBuffers.erase(buffer->getId());
+
+    // Notify any waiting onFrameAvailable calls
+    --mOutstandingBuffers;
+    mReleaseCondition.signal();
+}
+
+void Camera3StreamSplitter::onAbandonedLocked() {
+    ALOGE("one of my outputs has abandoned me");
+    if (!mIsAbandoned && mConsumer != nullptr) {
+        mConsumer->consumerDisconnect();
+    }
+    mIsAbandoned = true;
+    mReleaseCondition.broadcast();
+}
+
+Camera3StreamSplitter::OutputListener::OutputListener(
+        wp<Camera3StreamSplitter> splitter,
+        wp<IGraphicBufferProducer> output)
+      : mSplitter(splitter), mOutput(output) {}
+
+void Camera3StreamSplitter::OutputListener::onBufferReleased() {
+    sp<Camera3StreamSplitter> splitter = mSplitter.promote();
+    sp<IGraphicBufferProducer> output = mOutput.promote();
+    if (splitter != nullptr && output != nullptr) {
+        splitter->onBufferReleasedByOutput(output);
+    }
+}
+
+void Camera3StreamSplitter::OutputListener::binderDied(const wp<IBinder>& /* who */) {
+    sp<Camera3StreamSplitter> splitter = mSplitter.promote();
+    if (splitter != nullptr) {
+        Mutex::Autolock lock(splitter->mMutex);
+        splitter->onAbandonedLocked();
+    }
+}
+
+Camera3StreamSplitter::BufferTracker::BufferTracker(
+        const sp<GraphicBuffer>& buffer, size_t referenceCount)
+      : mBuffer(buffer), mMergedFence(Fence::NO_FENCE),
+        mReferenceCount(referenceCount) {}
+
+void Camera3StreamSplitter::BufferTracker::mergeFence(const sp<Fence>& with) {
+    mMergedFence = Fence::merge(String8("Camera3StreamSplitter"), mMergedFence, with);
+}
+
+size_t Camera3StreamSplitter::BufferTracker::decrementReferenceCountLocked() {
+    if (mReferenceCount > 0)
+        --mReferenceCount;
+    return mReferenceCount;
+}
+
+} // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.h b/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
new file mode 100644
index 0000000..5a25712
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2014,2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_STREAMSPLITTER_H
+#define ANDROID_SERVERS_STREAMSPLITTER_H
+
+#include <gui/IConsumerListener.h>
+#include <gui/IProducerListener.h>
+#include <gui/BufferItemConsumer.h>
+
+#include <utils/Condition.h>
+#include <utils/Mutex.h>
+#include <utils/StrongPointer.h>
+#include <utils/Timers.h>
+
+namespace android {
+
+class GraphicBuffer;
+class IGraphicBufferConsumer;
+class IGraphicBufferProducer;
+
+// Camera3StreamSplitter is an autonomous class that manages one input BufferQueue
+// and multiple output BufferQueues. By using the buffer attach and detach logic
+// in BufferQueue, it is able to present the illusion of a single split
+// BufferQueue, where each buffer queued to the input is available to be
+// acquired by each of the outputs, and is able to be dequeued by the input
+// again only once all of the outputs have released it.
+class Camera3StreamSplitter : public BnConsumerListener {
+public:
+
+    // Constructor
+    Camera3StreamSplitter() = default;
+
+    // Connect to the stream splitter by creating buffer queue and connecting it
+    // with output surfaces.
+    status_t connect(const std::vector<sp<Surface> >& surfaces,
+            uint32_t consumerUsage, size_t hal_max_buffers,
+            sp<Surface>& consumer);
+
+    // addOutput adds an output BufferQueue to the splitter. The splitter
+    // connects to outputQueue as a CPU producer, and any buffers queued
+    // to the input will be queued to each output. It is assumed that all of the
+    // outputs are added before any buffers are queued on the input. If any
+    // output is abandoned by its consumer, the splitter will abandon its input
+    // queue (see onAbandoned).
+    //
+    // A return value other than NO_ERROR means that an error has occurred and
+    // outputQueue has not been added to the splitter. BAD_VALUE is returned if
+    // outputQueue is NULL. See IGraphicBufferProducer::connect for explanations
+    // of other error codes.
+    status_t addOutput(sp<Surface>& outputQueue, size_t hal_max_buffers);
+
+    // Request surfaces for a particular frame number. The requested surfaces
+    // are stored in a FIFO queue. And when the buffer becomes available from the
+    // input queue, the registered surfaces are used to decide which output is
+    // the buffer sent to.
+    status_t notifyRequestedSurfaces(const std::vector<size_t>& surfaces);
+
+    // Disconnect the buffer queue from output surfaces.
+    void disconnect();
+
+private:
+    // From IConsumerListener
+    //
+    // During this callback, we store some tracking information, detach the
+    // buffer from the input, and attach it to each of the outputs. This call
+    // can block if there are too many outstanding buffers. If it blocks, it
+    // will resume when onBufferReleasedByOutput releases a buffer back to the
+    // input.
+    void onFrameAvailable(const BufferItem& item) override;
+
+    // From IConsumerListener
+    // We don't care about released buffers because we detach each buffer as
+    // soon as we acquire it. See the comment for onBufferReleased below for
+    // some clarifying notes about the name.
+    void onBuffersReleased() override {}
+
+    // From IConsumerListener
+    // We don't care about sideband streams, since we won't be splitting them
+    void onSidebandStreamChanged() override {}
+
+    // This is the implementation of the onBufferReleased callback from
+    // IProducerListener. It gets called from an OutputListener (see below), and
+    // 'from' is which producer interface from which the callback was received.
+    //
+    // During this callback, we detach the buffer from the output queue that
+    // generated the callback, update our state tracking to see if this is the
+    // last output releasing the buffer, and if so, release it to the input.
+    // If we release the buffer to the input, we allow a blocked
+    // onFrameAvailable call to proceed.
+    void onBufferReleasedByOutput(const sp<IGraphicBufferProducer>& from);
+
+    // When this is called, the splitter disconnects from (i.e., abandons) its
+    // input queue and signals any waiting onFrameAvailable calls to wake up.
+    // It still processes callbacks from other outputs, but only detaches their
+    // buffers so they can continue operating until they run out of buffers to
+    // acquire. This must be called with mMutex locked.
+    void onAbandonedLocked();
+
+    // This is a thin wrapper class that lets us determine which BufferQueue
+    // the IProducerListener::onBufferReleased callback is associated with. We
+    // create one of these per output BufferQueue, and then pass the producer
+    // into onBufferReleasedByOutput above.
+    class OutputListener : public BnProducerListener,
+                           public IBinder::DeathRecipient {
+    public:
+        OutputListener(wp<Camera3StreamSplitter> splitter,
+                wp<IGraphicBufferProducer> output);
+        virtual ~OutputListener() = default;
+
+        // From IProducerListener
+        void onBufferReleased() override;
+
+        // From IBinder::DeathRecipient
+        void binderDied(const wp<IBinder>& who) override;
+
+    private:
+        wp<Camera3StreamSplitter> mSplitter;
+        wp<IGraphicBufferProducer> mOutput;
+    };
+
+    class BufferTracker {
+    public:
+        BufferTracker(const sp<GraphicBuffer>& buffer, size_t referenceCount);
+        ~BufferTracker() = default;
+
+        const sp<GraphicBuffer>& getBuffer() const { return mBuffer; }
+        const sp<Fence>& getMergedFence() const { return mMergedFence; }
+
+        void mergeFence(const sp<Fence>& with);
+
+        // Returns the new value
+        // Only called while mMutex is held
+        size_t decrementReferenceCountLocked();
+
+    private:
+
+        // Disallow copying
+        BufferTracker(const BufferTracker& other);
+        BufferTracker& operator=(const BufferTracker& other);
+
+        sp<GraphicBuffer> mBuffer; // One instance that holds this native handle
+        sp<Fence> mMergedFence;
+        size_t mReferenceCount;
+    };
+
+    // A deferred output is an output being added to the splitter after
+    // connect() call, whereas a non deferred output is added within connect()
+    // call.
+    enum class OutputType { NonDeferred, Deferred };
+
+    // Must be accessed through RefBase
+    virtual ~Camera3StreamSplitter();
+
+    status_t addOutputLocked(const sp<Surface>& outputQueue,
+                             size_t hal_max_buffers, OutputType outputType);
+
+    // Get unique name for the buffer queue consumer
+    static String8 getUniqueConsumerName();
+
+    // Max consumer side buffers for deferred surface. This will be used as a
+    // lower bound for overall consumer side max buffers.
+    static const int MAX_BUFFERS_DEFERRED_OUTPUT = 2;
+    int mMaxConsumerBuffers = MAX_BUFFERS_DEFERRED_OUTPUT;
+
+    static const nsecs_t kDequeueBufferTimeout   = s2ns(1); // 1 sec
+
+    // mIsAbandoned is set to true when an output dies. Once the Camera3StreamSplitter
+    // has been abandoned, it will continue to detach buffers from other
+    // outputs, but it will disconnect from the input and not attempt to
+    // communicate with it further.
+    bool mIsAbandoned = false;
+
+    Mutex mMutex;
+    Condition mReleaseCondition;
+    int mOutstandingBuffers = 0;
+
+    sp<IGraphicBufferProducer> mProducer;
+    sp<IGraphicBufferConsumer> mConsumer;
+    sp<BufferItemConsumer> mBufferItemConsumer;
+    sp<Surface> mSurface;
+
+    std::vector<sp<IGraphicBufferProducer> > mOutputs;
+    // Tracking which outputs should the buffer be attached and queued
+    // to for each input buffer.
+    std::vector<std::vector<size_t> > mRequestedSurfaces;
+
+    // Map of GraphicBuffer IDs (GraphicBuffer::getId()) to buffer tracking
+    // objects (which are mostly for counting how many outputs have released the
+    // buffer, but also contain merged release fences).
+    std::unordered_map<uint64_t, std::unique_ptr<BufferTracker> > mBuffers;
+};
+
+} // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/tests/Android.mk b/services/camera/libcameraservice/tests/Android.mk
new file mode 100644
index 0000000..179643b
--- /dev/null
+++ b/services/camera/libcameraservice/tests/Android.mk
@@ -0,0 +1,38 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= $(call all-cpp-files-under, .)
+
+LOCAL_SHARED_LIBRARIES := \
+    libcutils \
+    libcameraservice \
+    libhidlbase \
+    liblog \
+    libutils \
+    android.hardware.camera.common@1.0 \
+    android.hardware.camera.provider@2.4 \
+    android.hardware.camera.device@1.0 \
+    android.hardware.camera.device@3.2
+
+LOCAL_C_INCLUDES += \
+
+LOCAL_CFLAGS += -Wall -Wextra -Werror
+
+LOCAL_MODULE:= cameraservice_test
+LOCAL_MODULE_TAGS := tests
+
+include $(BUILD_NATIVE_TEST)
diff --git a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
new file mode 100644
index 0000000..eb934ba
--- /dev/null
+++ b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "CameraProviderManagerTest"
+
+#include "../common/CameraProviderManager.h"
+#include <android/hidl/manager/1.0/IServiceManager.h>
+#include <android/hidl/manager/1.0/IServiceNotification.h>
+
+#include <gtest/gtest.h>
+
+using namespace android;
+using namespace android::hardware::camera;
+using android::hardware::camera::common::V1_0::Status;
+
+/**
+ * Basic test implementation of a camera provider
+ */
+struct TestICameraProvider : virtual public provider::V2_4::ICameraProvider {
+    sp<provider::V2_4::ICameraProviderCallbacks> mCallbacks;
+
+    std::vector<hardware::hidl_string> mDeviceNames;
+
+    TestICameraProvider() {
+        mDeviceNames.push_back("device@3.2/test/0");
+        mDeviceNames.push_back("device@1.0/test/0");
+        mDeviceNames.push_back("device@3.2/test/1");
+    }
+
+    virtual hardware::Return<Status> setCallbacks(
+            const sp<provider::V2_4::ICameraProviderCallbacks>& callbacks) override {
+        mCallbacks = callbacks;
+        return hardware::Return<Status>(Status::OK);
+    }
+
+    using getVendorTags_cb = std::function<void(Status status,
+            const hardware::hidl_vec<common::V1_0::VendorTagSection>& sections)>;
+    virtual hardware::Return<void> getVendorTags(getVendorTags_cb _hidl_cb) override {
+        hardware::hidl_vec<common::V1_0::VendorTagSection> sections;
+        _hidl_cb(Status::OK, sections);
+        return hardware::Void();
+    }
+
+    using getCameraIdList_cb = std::function<void(Status status,
+            const hardware::hidl_vec<hardware::hidl_string>& cameraDeviceNames)>;
+    virtual hardware::Return<void> getCameraIdList(getCameraIdList_cb _hidl_cb) override {
+        _hidl_cb(Status::OK, mDeviceNames);
+        return hardware::Void();
+    }
+
+    using getCameraDeviceInterface_V1_x_cb = std::function<void(Status status,
+            const sp<device::V1_0::ICameraDevice>& device)>;
+    virtual hardware::Return<void> getCameraDeviceInterface_V1_x(
+            const hardware::hidl_string& cameraDeviceName,
+            getCameraDeviceInterface_V1_x_cb _hidl_cb) override {
+        (void) cameraDeviceName;
+        _hidl_cb(Status::OK, nullptr);
+        return hardware::Void();
+    }
+
+    using getCameraDeviceInterface_V3_x_cb = std::function<void(Status status,
+            const sp<device::V3_2::ICameraDevice>& device)>;
+    virtual hardware::Return<void> getCameraDeviceInterface_V3_x(
+            const hardware::hidl_string& cameraDeviceName,
+            getCameraDeviceInterface_V3_x_cb _hidl_cb) override {
+        (void) cameraDeviceName;
+        _hidl_cb(Status::OK, nullptr);
+        return hardware::Void();
+    }
+
+};
+
+/**
+ * Simple test version of the interaction proxy, to use to inject onRegistered calls to the
+ * CameraProviderManager
+ */
+struct TestInteractionProxy : public CameraProviderManager::ServiceInteractionProxy {
+    sp<hidl::manager::V1_0::IServiceNotification> mManagerNotificationInterface;
+    const sp<TestICameraProvider> mTestCameraProvider;
+
+    TestInteractionProxy() :
+            mTestCameraProvider(new TestICameraProvider()) {
+
+    }
+    std::string mLastRequestedServiceName;
+
+    virtual ~TestInteractionProxy() {}
+
+    virtual bool registerForNotifications(
+            const std::string &serviceName,
+            const sp<hidl::manager::V1_0::IServiceNotification> &notification) override {
+        (void) serviceName;
+        mManagerNotificationInterface = notification;
+        return true;
+    }
+
+    virtual sp<hardware::camera::provider::V2_4::ICameraProvider> getService(
+            const std::string &serviceName) override {
+        mLastRequestedServiceName = serviceName;
+        return mTestCameraProvider;
+    }
+
+};
+
+TEST(CameraProviderManagerTest, InitializeTest) {
+
+    status_t res;
+    sp<CameraProviderManager> providerManager = new CameraProviderManager();
+    TestInteractionProxy serviceProxy{};
+
+    res = providerManager->initialize(&serviceProxy);
+    ASSERT_EQ(res, OK) << "Unable to initialize provider manager";
+
+    hardware::hidl_string legacyInstanceName = "legacy/0";
+    ASSERT_EQ(serviceProxy.mLastRequestedServiceName, legacyInstanceName) <<
+            "Legacy instance not requested from service manager";
+
+    hardware::hidl_string testProviderFqInterfaceName =
+            "android.hardware.camera.provider@2.4::ICameraProvider";
+    hardware::hidl_string testProviderInstanceName = "test/0";
+    serviceProxy.mManagerNotificationInterface->onRegistration(
+            testProviderFqInterfaceName,
+            testProviderInstanceName, false);
+
+    ASSERT_EQ(serviceProxy.mLastRequestedServiceName, testProviderInstanceName) <<
+            "Incorrect instance requested from service manager";
+}
diff --git a/services/mediaanalytics/Android.mk b/services/mediaanalytics/Android.mk
new file mode 100644
index 0000000..76a5c1c
--- /dev/null
+++ b/services/mediaanalytics/Android.mk
@@ -0,0 +1,32 @@
+# Media Statistics service
+#
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+	main_mediaanalytics.cpp
+
+LOCAL_SHARED_LIBRARIES := \
+	libcutils \
+	liblog \
+	libmedia \
+	libmediaanalyticsservice \
+	libutils \
+	libbinder \
+	libicuuc
+
+LOCAL_STATIC_LIBRARIES := \
+        libicuandroid_utils \
+        libregistermsext
+
+LOCAL_C_INCLUDES := \
+    frameworks/av/media/libmediaanalyticsservice
+
+LOCAL_MODULE:= mediaanalytics
+
+LOCAL_INIT_RC := mediaanalytics.rc
+
+LOCAL_CFLAGS := -Werror -Wall
+
+include $(BUILD_EXECUTABLE)
diff --git a/services/mediaanalytics/main_mediaanalytics.cpp b/services/mediaanalytics/main_mediaanalytics.cpp
new file mode 100644
index 0000000..672d13d
--- /dev/null
+++ b/services/mediaanalytics/main_mediaanalytics.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "mediaanalytics"
+//#define LOG_NDEBUG 0
+
+#include <binder/IPCThreadState.h>
+#include <binder/ProcessState.h>
+#include <binder/IServiceManager.h>
+#include <utils/Log.h>
+//#include "RegisterExtensions.h"
+
+// from LOCAL_C_INCLUDES
+#include "IcuUtils.h"
+#include "MediaAnalyticsService.h"
+
+using namespace android;
+
+int main(int argc __unused, char **argv __unused)
+{
+    signal(SIGPIPE, SIG_IGN);
+
+    // to match the service name
+    // we're replacing "/system/bin/mediaanalytics" with "media.analytics"
+    // we add a ".", but discard the path components: we finish with a shorter string
+    strcpy(argv[0], "media.analytics");
+
+    sp<ProcessState> proc(ProcessState::self());
+    sp<IServiceManager> sm(defaultServiceManager());
+    ALOGI("ServiceManager: %p", sm.get());
+
+    InitializeIcuOrDie();
+    MediaAnalyticsService::instantiate();
+    ProcessState::self()->startThreadPool();
+    IPCThreadState::self()->joinThreadPool();
+}
diff --git a/services/mediaanalytics/mediaanalytics.rc b/services/mediaanalytics/mediaanalytics.rc
new file mode 100644
index 0000000..0af69f5
--- /dev/null
+++ b/services/mediaanalytics/mediaanalytics.rc
@@ -0,0 +1,5 @@
+service mediaanalytics /system/bin/mediaanalytics
+    class main
+    user media
+    ioprio rt 4
+    writepid /dev/cpuset/foreground/tasks /dev/stune/foreground/tasks
diff --git a/services/mediadrm/Android.mk b/services/mediadrm/Android.mk
index 4ce5c38..73109e1 100644
--- a/services/mediadrm/Android.mk
+++ b/services/mediadrm/Android.mk
@@ -23,9 +23,7 @@
 
 LOCAL_SHARED_LIBRARIES:= \
     libbinder \
-    libcutils \
     liblog \
-    libmedia \
     libmediadrm \
     libutils \
 
diff --git a/services/mediadrm/tests/Android.mk b/services/mediadrm/tests/Android.mk
index 8cbf782..e2f7399 100644
--- a/services/mediadrm/tests/Android.mk
+++ b/services/mediadrm/tests/Android.mk
@@ -19,7 +19,6 @@
 	frameworks/av/media/libmediaplayerservice \
 
 LOCAL_CFLAGS += -Werror -Wall
-LOCAL_CLANG := true
 
 LOCAL_32_BIT_ONLY := true
 
diff --git a/services/mediaextractor/Android.mk b/services/mediaextractor/Android.mk
index a9a2d3c..4e337a0 100644
--- a/services/mediaextractor/Android.mk
+++ b/services/mediaextractor/Android.mk
@@ -5,19 +5,20 @@
 LOCAL_SRC_FILES := MediaExtractorService.cpp
 LOCAL_SHARED_LIBRARIES := libmedia libstagefright libbinder libutils liblog
 LOCAL_MODULE:= libmediaextractorservice
-LOCAL_32_BIT_ONLY := true
 include $(BUILD_SHARED_LIBRARY)
 
 
 # service executable
 include $(CLEAR_VARS)
+# seccomp filters are defined for the following architectures:
 LOCAL_REQUIRED_MODULES_arm := mediaextractor-seccomp.policy
+LOCAL_REQUIRED_MODULES_arm64 := mediaextractor-seccomp.policy
 LOCAL_REQUIRED_MODULES_x86 := mediaextractor-seccomp.policy
+# TODO add seccomp filter for x86_64.
 LOCAL_SRC_FILES := main_extractorservice.cpp minijail/minijail.cpp
 LOCAL_SHARED_LIBRARIES := libmedia libmediaextractorservice libbinder libutils liblog libicuuc libminijail
 LOCAL_STATIC_LIBRARIES := libicuandroid_utils
 LOCAL_MODULE:= mediaextractor
-LOCAL_32_BIT_ONLY := true
 LOCAL_INIT_RC := mediaextractor.rc
 LOCAL_C_INCLUDES := frameworks/av/media/libmedia
 include $(BUILD_EXECUTABLE)
diff --git a/services/mediaextractor/minijail/Android.mk b/services/mediaextractor/minijail/Android.mk
index 0cf8eff..6b01e77 100644
--- a/services/mediaextractor/minijail/Android.mk
+++ b/services/mediaextractor/minijail/Android.mk
@@ -1,18 +1,12 @@
 LOCAL_PATH := $(call my-dir)
 
-ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), arm arm64 x86 x86_64))
+# TODO add filter for x86_64
+ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), arm arm64 x86))
 include $(CLEAR_VARS)
 LOCAL_MODULE := mediaextractor-seccomp.policy
 LOCAL_MODULE_CLASS := ETC
 LOCAL_MODULE_PATH := $(TARGET_OUT)/etc/seccomp_policy
-
-# mediaextractor runs in 32-bit combatibility mode. For 64 bit architectures,
-# use the 32 bit policy
-ifdef TARGET_2ND_ARCH
-    LOCAL_SRC_FILES := $(LOCAL_PATH)/seccomp_policy/mediaextractor-seccomp-$(TARGET_2ND_ARCH).policy
-else
-    LOCAL_SRC_FILES := $(LOCAL_PATH)/seccomp_policy/mediaextractor-seccomp-$(TARGET_ARCH).policy
-endif
+LOCAL_SRC_FILES := $(LOCAL_PATH)/seccomp_policy/mediaextractor-seccomp-$(TARGET_ARCH).policy
 
 # allow device specific additions to the syscall whitelist
 LOCAL_SRC_FILES += $(wildcard $(foreach dir, $(BOARD_SECCOMP_POLICY), \
diff --git a/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-arm64.policy b/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-arm64.policy
new file mode 100644
index 0000000..7e7b858
--- /dev/null
+++ b/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-arm64.policy
@@ -0,0 +1,38 @@
+# Organized by frequency of systemcall - in descending order for
+# best performance.
+ioctl: 1
+futex: 1
+prctl: 1
+write: 1
+getpriority: 1
+close: 1
+dup: 1
+mmap: 1
+munmap: 1
+openat: 1
+mprotect: 1
+madvise: 1
+getuid: 1
+fstat: 1
+read: 1
+setpriority: 1
+sigaltstack: 1
+clone: 1
+lseek: 1
+newfstatat: 1
+faccessat: 1
+restart_syscall: 1
+exit: 1
+exit_group: 1
+rt_sigreturn: 1
+getrlimit: 1
+
+# for attaching to debuggerd on process crash
+tgkill: 1
+rt_sigprocmask: 1
+rt_sigaction: 1
+# socket: arg0 == AF_LOCAL
+socket: arg0 == 1
+connect: 1
+rt_tgsigqueueinfo: 1
+writev: 1
diff --git a/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-x86.policy b/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-x86.policy
index 67976ff..189855c 100644
--- a/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-x86.policy
+++ b/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-x86.policy
@@ -35,6 +35,7 @@
 getgid32: 1
 getegid32: 1
 getgroups32: 1
+nanosleep: 1
 
 # for attaching to debuggerd on process crash
 socketcall: 1
diff --git a/services/medialog/Android.mk b/services/medialog/Android.mk
index a1da63d..423b186 100644
--- a/services/medialog/Android.mk
+++ b/services/medialog/Android.mk
@@ -4,7 +4,7 @@
 
 LOCAL_SRC_FILES := MediaLogService.cpp IMediaLogService.cpp
 
-LOCAL_SHARED_LIBRARIES := libbinder libutils liblog libnbaio
+LOCAL_SHARED_LIBRARIES := libbinder libutils liblog libnbaio libaudioutils
 
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
 
diff --git a/services/medialog/MediaLogService.cpp b/services/medialog/MediaLogService.cpp
index f85aa13..ab2f925 100644
--- a/services/medialog/MediaLogService.cpp
+++ b/services/medialog/MediaLogService.cpp
@@ -35,7 +35,7 @@
             shared->size() < NBLog::Timeline::sharedSize(size)) {
         return;
     }
-    sp<NBLog::Reader> reader(new NBLog::Reader(size, shared));
+    sp<NBLog::Reader> reader(new NBLog::Reader(shared, size));
     NamedReader namedReader(reader, name);
     Mutex::Autolock _l(mLock);
     mNamedReaders.add(namedReader);
diff --git a/services/mediaresourcemanager/Android.mk b/services/mediaresourcemanager/Android.mk
index b72230f..c9cd8cc 100644
--- a/services/mediaresourcemanager/Android.mk
+++ b/services/mediaresourcemanager/Android.mk
@@ -4,7 +4,7 @@
 
 LOCAL_SRC_FILES := ResourceManagerService.cpp ServiceLog.cpp
 
-LOCAL_SHARED_LIBRARIES := libmedia libstagefright libbinder libutils liblog
+LOCAL_SHARED_LIBRARIES := libmedia libmediautils libbinder libutils liblog
 
 LOCAL_MODULE:= libresourcemanagerservice
 
@@ -14,7 +14,6 @@
     $(TOPDIR)frameworks/av/include
 
 LOCAL_CFLAGS += -Werror -Wall
-LOCAL_CLANG := true
 
 include $(BUILD_SHARED_LIBRARY)
 
diff --git a/services/mediaresourcemanager/test/Android.mk b/services/mediaresourcemanager/test/Android.mk
index 3b4ef0d..6abcf92 100644
--- a/services/mediaresourcemanager/test/Android.mk
+++ b/services/mediaresourcemanager/test/Android.mk
@@ -21,7 +21,6 @@
   frameworks/av/services/mediaresourcemanager \
 
 LOCAL_CFLAGS += -Werror -Wall
-LOCAL_CLANG := true
 
 LOCAL_32_BIT_ONLY := true
 
@@ -47,7 +46,6 @@
   frameworks/av/services/mediaresourcemanager \
 
 LOCAL_CFLAGS += -Werror -Wall
-LOCAL_CLANG := true
 
 LOCAL_32_BIT_ONLY := true
 
diff --git a/services/oboeservice/Android.mk b/services/oboeservice/Android.mk
new file mode 100644
index 0000000..07b4d76
--- /dev/null
+++ b/services/oboeservice/Android.mk
@@ -0,0 +1,52 @@
+LOCAL_PATH:= $(call my-dir)
+
+# Oboe Service
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := oboeservice
+LOCAL_MODULE_TAGS := optional
+
+LIBOBOE_DIR := ../../media/liboboe
+LIBOBOE_SRC_DIR := $(LIBOBOE_DIR)/src
+
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/native/include \
+    system/core/base/include \
+    $(TOP)/frameworks/native/media/liboboe/include/include \
+    $(TOP)/frameworks/av/media/liboboe/include \
+    frameworks/native/include \
+    $(TOP)/external/tinyalsa/include \
+    $(TOP)/frameworks/av/media/liboboe/src \
+    $(TOP)/frameworks/av/media/liboboe/src/binding \
+    $(TOP)/frameworks/av/media/liboboe/src/client \
+    $(TOP)/frameworks/av/media/liboboe/src/core \
+    $(TOP)/frameworks/av/media/liboboe/src/fifo \
+    $(TOP)/frameworks/av/media/liboboe/src/utility
+
+# TODO These could be in a liboboe_common library
+LOCAL_SRC_FILES += \
+    $(LIBOBOE_SRC_DIR)/utility/HandleTracker.cpp \
+    $(LIBOBOE_SRC_DIR)/utility/OboeUtilities.cpp \
+    $(LIBOBOE_SRC_DIR)/fifo/FifoBuffer.cpp \
+    $(LIBOBOE_SRC_DIR)/fifo/FifoControllerBase.cpp \
+    $(LIBOBOE_SRC_DIR)/binding/SharedMemoryParcelable.cpp \
+    $(LIBOBOE_SRC_DIR)/binding/SharedRegionParcelable.cpp \
+    $(LIBOBOE_SRC_DIR)/binding/RingBufferParcelable.cpp \
+    $(LIBOBOE_SRC_DIR)/binding/AudioEndpointParcelable.cpp \
+    $(LIBOBOE_SRC_DIR)/binding/OboeStreamRequest.cpp \
+    $(LIBOBOE_SRC_DIR)/binding/OboeStreamConfiguration.cpp \
+    $(LIBOBOE_SRC_DIR)/binding/IOboeAudioService.cpp \
+    SharedRingBuffer.cpp \
+    FakeAudioHal.cpp \
+    OboeAudioService.cpp \
+    OboeServiceStreamBase.cpp \
+    OboeServiceStreamFakeHal.cpp \
+    OboeServiceMain.cpp
+
+LOCAL_CFLAGS += -Wno-unused-parameter
+LOCAL_CFLAGS += -Wall -Werror
+
+LOCAL_SHARED_LIBRARIES :=  libbinder libcutils libutils liblog libtinyalsa
+
+include $(BUILD_EXECUTABLE)
diff --git a/services/oboeservice/FakeAudioHal.cpp b/services/oboeservice/FakeAudioHal.cpp
new file mode 100644
index 0000000..7fa2eef
--- /dev/null
+++ b/services/oboeservice/FakeAudioHal.cpp
@@ -0,0 +1,227 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Simple fake HAL that supports ALSA MMAP/NOIRQ mode.
+ */
+
+#include <iostream>
+#include <math.h>
+#include <limits>
+#include <string.h>
+#include <unistd.h>
+
+#define __force
+#define __bitwise
+#define __user
+#include <sound/asound.h>
+
+#include "tinyalsa/asoundlib.h"
+
+#include "FakeAudioHal.h"
+
+//using namespace oboe;
+
+using sample_t = int16_t;
+using std::cout;
+using std::endl;
+
+#undef SNDRV_PCM_IOCTL_SYNC_PTR
+#define SNDRV_PCM_IOCTL_SYNC_PTR 0xc0884123
+#define PCM_ERROR_MAX 128
+
+const int SAMPLE_RATE = 48000;       // Hz
+const int CHANNEL_COUNT = 2;
+
+struct pcm {
+    int fd;
+    unsigned int flags;
+    int running:1;
+    int prepared:1;
+    int underruns;
+    unsigned int buffer_size;
+    unsigned int boundary;
+    char error[PCM_ERROR_MAX];
+    struct pcm_config config;
+    struct snd_pcm_mmap_status *mmap_status;
+    struct snd_pcm_mmap_control *mmap_control;
+    struct snd_pcm_sync_ptr *sync_ptr;
+    void *mmap_buffer;
+    unsigned int noirq_frames_per_msec;
+    int wait_for_avail_min;
+};
+
+static int pcm_sync_ptr(struct pcm *pcm, int flags) {
+    if (pcm->sync_ptr) {
+        pcm->sync_ptr->flags = flags;
+        if (ioctl(pcm->fd, SNDRV_PCM_IOCTL_SYNC_PTR, pcm->sync_ptr) < 0)
+            return -1;
+    }
+    return 0;
+}
+
+int pcm_get_hw_ptr(struct pcm* pcm, unsigned int* hw_ptr) {
+    if (!hw_ptr || !pcm) return -EINVAL;
+
+    int result = pcm_sync_ptr(pcm, SNDRV_PCM_SYNC_PTR_HWSYNC);
+    if (!result) {
+        *hw_ptr = pcm->sync_ptr->s.status.hw_ptr;
+    }
+
+    return result;
+}
+
+typedef struct stream_tracker {
+    struct pcm * pcm;
+    int          framesPerBurst;
+    sample_t   * hwBuffer;
+    int32_t      capacityInFrames;
+    int32_t      capacityInBytes;
+} stream_tracker_t;
+
+#define FRAMES_PER_BURST_QUALCOMM 192
+#define FRAMES_PER_BURST_NVIDIA   128
+
+int fake_hal_open(int card_id, int device_id, fake_hal_stream_ptr *streamPP) {
+    int framesPerBurst = FRAMES_PER_BURST_QUALCOMM; // TODO update as needed
+    int periodCount = 32;
+    unsigned int offset1;
+    unsigned int frames1;
+    void *area = nullptr;
+    int mmapAvail = 0;
+
+    // Configuration for an ALSA stream.
+    pcm_config cfg;
+    memset(&cfg, 0, sizeof(cfg));
+    cfg.channels = CHANNEL_COUNT;
+    cfg.format = PCM_FORMAT_S16_LE;
+    cfg.rate = SAMPLE_RATE;
+    cfg.period_count = periodCount;
+    cfg.period_size = framesPerBurst;
+    cfg.start_threshold = 0; // for NOIRQ, should just start, was     framesPerBurst;
+    cfg.stop_threshold = INT32_MAX;
+    cfg.silence_size = 0;
+    cfg.silence_threshold = 0;
+    cfg.avail_min = framesPerBurst;
+
+    stream_tracker_t *streamTracker = (stream_tracker_t *) malloc(sizeof(stream_tracker_t));
+    if (streamTracker == nullptr) {
+        return -1;
+    }
+    memset(streamTracker, 0, sizeof(stream_tracker_t));
+
+    streamTracker->pcm = pcm_open(card_id, device_id, PCM_OUT | PCM_MMAP | PCM_NOIRQ, &cfg);
+    if (streamTracker->pcm == nullptr) {
+        cout << "Could not open device." << endl;
+        free(streamTracker);
+        return -1;
+    }
+
+    streamTracker->framesPerBurst = cfg.period_size; // Get from ALSA
+    streamTracker->capacityInFrames = pcm_get_buffer_size(streamTracker->pcm);
+    streamTracker->capacityInBytes = pcm_frames_to_bytes(streamTracker->pcm, streamTracker->capacityInFrames);
+    std::cout << "fake_hal_open() streamTracker->framesPerBurst = " << streamTracker->framesPerBurst << std::endl;
+    std::cout << "fake_hal_open() streamTracker->capacityInFrames = " << streamTracker->capacityInFrames << std::endl;
+
+    if (pcm_is_ready(streamTracker->pcm) < 0) {
+        cout << "Device is not ready." << endl;
+        goto error;
+    }
+
+    if (pcm_prepare(streamTracker->pcm) < 0) {
+        cout << "Device could not be prepared." << endl;
+        cout << "For Marlin, please enter:" << endl;
+        cout << "   adb shell" << endl;
+        cout << "   tinymix \"QUAT_MI2S_RX Audio Mixer MultiMedia8\" 1" << endl;
+        goto error;
+    }
+    mmapAvail = pcm_mmap_avail(streamTracker->pcm);
+    if (mmapAvail <= 0) {
+        cout << "fake_hal_open() mmap_avail is <=0" << endl;
+        goto error;
+    }
+    cout << "fake_hal_open() mmap_avail = " << mmapAvail << endl;
+
+    // Where is the memory mapped area?
+    if (pcm_mmap_begin(streamTracker->pcm, &area, &offset1, &frames1) < 0)  {
+        cout << "fake_hal_open() pcm_mmap_begin failed" << endl;
+        goto error;
+    }
+
+    // Clear the buffer.
+    memset((sample_t*) area, 0, streamTracker->capacityInBytes);
+    streamTracker->hwBuffer = (sample_t*) area;
+    streamTracker->hwBuffer[0] = 32000; // impulse
+
+    // Prime the buffer so it can start.
+    if (pcm_mmap_commit(streamTracker->pcm, 0, framesPerBurst) < 0) {
+        cout << "fake_hal_open() pcm_mmap_commit failed" << endl;
+        goto error;
+    }
+
+    *streamPP = streamTracker;
+    return 1;
+
+error:
+    fake_hal_close(streamTracker);
+    return -1;
+}
+
+int fake_hal_get_mmap_info(fake_hal_stream_ptr stream, mmap_buffer_info *info) {
+    stream_tracker_t *streamTracker = (stream_tracker_t *) stream;
+    info->fd = streamTracker->pcm->fd; // TODO use tinyalsa function
+    info->hw_buffer = streamTracker->hwBuffer;
+    info->burst_size_in_frames = streamTracker->framesPerBurst;
+    info->buffer_capacity_in_frames = streamTracker->capacityInFrames;
+    info->buffer_capacity_in_bytes = streamTracker->capacityInBytes;
+    info->sample_rate = SAMPLE_RATE;
+    info->channel_count = CHANNEL_COUNT;
+    return 0;
+}
+
+int fake_hal_start(fake_hal_stream_ptr stream) {
+    stream_tracker_t *streamTracker = (stream_tracker_t *) stream;
+    if (pcm_start(streamTracker->pcm) < 0) {
+        cout << "fake_hal_start failed" << endl;
+        return -1;
+    }
+    return 0;
+}
+
+int fake_hal_pause(fake_hal_stream_ptr stream) {
+    stream_tracker_t *streamTracker = (stream_tracker_t *) stream;
+    if (pcm_stop(streamTracker->pcm) < 0) {
+        cout << "fake_hal_stop failed" << endl;
+        return -1;
+    }
+    return 0;
+}
+
+int fake_hal_get_frame_counter(fake_hal_stream_ptr stream, int *frame_counter) {
+    stream_tracker_t *streamTracker = (stream_tracker_t *) stream;
+    if (pcm_get_hw_ptr(streamTracker->pcm, (unsigned int *)frame_counter) < 0) {
+        cout << "fake_hal_get_frame_counter failed" << endl;
+        return -1;
+    }
+    return 0;
+}
+
+int fake_hal_close(fake_hal_stream_ptr stream) {
+    stream_tracker_t *streamTracker = (stream_tracker_t *) stream;
+    pcm_close(streamTracker->pcm);
+    free(streamTracker);
+    return 0;
+}
+
diff --git a/services/oboeservice/FakeAudioHal.h b/services/oboeservice/FakeAudioHal.h
new file mode 100644
index 0000000..d6f28b2
--- /dev/null
+++ b/services/oboeservice/FakeAudioHal.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Simple fake HAL that supports ALSA MMAP/NOIRQ mode.
+ */
+
+#ifndef FAKE_AUDIO_HAL_H
+#define FAKE_AUDIO_HAL_H
+
+//namespace oboe {
+
+using sample_t = int16_t;
+struct mmap_buffer_info {
+    int       fd;
+    int32_t   burst_size_in_frames;
+    int32_t   buffer_capacity_in_frames;
+    int32_t   buffer_capacity_in_bytes;
+    int32_t   sample_rate;
+    int32_t   channel_count;
+    sample_t *hw_buffer;
+};
+
+typedef void *fake_hal_stream_ptr;
+
+//extern "C"
+//{
+
+int fake_hal_open(int card_id, int device_id, fake_hal_stream_ptr *stream_pp);
+
+int fake_hal_get_mmap_info(fake_hal_stream_ptr stream, mmap_buffer_info *info);
+
+int fake_hal_start(fake_hal_stream_ptr stream);
+
+int fake_hal_pause(fake_hal_stream_ptr stream);
+
+int fake_hal_get_frame_counter(fake_hal_stream_ptr stream, int *frame_counter);
+
+int fake_hal_close(fake_hal_stream_ptr stream);
+
+//} /* "C" */
+
+//} /* namespace oboe */
+
+#endif // FAKE_AUDIO_HAL_H
diff --git a/services/oboeservice/OboeAudioService.cpp b/services/oboeservice/OboeAudioService.cpp
new file mode 100644
index 0000000..caddc1d
--- /dev/null
+++ b/services/oboeservice/OboeAudioService.cpp
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "OboeService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <time.h>
+#include <pthread.h>
+
+#include <oboe/OboeDefinitions.h>
+
+#include "HandleTracker.h"
+#include "IOboeAudioService.h"
+#include "OboeService.h"
+#include "OboeAudioService.h"
+#include "OboeServiceStreamFakeHal.h"
+
+using namespace android;
+using namespace oboe;
+
+typedef enum
+{
+    OBOE_HANDLE_TYPE_STREAM,
+    OBOE_HANDLE_TYPE_COUNT
+} oboe_service_handle_type_t;
+static_assert(OBOE_HANDLE_TYPE_COUNT <= HANDLE_TRACKER_MAX_TYPES, "Too many handle types.");
+
+oboe_handle_t OboeAudioService::openStream(oboe::OboeStreamRequest &request,
+                                                oboe::OboeStreamConfiguration &configuration) {
+    OboeServiceStreamBase *serviceStream =  new OboeServiceStreamFakeHal();
+    ALOGD("OboeAudioService::openStream(): created serviceStream = %p", serviceStream);
+    oboe_result_t result = serviceStream->open(request, configuration);
+    if (result < 0) {
+        ALOGE("OboeAudioService::openStream(): open returned %d", result);
+        return result;
+    } else {
+        OboeStream handle = mHandleTracker.put(OBOE_HANDLE_TYPE_STREAM, serviceStream);
+        ALOGD("OboeAudioService::openStream(): handle = 0x%08X", handle);
+        if (handle < 0) {
+            delete serviceStream;
+        }
+        return handle;
+    }
+}
+
+oboe_result_t OboeAudioService::closeStream(oboe_handle_t streamHandle) {
+    OboeServiceStreamBase *serviceStream = (OboeServiceStreamBase *)
+            mHandleTracker.remove(OBOE_HANDLE_TYPE_STREAM,
+                                  streamHandle);
+    ALOGI("OboeAudioService.closeStream(0x%08X)", streamHandle);
+    if (serviceStream != nullptr) {
+        ALOGD("OboeAudioService::closeStream(): deleting serviceStream = %p", serviceStream);
+        delete serviceStream;
+        return OBOE_OK;
+    }
+    return OBOE_ERROR_INVALID_HANDLE;
+}
+
+OboeServiceStreamBase *OboeAudioService::convertHandleToServiceStream(
+        oboe_handle_t streamHandle) const {
+    return (OboeServiceStreamBase *) mHandleTracker.get(OBOE_HANDLE_TYPE_STREAM,
+                              (oboe_handle_t)streamHandle);
+}
+
+oboe_result_t OboeAudioService::getStreamDescription(
+                oboe_handle_t streamHandle,
+                oboe::AudioEndpointParcelable &parcelable) {
+    ALOGI("OboeAudioService::getStreamDescriptor(), streamHandle = 0x%08x", streamHandle);
+    OboeServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+    ALOGI("OboeAudioService::getStreamDescriptor(), serviceStream = %p", serviceStream);
+    if (serviceStream == nullptr) {
+        return OBOE_ERROR_INVALID_HANDLE;
+    }
+    return serviceStream->getDescription(parcelable);
+}
+
+oboe_result_t OboeAudioService::startStream(oboe_handle_t streamHandle) {
+    OboeServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+    ALOGI("OboeAudioService::startStream(), serviceStream = %p", serviceStream);
+    if (serviceStream == nullptr) {
+        return OBOE_ERROR_INVALID_HANDLE;
+    }
+    mLatestHandle = streamHandle;
+    return serviceStream->start();
+}
+
+oboe_result_t OboeAudioService::pauseStream(oboe_handle_t streamHandle) {
+    OboeServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+    ALOGI("OboeAudioService::pauseStream(), serviceStream = %p", serviceStream);
+    if (serviceStream == nullptr) {
+        return OBOE_ERROR_INVALID_HANDLE;
+    }
+    return serviceStream->pause();
+}
+
+oboe_result_t OboeAudioService::flushStream(oboe_handle_t streamHandle) {
+    OboeServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+    ALOGI("OboeAudioService::flushStream(), serviceStream = %p", serviceStream);
+    if (serviceStream == nullptr) {
+        return OBOE_ERROR_INVALID_HANDLE;
+    }
+    return serviceStream->flush();
+}
+
+void OboeAudioService::tickle() {
+    OboeServiceStreamBase *serviceStream = convertHandleToServiceStream(mLatestHandle);
+    //ALOGI("OboeAudioService::tickle(), serviceStream = %p", serviceStream);
+    if (serviceStream != nullptr) {
+        serviceStream->tickle();
+    }
+}
+
+oboe_result_t OboeAudioService::registerAudioThread(oboe_handle_t streamHandle,
+                                                         pid_t clientThreadId,
+                                                         oboe_nanoseconds_t periodNanoseconds) {
+    OboeServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+    ALOGI("OboeAudioService::registerAudioThread(), serviceStream = %p", serviceStream);
+    if (serviceStream == nullptr) {
+        ALOGE("OboeAudioService::registerAudioThread(), serviceStream == nullptr");
+        return OBOE_ERROR_INVALID_HANDLE;
+    }
+    if (serviceStream->getRegisteredThread() != OboeServiceStreamBase::ILLEGAL_THREAD_ID) {
+        ALOGE("OboeAudioService::registerAudioThread(), thread already registered");
+        return OBOE_ERROR_INVALID_ORDER;
+    }
+    serviceStream->setRegisteredThread(clientThreadId);
+    // Boost client thread to SCHED_FIFO
+    struct sched_param sp;
+    memset(&sp, 0, sizeof(sp));
+    sp.sched_priority = 2; // TODO use 'requestPriority' function from frameworks/av/media/utils
+    int err = sched_setscheduler(clientThreadId, SCHED_FIFO, &sp);
+    if (err != 0){
+        ALOGE("OboeAudioService::sched_setscheduler() failed, errno = %d, priority = %d",
+              errno, sp.sched_priority);
+        return OBOE_ERROR_INTERNAL;
+    } else {
+        return OBOE_OK;
+    }
+}
+
+oboe_result_t OboeAudioService::unregisterAudioThread(oboe_handle_t streamHandle,
+                                                           pid_t clientThreadId) {
+    OboeServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+    ALOGI("OboeAudioService::unregisterAudioThread(), serviceStream = %p", serviceStream);
+    if (serviceStream == nullptr) {
+        ALOGE("OboeAudioService::unregisterAudioThread(), serviceStream == nullptr");
+        return OBOE_ERROR_INVALID_HANDLE;
+    }
+    if (serviceStream->getRegisteredThread() != clientThreadId) {
+        ALOGE("OboeAudioService::unregisterAudioThread(), wrong thread");
+        return OBOE_ERROR_ILLEGAL_ARGUMENT;
+    }
+    serviceStream->setRegisteredThread(0);
+    return OBOE_OK;
+}
diff --git a/services/oboeservice/OboeAudioService.h b/services/oboeservice/OboeAudioService.h
new file mode 100644
index 0000000..df3cbf8
--- /dev/null
+++ b/services/oboeservice/OboeAudioService.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_OBOE_AUDIO_SERVICE_H
+#define OBOE_OBOE_AUDIO_SERVICE_H
+
+#include <time.h>
+#include <pthread.h>
+
+#include <binder/BinderService.h>
+
+#include <oboe/OboeDefinitions.h>
+#include <oboe/OboeAudio.h>
+#include "HandleTracker.h"
+#include "IOboeAudioService.h"
+#include "OboeService.h"
+#include "OboeServiceStreamBase.h"
+
+using namespace android;
+namespace oboe {
+
+class OboeAudioService :
+    public BinderService<OboeAudioService>,
+    public BnOboeAudioService
+{
+    friend class BinderService<OboeAudioService>;   // for OboeAudioService()
+public:
+// TODO why does this fail?    static const char* getServiceName() ANDROID_API { return "media.audio_oboe"; }
+    static const char* getServiceName() { return "media.audio_oboe"; }
+
+    virtual oboe_handle_t openStream(OboeStreamRequest &request,
+                                     OboeStreamConfiguration &configuration);
+
+    virtual oboe_result_t closeStream(oboe_handle_t streamHandle);
+
+    virtual oboe_result_t getStreamDescription(
+                oboe_handle_t streamHandle,
+                AudioEndpointParcelable &parcelable);
+
+    virtual oboe_result_t startStream(oboe_handle_t streamHandle);
+
+    virtual oboe_result_t pauseStream(oboe_handle_t streamHandle);
+
+    virtual oboe_result_t flushStream(oboe_handle_t streamHandle);
+
+    virtual oboe_result_t registerAudioThread(oboe_handle_t streamHandle,
+                                              pid_t pid, oboe_nanoseconds_t periodNanoseconds) ;
+
+    virtual oboe_result_t unregisterAudioThread(oboe_handle_t streamHandle, pid_t pid);
+
+    virtual void tickle();
+
+private:
+
+    OboeServiceStreamBase *convertHandleToServiceStream(oboe_handle_t streamHandle) const;
+
+    HandleTracker mHandleTracker;
+    oboe_handle_t mLatestHandle = OBOE_ERROR_INVALID_HANDLE; // TODO until we have service threads
+};
+
+} /* namespace oboe */
+
+#endif //OBOE_OBOE_AUDIO_SERVICE_H
diff --git a/services/oboeservice/OboeService.h b/services/oboeservice/OboeService.h
new file mode 100644
index 0000000..a24f525
--- /dev/null
+++ b/services/oboeservice/OboeService.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_OBOE_SERVICE_H
+#define OBOE_OBOE_SERVICE_H
+
+#include <stdint.h>
+
+#include <oboe/OboeAudio.h>
+
+#include "binding/RingBufferParcelable.h"
+
+namespace oboe {
+
+// TODO move this an "include" folder for the service.
+
+struct OboeMessageTimestamp {
+    oboe_position_frames_t position;
+    int64_t                deviceOffset; // add to client position to get device position
+    oboe_nanoseconds_t     timestamp;
+};
+
+typedef enum oboe_service_event_e : uint32_t {
+    OBOE_SERVICE_EVENT_STARTED,
+    OBOE_SERVICE_EVENT_PAUSED,
+    OBOE_SERVICE_EVENT_FLUSHED,
+    OBOE_SERVICE_EVENT_CLOSED,
+    OBOE_SERVICE_EVENT_DISCONNECTED
+} oboe_service_event_t;
+
+struct OboeMessageEvent {
+    oboe_service_event_t event;
+    int32_t data1;
+    int64_t data2;
+};
+
+typedef struct OboeServiceMessage_s {
+    enum class code : uint32_t {
+        NOTHING,
+        TIMESTAMP,
+        EVENT,
+    };
+
+    code what;
+    union {
+        OboeMessageTimestamp timestamp;
+        OboeMessageEvent event;
+    };
+} OboeServiceMessage;
+
+
+} /* namespace oboe */
+
+#endif //OBOE_OBOE_SERVICE_H
diff --git a/services/oboeservice/OboeServiceMain.cpp b/services/oboeservice/OboeServiceMain.cpp
new file mode 100644
index 0000000..18bcf2b
--- /dev/null
+++ b/services/oboeservice/OboeServiceMain.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "OboeService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <math.h>
+
+#include <utils/RefBase.h>
+#include <binder/TextOutput.h>
+
+#include <binder/IInterface.h>
+#include <binder/IBinder.h>
+#include <binder/ProcessState.h>
+#include <binder/IServiceManager.h>
+#include <binder/IPCThreadState.h>
+
+#include <cutils/ashmem.h>
+#include <sys/mman.h>
+
+#include "OboeService.h"
+#include "IOboeAudioService.h"
+#include "OboeAudioService.h"
+
+using namespace android;
+using namespace oboe;
+
+/**
+ * This is used to test the OboeService as a standalone application.
+ * It is not used when the OboeService is integrated with AudioFlinger.
+ */
+int main(int argc, char **argv) {
+    printf("Test OboeService %s\n", argv[1]);
+    ALOGD("This is the OboeAudioService");
+
+    defaultServiceManager()->addService(String16("OboeAudioService"), new OboeAudioService());
+    android::ProcessState::self()->startThreadPool();
+    printf("OboeAudioService service is now ready\n");
+    IPCThreadState::self()->joinThreadPool();
+    printf("OboeAudioService service thread joined\n");
+
+    return 0;
+}
diff --git a/services/oboeservice/OboeServiceStreamBase.cpp b/services/oboeservice/OboeServiceStreamBase.cpp
new file mode 100644
index 0000000..6b7e4e5
--- /dev/null
+++ b/services/oboeservice/OboeServiceStreamBase.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "OboeService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include "IOboeAudioService.h"
+#include "OboeService.h"
+#include "OboeServiceStreamBase.h"
+#include "AudioEndpointParcelable.h"
+
+using namespace android;
+using namespace oboe;
+
+/**
+ * Construct the AudioCommandQueues and the AudioDataQueue
+ * and fill in the endpoint parcelable.
+ */
+
+OboeServiceStreamBase::OboeServiceStreamBase()
+        : mUpMessageQueue(nullptr)
+{
+    // TODO could fail so move out of constructor
+    mUpMessageQueue = new SharedRingBuffer();
+    mUpMessageQueue->allocate(sizeof(OboeServiceMessage), QUEUE_UP_CAPACITY_COMMANDS);
+}
+
+OboeServiceStreamBase::~OboeServiceStreamBase() {
+    delete mUpMessageQueue;
+}
+
+void OboeServiceStreamBase::sendServiceEvent(oboe_service_event_t event,
+                              int32_t data1,
+                              int64_t data2) {
+    OboeServiceMessage command;
+    command.what = OboeServiceMessage::code::EVENT;
+    command.event.event = event;
+    command.event.data1 = data1;
+    command.event.data2 = data2;
+    mUpMessageQueue->getFifoBuffer()->write(&command, 1);
+}
+
+
diff --git a/services/oboeservice/OboeServiceStreamBase.h b/services/oboeservice/OboeServiceStreamBase.h
new file mode 100644
index 0000000..736c754
--- /dev/null
+++ b/services/oboeservice/OboeServiceStreamBase.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_OBOE_SERVICE_STREAM_BASE_H
+#define OBOE_OBOE_SERVICE_STREAM_BASE_H
+
+#include "IOboeAudioService.h"
+#include "OboeService.h"
+#include "AudioStream.h"
+#include "fifo/FifoBuffer.h"
+#include "SharedRingBuffer.h"
+#include "AudioEndpointParcelable.h"
+
+namespace oboe {
+
+// We expect the queue to only have a few commands.
+// This should be way more than we need.
+#define QUEUE_UP_CAPACITY_COMMANDS (128)
+
+class OboeServiceStreamBase  {
+
+public:
+    OboeServiceStreamBase();
+    virtual ~OboeServiceStreamBase();
+
+    enum {
+        ILLEGAL_THREAD_ID = 0
+    };
+
+    /**
+     * Fill in a parcelable description of stream.
+     */
+    virtual oboe_result_t getDescription(oboe::AudioEndpointParcelable &parcelable) = 0;
+
+    /**
+     * Open the device.
+     */
+    virtual oboe_result_t open(oboe::OboeStreamRequest &request,
+                               oboe::OboeStreamConfiguration &configuration) = 0;
+
+    /**
+     * Start the flow of data.
+     */
+    virtual oboe_result_t start() = 0;
+
+    /**
+     * Stop the flow of data such that start() can resume with loss of data.
+     */
+    virtual oboe_result_t pause() = 0;
+
+    /**
+     *  Discard any data held by the underlying HAL or Service.
+     */
+    virtual oboe_result_t flush() = 0;
+
+    virtual oboe_result_t close() = 0;
+
+    virtual void tickle() = 0;
+
+    virtual void sendServiceEvent(oboe_service_event_t event,
+                                  int32_t data1 = 0,
+                                  int64_t data2 = 0);
+
+    virtual void setRegisteredThread(pid_t pid) {
+        mRegisteredClientThread = pid;
+    }
+    virtual pid_t getRegisteredThread() {
+        return mRegisteredClientThread;
+    }
+
+protected:
+
+    pid_t                    mRegisteredClientThread = ILLEGAL_THREAD_ID;
+
+    SharedRingBuffer *       mUpMessageQueue;
+
+    oboe_sample_rate_t       mSampleRate = 0;
+    oboe_size_bytes_t        mBytesPerFrame = 0;
+    oboe_size_frames_t       mFramesPerBurst = 0;
+    oboe_size_frames_t       mCapacityInFrames = 0;
+    oboe_size_bytes_t        mCapacityInBytes = 0;
+};
+
+} /* namespace oboe */
+
+#endif //OBOE_OBOE_SERVICE_STREAM_BASE_H
diff --git a/services/oboeservice/OboeServiceStreamFakeHal.cpp b/services/oboeservice/OboeServiceStreamFakeHal.cpp
new file mode 100644
index 0000000..dbbc860
--- /dev/null
+++ b/services/oboeservice/OboeServiceStreamFakeHal.cpp
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "OboeService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include "AudioClock.h"
+#include "AudioEndpointParcelable.h"
+
+#include "OboeServiceStreamBase.h"
+#include "OboeServiceStreamFakeHal.h"
+
+#include "FakeAudioHal.h"
+
+using namespace android;
+using namespace oboe;
+
+// HACK values for Marlin
+#define CARD_ID              0
+#define DEVICE_ID            19
+
+/**
+ * Construct the audio message queuues and message queues.
+ */
+
+OboeServiceStreamFakeHal::OboeServiceStreamFakeHal()
+        : OboeServiceStreamBase()
+        , mStreamId(nullptr)
+        , mPreviousFrameCounter(0)
+{
+}
+
+OboeServiceStreamFakeHal::~OboeServiceStreamFakeHal() {
+    ALOGD("OboeServiceStreamFakeHal::~OboeServiceStreamFakeHal() call close()");
+    close();
+}
+
+oboe_result_t OboeServiceStreamFakeHal::open(oboe::OboeStreamRequest &request,
+                                             oboe::OboeStreamConfiguration &configuration) {
+    // Open stream on HAL and pass information about the ring buffer to the client.
+    mmap_buffer_info mmapInfo;
+    oboe_result_t error;
+
+    // Open HAL
+    error = fake_hal_open(CARD_ID, DEVICE_ID, &mStreamId);
+    if(error < 0) {
+        ALOGE("Could not open card %d, device %d", CARD_ID, DEVICE_ID);
+        return error;
+    }
+
+    // Get information about the shared audio buffer.
+    error = fake_hal_get_mmap_info(mStreamId, &mmapInfo);
+    if (error < 0) {
+        ALOGE("fake_hal_get_mmap_info returned %d", error);
+        fake_hal_close(mStreamId);
+        mStreamId = nullptr;
+        return error;
+    }
+    mHalFileDescriptor = mmapInfo.fd;
+    mFramesPerBurst = mmapInfo.burst_size_in_frames;
+    mCapacityInFrames = mmapInfo.buffer_capacity_in_frames;
+    mCapacityInBytes = mmapInfo.buffer_capacity_in_bytes;
+    mSampleRate = mmapInfo.sample_rate;
+    mBytesPerFrame = mmapInfo.channel_count * sizeof(int16_t); // FIXME based on data format
+    ALOGD("OboeServiceStreamFakeHal::open() mmapInfo.burst_size_in_frames = %d",
+         mmapInfo.burst_size_in_frames);
+    ALOGD("OboeServiceStreamFakeHal::open() mmapInfo.buffer_capacity_in_frames = %d",
+         mmapInfo.buffer_capacity_in_frames);
+    ALOGD("OboeServiceStreamFakeHal::open() mmapInfo.buffer_capacity_in_bytes = %d",
+         mmapInfo.buffer_capacity_in_bytes);
+
+    // Fill in OboeStreamConfiguration
+    configuration.setSampleRate(mSampleRate);
+    configuration.setSamplesPerFrame(mmapInfo.channel_count);
+    configuration.setAudioFormat(OBOE_AUDIO_FORMAT_PCM16);
+    return OBOE_OK;
+}
+
+/**
+ * Get an immutable description of the in-memory queues
+ * used to communicate with the underlying HAL or Service.
+ */
+oboe_result_t OboeServiceStreamFakeHal::getDescription(AudioEndpointParcelable &parcelable) {
+    // Gather information on the message queue.
+    mUpMessageQueue->fillParcelable(parcelable,
+                                    parcelable.mUpMessageQueueParcelable);
+
+    // Gather information on the data queue.
+    // TODO refactor into a SharedRingBuffer?
+    int fdIndex = parcelable.addFileDescriptor(mHalFileDescriptor, mCapacityInBytes);
+    parcelable.mDownDataQueueParcelable.setupMemory(fdIndex, 0, mCapacityInBytes);
+    parcelable.mDownDataQueueParcelable.setBytesPerFrame(mBytesPerFrame);
+    parcelable.mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
+    parcelable.mDownDataQueueParcelable.setCapacityInFrames(mCapacityInFrames);
+    return OBOE_OK;
+}
+
+/**
+ * Start the flow of data.
+ */
+oboe_result_t OboeServiceStreamFakeHal::start() {
+    if (mStreamId == nullptr) return OBOE_ERROR_NULL;
+    oboe_result_t result = fake_hal_start(mStreamId);
+    sendServiceEvent(OBOE_SERVICE_EVENT_STARTED);
+    mState = OBOE_STREAM_STATE_STARTED;
+    return result;
+}
+
+/**
+ * Stop the flow of data such that start() can resume with loss of data.
+ */
+oboe_result_t OboeServiceStreamFakeHal::pause() {
+    if (mStreamId == nullptr) return OBOE_ERROR_NULL;
+    sendCurrentTimestamp();
+    oboe_result_t result = fake_hal_pause(mStreamId);
+    sendServiceEvent(OBOE_SERVICE_EVENT_PAUSED);
+    mState = OBOE_STREAM_STATE_PAUSED;
+    mFramesRead.reset32();
+    ALOGD("OboeServiceStreamFakeHal::pause() sent OBOE_SERVICE_EVENT_PAUSED");
+    return result;
+}
+
+/**
+ *  Discard any data held by the underlying HAL or Service.
+ */
+oboe_result_t OboeServiceStreamFakeHal::flush() {
+    if (mStreamId == nullptr) return OBOE_ERROR_NULL;
+    // TODO how do we flush an MMAP/NOIRQ buffer? sync pointers?
+    ALOGD("OboeServiceStreamFakeHal::pause() send OBOE_SERVICE_EVENT_FLUSHED");
+    sendServiceEvent(OBOE_SERVICE_EVENT_FLUSHED);
+    mState = OBOE_STREAM_STATE_FLUSHED;
+    return OBOE_OK;
+}
+
+oboe_result_t OboeServiceStreamFakeHal::close() {
+    oboe_result_t result = OBOE_OK;
+    if (mStreamId != nullptr) {
+        result = fake_hal_close(mStreamId);
+        mStreamId = nullptr;
+    }
+    return result;
+}
+
+void OboeServiceStreamFakeHal::sendCurrentTimestamp() {
+    int frameCounter = 0;
+    int error = fake_hal_get_frame_counter(mStreamId, &frameCounter);
+    if (error < 0) {
+        ALOGE("OboeServiceStreamFakeHal::sendCurrentTimestamp() error %d",
+                error);
+    } else if (frameCounter != mPreviousFrameCounter) {
+        OboeServiceMessage command;
+        command.what = OboeServiceMessage::code::TIMESTAMP;
+        mFramesRead.update32(frameCounter);
+        command.timestamp.position = mFramesRead.get();
+        ALOGV("OboeServiceStreamFakeHal::sendCurrentTimestamp() HAL frames = %d, pos = %d",
+                frameCounter, (int)mFramesRead.get());
+        command.timestamp.timestamp = AudioClock::getNanoseconds();
+        mUpMessageQueue->getFifoBuffer()->write(&command, 1);
+        mPreviousFrameCounter = frameCounter;
+    }
+}
+
+void OboeServiceStreamFakeHal::tickle() {
+    if (mStreamId != nullptr) {
+        switch (mState) {
+            case OBOE_STREAM_STATE_STARTING:
+            case OBOE_STREAM_STATE_STARTED:
+            case OBOE_STREAM_STATE_PAUSING:
+            case OBOE_STREAM_STATE_STOPPING:
+                sendCurrentTimestamp();
+                break;
+            default:
+                break;
+        }
+    }
+}
+
diff --git a/services/oboeservice/OboeServiceStreamFakeHal.h b/services/oboeservice/OboeServiceStreamFakeHal.h
new file mode 100644
index 0000000..b026d34
--- /dev/null
+++ b/services/oboeservice/OboeServiceStreamFakeHal.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_OBOE_SERVICE_STREAM_FAKE_HAL_H
+#define OBOE_OBOE_SERVICE_STREAM_FAKE_HAL_H
+
+#include "OboeService.h"
+#include "OboeServiceStreamBase.h"
+#include "FakeAudioHal.h"
+#include "MonotonicCounter.h"
+#include "AudioEndpointParcelable.h"
+
+namespace oboe {
+
+class OboeServiceStreamFakeHal : public OboeServiceStreamBase {
+
+public:
+    OboeServiceStreamFakeHal();
+    virtual ~OboeServiceStreamFakeHal();
+
+    virtual oboe_result_t getDescription(AudioEndpointParcelable &parcelable) override;
+
+    virtual oboe_result_t open(oboe::OboeStreamRequest &request,
+                               oboe::OboeStreamConfiguration &configuration) override;
+
+    /**
+     * Start the flow of data.
+     */
+    virtual oboe_result_t start() override;
+
+    /**
+     * Stop the flow of data such that start() can resume with loss of data.
+     */
+    virtual oboe_result_t pause() override;
+
+    /**
+     *  Discard any data held by the underlying HAL or Service.
+     */
+    virtual oboe_result_t flush() override;
+
+    virtual oboe_result_t close() override;
+
+    virtual void tickle() override;
+
+protected:
+
+    void sendCurrentTimestamp();
+
+private:
+    fake_hal_stream_ptr    mStreamId; // Move to HAL
+
+    MonotonicCounter       mFramesWritten;
+    MonotonicCounter       mFramesRead;
+    int                    mHalFileDescriptor = -1;
+    int                    mPreviousFrameCounter = 0;   // from HAL
+
+    oboe_stream_state_t    mState = OBOE_STREAM_STATE_UNINITIALIZED;
+};
+
+} // namespace oboe
+
+#endif //OBOE_OBOE_SERVICE_STREAM_FAKE_HAL_H
diff --git a/services/oboeservice/SharedRingBuffer.cpp b/services/oboeservice/SharedRingBuffer.cpp
new file mode 100644
index 0000000..c3df5ce
--- /dev/null
+++ b/services/oboeservice/SharedRingBuffer.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "OboeService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include "AudioClock.h"
+#include "AudioEndpointParcelable.h"
+
+//#include "OboeServiceStreamBase.h"
+//#include "OboeServiceStreamFakeHal.h"
+
+#include "SharedRingBuffer.h"
+
+using namespace android;
+using namespace oboe;
+
+SharedRingBuffer::~SharedRingBuffer()
+{
+    if (mSharedMemory != nullptr) {
+        delete mFifoBuffer;
+        munmap(mSharedMemory, mSharedMemorySizeInBytes);
+        close(mFileDescriptor);
+        mSharedMemory = nullptr;
+    }
+}
+
+oboe_result_t SharedRingBuffer::allocate(fifo_frames_t   bytesPerFrame,
+                                         fifo_frames_t   capacityInFrames) {
+    mCapacityInFrames = capacityInFrames;
+
+    // Create shared memory large enough to hold the data and the read and write counters.
+    mDataMemorySizeInBytes = bytesPerFrame * capacityInFrames;
+    mSharedMemorySizeInBytes = mDataMemorySizeInBytes + (2 * (sizeof(fifo_counter_t)));
+    mFileDescriptor = ashmem_create_region("OboeSharedRingBuffer", mSharedMemorySizeInBytes);
+    if (mFileDescriptor < 0) {
+        ALOGE("SharedRingBuffer::allocate() ashmem_create_region() failed %d", errno);
+        return OBOE_ERROR_INTERNAL;
+    }
+    int err = ashmem_set_prot_region(mFileDescriptor, PROT_READ|PROT_WRITE); // TODO error handling?
+    if (err < 0) {
+        ALOGE("SharedRingBuffer::allocate() ashmem_set_prot_region() failed %d", errno);
+        close(mFileDescriptor);
+        return OBOE_ERROR_INTERNAL; // TODO convert errno to a better OBOE_ERROR;
+    }
+
+    // Map the fd to memory addresses.
+    mSharedMemory = (uint8_t *) mmap(0, mSharedMemorySizeInBytes,
+                         PROT_READ|PROT_WRITE,
+                         MAP_SHARED,
+                         mFileDescriptor, 0);
+    if (mSharedMemory == MAP_FAILED) {
+        ALOGE("SharedRingBuffer::allocate() mmap() failed %d", errno);
+        close(mFileDescriptor);
+        return OBOE_ERROR_INTERNAL; // TODO convert errno to a better OBOE_ERROR;
+    }
+
+    // Get addresses for our counters and data from the shared memory.
+    fifo_counter_t *readCounterAddress =
+            (fifo_counter_t *) &mSharedMemory[SHARED_RINGBUFFER_READ_OFFSET];
+    fifo_counter_t *writeCounterAddress =
+            (fifo_counter_t *) &mSharedMemory[SHARED_RINGBUFFER_WRITE_OFFSET];
+    uint8_t *dataAddress = &mSharedMemory[SHARED_RINGBUFFER_DATA_OFFSET];
+
+    mFifoBuffer = new(std::nothrow) FifoBuffer(bytesPerFrame, capacityInFrames,
+                                 readCounterAddress, writeCounterAddress, dataAddress);
+    return (mFifoBuffer == nullptr) ? OBOE_ERROR_NO_MEMORY : OBOE_OK;
+}
+
+void SharedRingBuffer::fillParcelable(AudioEndpointParcelable &endpointParcelable,
+                    RingBufferParcelable &ringBufferParcelable) {
+    int fdIndex = endpointParcelable.addFileDescriptor(mFileDescriptor, mSharedMemorySizeInBytes);
+    ringBufferParcelable.setupMemory(fdIndex,
+                                     SHARED_RINGBUFFER_DATA_OFFSET,
+                                     mDataMemorySizeInBytes,
+                                     SHARED_RINGBUFFER_READ_OFFSET,
+                                     SHARED_RINGBUFFER_WRITE_OFFSET,
+                                     sizeof(fifo_counter_t));
+    ringBufferParcelable.setBytesPerFrame(mFifoBuffer->getBytesPerFrame());
+    ringBufferParcelable.setFramesPerBurst(1);
+    ringBufferParcelable.setCapacityInFrames(mCapacityInFrames);
+}
diff --git a/services/oboeservice/SharedRingBuffer.h b/services/oboeservice/SharedRingBuffer.h
new file mode 100644
index 0000000..3cc1c2d
--- /dev/null
+++ b/services/oboeservice/SharedRingBuffer.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_SHARED_RINGBUFFER_H
+#define OBOE_SHARED_RINGBUFFER_H
+
+#include <stdint.h>
+#include <cutils/ashmem.h>
+#include <sys/mman.h>
+
+#include "fifo/FifoBuffer.h"
+#include "RingBufferParcelable.h"
+#include "AudioEndpointParcelable.h"
+
+namespace oboe {
+
+// Determine the placement of the counters and data in shared memory.
+#define SHARED_RINGBUFFER_READ_OFFSET   0
+#define SHARED_RINGBUFFER_WRITE_OFFSET  sizeof(fifo_counter_t)
+#define SHARED_RINGBUFFER_DATA_OFFSET   (SHARED_RINGBUFFER_WRITE_OFFSET + sizeof(fifo_counter_t))
+
+/**
+ * Atomic FIFO that uses shared memory.
+ */
+class SharedRingBuffer {
+public:
+    SharedRingBuffer() {}
+
+    virtual ~SharedRingBuffer();
+
+    oboe_result_t allocate(fifo_frames_t bytesPerFrame, fifo_frames_t capacityInFrames);
+
+    void fillParcelable(AudioEndpointParcelable &endpointParcelable,
+                        RingBufferParcelable &ringBufferParcelable);
+
+    FifoBuffer * getFifoBuffer() {
+        return mFifoBuffer;
+    }
+
+private:
+    int            mFileDescriptor = -1;
+    FifoBuffer   * mFifoBuffer = nullptr;
+    uint8_t      * mSharedMemory = nullptr;
+    int32_t        mSharedMemorySizeInBytes = 0;
+    int32_t        mDataMemorySizeInBytes = 0;
+    fifo_frames_t  mCapacityInFrames = 0;
+};
+
+} /* namespace oboe */
+
+#endif //OBOE_SHARED_RINGBUFFER_H
diff --git a/services/radio/Android.mk b/services/radio/Android.mk
index fc8f00c..1b50dc3 100644
--- a/services/radio/Android.mk
+++ b/services/radio/Android.mk
@@ -17,7 +17,7 @@
 include $(CLEAR_VARS)
 
 
-LOCAL_SRC_FILES:=               \
+LOCAL_SRC_FILES:= \
     RadioService.cpp
 
 LOCAL_SHARED_LIBRARIES:= \
@@ -30,6 +30,26 @@
     libradio \
     libradio_metadata
 
+ifeq ($(USE_LEGACY_LOCAL_AUDIO_HAL),true)
+# libhardware configuration
+LOCAL_SRC_FILES +=               \
+    RadioHalLegacy.cpp
+else
+# Treble configuration
+
+LOCAL_SRC_FILES += \
+    HidlUtils.cpp \
+    RadioHalHidl.cpp
+
+LOCAL_SHARED_LIBRARIES += \
+    libhwbinder \
+    libhidlbase \
+    libhidltransport \
+    libbase \
+    libaudiohal \
+    android.hardware.broadcastradio@1.0
+endif
+
 LOCAL_CFLAGS += -Wall -Wextra -Werror
 
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
diff --git a/services/radio/HidlUtils.cpp b/services/radio/HidlUtils.cpp
new file mode 100644
index 0000000..3b33386
--- /dev/null
+++ b/services/radio/HidlUtils.cpp
@@ -0,0 +1,176 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_TAG "HidlUtils"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/misc.h>
+#include <system/radio_metadata.h>
+
+#include "HidlUtils.h"
+
+namespace android {
+
+using android::hardware::broadcastradio::V1_0::MetadataType;
+using android::hardware::broadcastradio::V1_0::Band;
+using android::hardware::broadcastradio::V1_0::Deemphasis;
+using android::hardware::broadcastradio::V1_0::Rds;
+
+//static
+int HidlUtils::convertHalResult(Result result)
+{
+    switch (result) {
+        case Result::OK:
+            return 0;
+        case Result::INVALID_ARGUMENTS:
+            return -EINVAL;
+        case Result::INVALID_STATE:
+            return -ENOSYS;
+        case Result::TIMEOUT:
+            return -ETIMEDOUT;
+        case Result::NOT_INITIALIZED:
+        default:
+            return -ENODEV;
+    }
+}
+
+
+//static
+void HidlUtils::convertBandConfigToHal(BandConfig *halConfig,
+                                       const radio_hal_band_config_t *config)
+{
+    halConfig->type = static_cast<Band>(config->type);
+    halConfig->antennaConnected = config->antenna_connected;
+    halConfig->lowerLimit = config->lower_limit;
+    halConfig->upperLimit = config->upper_limit;
+    halConfig->spacings.setToExternal(const_cast<unsigned int *>(&config->spacings[0]),
+                                       config->num_spacings * sizeof(uint32_t));
+    // FIXME: transfer buffer ownership. should have a method for that in hidl_vec
+    halConfig->spacings.resize(config->num_spacings);
+
+    if (halConfig->type == Band::FM) {
+        halConfig->ext.fm.deemphasis = static_cast<Deemphasis>(config->fm.deemphasis);
+        halConfig->ext.fm.stereo = config->fm.stereo;
+        halConfig->ext.fm.rds = static_cast<Rds>(config->fm.rds);
+        halConfig->ext.fm.ta = config->fm.ta;
+        halConfig->ext.fm.af = config->fm.af;
+        halConfig->ext.fm.ea = config->fm.ea;
+    } else {
+        halConfig->ext.am.stereo = config->am.stereo;
+    }
+}
+
+//static
+void HidlUtils::convertPropertiesFromHal(radio_hal_properties_t *properties,
+                                         const Properties *halProperties)
+{
+    properties->class_id = static_cast<radio_class_t>(halProperties->classId);
+    strlcpy(properties->implementor, halProperties->implementor.c_str(), RADIO_STRING_LEN_MAX);
+    strlcpy(properties->product, halProperties->product.c_str(), RADIO_STRING_LEN_MAX);
+    strlcpy(properties->version, halProperties->version.c_str(), RADIO_STRING_LEN_MAX);
+    strlcpy(properties->serial, halProperties->serial.c_str(), RADIO_STRING_LEN_MAX);
+    properties->num_tuners = halProperties->numTuners;
+    properties->num_audio_sources = halProperties->numAudioSources;
+    properties->supports_capture = halProperties->supportsCapture;
+    properties->num_bands = halProperties->bands.size();
+
+    for (size_t i = 0; i < halProperties->bands.size(); i++) {
+        convertBandConfigFromHal(&properties->bands[i], &halProperties->bands[i]);
+    }
+}
+
+//static
+void HidlUtils::convertBandConfigFromHal(radio_hal_band_config_t *config,
+                                         const BandConfig *halConfig)
+{
+    config->type = static_cast<radio_band_t>(halConfig->type);
+    config->antenna_connected = halConfig->antennaConnected;
+    config->lower_limit = halConfig->lowerLimit;
+    config->upper_limit = halConfig->upperLimit;
+    config->num_spacings = halConfig->spacings.size();
+    if (config->num_spacings > RADIO_NUM_SPACINGS_MAX) {
+        config->num_spacings = RADIO_NUM_SPACINGS_MAX;
+    }
+    memcpy(config->spacings, halConfig->spacings.data(),
+           sizeof(uint32_t) * config->num_spacings);
+
+    if (halConfig->type == Band::FM) {
+        config->fm.deemphasis = static_cast<radio_deemphasis_t>(halConfig->ext.fm.deemphasis);
+        config->fm.stereo = halConfig->ext.fm.stereo;
+        config->fm.rds = static_cast<radio_rds_t>(halConfig->ext.fm.rds);
+        config->fm.ta = halConfig->ext.fm.ta;
+        config->fm.af = halConfig->ext.fm.af;
+        config->fm.ea = halConfig->ext.fm.ea;
+    } else {
+        config->am.stereo = halConfig->ext.am.stereo;
+    }
+}
+
+
+//static
+void HidlUtils::convertProgramInfoFromHal(radio_program_info_t *info,
+                                          const ProgramInfo *halInfo)
+{
+    info->channel = halInfo->channel;
+    info->sub_channel = halInfo->subChannel;
+    info->tuned = halInfo->tuned;
+    info->stereo = halInfo->stereo;
+    info->digital = halInfo->digital;
+    info->signal_strength = halInfo->signalStrength;
+    convertMetaDataFromHal(&info->metadata, halInfo->metadata,
+                           halInfo->channel, halInfo->subChannel);
+}
+
+//static
+void HidlUtils::convertMetaDataFromHal(radio_metadata_t **metadata,
+                                       const hidl_vec<MetaData>& halMetadata,
+                                       uint32_t channel,
+                                       uint32_t subChannel)
+{
+
+    radio_metadata_allocate(metadata, channel, subChannel);
+    for (size_t i = 0; i < halMetadata.size(); i++) {
+        radio_metadata_key_t key = static_cast<radio_metadata_key_t>(halMetadata[i].key);
+        radio_metadata_type_t type = static_cast<radio_metadata_key_t>(halMetadata[i].type);
+        radio_metadata_clock_t clock;
+
+        switch (type) {
+        case RADIO_METADATA_TYPE_INT:
+            radio_metadata_add_int(metadata, key, halMetadata[i].intValue);
+            break;
+        case RADIO_METADATA_TYPE_TEXT:
+            radio_metadata_add_text(metadata, key, halMetadata[i].stringValue.c_str());
+            break;
+        case RADIO_METADATA_TYPE_RAW:
+            radio_metadata_add_raw(metadata, key,
+                                   halMetadata[i].rawValue.data(),
+                                   halMetadata[i].rawValue.size());
+            break;
+        case RADIO_METADATA_TYPE_CLOCK:
+            clock.utc_seconds_since_epoch =
+                    halMetadata[i].clockValue.utcSecondsSinceEpoch;
+            clock.timezone_offset_in_minutes =
+                    halMetadata[i].clockValue.timezoneOffsetInMinutes;
+            radio_metadata_add_clock(metadata, key, &clock);
+            break;
+        default:
+            ALOGW("%s invalid metadata type %u",__FUNCTION__, halMetadata[i].type);
+            break;
+        }
+    }
+}
+
+}  // namespace android
diff --git a/services/radio/HidlUtils.h b/services/radio/HidlUtils.h
new file mode 100644
index 0000000..c771060
--- /dev/null
+++ b/services/radio/HidlUtils.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ANDROID_HARDWARE_RADIO_HAL_HIDL_UTILS_H
+#define ANDROID_HARDWARE_RADIO_HAL_HIDL_UTILS_H
+
+#include <android/hardware/broadcastradio/1.0/types.h>
+#include <hardware/radio.h>
+
+namespace android {
+
+using android::hardware::hidl_vec;
+using android::hardware::broadcastradio::V1_0::Result;
+using android::hardware::broadcastradio::V1_0::Properties;
+using android::hardware::broadcastradio::V1_0::BandConfig;
+using android::hardware::broadcastradio::V1_0::ProgramInfo;
+using android::hardware::broadcastradio::V1_0::MetaData;
+
+class HidlUtils {
+public:
+    static int convertHalResult(Result result);
+    static void convertBandConfigFromHal(radio_hal_band_config_t *config,
+                                         const BandConfig *halConfig);
+    static void convertPropertiesFromHal(radio_hal_properties_t *properties,
+                                         const Properties *halProperties);
+    static void convertBandConfigToHal(BandConfig *halConfig,
+                                       const radio_hal_band_config_t *config);
+    static void convertProgramInfoFromHal(radio_program_info_t *info,
+                                          const ProgramInfo *halInfo);
+    static void convertMetaDataFromHal(radio_metadata_t **metadata,
+                                       const hidl_vec<MetaData>& halMetadata,
+                                       uint32_t channel,
+                                       uint32_t subChannel);
+};
+
+}  // namespace android
+
+#endif  // ANDROID_HARDWARE_RADIO_HAL_HIDL_UTILS_H
diff --git a/services/radio/RadioHalHidl.cpp b/services/radio/RadioHalHidl.cpp
new file mode 100644
index 0000000..032d3fd
--- /dev/null
+++ b/services/radio/RadioHalHidl.cpp
@@ -0,0 +1,386 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "RadioHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <media/audiohal/hidl/HalDeathHandler.h>
+#include <utils/Log.h>
+#include <utils/misc.h>
+#include <system/radio_metadata.h>
+#include <android/hardware/broadcastradio/1.0/IBroadcastRadioFactory.h>
+
+#include "RadioHalHidl.h"
+#include "HidlUtils.h"
+
+namespace android {
+
+using android::hardware::broadcastradio::V1_0::IBroadcastRadioFactory;
+using android::hardware::broadcastradio::V1_0::Class;
+using android::hardware::broadcastradio::V1_0::Direction;
+using android::hardware::broadcastradio::V1_0::Properties;
+
+
+/* static */
+sp<RadioInterface> RadioInterface::connectModule(radio_class_t classId)
+{
+    return new RadioHalHidl(classId);
+}
+
+int RadioHalHidl::getProperties(radio_hal_properties_t *properties)
+{
+    ALOGV("%s IN", __FUNCTION__);
+    sp<IBroadcastRadio> module = getService();
+    if (module == 0) {
+        return -ENODEV;
+    }
+    Properties halProperties;
+    Result halResult = Result::NOT_INITIALIZED;
+    Return<void> hidlReturn =
+            module->getProperties([&](Result result, const Properties& properties) {
+                    halResult = result;
+                    if (result == Result::OK) {
+                        halProperties = properties;
+                    }
+                });
+
+    if (halResult == Result::OK) {
+        HidlUtils::convertPropertiesFromHal(properties, &halProperties);
+    }
+    return HidlUtils::convertHalResult(halResult);
+}
+
+int RadioHalHidl::openTuner(const radio_hal_band_config_t *config,
+                            bool audio,
+                            sp<TunerCallbackInterface> callback,
+                            sp<TunerInterface>& tuner)
+{
+    sp<IBroadcastRadio> module = getService();
+    if (module == 0) {
+        return -ENODEV;
+    }
+    sp<Tuner> tunerImpl = new Tuner(callback, this);
+
+    BandConfig halConfig;
+    Result halResult = Result::NOT_INITIALIZED;
+    sp<ITuner> halTuner;
+
+    HidlUtils::convertBandConfigToHal(&halConfig, config);
+    Return<void> hidlReturn =
+            module->openTuner(halConfig, audio, tunerImpl,
+                              [&](Result result, const sp<ITuner>& tuner) {
+                    halResult = result;
+                    if (result == Result::OK) {
+                        halTuner = tuner;
+                    }
+                });
+
+    if (halResult == Result::OK) {
+        tunerImpl->setHalTuner(halTuner);
+        tuner = tunerImpl;
+    }
+
+    return HidlUtils::convertHalResult(halResult);
+}
+
+int RadioHalHidl::closeTuner(sp<TunerInterface>& tuner)
+{
+    sp<Tuner> tunerImpl = static_cast<Tuner *>(tuner.get());
+    sp<ITuner> clearTuner;
+    tunerImpl->setHalTuner(clearTuner);
+    return 0;
+}
+
+RadioHalHidl::RadioHalHidl(radio_class_t classId)
+    : mClassId(classId)
+{
+}
+
+RadioHalHidl::~RadioHalHidl()
+{
+}
+
+sp<IBroadcastRadio> RadioHalHidl::getService()
+{
+    if (mHalModule == 0) {
+        sp<IBroadcastRadioFactory> factory = IBroadcastRadioFactory::getService("broadcastradio");
+        if (factory != 0) {
+            factory->connectModule(static_cast<Class>(mClassId),
+                               [&](Result retval, const ::android::sp<IBroadcastRadio>& result) {
+                if (retval == Result::OK) {
+                    mHalModule = result;
+                }
+            });
+        }
+    }
+    ALOGV("%s OUT module %p", __FUNCTION__, mHalModule.get());
+    return mHalModule;
+}
+
+void RadioHalHidl::clearService()
+{
+    ALOGV("%s IN module %p", __FUNCTION__, mHalModule.get());
+    mHalModule.clear();
+}
+
+
+int RadioHalHidl::Tuner::setConfiguration(const radio_hal_band_config_t *config)
+{
+    ALOGV("%s IN mHalTuner %p", __FUNCTION__, mHalTuner.get());
+
+    if (mHalTuner == 0) {
+        return -ENODEV;
+    }
+    BandConfig halConfig;
+    HidlUtils::convertBandConfigToHal(&halConfig, config);
+
+    Return<Result> hidlResult = mHalTuner->setConfiguration(halConfig);
+    return HidlUtils::convertHalResult(hidlResult);
+}
+
+int RadioHalHidl::Tuner::getConfiguration(radio_hal_band_config_t *config)
+{
+    ALOGV("%s IN mHalTuner %p", __FUNCTION__, mHalTuner.get());
+    if (mHalTuner == 0) {
+        return -ENODEV;
+    }
+    BandConfig halConfig;
+    Result halResult;
+    Return<void> hidlReturn =
+            mHalTuner->getConfiguration([&](Result result, const BandConfig& config) {
+                    halResult = result;
+                    if (result == Result::OK) {
+                        halConfig = config;
+                    }
+                });
+    if (hidlReturn.isOk() && halResult == Result::OK) {
+        HidlUtils::convertBandConfigFromHal(config, &halConfig);
+    }
+    return HidlUtils::convertHalResult(halResult);
+}
+
+int RadioHalHidl::Tuner::scan(radio_direction_t direction, bool skip_sub_channel)
+{
+    ALOGV("%s IN mHalTuner %p", __FUNCTION__, mHalTuner.get());
+    if (mHalTuner == 0) {
+        return -ENODEV;
+    }
+    Return<Result> hidlResult =
+            mHalTuner->scan(static_cast<Direction>(direction), skip_sub_channel);
+    return HidlUtils::convertHalResult(hidlResult);
+}
+
+int RadioHalHidl::Tuner::step(radio_direction_t direction, bool skip_sub_channel)
+{
+    ALOGV("%s IN mHalTuner %p", __FUNCTION__, mHalTuner.get());
+    if (mHalTuner == 0) {
+        return -ENODEV;
+    }
+    Return<Result> hidlResult =
+            mHalTuner->step(static_cast<Direction>(direction), skip_sub_channel);
+    return HidlUtils::convertHalResult(hidlResult);
+}
+
+int RadioHalHidl::Tuner::tune(unsigned int channel, unsigned int sub_channel)
+{
+    ALOGV("%s IN mHalTuner %p", __FUNCTION__, mHalTuner.get());
+    if (mHalTuner == 0) {
+        return -ENODEV;
+    }
+    Return<Result> hidlResult =
+            mHalTuner->tune(channel, sub_channel);
+    return HidlUtils::convertHalResult(hidlResult);
+}
+
+int RadioHalHidl::Tuner::cancel()
+{
+    ALOGV("%s IN mHalTuner %p", __FUNCTION__, mHalTuner.get());
+    if (mHalTuner == 0) {
+        return -ENODEV;
+    }
+    Return<Result> hidlResult = mHalTuner->cancel();
+    return HidlUtils::convertHalResult(hidlResult);
+}
+
+int RadioHalHidl::Tuner::getProgramInformation(radio_program_info_t *info)
+{
+    ALOGV("%s IN mHalTuner %p", __FUNCTION__, mHalTuner.get());
+    if (mHalTuner == 0) {
+        return -ENODEV;
+    }
+    if (info == nullptr || info->metadata == nullptr) {
+        return BAD_VALUE;
+    }
+    ProgramInfo halInfo;
+    Result halResult;
+    Return<void> hidlReturn = mHalTuner->getProgramInformation(
+        [&](Result result, const ProgramInfo& info) {
+            halResult = result;
+            if (result == Result::OK) {
+                halInfo = info;
+            }
+        });
+    if (hidlReturn.isOk() && halResult == Result::OK) {
+        HidlUtils::convertProgramInfoFromHal(info, &halInfo);
+    }
+    return HidlUtils::convertHalResult(halResult);
+}
+
+Return<void> RadioHalHidl::Tuner::hardwareFailure()
+{
+    ALOGV("%s IN", __FUNCTION__);
+    handleHwFailure();
+    return Return<void>();
+}
+
+Return<void> RadioHalHidl::Tuner::configChange(Result result, const BandConfig& config)
+{
+    ALOGV("%s IN", __FUNCTION__);
+    radio_hal_event_t event;
+    memset(&event, 0, sizeof(radio_hal_event_t));
+    event.type = RADIO_EVENT_CONFIG;
+    event.status = HidlUtils::convertHalResult(result);
+    HidlUtils::convertBandConfigFromHal(&event.config, &config);
+    onCallback(&event);
+    return Return<void>();
+}
+
+Return<void> RadioHalHidl::Tuner::tuneComplete(Result result, const ProgramInfo& info)
+{
+    ALOGV("%s IN", __FUNCTION__);
+    radio_hal_event_t event;
+    memset(&event, 0, sizeof(radio_hal_event_t));
+    event.type = RADIO_EVENT_TUNED;
+    event.status = HidlUtils::convertHalResult(result);
+    HidlUtils::convertProgramInfoFromHal(&event.info, &info);
+    onCallback(&event);
+    radio_metadata_deallocate(event.info.metadata);
+    return Return<void>();
+}
+
+Return<void> RadioHalHidl::Tuner::afSwitch(const ProgramInfo& info)
+{
+    ALOGV("%s IN", __FUNCTION__);
+    radio_hal_event_t event;
+    memset(&event, 0, sizeof(radio_hal_event_t));
+    event.type = RADIO_EVENT_AF_SWITCH;
+    HidlUtils::convertProgramInfoFromHal(&event.info, &info);
+    onCallback(&event);
+    if (event.info.metadata != NULL) {
+        radio_metadata_deallocate(event.info.metadata);
+    }
+    return Return<void>();
+}
+
+Return<void> RadioHalHidl::Tuner::antennaStateChange(bool connected)
+{
+    ALOGV("%s IN", __FUNCTION__);
+    radio_hal_event_t event;
+    memset(&event, 0, sizeof(radio_hal_event_t));
+    event.type = RADIO_EVENT_ANTENNA;
+    event.on = connected;
+    onCallback(&event);
+    return Return<void>();
+}
+Return<void> RadioHalHidl::Tuner::trafficAnnouncement(bool active)
+{
+    ALOGV("%s IN", __FUNCTION__);
+    radio_hal_event_t event;
+    memset(&event, 0, sizeof(radio_hal_event_t));
+    event.type = RADIO_EVENT_TA;
+    event.on = active;
+    onCallback(&event);
+    return Return<void>();
+}
+Return<void> RadioHalHidl::Tuner::emergencyAnnouncement(bool active)
+{
+    ALOGV("%s IN", __FUNCTION__);
+    radio_hal_event_t event;
+    memset(&event, 0, sizeof(radio_hal_event_t));
+    event.type = RADIO_EVENT_EA;
+    event.on = active;
+    onCallback(&event);
+    return Return<void>();
+}
+Return<void> RadioHalHidl::Tuner::newMetadata(uint32_t channel, uint32_t subChannel,
+                                          const ::android::hardware::hidl_vec<MetaData>& metadata)
+{
+    ALOGV("%s IN", __FUNCTION__);
+    radio_hal_event_t event;
+    memset(&event, 0, sizeof(radio_hal_event_t));
+    event.type = RADIO_EVENT_METADATA;
+    HidlUtils::convertMetaDataFromHal(&event.metadata, metadata, channel, subChannel);
+    onCallback(&event);
+    if (event.metadata != NULL) {
+        radio_metadata_deallocate(event.info.metadata);
+    }
+    return Return<void>();
+}
+
+
+RadioHalHidl::Tuner::Tuner(sp<TunerCallbackInterface> callback, sp<RadioHalHidl> module)
+    : TunerInterface(), mHalTuner(NULL), mCallback(callback), mParentModule(module)
+{
+    // Make sure the handler we are passing in only deals with const members,
+    // as it can be called on an arbitrary thread.
+    const auto& self = this;
+    HalDeathHandler::getInstance()->registerAtExitHandler(
+            this, [&self]() { self->sendHwFailureEvent(); });
+}
+
+
+RadioHalHidl::Tuner::~Tuner()
+{
+    HalDeathHandler::getInstance()->unregisterAtExitHandler(this);
+}
+
+void RadioHalHidl::Tuner::setHalTuner(sp<ITuner>& halTuner) {
+    if (mHalTuner != 0) {
+        mHalTuner->unlinkToDeath(HalDeathHandler::getInstance());
+    }
+    mHalTuner = halTuner;
+    if (mHalTuner != 0) {
+        mHalTuner->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
+    }
+}
+
+void RadioHalHidl::Tuner::handleHwFailure()
+{
+    ALOGV("%s IN", __FUNCTION__);
+    sp<RadioHalHidl> parentModule = mParentModule.promote();
+    if (parentModule != 0) {
+        parentModule->clearService();
+    }
+    sendHwFailureEvent();
+    mHalTuner.clear();
+}
+
+void RadioHalHidl::Tuner::sendHwFailureEvent() const
+{
+    radio_hal_event_t event;
+    memset(&event, 0, sizeof(radio_hal_event_t));
+    event.type = RADIO_EVENT_HW_FAILURE;
+    onCallback(&event);
+}
+
+void RadioHalHidl::Tuner::onCallback(radio_hal_event_t *halEvent) const
+{
+    if (mCallback != 0) {
+        mCallback->onEvent(halEvent);
+    }
+}
+
+} // namespace android
diff --git a/services/radio/RadioHalHidl.h b/services/radio/RadioHalHidl.h
new file mode 100644
index 0000000..38e181a
--- /dev/null
+++ b/services/radio/RadioHalHidl.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_RADIO_HAL_HIDL_H
+#define ANDROID_HARDWARE_RADIO_HAL_HIDL_H
+
+#include <utils/RefBase.h>
+#include <utils/threads.h>
+#include "RadioInterface.h"
+#include "TunerInterface.h"
+#include "TunerCallbackInterface.h"
+#include <android/hardware/broadcastradio/1.0/types.h>
+#include <android/hardware/broadcastradio/1.0/IBroadcastRadio.h>
+#include <android/hardware/broadcastradio/1.0/ITuner.h>
+#include <android/hardware/broadcastradio/1.0/ITunerCallback.h>
+
+namespace android {
+
+using android::hardware::Status;
+using android::hardware::Return;
+using android::hardware::broadcastradio::V1_0::Result;
+using android::hardware::broadcastradio::V1_0::IBroadcastRadio;
+using android::hardware::broadcastradio::V1_0::ITuner;
+using android::hardware::broadcastradio::V1_0::ITunerCallback;
+
+using android::hardware::broadcastradio::V1_0::BandConfig;
+using android::hardware::broadcastradio::V1_0::ProgramInfo;
+using android::hardware::broadcastradio::V1_0::MetaData;
+
+class RadioHalHidl : public RadioInterface
+{
+public:
+                    RadioHalHidl(radio_class_t classId);
+
+                    // RadioInterface
+        virtual int getProperties(radio_hal_properties_t *properties);
+        virtual int openTuner(const radio_hal_band_config_t *config,
+                              bool audio,
+                              sp<TunerCallbackInterface> callback,
+                              sp<TunerInterface>& tuner);
+        virtual int closeTuner(sp<TunerInterface>& tuner);
+
+        class Tuner  : public TunerInterface, public virtual ITunerCallback
+        {
+        public:
+                        Tuner(sp<TunerCallbackInterface> callback, sp<RadioHalHidl> module);
+
+                        // TunerInterface
+            virtual int setConfiguration(const radio_hal_band_config_t *config);
+            virtual int getConfiguration(radio_hal_band_config_t *config);
+            virtual int scan(radio_direction_t direction, bool skip_sub_channel);
+            virtual int step(radio_direction_t direction, bool skip_sub_channel);
+            virtual int tune(unsigned int channel, unsigned int sub_channel);
+            virtual int cancel();
+            virtual int getProgramInformation(radio_program_info_t *info);
+
+                        // ITunerCallback
+            virtual Return<void> hardwareFailure();
+            virtual Return<void> configChange(Result result, const BandConfig& config);
+            virtual Return<void> tuneComplete(Result result, const ProgramInfo& info);
+            virtual Return<void> afSwitch(const ProgramInfo& info);
+            virtual Return<void> antennaStateChange(bool connected);
+            virtual Return<void> trafficAnnouncement(bool active);
+            virtual Return<void> emergencyAnnouncement(bool active);
+            virtual Return<void> newMetadata(uint32_t channel, uint32_t subChannel,
+                                         const ::android::hardware::hidl_vec<MetaData>& metadata);
+
+            void setHalTuner(sp<ITuner>& halTuner);
+            sp<ITuner> getHalTuner() { return mHalTuner; }
+
+        private:
+            virtual          ~Tuner();
+
+                    void     onCallback(radio_hal_event_t *halEvent) const;
+                    void     handleHwFailure();
+                    void     sendHwFailureEvent() const;
+
+            sp<ITuner> mHalTuner;
+            const sp<TunerCallbackInterface> mCallback;
+            wp<RadioHalHidl> mParentModule;
+        };
+
+        sp<IBroadcastRadio> getService();
+        void clearService();
+
+private:
+        virtual         ~RadioHalHidl();
+
+                radio_class_t mClassId;
+                sp<IBroadcastRadio> mHalModule;
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_RADIO_HAL_HIDL_H
diff --git a/services/radio/RadioHalLegacy.cpp b/services/radio/RadioHalLegacy.cpp
new file mode 100644
index 0000000..d50ccd4
--- /dev/null
+++ b/services/radio/RadioHalLegacy.cpp
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "RadioHalLegacy"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/misc.h>
+#include "RadioHalLegacy.h"
+
+namespace android {
+
+const char *RadioHalLegacy::sClassModuleNames[] = {
+    RADIO_HARDWARE_MODULE_ID_FM, /* corresponds to RADIO_CLASS_AM_FM */
+    RADIO_HARDWARE_MODULE_ID_SAT,  /* corresponds to RADIO_CLASS_SAT */
+    RADIO_HARDWARE_MODULE_ID_DT,   /* corresponds to RADIO_CLASS_DT */
+};
+
+/* static */
+sp<RadioInterface> RadioInterface::connectModule(radio_class_t classId)
+{
+    return new RadioHalLegacy(classId);
+}
+
+RadioHalLegacy::RadioHalLegacy(radio_class_t classId)
+    : RadioInterface(), mClassId(classId), mHwDevice(NULL)
+{
+}
+
+void RadioHalLegacy::onFirstRef()
+{
+    const hw_module_t *mod;
+    int rc;
+    ALOGI("%s mClassId %d", __FUNCTION__, mClassId);
+
+    mHwDevice = NULL;
+
+    if ((mClassId < 0) ||
+            (mClassId >= NELEM(sClassModuleNames))) {
+        ALOGE("invalid class ID %d", mClassId);
+        return;
+    }
+
+    ALOGI("%s RADIO_HARDWARE_MODULE_ID %s %s",
+          __FUNCTION__, RADIO_HARDWARE_MODULE_ID, sClassModuleNames[mClassId]);
+
+    rc = hw_get_module_by_class(RADIO_HARDWARE_MODULE_ID, sClassModuleNames[mClassId], &mod);
+    if (rc != 0) {
+        ALOGE("couldn't load radio module %s.%s (%s)",
+              RADIO_HARDWARE_MODULE_ID, sClassModuleNames[mClassId], strerror(-rc));
+        return;
+    }
+    rc = radio_hw_device_open(mod, &mHwDevice);
+    if (rc != 0) {
+        ALOGE("couldn't open radio hw device in %s.%s (%s)",
+              RADIO_HARDWARE_MODULE_ID, "primary", strerror(-rc));
+        mHwDevice = NULL;
+        return;
+    }
+    if (mHwDevice->common.version != RADIO_DEVICE_API_VERSION_CURRENT) {
+        ALOGE("wrong radio hw device version %04x", mHwDevice->common.version);
+        radio_hw_device_close(mHwDevice);
+        mHwDevice = NULL;
+    }
+}
+
+RadioHalLegacy::~RadioHalLegacy()
+{
+    if (mHwDevice != NULL) {
+        radio_hw_device_close(mHwDevice);
+    }
+}
+
+int RadioHalLegacy::getProperties(radio_hal_properties_t *properties)
+{
+    if (mHwDevice == NULL) {
+        return -ENODEV;
+    }
+
+    int rc = mHwDevice->get_properties(mHwDevice, properties);
+    if (rc != 0) {
+        ALOGE("could not read implementation properties");
+    }
+
+    return rc;
+}
+
+int RadioHalLegacy::openTuner(const radio_hal_band_config_t *config,
+                bool audio,
+                sp<TunerCallbackInterface> callback,
+                sp<TunerInterface>& tuner)
+{
+    if (mHwDevice == NULL) {
+        return -ENODEV;
+    }
+    sp<Tuner> tunerImpl = new Tuner(callback);
+
+    const struct radio_tuner *halTuner;
+    int rc = mHwDevice->open_tuner(mHwDevice, config, audio,
+                                   RadioHalLegacy::Tuner::callback, tunerImpl.get(),
+                                   &halTuner);
+    if (rc == 0) {
+        tunerImpl->setHalTuner(halTuner);
+        tuner = tunerImpl;
+    }
+    return rc;
+}
+
+int RadioHalLegacy::closeTuner(sp<TunerInterface>& tuner)
+{
+    if (mHwDevice == NULL) {
+        return -ENODEV;
+    }
+    if (tuner == 0) {
+        return -EINVAL;
+    }
+    sp<Tuner> tunerImpl = (Tuner *)tuner.get();
+    return mHwDevice->close_tuner(mHwDevice, tunerImpl->getHalTuner());
+}
+
+int RadioHalLegacy::Tuner::setConfiguration(const radio_hal_band_config_t *config)
+{
+    if (mHalTuner == NULL) {
+        return -ENODEV;
+    }
+    return mHalTuner->set_configuration(mHalTuner, config);
+}
+
+int RadioHalLegacy::Tuner::getConfiguration(radio_hal_band_config_t *config)
+{
+    if (mHalTuner == NULL) {
+        return -ENODEV;
+    }
+    return mHalTuner->get_configuration(mHalTuner, config);
+}
+
+int RadioHalLegacy::Tuner::scan(radio_direction_t direction, bool skip_sub_channel)
+{
+    if (mHalTuner == NULL) {
+        return -ENODEV;
+    }
+    return mHalTuner->scan(mHalTuner, direction, skip_sub_channel);
+}
+
+int RadioHalLegacy::Tuner::step(radio_direction_t direction, bool skip_sub_channel)
+{
+    if (mHalTuner == NULL) {
+        return -ENODEV;
+    }
+    return mHalTuner->step(mHalTuner, direction, skip_sub_channel);
+}
+
+int RadioHalLegacy::Tuner::tune(unsigned int channel, unsigned int sub_channel)
+{
+    if (mHalTuner == NULL) {
+        return -ENODEV;
+    }
+    return mHalTuner->tune(mHalTuner, channel, sub_channel);
+}
+
+int RadioHalLegacy::Tuner::cancel()
+{
+    if (mHalTuner == NULL) {
+        return -ENODEV;
+    }
+    return mHalTuner->cancel(mHalTuner);
+}
+
+int RadioHalLegacy::Tuner::getProgramInformation(radio_program_info_t *info)
+{
+    if (mHalTuner == NULL) {
+        return -ENODEV;
+    }
+    return mHalTuner->get_program_information(mHalTuner, info);
+}
+
+void RadioHalLegacy::Tuner::onCallback(radio_hal_event_t *halEvent)
+{
+    if (mCallback != 0) {
+        mCallback->onEvent(halEvent);
+    }
+}
+
+//static
+void RadioHalLegacy::Tuner::callback(radio_hal_event_t *halEvent, void *cookie)
+{
+    wp<RadioHalLegacy::Tuner> weak = wp<RadioHalLegacy::Tuner>((RadioHalLegacy::Tuner *)cookie);
+    sp<RadioHalLegacy::Tuner> tuner = weak.promote();
+    if (tuner != 0) {
+        tuner->onCallback(halEvent);
+    }
+}
+
+RadioHalLegacy::Tuner::Tuner(sp<TunerCallbackInterface> callback)
+    : TunerInterface(), mHalTuner(NULL), mCallback(callback)
+{
+}
+
+
+RadioHalLegacy::Tuner::~Tuner()
+{
+}
+
+
+} // namespace android
diff --git a/services/radio/RadioHalLegacy.h b/services/radio/RadioHalLegacy.h
new file mode 100644
index 0000000..7d4831b
--- /dev/null
+++ b/services/radio/RadioHalLegacy.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_RADIO_HAL_LEGACY_H
+#define ANDROID_HARDWARE_RADIO_HAL_LEGACY_H
+
+#include <utils/RefBase.h>
+#include <hardware/radio.h>
+#include "RadioInterface.h"
+#include "TunerInterface.h"
+#include "TunerCallbackInterface.h"
+
+namespace android {
+
+class RadioHalLegacy : public RadioInterface
+{
+public:
+        RadioHalLegacy(radio_class_t classId);
+
+        // RadioInterface
+        virtual int getProperties(radio_hal_properties_t *properties);
+        virtual int openTuner(const radio_hal_band_config_t *config,
+                        bool audio,
+                        sp<TunerCallbackInterface> callback,
+                        sp<TunerInterface>& tuner);
+        virtual int closeTuner(sp<TunerInterface>& tuner);
+
+        // RefBase
+        virtual void onFirstRef();
+
+        class Tuner  : public TunerInterface
+        {
+        public:
+                        Tuner(sp<TunerCallbackInterface> callback);
+
+            virtual int setConfiguration(const radio_hal_band_config_t *config);
+            virtual int getConfiguration(radio_hal_band_config_t *config);
+            virtual int scan(radio_direction_t direction, bool skip_sub_channel);
+            virtual int step(radio_direction_t direction, bool skip_sub_channel);
+            virtual int tune(unsigned int channel, unsigned int sub_channel);
+            virtual int cancel();
+            virtual int getProgramInformation(radio_program_info_t *info);
+
+            static void callback(radio_hal_event_t *halEvent, void *cookie);
+                   void onCallback(radio_hal_event_t *halEvent);
+
+            void setHalTuner(const struct radio_tuner *halTuner) { mHalTuner = halTuner; }
+            const struct radio_tuner *getHalTuner() { return mHalTuner; }
+
+        private:
+            virtual      ~Tuner();
+
+            const struct radio_tuner    *mHalTuner;
+            sp<TunerCallbackInterface>  mCallback;
+        };
+
+protected:
+        virtual     ~RadioHalLegacy();
+
+private:
+        static const char * sClassModuleNames[];
+
+        radio_class_t mClassId;
+        struct radio_hw_device  *mHwDevice;
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_RADIO_HAL_LEGACY_H
diff --git a/services/radio/RadioInterface.h b/services/radio/RadioInterface.h
new file mode 100644
index 0000000..fcfb4d5
--- /dev/null
+++ b/services/radio/RadioInterface.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_RADIO_INTERFACE_H
+#define ANDROID_HARDWARE_RADIO_INTERFACE_H
+
+#include <utils/RefBase.h>
+#include <system/radio.h>
+#include "TunerInterface.h"
+#include "TunerCallbackInterface.h"
+
+namespace android {
+
+class RadioInterface : public virtual RefBase
+{
+public:
+        /* get a sound trigger HAL instance */
+        static sp<RadioInterface> connectModule(radio_class_t classId);
+
+        /*
+         * Retrieve implementation properties.
+         *
+         * arguments:
+         * - properties: where to return the module properties
+         *
+         * returns:
+         *  0 if no error
+         *  -EINVAL if invalid arguments are passed
+         */
+        virtual int getProperties(radio_hal_properties_t *properties) = 0;
+
+        /*
+         * Open a tuner interface for the requested configuration.
+         * If no other tuner is opened, this will activate the radio module.
+         *
+         * arguments:
+         * - config: the band configuration to apply
+         * - audio: this tuner will be used for live radio listening and should be connected to
+         * the radio audio source.
+         * - callback: the event callback
+         * - cookie: the cookie to pass when calling the callback
+         * - tuner: where to return the tuner interface
+         *
+         * returns:
+         *  0 if HW was powered up and configuration could be applied
+         *  -EINVAL if configuration requested is invalid
+         *  -ENOSYS if called out of sequence
+         *
+         * Callback function with event RADIO_EVENT_CONFIG MUST be called once the
+         * configuration is applied or a failure occurs or after a time out.
+         */
+        virtual int openTuner(const radio_hal_band_config_t *config,
+                        bool audio,
+                        sp<TunerCallbackInterface> callback,
+                        sp<TunerInterface>& tuner) = 0;
+
+        /*
+         * Close a tuner interface.
+         * If the last tuner is closed, the radio module is deactivated.
+         *
+         * arguments:
+         * - tuner: the tuner interface to close
+         *
+         * returns:
+         *  0 if powered down successfully.
+         *  -EINVAL if an invalid argument is passed
+         *  -ENOSYS if called out of sequence
+         */
+        virtual int closeTuner(sp<TunerInterface>& tuner) = 0;
+
+protected:
+        RadioInterface() {}
+        virtual     ~RadioInterface() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_RADIO_INTERFACE_H
diff --git a/services/radio/RadioService.cpp b/services/radio/RadioService.cpp
index 5a3f750..a73ed8f 100644
--- a/services/radio/RadioService.cpp
+++ b/services/radio/RadioService.cpp
@@ -51,31 +51,15 @@
 
 void RadioService::onFirstRef()
 {
-    const hw_module_t *mod;
-    int rc;
-    struct radio_hw_device *dev;
-
     ALOGI("%s", __FUNCTION__);
 
-    rc = hw_get_module_by_class(RADIO_HARDWARE_MODULE_ID, RADIO_HARDWARE_MODULE_ID_FM, &mod);
-    if (rc != 0) {
-        ALOGE("couldn't load radio module %s.%s (%s)",
-              RADIO_HARDWARE_MODULE_ID, "primary", strerror(-rc));
-        return;
-    }
-    rc = radio_hw_device_open(mod, &dev);
-    if (rc != 0) {
-        ALOGE("couldn't open radio hw device in %s.%s (%s)",
-              RADIO_HARDWARE_MODULE_ID, "primary", strerror(-rc));
-        return;
-    }
-    if (dev->common.version != RADIO_DEVICE_API_VERSION_CURRENT) {
-        ALOGE("wrong radio hw device version %04x", dev->common.version);
-        return;
-    }
+    sp<RadioInterface> dev = RadioInterface::connectModule(RADIO_CLASS_AM_FM);
 
+    if (dev == 0) {
+        return;
+    }
     struct radio_hal_properties halProperties;
-    rc = dev->get_properties(dev, &halProperties);
+    int rc = dev->getProperties(&halProperties);
     if (rc != 0) {
         ALOGE("could not read implementation properties");
         return;
@@ -94,9 +78,6 @@
 
 RadioService::~RadioService()
 {
-    for (size_t i = 0; i < mModules.size(); i++) {
-        radio_hw_device_close(mModules.valueAt(i)->hwDevice());
-    }
 }
 
 status_t RadioService::listModules(struct radio_properties *properties,
@@ -108,7 +89,7 @@
     if (numModules == NULL || (*numModules != 0 && properties == NULL)) {
         return BAD_VALUE;
     }
-    size_t maxModules = *numModules;
+    uint32_t maxModules = *numModules;
     *numModules = mModules.size();
     for (size_t i = 0; i < mModules.size() && i < maxModules; i++) {
         properties[i] = mModules.valueAt(i)->properties();
@@ -192,16 +173,6 @@
 }
 
 
-// static
-void RadioService::callback(radio_hal_event_t *halEvent, void *cookie)
-{
-    CallbackThread *callbackThread = (CallbackThread *)cookie;
-    if (callbackThread == NULL) {
-        return;
-    }
-    callbackThread->sendEvent(halEvent);
-}
-
 /* static */
 void RadioService::convertProperties(radio_properties_t *properties,
                                      const radio_hal_properties_t *halProperties)
@@ -305,32 +276,40 @@
 {
     sp<IMemory> eventMemory;
 
-    size_t headerSize =
-            (sizeof(struct radio_event) + sizeof(unsigned int) - 1) /sizeof(unsigned int);
-    size_t metadataSize = 0;
+    // The event layout in shared memory is:
+    // sizeof(struct radio_event) bytes : the event itself
+    // 4 bytes                          : metadata size or 0
+    // N bytes                          : metadata if present
+    uint32_t metadataOffset = sizeof(struct radio_event) + sizeof(uint32_t);
+    uint32_t metadataSize = 0;
+
     switch (halEvent->type) {
     case RADIO_EVENT_TUNED:
     case RADIO_EVENT_AF_SWITCH:
         if (radio_metadata_check(halEvent->info.metadata) == 0) {
-            metadataSize = radio_metadata_get_size(halEvent->info.metadata);
+            metadataSize = (uint32_t)radio_metadata_get_size(halEvent->info.metadata);
         }
         break;
     case RADIO_EVENT_METADATA:
         if (radio_metadata_check(halEvent->metadata) != 0) {
             return eventMemory;
         }
-        metadataSize = radio_metadata_get_size(halEvent->metadata);
+        metadataSize = (uint32_t)radio_metadata_get_size(halEvent->metadata);
         break;
     default:
         break;
     }
-    size_t size = headerSize + metadataSize;
-    eventMemory = mMemoryDealer->allocate(size);
+
+    eventMemory = mMemoryDealer->allocate(metadataOffset + metadataSize);
     if (eventMemory == 0 || eventMemory->pointer() == NULL) {
         eventMemory.clear();
         return eventMemory;
     }
+
     struct radio_event *event = (struct radio_event *)eventMemory->pointer();
+
+    *(uint32_t *)((uint8_t *)event + metadataOffset - sizeof(uint32_t)) = metadataSize;
+
     event->type = halEvent->type;
     event->status = halEvent->status;
 
@@ -342,10 +321,7 @@
     case RADIO_EVENT_AF_SWITCH:
         event->info = halEvent->info;
         if (metadataSize != 0) {
-            memcpy((char *)event + headerSize, halEvent->info.metadata, metadataSize);
-            // replace meta data pointer by offset while in shared memory so that receiving side
-            // can restore the pointer in destination process.
-            event->info.metadata = (radio_metadata_t *)headerSize;
+            memcpy((uint8_t *)event + metadataOffset, halEvent->info.metadata, metadataSize);
         }
         break;
     case RADIO_EVENT_TA:
@@ -355,10 +331,9 @@
         event->on = halEvent->on;
         break;
     case RADIO_EVENT_METADATA:
-        memcpy((char *)event + headerSize, halEvent->metadata, metadataSize);
-        // replace meta data pointer by offset while in shared memory so that receiving side
-        // can restore the pointer in destination process.
-        event->metadata = (radio_metadata_t *)headerSize;
+        if (metadataSize != 0) {
+            memcpy((uint8_t *)event + metadataOffset, halEvent->metadata, metadataSize);
+        }
         break;
     case RADIO_EVENT_HW_FAILURE:
     default:
@@ -385,12 +360,13 @@
 #undef LOG_TAG
 #define LOG_TAG "RadioService::Module"
 
-RadioService::Module::Module(radio_hw_device* hwDevice, radio_properties properties)
+RadioService::Module::Module(sp<RadioInterface> hwDevice, radio_properties properties)
  : mHwDevice(hwDevice), mProperties(properties), mMute(true)
 {
 }
 
 RadioService::Module::~Module() {
+    mHwDevice.clear();
     mModuleClients.clear();
 }
 
@@ -404,10 +380,15 @@
                                     bool audio)
 {
     ALOGV("addClient() %p config %p product %s", this, config, mProperties.product);
+
     AutoMutex lock(mLock);
     sp<ModuleClient> moduleClient;
     int ret;
 
+    if (mHwDevice == 0) {
+        return moduleClient;
+    }
+
     for (size_t i = 0; i < mModuleClients.size(); i++) {
         if (mModuleClients[i]->client() == client) {
             // client already connected: reject
@@ -464,7 +445,7 @@
         }
     }
 
-    const struct radio_tuner *halTuner;
+    sp<TunerInterface> halTuner;
     sp<ModuleClient> preemtedClient;
     if (audio) {
         if (allocatedAudio >= mProperties.num_audio_sources) {
@@ -484,18 +465,19 @@
     }
     if (preemtedClient != 0) {
         halTuner = preemtedClient->getTuner();
-        preemtedClient->setTuner(NULL);
-        mHwDevice->close_tuner(mHwDevice, halTuner);
+        sp<TunerInterface> clear;
+        preemtedClient->setTuner(clear);
+        mHwDevice->closeTuner(halTuner);
         if (preemtedClient->audio()) {
             notifyDeviceConnection(false, "");
         }
     }
 
-    ret = mHwDevice->open_tuner(mHwDevice, &halConfig, audio,
-                                RadioService::callback, moduleClient->callbackThread().get(),
-                                &halTuner);
+    ret = mHwDevice->openTuner(&halConfig, audio,
+                               moduleClient,
+                               halTuner);
     if (ret == 0) {
-        ALOGV("addClient() setTuner %p", halTuner);
+        ALOGV("addClient() setTuner %p", halTuner.get());
         moduleClient->setTuner(halTuner);
         mModuleClients.add(moduleClient);
         if (audio) {
@@ -527,12 +509,15 @@
     }
 
     mModuleClients.removeAt(index);
-    const struct radio_tuner *halTuner = moduleClient->getTuner();
+    sp<TunerInterface> halTuner = moduleClient->getTuner();
     if (halTuner == NULL) {
         return;
     }
 
-    mHwDevice->close_tuner(mHwDevice, halTuner);
+    if (mHwDevice != 0) {
+        mHwDevice->closeTuner(halTuner);
+    }
+
     if (moduleClient->audio()) {
         notifyDeviceConnection(false, "");
     }
@@ -543,6 +528,10 @@
         return;
     }
 
+    if (mHwDevice == 0) {
+        return;
+    }
+
     // Tuner reallocation logic:
     // When a client is removed and was controlling a tuner, this tuner will be allocated to a
     // previously preempted client. This client will be notified by a callback with
@@ -591,9 +580,9 @@
     ALOG_ASSERT(youngestClient != 0, "removeClient() removed client no candidate found for tuner");
 
     struct radio_hal_band_config halConfig = youngestClient->halConfig();
-    ret = mHwDevice->open_tuner(mHwDevice, &halConfig, youngestClient->audio(),
-                                RadioService::callback, moduleClient->callbackThread().get(),
-                                &halTuner);
+    ret = mHwDevice->openTuner(&halConfig, youngestClient->audio(),
+                                moduleClient,
+                                halTuner);
 
     if (ret == 0) {
         youngestClient->setTuner(halTuner);
@@ -646,7 +635,7 @@
                                          const sp<IRadioClient>& client,
                                          const struct radio_band_config *config,
                                          bool audio)
- : mModule(module), mClient(client), mConfig(*config), mAudio(audio), mTuner(NULL)
+ : mModule(module), mClient(client), mConfig(*config), mAudio(audio), mTuner(0)
 {
 }
 
@@ -666,6 +655,11 @@
     }
 }
 
+void RadioService::ModuleClient::onEvent(radio_hal_event_t *halEvent)
+{
+    mCallbackThread->sendEvent(halEvent);
+}
+
 status_t RadioService::ModuleClient::dump(int fd __unused,
                                              const Vector<String16>& args __unused) {
     String8 result;
@@ -696,14 +690,14 @@
     return mConfig.band;
 }
 
-const struct radio_tuner *RadioService::ModuleClient::getTuner() const
+sp<TunerInterface>& RadioService::ModuleClient::getTuner()
 {
     AutoMutex lock(mLock);
     ALOGV("%s locked", __FUNCTION__);
     return mTuner;
 }
 
-void RadioService::ModuleClient::setTuner(const struct radio_tuner *tuner)
+void RadioService::ModuleClient::setTuner(sp<TunerInterface>& tuner)
 {
     ALOGV("%s %p", __FUNCTION__, this);
 
@@ -714,7 +708,7 @@
     radio_hal_event_t event;
     event.type = RADIO_EVENT_CONTROL;
     event.status = 0;
-    event.on = mTuner != NULL;
+    event.on = mTuner != 0;
     mCallbackThread->sendEvent(&event);
     ALOGV("%s DONE", __FUNCTION__);
 
@@ -726,10 +720,10 @@
     status_t status = NO_ERROR;
     ALOGV("%s locked", __FUNCTION__);
 
-    if (mTuner != NULL) {
+    if (mTuner != 0) {
         struct radio_hal_band_config halConfig;
         halConfig = config->band;
-        status = (status_t)mTuner->set_configuration(mTuner, &halConfig);
+        status = (status_t)mTuner->setConfiguration(&halConfig);
         if (status == NO_ERROR) {
             mConfig = *config;
         }
@@ -747,9 +741,9 @@
     status_t status = NO_ERROR;
     ALOGV("%s locked", __FUNCTION__);
 
-    if (mTuner != NULL) {
+    if (mTuner != 0) {
         struct radio_hal_band_config halConfig;
-        status = (status_t)mTuner->get_configuration(mTuner, &halConfig);
+        status = (status_t)mTuner->getConfiguration(&halConfig);
         if (status == NO_ERROR) {
             mConfig.band = halConfig;
         }
@@ -765,7 +759,7 @@
     {
         Mutex::Autolock _l(mLock);
         ALOGV("%s locked", __FUNCTION__);
-        if (mTuner == NULL || !mAudio) {
+        if (mTuner == 0 || !mAudio) {
             return INVALID_OPERATION;
         }
         module = mModule.promote();
@@ -796,8 +790,8 @@
     AutoMutex lock(mLock);
     ALOGV("%s locked", __FUNCTION__);
     status_t status;
-    if (mTuner != NULL) {
-        status = (status_t)mTuner->scan(mTuner, direction, skipSubChannel);
+    if (mTuner != 0) {
+        status = (status_t)mTuner->scan(direction, skipSubChannel);
     } else {
         status = INVALID_OPERATION;
     }
@@ -809,21 +803,21 @@
     AutoMutex lock(mLock);
     ALOGV("%s locked", __FUNCTION__);
     status_t status;
-    if (mTuner != NULL) {
-        status = (status_t)mTuner->step(mTuner, direction, skipSubChannel);
+    if (mTuner != 0) {
+        status = (status_t)mTuner->step(direction, skipSubChannel);
     } else {
         status = INVALID_OPERATION;
     }
     return status;
 }
 
-status_t RadioService::ModuleClient::tune(unsigned int channel, unsigned int subChannel)
+status_t RadioService::ModuleClient::tune(uint32_t channel, uint32_t subChannel)
 {
     AutoMutex lock(mLock);
     ALOGV("%s locked", __FUNCTION__);
     status_t status;
-    if (mTuner != NULL) {
-        status = (status_t)mTuner->tune(mTuner, channel, subChannel);
+    if (mTuner != 0) {
+        status = (status_t)mTuner->tune(channel, subChannel);
     } else {
         status = INVALID_OPERATION;
     }
@@ -835,8 +829,8 @@
     AutoMutex lock(mLock);
     ALOGV("%s locked", __FUNCTION__);
     status_t status;
-    if (mTuner != NULL) {
-        status = (status_t)mTuner->cancel(mTuner);
+    if (mTuner != 0) {
+        status = (status_t)mTuner->cancel();
     } else {
         status = INVALID_OPERATION;
     }
@@ -849,10 +843,11 @@
     ALOGV("%s locked", __FUNCTION__);
     status_t status;
     if (mTuner != NULL) {
-        status = (status_t)mTuner->get_program_information(mTuner, info);
+        status = (status_t)mTuner->getProgramInformation(info);
     } else {
         status = INVALID_OPERATION;
     }
+
     return status;
 }
 
@@ -860,7 +855,7 @@
 {
     Mutex::Autolock lock(mLock);
     ALOGV("%s locked", __FUNCTION__);
-    *hasControl = mTuner != NULL;
+    *hasControl = mTuner != 0;
     return NO_ERROR;
 }
 
diff --git a/services/radio/RadioService.h b/services/radio/RadioService.h
index ac3481e..444eb7a 100644
--- a/services/radio/RadioService.h
+++ b/services/radio/RadioService.h
@@ -27,6 +27,9 @@
 #include <radio/IRadioClient.h>
 #include <system/radio.h>
 #include <hardware/radio.h>
+#include "RadioInterface.h"
+#include "TunerInterface.h"
+#include "TunerCallbackInterface.h"
 
 namespace android {
 
@@ -66,7 +69,7 @@
     class Module : public virtual RefBase {
     public:
 
-       Module(radio_hw_device* hwDevice,
+       Module(sp<RadioInterface> hwDevice,
               struct radio_properties properties);
 
        virtual ~Module();
@@ -83,7 +86,7 @@
 
        virtual status_t dump(int fd, const Vector<String16>& args);
 
-       const struct radio_hw_device *hwDevice() const { return mHwDevice; }
+       sp<RadioInterface> hwDevice() const { return mHwDevice; }
        const struct radio_properties properties() const { return mProperties; }
        const struct radio_band_config *getDefaultConfig() const ;
 
@@ -92,7 +95,7 @@
        void notifyDeviceConnection(bool connected, const char *address);
 
         Mutex                         mLock;          // protects  mModuleClients
-        const struct radio_hw_device  *mHwDevice;     // HAL hardware device
+        sp<RadioInterface>            mHwDevice;      // HAL hardware device
         const struct radio_properties mProperties;    // cached hardware module properties
         Vector< sp<ModuleClient> >    mModuleClients; // list of attached clients
         bool                          mMute;          // radio audio source state
@@ -128,7 +131,8 @@
     }; // class CallbackThread
 
     class ModuleClient : public BnRadio,
-                   public IBinder::DeathRecipient {
+                   public IBinder::DeathRecipient,
+                   public TunerCallbackInterface {
     public:
 
        ModuleClient(const sp<Module>& module,
@@ -167,8 +171,8 @@
                wp<Module> module() const { return mModule; }
                radio_hal_band_config_t halConfig() const;
                sp<CallbackThread> callbackThread() const { return mCallbackThread; }
-               void setTuner(const struct radio_tuner *tuner);
-               const struct radio_tuner *getTuner() const;
+               void setTuner(sp<TunerInterface>& tuner);
+               sp<TunerInterface>& getTuner();
                bool audio() const { return mAudio; }
 
                void onCallbackEvent(const sp<IMemory>& event);
@@ -179,6 +183,9 @@
        // IBinder::DeathRecipient implementation
        virtual void        binderDied(const wp<IBinder> &who);
 
+       // TunerCallbackInterface
+       virtual void onEvent(radio_hal_event_t *event);
+
     private:
 
         mutable Mutex               mLock;           // protects mClient, mConfig and mTuner
@@ -187,7 +194,7 @@
         radio_band_config_t         mConfig;         // current band configuration
         sp<CallbackThread>          mCallbackThread; // event callback thread
         const bool                  mAudio;
-        const struct radio_tuner    *mTuner;        // HAL tuner interface. NULL indicates that
+        sp<TunerInterface>          mTuner;        // HAL tuner interface. NULL indicates that
                                                     // this client does not have control on any
                                                     // tuner
     }; // class ModuleClient
diff --git a/services/radio/TunerCallbackInterface.h b/services/radio/TunerCallbackInterface.h
new file mode 100644
index 0000000..4973cce
--- /dev/null
+++ b/services/radio/TunerCallbackInterface.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_TUNER_CALLBACK_INTERFACE_H
+#define ANDROID_HARDWARE_TUNER_CALLBACK_INTERFACE_H
+
+#include <utils/RefBase.h>
+#include <system/radio.h>
+
+namespace android {
+
+class TunerCallbackInterface : public virtual RefBase
+{
+public:
+    virtual void onEvent(radio_hal_event_t *event) = 0;
+
+protected:
+                 TunerCallbackInterface() {}
+    virtual      ~TunerCallbackInterface() {}
+
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_TUNER_CALLBACK_INTERFACE_H
diff --git a/services/radio/TunerInterface.h b/services/radio/TunerInterface.h
new file mode 100644
index 0000000..4e657d3
--- /dev/null
+++ b/services/radio/TunerInterface.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_TUNER_INTERFACE_H
+#define ANDROID_HARDWARE_TUNER_INTERFACE_H
+
+#include <utils/RefBase.h>
+#include <system/radio.h>
+
+namespace android {
+
+class TunerInterface : public virtual RefBase
+{
+public:
+        /*
+         * Apply current radio band configuration (band, range, channel spacing ...).
+         *
+         * arguments:
+         * - config: the band configuration to apply
+         *
+         * returns:
+         *  0 if configuration could be applied
+         *  -EINVAL if configuration requested is invalid
+         *
+         * Automatically cancels pending scan, step or tune.
+         *
+         * Callback function with event RADIO_EVENT_CONFIG MUST be called once the
+         * configuration is applied or a failure occurs or after a time out.
+         */
+    virtual int setConfiguration(const radio_hal_band_config_t *config) = 0;
+
+        /*
+         * Retrieve current radio band configuration.
+         *
+         * arguments:
+         * - config: where to return the band configuration
+         *
+         * returns:
+         *  0 if valid configuration is returned
+         *  -EINVAL if invalid arguments are passed
+         */
+    virtual int getConfiguration(radio_hal_band_config_t *config) = 0;
+
+        /*
+         * Start scanning up to next valid station.
+         * Must be called when a valid configuration has been applied.
+         *
+         * arguments:
+         * - direction: RADIO_DIRECTION_UP or RADIO_DIRECTION_DOWN
+         * - skip_sub_channel: valid for HD radio or digital radios only: ignore sub channels
+         *  (e.g SPS for HD radio).
+         *
+         * returns:
+         *  0 if scan successfully started
+         *  -ENOSYS if called out of sequence
+         *  -ENODEV if another error occurs
+         *
+         * Automatically cancels pending scan, step or tune.
+         *
+         *  Callback function with event RADIO_EVENT_TUNED MUST be called once
+         *  locked on a station or after a time out or full frequency scan if
+         *  no station found. The event status should indicate if a valid station
+         *  is tuned or not.
+         */
+    virtual int scan(radio_direction_t direction, bool skip_sub_channel) = 0;
+
+        /*
+         * Move one channel spacing up or down.
+         * Must be called when a valid configuration has been applied.
+         *
+         * arguments:
+         * - direction: RADIO_DIRECTION_UP or RADIO_DIRECTION_DOWN
+         * - skip_sub_channel: valid for HD radio or digital radios only: ignore sub channels
+         *  (e.g SPS for HD radio).
+         *
+         * returns:
+         *  0 if step successfully started
+         *  -ENOSYS if called out of sequence
+         *  -ENODEV if another error occurs
+         *
+         * Automatically cancels pending scan, step or tune.
+         *
+         * Callback function with event RADIO_EVENT_TUNED MUST be called once
+         * step completed or after a time out. The event status should indicate
+         * if a valid station is tuned or not.
+         */
+    virtual int step(radio_direction_t direction, bool skip_sub_channel) = 0;
+
+        /*
+         * Tune to specified frequency.
+         * Must be called when a valid configuration has been applied.
+         *
+         * arguments:
+         * - channel: channel to tune to. A frequency in kHz for AM/FM/HD Radio bands.
+         * - sub_channel: valid for HD radio or digital radios only: (e.g SPS number for HD radio).
+         *
+         * returns:
+         *  0 if tune successfully started
+         *  -ENOSYS if called out of sequence
+         *  -EINVAL if invalid arguments are passed
+         *  -ENODEV if another error occurs
+         *
+         * Automatically cancels pending scan, step or tune.
+         *
+         * Callback function with event RADIO_EVENT_TUNED MUST be called once
+         * tuned or after a time out. The event status should indicate
+         * if a valid station is tuned or not.
+         */
+    virtual int tune(unsigned int channel, unsigned int sub_channel) = 0;
+
+        /*
+         * Cancel a scan, step or tune operation.
+         * Must be called while a scan, step or tune operation is pending
+         * (callback not yet sent).
+         *
+         * returns:
+         *  0 if successful
+         *  -ENOSYS if called out of sequence
+         *  -ENODEV if another error occurs
+         *
+         * The callback is not sent.
+         */
+    virtual int cancel() = 0;
+
+        /*
+         * Retrieve current station information.
+         *
+         * arguments:
+         * - info: where to return the program info.
+         * If info->metadata is NULL. no meta data should be returned.
+         * If meta data must be returned, they should be added to or cloned to
+         * info->metadata, not passed from a newly created meta data buffer.
+         *
+         * returns:
+         *  0 if tuned and information available
+         *  -EINVAL if invalid arguments are passed
+         *  -ENODEV if another error occurs
+         */
+    virtual int getProgramInformation(radio_program_info_t *info) = 0;
+
+protected:
+                TunerInterface() {}
+    virtual     ~TunerInterface() {}
+
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_TUNER_INTERFACE_H
diff --git a/services/soundtrigger/Android.mk b/services/soundtrigger/Android.mk
index 0f5bbba..3e7a7ce 100644
--- a/services/soundtrigger/Android.mk
+++ b/services/soundtrigger/Android.mk
@@ -16,7 +16,6 @@
 
 include $(CLEAR_VARS)
 
-
 ifeq ($(SOUND_TRIGGER_USE_STUB_MODULE), 1)
     LOCAL_CFLAGS += -DSOUND_TRIGGER_USE_STUB_MODULE
 endif
@@ -34,12 +33,33 @@
     libaudioclient \
     libserviceutility
 
+
+ifeq ($(USE_LEGACY_LOCAL_AUDIO_HAL),true)
+# libhardware configuration
+LOCAL_SRC_FILES +=               \
+    SoundTriggerHalLegacy.cpp
+else
+# Treble configuration
+LOCAL_SRC_FILES +=               \
+    SoundTriggerHalHidl.cpp
+
+LOCAL_SHARED_LIBRARIES += \
+    libhwbinder \
+    libhidlbase \
+    libhidltransport \
+    libbase \
+    libaudiohal \
+    android.hardware.soundtrigger@2.0 \
+    android.hardware.audio.common@2.0
+endif
+
+
 LOCAL_C_INCLUDES += \
     $(TOPDIR)frameworks/av/services/audioflinger
 
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
 
-LOCAL_CFLAGS := -Wall -Werror
+LOCAL_CFLAGS += -Wall -Werror
 
 LOCAL_MODULE:= libsoundtriggerservice
 
diff --git a/services/soundtrigger/SoundTriggerHalHidl.cpp b/services/soundtrigger/SoundTriggerHalHidl.cpp
new file mode 100644
index 0000000..7cc8a2b
--- /dev/null
+++ b/services/soundtrigger/SoundTriggerHalHidl.cpp
@@ -0,0 +1,604 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "SoundTriggerHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <media/audiohal/hidl/HalDeathHandler.h>
+#include <utils/Log.h>
+#include "SoundTriggerHalHidl.h"
+#include <hwbinder/IPCThreadState.h>
+#include <hwbinder/ProcessState.h>
+
+namespace android {
+
+using android::hardware::Return;
+using android::hardware::ProcessState;
+using android::hardware::audio::common::V2_0::AudioDevice;
+
+/* static */
+sp<SoundTriggerHalInterface> SoundTriggerHalInterface::connectModule(const char *moduleName)
+{
+    return new SoundTriggerHalHidl(moduleName);
+}
+
+int SoundTriggerHalHidl::getProperties(struct sound_trigger_properties *properties)
+{
+    sp<ISoundTriggerHw> soundtrigger = getService();
+    if (soundtrigger == 0) {
+        return -ENODEV;
+    }
+
+    ISoundTriggerHw::Properties halProperties;
+    Return<void> hidlReturn;
+    int ret;
+    {
+        AutoMutex lock(mHalLock);
+        hidlReturn = soundtrigger->getProperties([&](int rc, auto res) {
+            ret = rc;
+            halProperties = res;
+            ALOGI("getProperties res implementor %s", res.implementor.c_str());
+        });
+    }
+
+    if (hidlReturn.isOk()) {
+        if (ret == 0) {
+            convertPropertiesFromHal(properties, &halProperties);
+        }
+    } else {
+        ALOGE("getProperties error %s", hidlReturn.description().c_str());
+        return FAILED_TRANSACTION;
+    }
+    ALOGI("getProperties ret %d", ret);
+    return ret;
+}
+
+int SoundTriggerHalHidl::loadSoundModel(struct sound_trigger_sound_model *sound_model,
+                        sound_model_callback_t callback,
+                        void *cookie,
+                        sound_model_handle_t *handle)
+{
+    if (handle == NULL) {
+        return -EINVAL;
+    }
+
+    sp<ISoundTriggerHw> soundtrigger = getService();
+    if (soundtrigger == 0) {
+        return -ENODEV;
+    }
+
+    uint32_t modelId;
+    {
+        AutoMutex lock(mLock);
+        do {
+            modelId = nextUniqueId();
+            ALOGI("loadSoundModel modelId %u", modelId);
+            sp<SoundModel> model = mSoundModels.valueFor(modelId);
+            ALOGI("loadSoundModel model %p", model.get());
+        } while (mSoundModels.valueFor(modelId) != 0 && modelId != 0);
+    }
+    LOG_ALWAYS_FATAL_IF(modelId == 0,
+                        "loadSoundModel(): wrap around in sound model IDs, num loaded models %zd",
+                        mSoundModels.size());
+
+    ISoundTriggerHw::SoundModel *halSoundModel =
+            convertSoundModelToHal(sound_model);
+    if (halSoundModel == NULL) {
+        return -EINVAL;
+    }
+
+    Return<void> hidlReturn;
+    int ret;
+    SoundModelHandle halHandle;
+    {
+        AutoMutex lock(mHalLock);
+        if (sound_model->type == SOUND_MODEL_TYPE_KEYPHRASE) {
+            hidlReturn = soundtrigger->loadPhraseSoundModel(
+                    *(const ISoundTriggerHw::PhraseSoundModel *)halSoundModel,
+                    this, modelId, [&](int32_t retval, auto res) {
+                ret = retval;
+                halHandle = res;
+            });
+
+        } else {
+            hidlReturn = soundtrigger->loadSoundModel(*halSoundModel,
+                    this, modelId, [&](int32_t retval, auto res) {
+                ret = retval;
+                halHandle = res;
+            });
+        }
+    }
+
+    delete halSoundModel;
+
+    if (hidlReturn.isOk()) {
+        if (ret == 0) {
+            AutoMutex lock(mLock);
+            *handle = (sound_model_handle_t)modelId;
+            sp<SoundModel> model = new SoundModel(*handle, callback, cookie, halHandle);
+            mSoundModels.add(*handle, model);
+        }
+    } else {
+        ALOGE("loadSoundModel error %s", hidlReturn.description().c_str());
+        return FAILED_TRANSACTION;
+    }
+
+    return ret;
+}
+
+int SoundTriggerHalHidl::unloadSoundModel(sound_model_handle_t handle)
+{
+    sp<ISoundTriggerHw> soundtrigger = getService();
+    if (soundtrigger == 0) {
+        return -ENODEV;
+    }
+
+    sp<SoundModel> model = removeModel(handle);
+    if (model == 0) {
+        ALOGE("unloadSoundModel model not found for handle %u", handle);
+        return -EINVAL;
+    }
+
+    Return<int32_t> hidlReturn(0);
+    {
+        AutoMutex lock(mHalLock);
+        hidlReturn = soundtrigger->unloadSoundModel(model->mHalHandle);
+    }
+
+    if (!hidlReturn.isOk()) {
+        ALOGE("unloadSoundModel error %s", hidlReturn.description().c_str());
+        return FAILED_TRANSACTION;
+    }
+
+    return hidlReturn;
+}
+
+int SoundTriggerHalHidl::startRecognition(sound_model_handle_t handle,
+                         const struct sound_trigger_recognition_config *config,
+                         recognition_callback_t callback,
+                         void *cookie)
+{
+    sp<ISoundTriggerHw> soundtrigger = getService();
+    if (soundtrigger == 0) {
+        return -ENODEV;
+    }
+
+    sp<SoundModel> model = getModel(handle);
+    if (model == 0) {
+        ALOGE("startRecognition model not found for handle %u", handle);
+        return -EINVAL;
+    }
+
+    model->mRecognitionCallback = callback;
+    model->mRecognitionCookie = cookie;
+
+    ISoundTriggerHw::RecognitionConfig *halConfig =
+            convertRecognitionConfigToHal(config);
+
+    Return<int32_t> hidlReturn(0);
+    {
+        AutoMutex lock(mHalLock);
+        hidlReturn = soundtrigger->startRecognition(model->mHalHandle, *halConfig, this, handle);
+    }
+
+    delete halConfig;
+
+    if (!hidlReturn.isOk()) {
+        ALOGE("startRecognition error %s", hidlReturn.description().c_str());
+        return FAILED_TRANSACTION;
+    }
+    return hidlReturn;
+}
+
+int SoundTriggerHalHidl::stopRecognition(sound_model_handle_t handle)
+{
+    sp<ISoundTriggerHw> soundtrigger = getService();
+    if (soundtrigger == 0) {
+        return -ENODEV;
+    }
+
+    sp<SoundModel> model = getModel(handle);
+    if (model == 0) {
+        ALOGE("stopRecognition model not found for handle %u", handle);
+        return -EINVAL;
+    }
+
+    Return<int32_t> hidlReturn(0);
+    {
+        AutoMutex lock(mHalLock);
+        hidlReturn = soundtrigger->stopRecognition(model->mHalHandle);
+    }
+
+    if (!hidlReturn.isOk()) {
+        ALOGE("stopRecognition error %s", hidlReturn.description().c_str());
+        return FAILED_TRANSACTION;
+    }
+    return hidlReturn;
+}
+
+int SoundTriggerHalHidl::stopAllRecognitions()
+{
+    sp<ISoundTriggerHw> soundtrigger = getService();
+    if (soundtrigger == 0) {
+        return -ENODEV;
+    }
+
+    Return<int32_t> hidlReturn(0);
+    {
+        AutoMutex lock(mHalLock);
+        hidlReturn = soundtrigger->stopAllRecognitions();
+    }
+
+    if (!hidlReturn.isOk()) {
+        ALOGE("stopAllRecognitions error %s", hidlReturn.description().c_str());
+        return FAILED_TRANSACTION;
+    }
+    return hidlReturn;
+}
+
+SoundTriggerHalHidl::SoundTriggerHalHidl(const char *moduleName)
+    : mModuleName(moduleName), mNextUniqueId(1)
+{
+}
+
+SoundTriggerHalHidl::~SoundTriggerHalHidl()
+{
+}
+
+sp<ISoundTriggerHw> SoundTriggerHalHidl::getService()
+{
+    AutoMutex lock(mLock);
+    if (mISoundTrigger == 0) {
+        if (mModuleName == NULL) {
+            mModuleName = "primary";
+        }
+        std::string serviceName = "sound_trigger.";
+        serviceName.append(mModuleName);
+        mISoundTrigger = ISoundTriggerHw::getService(serviceName);
+        if (mISoundTrigger != 0) {
+            mISoundTrigger->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
+        }
+    }
+    return mISoundTrigger;
+}
+
+sp<SoundTriggerHalHidl::SoundModel> SoundTriggerHalHidl::getModel(sound_model_handle_t handle)
+{
+    AutoMutex lock(mLock);
+    return mSoundModels.valueFor(handle);
+}
+
+sp<SoundTriggerHalHidl::SoundModel> SoundTriggerHalHidl::removeModel(sound_model_handle_t handle)
+{
+    AutoMutex lock(mLock);
+    sp<SoundModel> model = mSoundModels.valueFor(handle);
+    mSoundModels.removeItem(handle);
+    return model;
+}
+
+uint32_t SoundTriggerHalHidl::nextUniqueId()
+{
+    return (uint32_t) atomic_fetch_add_explicit(&mNextUniqueId,
+                (uint_fast32_t) 1, memory_order_acq_rel);
+}
+
+void SoundTriggerHalHidl::convertUuidToHal(Uuid *halUuid,
+                                           const sound_trigger_uuid_t *uuid)
+{
+    halUuid->timeLow = uuid->timeLow;
+    halUuid->timeMid = uuid->timeMid;
+    halUuid->versionAndTimeHigh = uuid->timeHiAndVersion;
+    halUuid->variantAndClockSeqHigh = uuid->clockSeq;
+    memcpy(halUuid->node.data(), &uuid->node[0], sizeof(uuid->node));
+}
+
+void SoundTriggerHalHidl::convertUuidFromHal(sound_trigger_uuid_t *uuid,
+                                             const Uuid *halUuid)
+{
+    uuid->timeLow = halUuid->timeLow;
+    uuid->timeMid = halUuid->timeMid;
+    uuid->timeHiAndVersion = halUuid->versionAndTimeHigh;
+    uuid->clockSeq = halUuid->variantAndClockSeqHigh;
+    memcpy(&uuid->node[0], halUuid->node.data(), sizeof(uuid->node));
+}
+
+void SoundTriggerHalHidl::convertPropertiesFromHal(
+        struct sound_trigger_properties *properties,
+        const ISoundTriggerHw::Properties *halProperties)
+{
+    strlcpy(properties->implementor,
+            halProperties->implementor.c_str(), SOUND_TRIGGER_MAX_STRING_LEN);
+    strlcpy(properties->description,
+            halProperties->description.c_str(), SOUND_TRIGGER_MAX_STRING_LEN);
+    properties->version = halProperties->version;
+    convertUuidFromHal(&properties->uuid, &halProperties->uuid);
+    properties->max_sound_models = halProperties->maxSoundModels;
+    properties->max_key_phrases = halProperties->maxKeyPhrases;
+    properties->max_users = halProperties->maxUsers;
+    properties->recognition_modes = halProperties->recognitionModes;
+    properties->capture_transition = (bool)halProperties->captureTransition;
+    properties->max_buffer_ms = halProperties->maxBufferMs;
+    properties->concurrent_capture = (bool)halProperties->concurrentCapture;
+    properties->trigger_in_event = (bool)halProperties->triggerInEvent;
+    properties->power_consumption_mw = halProperties->powerConsumptionMw;
+}
+
+void SoundTriggerHalHidl::convertTriggerPhraseToHal(
+        ISoundTriggerHw::Phrase *halTriggerPhrase,
+        const struct sound_trigger_phrase *triggerPhrase)
+{
+    halTriggerPhrase->id = triggerPhrase->id;
+    halTriggerPhrase->recognitionModes = triggerPhrase->recognition_mode;
+    halTriggerPhrase->users.setToExternal((uint32_t *)&triggerPhrase->users[0], triggerPhrase->num_users);
+    halTriggerPhrase->locale = triggerPhrase->locale;
+    halTriggerPhrase->text = triggerPhrase->text;
+}
+
+ISoundTriggerHw::SoundModel *SoundTriggerHalHidl::convertSoundModelToHal(
+        const struct sound_trigger_sound_model *soundModel)
+{
+    ISoundTriggerHw::SoundModel *halModel = NULL;
+    if (soundModel->type == SOUND_MODEL_TYPE_KEYPHRASE) {
+        ISoundTriggerHw::PhraseSoundModel *halKeyPhraseModel =
+                new ISoundTriggerHw::PhraseSoundModel();
+        struct sound_trigger_phrase_sound_model *keyPhraseModel =
+                (struct sound_trigger_phrase_sound_model *)soundModel;
+        ISoundTriggerHw::Phrase *halPhrases =
+                new ISoundTriggerHw::Phrase[keyPhraseModel->num_phrases];
+
+
+        for (unsigned int i = 0; i < keyPhraseModel->num_phrases; i++) {
+            convertTriggerPhraseToHal(&halPhrases[i],
+                                      &keyPhraseModel->phrases[i]);
+        }
+        halKeyPhraseModel->phrases.setToExternal(halPhrases, keyPhraseModel->num_phrases);
+        // FIXME: transfer buffer ownership. should have a method for that in hidl_vec
+        halKeyPhraseModel->phrases.resize(keyPhraseModel->num_phrases);
+
+        delete[] halPhrases;
+
+        halModel = (ISoundTriggerHw::SoundModel *)halKeyPhraseModel;
+    } else {
+        halModel = new ISoundTriggerHw::SoundModel();
+    }
+    halModel->type = (SoundModelType)soundModel->type;
+    convertUuidToHal(&halModel->uuid, &soundModel->uuid);
+    convertUuidToHal(&halModel->vendorUuid, &soundModel->vendor_uuid);
+    halModel->data.setToExternal((uint8_t *)soundModel + soundModel->data_offset, soundModel->data_size);
+    halModel->data.resize(soundModel->data_size);
+
+    return halModel;
+}
+
+void SoundTriggerHalHidl::convertPhraseRecognitionExtraToHal(
+        PhraseRecognitionExtra *halExtra,
+        const struct sound_trigger_phrase_recognition_extra *extra)
+{
+    halExtra->id = extra->id;
+    halExtra->recognitionModes = extra->recognition_modes;
+    halExtra->confidenceLevel = extra->confidence_level;
+    ConfidenceLevel *halLevels =
+            new ConfidenceLevel[extra->num_levels];
+    for (unsigned int i = 0; i < extra->num_levels; i++) {
+        halLevels[i].userId = extra->levels[i].user_id;
+        halLevels[i].levelPercent = extra->levels[i].level;
+    }
+    halExtra->levels.setToExternal(halLevels, extra->num_levels);
+    // FIXME: transfer buffer ownership. should have a method for that in hidl_vec
+    halExtra->levels.resize(extra->num_levels);
+
+    delete[] halLevels;
+}
+
+
+ISoundTriggerHw::RecognitionConfig *SoundTriggerHalHidl::convertRecognitionConfigToHal(
+        const struct sound_trigger_recognition_config *config)
+{
+    ISoundTriggerHw::RecognitionConfig *halConfig =
+            new ISoundTriggerHw::RecognitionConfig();
+
+    halConfig->captureHandle = config->capture_handle;
+    halConfig->captureDevice = (AudioDevice)config->capture_device;
+    halConfig->captureRequested = (uint32_t)config->capture_requested;
+
+    PhraseRecognitionExtra *halExtras =
+            new PhraseRecognitionExtra[config->num_phrases];
+
+    for (unsigned int i = 0; i < config->num_phrases; i++) {
+        convertPhraseRecognitionExtraToHal(&halExtras[i],
+                                  &config->phrases[i]);
+    }
+    halConfig->phrases.setToExternal(halExtras, config->num_phrases);
+    // FIXME: transfer buffer ownership. should have a method for that in hidl_vec
+    halConfig->phrases.resize(config->num_phrases);
+
+    delete[] halExtras;
+
+    halConfig->data.setToExternal((uint8_t *)config + config->data_offset, config->data_size);
+
+    return halConfig;
+}
+
+
+// ISoundTriggerHwCallback
+::android::hardware::Return<void> SoundTriggerHalHidl::recognitionCallback(
+        const ISoundTriggerHwCallback::RecognitionEvent& halEvent,
+        CallbackCookie cookie)
+{
+    sp<SoundModel> model;
+    {
+        AutoMutex lock(mLock);
+        model = mSoundModels.valueFor((SoundModelHandle)cookie);
+        if (model == 0) {
+            return Return<void>();
+        }
+    }
+    struct sound_trigger_recognition_event *event = convertRecognitionEventFromHal(&halEvent);
+    if (event == NULL) {
+        return Return<void>();
+    }
+    event->model = model->mHandle;
+    model->mRecognitionCallback(event, model->mRecognitionCookie);
+
+    free(event);
+
+    return Return<void>();
+}
+
+::android::hardware::Return<void> SoundTriggerHalHidl::phraseRecognitionCallback(
+        const ISoundTriggerHwCallback::PhraseRecognitionEvent& halEvent,
+        CallbackCookie cookie)
+{
+    sp<SoundModel> model;
+    {
+        AutoMutex lock(mLock);
+        model = mSoundModels.valueFor((SoundModelHandle)cookie);
+        if (model == 0) {
+            return Return<void>();
+        }
+    }
+
+    struct sound_trigger_recognition_event *event = convertRecognitionEventFromHal(
+                                   (const ISoundTriggerHwCallback::RecognitionEvent *)&halEvent);
+    if (event == NULL) {
+        return Return<void>();
+    }
+
+    event->model = model->mHandle;
+    model->mRecognitionCallback(event, model->mRecognitionCookie);
+
+    free(event);
+
+    return Return<void>();
+}
+
+::android::hardware::Return<void> SoundTriggerHalHidl::soundModelCallback(
+        const ISoundTriggerHwCallback::ModelEvent& halEvent,
+        CallbackCookie cookie)
+{
+    sp<SoundModel> model;
+    {
+        AutoMutex lock(mLock);
+        model = mSoundModels.valueFor((SoundModelHandle)cookie);
+        if (model == 0) {
+            return Return<void>();
+        }
+    }
+
+    struct sound_trigger_model_event *event = convertSoundModelEventFromHal(&halEvent);
+    if (event == NULL) {
+        return Return<void>();
+    }
+
+    event->model = model->mHandle;
+    model->mSoundModelCallback(event, model->mSoundModelCookie);
+
+    free(event);
+
+    return Return<void>();
+}
+
+
+struct sound_trigger_model_event *SoundTriggerHalHidl::convertSoundModelEventFromHal(
+                                              const ISoundTriggerHwCallback::ModelEvent *halEvent)
+{
+    struct sound_trigger_model_event *event = (struct sound_trigger_model_event *)malloc(
+            sizeof(struct sound_trigger_model_event) +
+            halEvent->data.size());
+    if (event == NULL) {
+        return NULL;
+    }
+
+    event->status = (int)halEvent->status;
+    // event->model to be set by caller
+    event->data_offset = sizeof(struct sound_trigger_model_event);
+    event->data_size = halEvent->data.size();
+    uint8_t *dst = (uint8_t *)event + event->data_offset;
+    uint8_t *src = (uint8_t *)&halEvent->data[0];
+    memcpy(dst, src, halEvent->data.size());
+
+    return event;
+}
+
+void SoundTriggerHalHidl::convertPhraseRecognitionExtraFromHal(
+        struct sound_trigger_phrase_recognition_extra *extra,
+        const PhraseRecognitionExtra *halExtra)
+{
+    extra->id = halExtra->id;
+    extra->recognition_modes = halExtra->recognitionModes;
+    extra->confidence_level = halExtra->confidenceLevel;
+
+    size_t i;
+    for (i = 0; i < halExtra->levels.size() && i < SOUND_TRIGGER_MAX_USERS; i++) {
+        extra->levels[i].user_id = halExtra->levels[i].userId;
+        extra->levels[i].level = halExtra->levels[i].levelPercent;
+    }
+    extra->num_levels = (unsigned int)i;
+}
+
+
+struct sound_trigger_recognition_event *SoundTriggerHalHidl::convertRecognitionEventFromHal(
+        const ISoundTriggerHwCallback::RecognitionEvent *halEvent)
+{
+    struct sound_trigger_recognition_event *event;
+
+    if (halEvent->type == SoundModelType::KEYPHRASE) {
+        struct sound_trigger_phrase_recognition_event *phraseEvent =
+                (struct sound_trigger_phrase_recognition_event *)malloc(
+                        sizeof(struct sound_trigger_phrase_recognition_event) +
+                        halEvent->data.size());
+        if (phraseEvent == NULL) {
+            return NULL;
+        }
+        const ISoundTriggerHwCallback::PhraseRecognitionEvent *halPhraseEvent =
+                (const ISoundTriggerHwCallback::PhraseRecognitionEvent *)halEvent;
+
+        for (unsigned int i = 0; i < halPhraseEvent->phraseExtras.size(); i++) {
+            convertPhraseRecognitionExtraFromHal(&phraseEvent->phrase_extras[i],
+                                                 &halPhraseEvent->phraseExtras[i]);
+        }
+        phraseEvent->num_phrases = halPhraseEvent->phraseExtras.size();
+        event = (struct sound_trigger_recognition_event *)phraseEvent;
+        event->data_offset = sizeof(sound_trigger_phrase_recognition_event);
+    } else {
+        event = (struct sound_trigger_recognition_event *)malloc(
+                sizeof(struct sound_trigger_recognition_event) + halEvent->data.size());
+        if (event == NULL) {
+            return NULL;
+        }
+        event->data_offset = sizeof(sound_trigger_recognition_event);
+    }
+    event->status = (int)halEvent->status;
+    event->type = (sound_trigger_sound_model_type_t)halEvent->type;
+    // event->model to be set by caller
+    event->capture_available = (bool)halEvent->captureAvailable;
+    event->capture_session = halEvent->captureSession;
+    event->capture_delay_ms = halEvent->captureDelayMs;
+    event->capture_preamble_ms = halEvent->capturePreambleMs;
+    event->trigger_in_data = (bool)halEvent->triggerInData;
+    event->audio_config.sample_rate = halEvent->audioConfig.sampleRateHz;
+    event->audio_config.channel_mask = (audio_channel_mask_t)halEvent->audioConfig.channelMask;
+    event->audio_config.format = (audio_format_t)halEvent->audioConfig.format;
+
+    event->data_size = halEvent->data.size();
+    uint8_t *dst = (uint8_t *)event + event->data_offset;
+    uint8_t *src = (uint8_t *)&halEvent->data[0];
+    memcpy(dst, src, halEvent->data.size());
+
+    return event;
+}
+
+} // namespace android
diff --git a/services/soundtrigger/SoundTriggerHalHidl.h b/services/soundtrigger/SoundTriggerHalHidl.h
new file mode 100644
index 0000000..916fcc4
--- /dev/null
+++ b/services/soundtrigger/SoundTriggerHalHidl.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_SOUNDTRIGGER_HAL_HIDL_H
+#define ANDROID_HARDWARE_SOUNDTRIGGER_HAL_HIDL_H
+
+#include <utils/RefBase.h>
+#include <utils/KeyedVector.h>
+#include <utils/Vector.h>
+#include <utils/threads.h>
+#include "SoundTriggerHalInterface.h"
+#include <android/hardware/soundtrigger/2.0/types.h>
+#include <android/hardware/soundtrigger/2.0/ISoundTriggerHw.h>
+#include <android/hardware/soundtrigger/2.0/ISoundTriggerHwCallback.h>
+
+namespace android {
+
+using android::hardware::audio::common::V2_0::Uuid;
+using android::hardware::soundtrigger::V2_0::ConfidenceLevel;
+using android::hardware::soundtrigger::V2_0::PhraseRecognitionExtra;
+using android::hardware::soundtrigger::V2_0::SoundModelType;
+using android::hardware::soundtrigger::V2_0::SoundModelHandle;
+using android::hardware::soundtrigger::V2_0::ISoundTriggerHw;
+using android::hardware::soundtrigger::V2_0::ISoundTriggerHwCallback;
+
+class SoundTriggerHalHidl : public SoundTriggerHalInterface,
+                            public virtual ISoundTriggerHwCallback
+
+{
+public:
+        virtual int getProperties(struct sound_trigger_properties *properties);
+
+        /*
+         * Load a sound model. Once loaded, recognition of this model can be started and stopped.
+         * Only one active recognition per model at a time. The SoundTrigger service will handle
+         * concurrent recognition requests by different users/applications on the same model.
+         * The implementation returns a unique handle used by other functions (unload_sound_model(),
+         * start_recognition(), etc...
+         */
+        virtual int loadSoundModel(struct sound_trigger_sound_model *sound_model,
+                                sound_model_callback_t callback,
+                                void *cookie,
+                                sound_model_handle_t *handle);
+
+        /*
+         * Unload a sound model. A sound model can be unloaded to make room for a new one to overcome
+         * implementation limitations.
+         */
+        virtual int unloadSoundModel(sound_model_handle_t handle);
+
+        /* Start recognition on a given model. Only one recognition active at a time per model.
+         * Once recognition succeeds of fails, the callback is called.
+         * TODO: group recognition configuration parameters into one struct and add key phrase options.
+         */
+        virtual int startRecognition(sound_model_handle_t handle,
+                                 const struct sound_trigger_recognition_config *config,
+                                 recognition_callback_t callback,
+                                 void *cookie);
+
+        /* Stop recognition on a given model.
+         * The implementation does not have to call the callback when stopped via this method.
+         */
+        virtual int stopRecognition(sound_model_handle_t handle);
+
+        /* Stop recognition on all models.
+         * Only supported for device api versions SOUND_TRIGGER_DEVICE_API_VERSION_1_1 or above.
+         * If no implementation is provided, stop_recognition will be called for each running model.
+         */
+        virtual int stopAllRecognitions();
+
+        // ISoundTriggerHwCallback
+        virtual ::android::hardware::Return<void> recognitionCallback(
+                const ISoundTriggerHwCallback::RecognitionEvent& event, CallbackCookie cookie);
+        virtual ::android::hardware::Return<void> phraseRecognitionCallback(
+                const ISoundTriggerHwCallback::PhraseRecognitionEvent& event, int32_t cookie);
+        virtual ::android::hardware::Return<void> soundModelCallback(
+                const ISoundTriggerHwCallback::ModelEvent& event, CallbackCookie cookie);
+private:
+        class SoundModel : public RefBase {
+        public:
+            SoundModel(sound_model_handle_t handle, sound_model_callback_t callback,
+                       void *cookie, android::hardware::soundtrigger::V2_0::SoundModelHandle halHandle)
+                 : mHandle(handle), mHalHandle(halHandle),
+                   mSoundModelCallback(callback), mSoundModelCookie(cookie),
+                   mRecognitionCallback(NULL), mRecognitionCookie(NULL) {}
+            ~SoundModel() {}
+
+            sound_model_handle_t   mHandle;
+            android::hardware::soundtrigger::V2_0::SoundModelHandle mHalHandle;
+            sound_model_callback_t mSoundModelCallback;
+            void *                 mSoundModelCookie;
+            recognition_callback_t mRecognitionCallback;
+            void *                 mRecognitionCookie;
+        };
+
+        friend class SoundTriggerHalInterface;
+
+        explicit SoundTriggerHalHidl(const char *moduleName = NULL);
+        virtual  ~SoundTriggerHalHidl();
+
+        void convertUuidToHal(Uuid *halUuid,
+                              const sound_trigger_uuid_t *uuid);
+        void convertUuidFromHal(sound_trigger_uuid_t *uuid,
+                                const Uuid *halUuid);
+
+        void convertPropertiesFromHal(
+                struct sound_trigger_properties *properties,
+                const ISoundTriggerHw::Properties *halProperties);
+
+        void convertTriggerPhraseToHal(
+                ISoundTriggerHw::Phrase *halTriggerPhrase,
+                const struct sound_trigger_phrase *triggerPhrase);
+        ISoundTriggerHw::SoundModel *convertSoundModelToHal(
+                const struct sound_trigger_sound_model *soundModel);
+
+        void convertPhraseRecognitionExtraToHal(
+                PhraseRecognitionExtra *halExtra,
+                const struct sound_trigger_phrase_recognition_extra *extra);
+        ISoundTriggerHw::RecognitionConfig *convertRecognitionConfigToHal(
+                const struct sound_trigger_recognition_config *config);
+
+        struct sound_trigger_model_event *convertSoundModelEventFromHal(
+                                              const ISoundTriggerHwCallback::ModelEvent *halEvent);
+        void convertPhraseRecognitionExtraFromHal(
+                struct sound_trigger_phrase_recognition_extra *extra,
+                const PhraseRecognitionExtra *halExtra);
+        struct sound_trigger_recognition_event *convertRecognitionEventFromHal(
+                const ISoundTriggerHwCallback::RecognitionEvent *halEvent);
+
+        uint32_t nextUniqueId();
+        sp<ISoundTriggerHw> getService();
+        sp<SoundModel> getModel(sound_model_handle_t handle);
+        sp<SoundModel> removeModel(sound_model_handle_t handle);
+
+        static pthread_once_t sOnceControl;
+        static void sOnceInit();
+
+        Mutex mLock;
+        Mutex mHalLock;
+        const char *mModuleName;
+        volatile atomic_uint_fast32_t  mNextUniqueId;
+        // Effect chains without a valid thread
+        DefaultKeyedVector< sound_model_handle_t , sp<SoundModel> > mSoundModels;
+        sp<::android::hardware::soundtrigger::V2_0::ISoundTriggerHw> mISoundTrigger;
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_SOUNDTRIGGER_HAL_HIDL_H
diff --git a/services/soundtrigger/SoundTriggerHalInterface.h b/services/soundtrigger/SoundTriggerHalInterface.h
new file mode 100644
index 0000000..c083195
--- /dev/null
+++ b/services/soundtrigger/SoundTriggerHalInterface.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_SOUNDTRIGGER_HAL_INTERFACE_H
+#define ANDROID_HARDWARE_SOUNDTRIGGER_HAL_INTERFACE_H
+
+#include <utils/RefBase.h>
+#include <system/sound_trigger.h>
+#include <hardware/sound_trigger.h>
+
+namespace android {
+
+class SoundTriggerHalInterface : public virtual RefBase
+{
+public:
+        /* get a sound trigger HAL instance */
+        static sp<SoundTriggerHalInterface> connectModule(const char *moduleName);
+
+        virtual     ~SoundTriggerHalInterface() {}
+
+        virtual int getProperties(struct sound_trigger_properties *properties) = 0;
+
+        /*
+         * Load a sound model. Once loaded, recognition of this model can be started and stopped.
+         * Only one active recognition per model at a time. The SoundTrigger service will handle
+         * concurrent recognition requests by different users/applications on the same model.
+         * The implementation returns a unique handle used by other functions (unload_sound_model(),
+         * start_recognition(), etc...
+         */
+        virtual int loadSoundModel(struct sound_trigger_sound_model *sound_model,
+                                sound_model_callback_t callback,
+                                void *cookie,
+                                sound_model_handle_t *handle) = 0;
+
+        /*
+         * Unload a sound model. A sound model can be unloaded to make room for a new one to overcome
+         * implementation limitations.
+         */
+        virtual int unloadSoundModel(sound_model_handle_t handle) = 0;
+
+        /* Start recognition on a given model. Only one recognition active at a time per model.
+         * Once recognition succeeds of fails, the callback is called.
+         * TODO: group recognition configuration parameters into one struct and add key phrase options.
+         */
+        virtual int startRecognition(sound_model_handle_t handle,
+                                 const struct sound_trigger_recognition_config *config,
+                                 recognition_callback_t callback,
+                                 void *cookie) = 0;
+
+        /* Stop recognition on a given model.
+         * The implementation does not have to call the callback when stopped via this method.
+         */
+        virtual int stopRecognition(sound_model_handle_t handle) = 0;
+
+        /* Stop recognition on all models.
+         * Only supported for device api versions SOUND_TRIGGER_DEVICE_API_VERSION_1_1 or above.
+         * If no implementation is provided, stop_recognition will be called for each running model.
+         */
+        virtual int stopAllRecognitions() = 0;
+
+protected:
+        SoundTriggerHalInterface() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_SOUNDTRIGGER_HAL_INTERFACE_H
diff --git a/services/soundtrigger/SoundTriggerHalLegacy.cpp b/services/soundtrigger/SoundTriggerHalLegacy.cpp
new file mode 100644
index 0000000..2b78818
--- /dev/null
+++ b/services/soundtrigger/SoundTriggerHalLegacy.cpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <utils/Log.h>
+#include "SoundTriggerHalLegacy.h"
+
+namespace android {
+
+/* static */
+sp<SoundTriggerHalInterface> SoundTriggerHalInterface::connectModule(const char *moduleName)
+{
+    return new SoundTriggerHalLegacy(moduleName);
+}
+
+SoundTriggerHalLegacy::SoundTriggerHalLegacy(const char *moduleName)
+    : mModuleName(moduleName), mHwDevice(NULL)
+{
+}
+
+void SoundTriggerHalLegacy::onFirstRef()
+{
+    const hw_module_t *mod;
+    int rc;
+
+    if (mModuleName == NULL) {
+        mModuleName = "primary";
+    }
+
+    rc = hw_get_module_by_class(SOUND_TRIGGER_HARDWARE_MODULE_ID, mModuleName, &mod);
+    if (rc != 0) {
+        ALOGE("couldn't load sound trigger module %s.%s (%s)",
+              SOUND_TRIGGER_HARDWARE_MODULE_ID, mModuleName, strerror(-rc));
+        return;
+    }
+    rc = sound_trigger_hw_device_open(mod, &mHwDevice);
+    if (rc != 0) {
+        ALOGE("couldn't open sound trigger hw device in %s.%s (%s)",
+              SOUND_TRIGGER_HARDWARE_MODULE_ID, mModuleName, strerror(-rc));
+        mHwDevice = NULL;
+        return;
+    }
+    if (mHwDevice->common.version < SOUND_TRIGGER_DEVICE_API_VERSION_1_0 ||
+            mHwDevice->common.version > SOUND_TRIGGER_DEVICE_API_VERSION_CURRENT) {
+        ALOGE("wrong sound trigger hw device version %04x", mHwDevice->common.version);
+        return;
+    }
+}
+
+SoundTriggerHalLegacy::~SoundTriggerHalLegacy()
+{
+    if (mHwDevice != NULL) {
+        sound_trigger_hw_device_close(mHwDevice);
+    }
+}
+
+int SoundTriggerHalLegacy::getProperties(struct sound_trigger_properties *properties)
+{
+    if (mHwDevice == NULL) {
+        return -ENODEV;
+    }
+    return mHwDevice->get_properties(mHwDevice, properties);
+}
+
+int SoundTriggerHalLegacy::loadSoundModel(struct sound_trigger_sound_model *sound_model,
+                        sound_model_callback_t callback,
+                        void *cookie,
+                        sound_model_handle_t *handle)
+{
+    if (mHwDevice == NULL) {
+        return -ENODEV;
+    }
+    return mHwDevice->load_sound_model(mHwDevice, sound_model, callback, cookie, handle);
+}
+
+int SoundTriggerHalLegacy::unloadSoundModel(sound_model_handle_t handle)
+{
+    if (mHwDevice == NULL) {
+        return -ENODEV;
+    }
+    return mHwDevice->unload_sound_model(mHwDevice, handle);
+}
+
+int SoundTriggerHalLegacy::startRecognition(sound_model_handle_t handle,
+                         const struct sound_trigger_recognition_config *config,
+                         recognition_callback_t callback,
+                         void *cookie)
+{
+    if (mHwDevice == NULL) {
+        return -ENODEV;
+    }
+    return mHwDevice->start_recognition(mHwDevice, handle, config, callback, cookie);
+}
+
+int SoundTriggerHalLegacy::stopRecognition(sound_model_handle_t handle)
+{
+    if (mHwDevice == NULL) {
+        return -ENODEV;
+    }
+    return mHwDevice->stop_recognition(mHwDevice, handle);
+}
+
+int SoundTriggerHalLegacy::stopAllRecognitions()
+{
+    if (mHwDevice == NULL) {
+        return -ENODEV;
+    }
+    if (mHwDevice->common.version >= SOUND_TRIGGER_DEVICE_API_VERSION_1_1 &&
+     mHwDevice->stop_all_recognitions) {
+        return mHwDevice->stop_all_recognitions(mHwDevice);
+    }
+    return -ENOSYS;
+}
+
+} // namespace android
diff --git a/services/soundtrigger/SoundTriggerHalLegacy.h b/services/soundtrigger/SoundTriggerHalLegacy.h
new file mode 100644
index 0000000..52488de
--- /dev/null
+++ b/services/soundtrigger/SoundTriggerHalLegacy.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_SOUNDTRIGGER_HAL_LEGACY_H
+#define ANDROID_HARDWARE_SOUNDTRIGGER_HAL_LEGACY_H
+
+#include "SoundTriggerHalInterface.h"
+
+namespace android {
+
+class SoundTriggerHalLegacy : public SoundTriggerHalInterface
+
+{
+public:
+        virtual             ~SoundTriggerHalLegacy();
+
+        virtual int getProperties(struct sound_trigger_properties *properties);
+
+        /*
+         * Load a sound model. Once loaded, recognition of this model can be started and stopped.
+         * Only one active recognition per model at a time. The SoundTrigger service will handle
+         * concurrent recognition requests by different users/applications on the same model.
+         * The implementation returns a unique handle used by other functions (unload_sound_model(),
+         * start_recognition(), etc...
+         */
+        virtual int loadSoundModel(struct sound_trigger_sound_model *sound_model,
+                                sound_model_callback_t callback,
+                                void *cookie,
+                                sound_model_handle_t *handle);
+
+        /*
+         * Unload a sound model. A sound model can be unloaded to make room for a new one to overcome
+         * implementation limitations.
+         */
+        virtual int unloadSoundModel(sound_model_handle_t handle);
+
+        /* Start recognition on a given model. Only one recognition active at a time per model.
+         * Once recognition succeeds of fails, the callback is called.
+         * TODO: group recognition configuration parameters into one struct and add key phrase options.
+         */
+        virtual int startRecognition(sound_model_handle_t handle,
+                                 const struct sound_trigger_recognition_config *config,
+                                 recognition_callback_t callback,
+                                 void *cookie);
+
+        /* Stop recognition on a given model.
+         * The implementation does not have to call the callback when stopped via this method.
+         */
+        virtual int stopRecognition(sound_model_handle_t handle);
+
+        /* Stop recognition on all models.
+         * Only supported for device api versions SOUND_TRIGGER_DEVICE_API_VERSION_1_1 or above.
+         * If no implementation is provided, stop_recognition will be called for each running model.
+         */
+        int stopAllRecognitions();
+
+        // RefBase
+        virtual     void        onFirstRef();
+
+private:
+
+        friend class SoundTriggerHalInterface;
+
+        explicit SoundTriggerHalLegacy(const char *moduleName = NULL);
+
+        const char *mModuleName;
+        struct sound_trigger_hw_device*        mHwDevice;
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_SOUNDTRIGGER_HAL_LEGACY_H
diff --git a/services/soundtrigger/SoundTriggerHwService.cpp b/services/soundtrigger/SoundTriggerHwService.cpp
index 6a52b9c..54f9b95 100644
--- a/services/soundtrigger/SoundTriggerHwService.cpp
+++ b/services/soundtrigger/SoundTriggerHwService.cpp
@@ -32,17 +32,16 @@
 #include <binder/IServiceManager.h>
 #include <binder/MemoryBase.h>
 #include <binder/MemoryHeapBase.h>
-#include <hardware/sound_trigger.h>
+#include <system/sound_trigger.h>
 #include <ServiceUtilities.h>
 #include "SoundTriggerHwService.h"
 
-namespace android {
-
 #ifdef SOUND_TRIGGER_USE_STUB_MODULE
 #define HW_MODULE_PREFIX "stub"
 #else
 #define HW_MODULE_PREFIX "primary"
 #endif
+namespace android {
 
 SoundTriggerHwService::SoundTriggerHwService()
     : BnSoundTriggerHwService(),
@@ -54,30 +53,17 @@
 
 void SoundTriggerHwService::onFirstRef()
 {
-    const hw_module_t *mod;
     int rc;
-    sound_trigger_hw_device *dev;
 
-    rc = hw_get_module_by_class(SOUND_TRIGGER_HARDWARE_MODULE_ID, HW_MODULE_PREFIX, &mod);
-    if (rc != 0) {
-        ALOGE("couldn't load sound trigger module %s.%s (%s)",
-              SOUND_TRIGGER_HARDWARE_MODULE_ID, HW_MODULE_PREFIX, strerror(-rc));
-        return;
-    }
-    rc = sound_trigger_hw_device_open(mod, &dev);
-    if (rc != 0) {
-        ALOGE("couldn't open sound trigger hw device in %s.%s (%s)",
-              SOUND_TRIGGER_HARDWARE_MODULE_ID, HW_MODULE_PREFIX, strerror(-rc));
-        return;
-    }
-    if (dev->common.version < SOUND_TRIGGER_DEVICE_API_VERSION_1_0 ||
-        dev->common.version > SOUND_TRIGGER_DEVICE_API_VERSION_CURRENT) {
-        ALOGE("wrong sound trigger hw device version %04x", dev->common.version);
-        return;
-    }
+    sp<SoundTriggerHalInterface> halInterface =
+            SoundTriggerHalInterface::connectModule(HW_MODULE_PREFIX);
 
+    if (halInterface == 0) {
+        ALOGW("could not connect to HAL");
+        return;
+    }
     sound_trigger_module_descriptor descriptor;
-    rc = dev->get_properties(dev, &descriptor.properties);
+    rc = halInterface->getProperties(&descriptor.properties);
     if (rc != 0) {
         ALOGE("could not read implementation properties");
         return;
@@ -87,8 +73,7 @@
     ALOGI("loaded default module %s, handle %d", descriptor.properties.description,
                                                  descriptor.handle);
 
-    sp<ISoundTriggerClient> client;
-    sp<Module> module = new Module(this, dev, descriptor, client);
+    sp<Module> module = new Module(this, halInterface, descriptor);
     mModules.add(descriptor.handle, module);
     mCallbackThread = new CallbackThread(this);
 }
@@ -98,9 +83,6 @@
     if (mCallbackThread != 0) {
         mCallbackThread->exit();
     }
-    for (size_t i = 0; i < mModules.size(); i++) {
-        sound_trigger_hw_device_close(mModules.valueAt(i)->hwDevice());
-    }
 }
 
 status_t SoundTriggerHwService::listModules(struct sound_trigger_module_descriptor *modules,
@@ -143,11 +125,13 @@
     }
     sp<Module> module = mModules.valueAt(index);
 
-    module->setClient(client);
-    IInterface::asBinder(client)->linkToDeath(module);
-    moduleInterface = module;
+    sp<ModuleClient> moduleClient = module->addClient(client);
+    if (moduleClient == 0) {
+        return NO_INIT;
+    }
 
-    module->setCaptureState_l(mCaptureState);
+    moduleClient->setCaptureState_l(mCaptureState);
+    moduleInterface = moduleClient;
 
     return NO_ERROR;
 }
@@ -164,14 +148,6 @@
 }
 
 
-void SoundTriggerHwService::detachModule(const sp<Module>& module)
-{
-    ALOGV("detachModule");
-    AutoMutex lock(mServiceLock);
-    module->clearClient();
-}
-
-
 static const int kDumpLockRetries = 50;
 static const int kDumpLockSleep = 60000;
 
@@ -293,8 +269,10 @@
          return;
      }
 
-     sendCallbackEvent_l(new CallbackEvent(CallbackEvent::TYPE_RECOGNITION,
-                                                  eventMemory, strongModule));
+    sp<CallbackEvent> callbackEvent = new CallbackEvent(CallbackEvent::TYPE_RECOGNITION,
+                                                        eventMemory);
+    callbackEvent->setModule(strongModule);
+    sendCallbackEvent_l(callbackEvent);
 }
 
 // static
@@ -346,8 +324,10 @@
     if (strongModule == 0) {
         return;
     }
-    sendCallbackEvent_l(new CallbackEvent(CallbackEvent::TYPE_SOUNDMODEL,
-                                                 eventMemory, strongModule));
+    sp<CallbackEvent> callbackEvent = new CallbackEvent(CallbackEvent::TYPE_SOUNDMODEL,
+                                                        eventMemory);
+    callbackEvent->setModule(strongModule);
+    sendCallbackEvent_l(callbackEvent);
 }
 
 
@@ -383,8 +363,23 @@
     if (strongModule == 0) {
         return;
     }
-    sendCallbackEvent_l(new CallbackEvent(CallbackEvent::TYPE_SERVICE_STATE,
-                                                 eventMemory, strongModule));
+    sp<CallbackEvent> callbackEvent = new CallbackEvent(CallbackEvent::TYPE_SERVICE_STATE,
+                                                        eventMemory);
+    callbackEvent->setModule(strongModule);
+    sendCallbackEvent_l(callbackEvent);
+}
+
+void SoundTriggerHwService::sendServiceStateEvent_l(sound_trigger_service_state_t state,
+                                                    ModuleClient *moduleClient)
+{
+    sp<IMemory> eventMemory = prepareServiceStateEvent_l(state);
+    if (eventMemory == 0) {
+        return;
+    }
+    sp<CallbackEvent> callbackEvent = new CallbackEvent(CallbackEvent::TYPE_SERVICE_STATE,
+                                                        eventMemory);
+    callbackEvent->setModuleClient(moduleClient);
+    sendCallbackEvent_l(callbackEvent);
 }
 
 // call with mServiceLock held
@@ -397,14 +392,25 @@
 {
     ALOGV("onCallbackEvent");
     sp<Module> module;
+    sp<ModuleClient> moduleClient;
     {
         AutoMutex lock(mServiceLock);
+        //CallbackEvent is either for Module or ModuleClient
         module = event->mModule.promote();
         if (module == 0) {
-            return;
+            moduleClient = event->mModuleClient.promote();
+            if (moduleClient == 0) {
+                return;
+            }
         }
     }
-    module->onCallbackEvent(event);
+    if (module != 0) {
+        ALOGV("onCallbackEvent for module");
+        module->onCallbackEvent(event);
+    } else if (moduleClient != 0) {
+        ALOGV("onCallbackEvent for moduleClient");
+        moduleClient->onCallbackEvent(event);
+    }
     {
         AutoMutex lock(mServiceLock);
         // clear now to execute with mServiceLock locked
@@ -474,9 +480,8 @@
     mCallbackCond.signal();
 }
 
-SoundTriggerHwService::CallbackEvent::CallbackEvent(event_type type, sp<IMemory> memory,
-                                                    wp<Module> module)
-    : mType(type), mMemory(memory), mModule(module)
+SoundTriggerHwService::CallbackEvent::CallbackEvent(event_type type, sp<IMemory> memory)
+    : mType(type), mMemory(memory)
 {
 }
 
@@ -489,52 +494,81 @@
 #define LOG_TAG "SoundTriggerHwService::Module"
 
 SoundTriggerHwService::Module::Module(const sp<SoundTriggerHwService>& service,
-                                      sound_trigger_hw_device* hwDevice,
-                                      sound_trigger_module_descriptor descriptor,
-                                      const sp<ISoundTriggerClient>& client)
- : mService(service), mHwDevice(hwDevice), mDescriptor(descriptor),
-   mClient(client), mServiceState(SOUND_TRIGGER_STATE_NO_INIT)
+                                      const sp<SoundTriggerHalInterface>& halInterface,
+                                      sound_trigger_module_descriptor descriptor)
+ : mService(service), mHalInterface(halInterface), mDescriptor(descriptor),
+   mServiceState(SOUND_TRIGGER_STATE_NO_INIT)
 {
 }
 
 SoundTriggerHwService::Module::~Module() {
+    mModuleClients.clear();
 }
 
-void SoundTriggerHwService::Module::detach() {
-    ALOGV("detach()");
-    if (!captureHotwordAllowed()) {
-        return;
-    }
-    {
-        AutoMutex lock(mLock);
-        for (size_t i = 0; i < mModels.size(); i++) {
-            sp<Model> model = mModels.valueAt(i);
-            ALOGV("detach() unloading model %d", model->mHandle);
-            if (model->mState == Model::STATE_ACTIVE) {
-                mHwDevice->stop_recognition(mHwDevice, model->mHandle);
-            }
-            mHwDevice->unload_sound_model(mHwDevice, model->mHandle);
+sp<SoundTriggerHwService::ModuleClient>
+SoundTriggerHwService::Module::addClient(const sp<ISoundTriggerClient>& client)
+{
+    AutoMutex lock(mLock);
+    sp<ModuleClient> moduleClient;
+
+    for (size_t i = 0; i < mModuleClients.size(); i++) {
+        if (mModuleClients[i]->client() == client) {
+            // Client already present, reuse client
+            return moduleClient;
         }
-        mModels.clear();
     }
-    if (mClient != 0) {
-        IInterface::asBinder(mClient)->unlinkToDeath(this);
+    moduleClient = new ModuleClient(this, client);
+
+    ALOGV("addClient() client %p", moduleClient.get());
+    mModuleClients.add(moduleClient);
+
+    return moduleClient;
+}
+
+void SoundTriggerHwService::Module::detach(const sp<ModuleClient>& moduleClient)
+{
+    ALOGV("Module::detach()");
+    AutoMutex lock(mLock);
+    ssize_t index = -1;
+
+    for (size_t i = 0; i < mModuleClients.size(); i++) {
+        if (mModuleClients[i] == moduleClient) {
+            index = i;
+            break;
+        }
     }
-    sp<SoundTriggerHwService> service = mService.promote();
-    if (service == 0) {
+    if (index == -1) {
         return;
     }
-    service->detachModule(this);
+
+    ALOGV("remove client %p", moduleClient.get());
+    mModuleClients.removeAt(index);
+
+    for (size_t i = 0; i < mModels.size(); i++) {
+        sp<Model> model = mModels.valueAt(i);
+        if (moduleClient == model->mModuleClient) {
+            mModels.removeItemsAt(i);
+            ALOGV("detach() unloading model %d", model->mHandle);
+            if (mHalInterface != 0) {
+                if (model->mState == Model::STATE_ACTIVE) {
+                    mHalInterface->stopRecognition(model->mHandle);
+                }
+                mHalInterface->unloadSoundModel(model->mHandle);
+            }
+            AudioSystem::releaseSoundTriggerSession(model->mCaptureSession);
+            mHalInterface->unloadSoundModel(model->mHandle);
+        }
+    }
 }
 
 status_t SoundTriggerHwService::Module::loadSoundModel(const sp<IMemory>& modelMemory,
-                                sound_model_handle_t *handle)
+                                                       sp<ModuleClient> moduleClient,
+                                                       sound_model_handle_t *handle)
 {
     ALOGV("loadSoundModel() handle");
-    if (!captureHotwordAllowed()) {
-        return PERMISSION_DENIED;
+    if (mHalInterface == 0) {
+        return NO_INIT;
     }
-
     if (modelMemory == 0 || modelMemory->pointer() == NULL) {
         ALOGE("loadSoundModel() modelMemory is 0 or has NULL pointer()");
         return BAD_VALUE;
@@ -566,7 +600,7 @@
         return INVALID_OPERATION;
     }
 
-    status_t status = mHwDevice->load_sound_model(mHwDevice, sound_model,
+    status_t status = mHalInterface->loadSoundModel(sound_model,
                                                   SoundTriggerHwService::soundModelCallback,
                                                   this, handle);
 
@@ -582,7 +616,8 @@
         return status;
     }
 
-    sp<Model> model = new Model(*handle, session, ioHandle, device, sound_model->type);
+    sp<Model> model = new Model(*handle, session, ioHandle, device, sound_model->type,
+                                moduleClient);
     mModels.replaceValueFor(*handle, model);
 
     return status;
@@ -591,16 +626,15 @@
 status_t SoundTriggerHwService::Module::unloadSoundModel(sound_model_handle_t handle)
 {
     ALOGV("unloadSoundModel() model handle %d", handle);
-    if (!captureHotwordAllowed()) {
-        return PERMISSION_DENIED;
-    }
-
     AutoMutex lock(mLock);
     return unloadSoundModel_l(handle);
 }
 
 status_t SoundTriggerHwService::Module::unloadSoundModel_l(sound_model_handle_t handle)
 {
+    if (mHalInterface == 0) {
+        return NO_INIT;
+    }
     ssize_t index = mModels.indexOfKey(handle);
     if (index < 0) {
         return BAD_VALUE;
@@ -608,21 +642,20 @@
     sp<Model> model = mModels.valueAt(index);
     mModels.removeItem(handle);
     if (model->mState == Model::STATE_ACTIVE) {
-        mHwDevice->stop_recognition(mHwDevice, model->mHandle);
+        mHalInterface->stopRecognition(model->mHandle);
         model->mState = Model::STATE_IDLE;
     }
     AudioSystem::releaseSoundTriggerSession(model->mCaptureSession);
-    return mHwDevice->unload_sound_model(mHwDevice, handle);
+    return mHalInterface->unloadSoundModel(handle);
 }
 
 status_t SoundTriggerHwService::Module::startRecognition(sound_model_handle_t handle,
                                  const sp<IMemory>& dataMemory)
 {
     ALOGV("startRecognition() model handle %d", handle);
-    if (!captureHotwordAllowed()) {
-        return PERMISSION_DENIED;
+    if (mHalInterface == 0) {
+        return NO_INIT;
     }
-
     if (dataMemory == 0 || dataMemory->pointer() == NULL) {
         ALOGE("startRecognition() dataMemory is 0 or has NULL pointer()");
         return BAD_VALUE;
@@ -657,7 +690,7 @@
     //TODO: get capture handle and device from audio policy service
     config->capture_handle = model->mCaptureIOHandle;
     config->capture_device = model->mCaptureDevice;
-    status_t status = mHwDevice->start_recognition(mHwDevice, handle, config,
+    status_t status = mHalInterface->startRecognition(handle, config,
                                         SoundTriggerHwService::recognitionCallback,
                                         this);
 
@@ -672,10 +705,9 @@
 status_t SoundTriggerHwService::Module::stopRecognition(sound_model_handle_t handle)
 {
     ALOGV("stopRecognition() model handle %d", handle);
-    if (!captureHotwordAllowed()) {
-        return PERMISSION_DENIED;
+    if (mHalInterface == 0) {
+        return NO_INIT;
     }
-
     AutoMutex lock(mLock);
     sp<Model> model = getModel(handle);
     if (model == 0) {
@@ -685,12 +717,11 @@
     if (model->mState != Model::STATE_ACTIVE) {
         return INVALID_OPERATION;
     }
-    mHwDevice->stop_recognition(mHwDevice, handle);
+    mHalInterface->stopRecognition(handle);
     model->mState = Model::STATE_IDLE;
     return NO_ERROR;
 }
 
-
 void SoundTriggerHwService::Module::onCallbackEvent(const sp<CallbackEvent>& event)
 {
     ALOGV("onCallbackEvent type %d", event->mType);
@@ -700,8 +731,8 @@
     if (eventMemory == 0 || eventMemory->pointer() == NULL) {
         return;
     }
-    if (mClient == 0) {
-        ALOGI("%s mClient == 0", __func__);
+    if (mModuleClients.isEmpty()) {
+        ALOGI("%s no clients", __func__);
         return;
     }
 
@@ -724,7 +755,7 @@
 
             recognitionEvent->capture_session = model->mCaptureSession;
             model->mState = Model::STATE_IDLE;
-            client = mClient;
+            client = model->mModuleClient->client();
         }
         if (client != 0) {
             client->onRecognitionEvent(eventMemory);
@@ -741,20 +772,24 @@
                 ALOGW("%s model == 0", __func__);
                 return;
             }
-            client = mClient;
+            client = model->mModuleClient->client();
         }
         if (client != 0) {
             client->onSoundModelEvent(eventMemory);
         }
     } break;
     case CallbackEvent::TYPE_SERVICE_STATE: {
-        sp<ISoundTriggerClient> client;
+        Vector< sp<ISoundTriggerClient> > clients;
         {
             AutoMutex lock(mLock);
-            client = mClient;
+            for (size_t i = 0; i < mModuleClients.size(); i++) {
+                if (mModuleClients[i] != 0) {
+                    clients.add(mModuleClients[i]->client());
+                }
+            }
         }
-        if (client != 0) {
-            client->onServiceStateChange(eventMemory);
+        for (size_t i = 0; i < clients.size(); i++) {
+            clients[i]->onServiceStateChange(eventMemory);
         }
     } break;
     default:
@@ -773,12 +808,6 @@
     return model;
 }
 
-void SoundTriggerHwService::Module::binderDied(
-    const wp<IBinder> &who __unused) {
-    ALOGW("client binder died for module %d", mDescriptor.handle);
-    detach();
-}
-
 // Called with mServiceLock held
 void SoundTriggerHwService::Module::setCaptureState_l(bool active)
 {
@@ -808,18 +837,13 @@
         }
 
         const bool supports_stop_all =
-            (mHwDevice->common.version >= SOUND_TRIGGER_DEVICE_API_VERSION_1_1 &&
-             mHwDevice->stop_all_recognitions);
-
-        if (supports_stop_all) {
-            mHwDevice->stop_all_recognitions(mHwDevice);
-        }
+                (mHalInterface != 0) && (mHalInterface->stopAllRecognitions() == ENOSYS);
 
         for (size_t i = 0; i < mModels.size(); i++) {
             sp<Model> model = mModels.valueAt(i);
             if (model->mState == Model::STATE_ACTIVE) {
-                if (!supports_stop_all) {
-                    mHwDevice->stop_recognition(mHwDevice, model->mHandle);
+                if (mHalInterface != 0 && !supports_stop_all) {
+                    mHalInterface->stopRecognition(model->mHandle);
                 }
                 // keep model in ACTIVE state so that event is processed by onCallbackEvent()
                 if (model->mType == SOUND_MODEL_TYPE_KEYPHRASE) {
@@ -867,8 +891,10 @@
     }
 
     for (size_t i = 0; i < events.size(); i++) {
-        service->sendCallbackEvent_l(new CallbackEvent(CallbackEvent::TYPE_RECOGNITION, events[i],
-                                                     this));
+        sp<CallbackEvent> callbackEvent = new CallbackEvent(CallbackEvent::TYPE_RECOGNITION,
+                                                            events[i]);
+        callbackEvent->setModule(this);
+        service->sendCallbackEvent_l(callbackEvent);
     }
 
 exit:
@@ -878,17 +904,170 @@
 
 SoundTriggerHwService::Model::Model(sound_model_handle_t handle, audio_session_t session,
                                     audio_io_handle_t ioHandle, audio_devices_t device,
-                                    sound_trigger_sound_model_type_t type) :
+                                    sound_trigger_sound_model_type_t type,
+                                    sp<ModuleClient>& moduleClient) :
     mHandle(handle), mState(STATE_IDLE), mCaptureSession(session),
-    mCaptureIOHandle(ioHandle), mCaptureDevice(device), mType(type)
+    mCaptureIOHandle(ioHandle), mCaptureDevice(device), mType(type),
+    mModuleClient(moduleClient)
 {
-
 }
 
-status_t SoundTriggerHwService::Module::dump(int fd __unused,
-                                             const Vector<String16>& args __unused) {
+#undef LOG_TAG
+#define LOG_TAG "SoundTriggerHwService::ModuleClient"
+
+SoundTriggerHwService::ModuleClient::ModuleClient(const sp<Module>& module,
+                                                  const sp<ISoundTriggerClient>& client)
+ : mModule(module), mClient(client)
+{
+}
+
+void SoundTriggerHwService::ModuleClient::onFirstRef()
+{
+    IInterface::asBinder(mClient)->linkToDeath(this);
+}
+
+SoundTriggerHwService::ModuleClient::~ModuleClient()
+{
+}
+
+status_t SoundTriggerHwService::ModuleClient::dump(int fd __unused,
+                                                   const Vector<String16>& args __unused) {
     String8 result;
     return NO_ERROR;
 }
 
+void SoundTriggerHwService::ModuleClient::detach() {
+    ALOGV("detach()");
+    if (!captureHotwordAllowed()) {
+        return;
+    }
+
+    {
+        AutoMutex lock(mLock);
+        if (mClient != 0) {
+            IInterface::asBinder(mClient)->unlinkToDeath(this);
+            mClient.clear();
+        }
+    }
+
+    sp<Module> module = mModule.promote();
+    if (module == 0) {
+        return;
+    }
+    module->detach(this);
+}
+
+status_t SoundTriggerHwService::ModuleClient::loadSoundModel(const sp<IMemory>& modelMemory,
+                                sound_model_handle_t *handle)
+{
+    ALOGV("loadSoundModel() handle");
+    if (!captureHotwordAllowed()) {
+        return PERMISSION_DENIED;
+    }
+
+    sp<Module> module = mModule.promote();
+    if (module == 0) {
+        return NO_INIT;
+    }
+    return module->loadSoundModel(modelMemory, this, handle);
+}
+
+status_t SoundTriggerHwService::ModuleClient::unloadSoundModel(sound_model_handle_t handle)
+{
+    ALOGV("unloadSoundModel() model handle %d", handle);
+    if (!captureHotwordAllowed()) {
+        return PERMISSION_DENIED;
+    }
+
+    sp<Module> module = mModule.promote();
+    if (module == 0) {
+        return NO_INIT;
+    }
+    return module->unloadSoundModel(handle);
+}
+
+status_t SoundTriggerHwService::ModuleClient::startRecognition(sound_model_handle_t handle,
+                                 const sp<IMemory>& dataMemory)
+{
+    ALOGV("startRecognition() model handle %d", handle);
+    if (!captureHotwordAllowed()) {
+        return PERMISSION_DENIED;
+    }
+
+    sp<Module> module = mModule.promote();
+    if (module == 0) {
+        return NO_INIT;
+    }
+    return module->startRecognition(handle, dataMemory);
+}
+
+status_t SoundTriggerHwService::ModuleClient::stopRecognition(sound_model_handle_t handle)
+{
+    ALOGV("stopRecognition() model handle %d", handle);
+    if (!captureHotwordAllowed()) {
+        return PERMISSION_DENIED;
+    }
+
+    sp<Module> module = mModule.promote();
+    if (module == 0) {
+        return NO_INIT;
+    }
+    return module->stopRecognition(handle);
+}
+
+void SoundTriggerHwService::ModuleClient::setCaptureState_l(bool active)
+{
+    ALOGV("ModuleClient::setCaptureState_l %d", active);
+    sp<SoundTriggerHwService> service;
+    sound_trigger_service_state_t state;
+
+    sp<Module> module = mModule.promote();
+    if (module == 0) {
+        return;
+    }
+    {
+        AutoMutex lock(mLock);
+        state = (active && !module->isConcurrentCaptureAllowed()) ?
+                                        SOUND_TRIGGER_STATE_DISABLED : SOUND_TRIGGER_STATE_ENABLED;
+
+        service = module->service().promote();
+        if (service == 0) {
+            return;
+        }
+    }
+    service->sendServiceStateEvent_l(state, this);
+}
+
+void SoundTriggerHwService::ModuleClient::onCallbackEvent(const sp<CallbackEvent>& event)
+{
+    ALOGV("ModuleClient onCallbackEvent type %d", event->mType);
+
+    sp<IMemory> eventMemory = event->mMemory;
+
+    if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+        return;
+    }
+
+    switch (event->mType) {
+    case CallbackEvent::TYPE_SERVICE_STATE: {
+        sp<ISoundTriggerClient> client;
+        {
+            AutoMutex lock(mLock);
+            client = mClient;
+        }
+        if (client !=0 ) {
+            client->onServiceStateChange(eventMemory);
+        }
+    } break;
+    default:
+        LOG_ALWAYS_FATAL("onCallbackEvent unknown event type %d", event->mType);
+    }
+}
+
+void SoundTriggerHwService::ModuleClient::binderDied(
+    const wp<IBinder> &who __unused) {
+    ALOGW("client binder died for client %p", this);
+    detach();
+}
+
 }; // namespace android
diff --git a/services/soundtrigger/SoundTriggerHwService.h b/services/soundtrigger/SoundTriggerHwService.h
index 13a577a..60ebb35 100644
--- a/services/soundtrigger/SoundTriggerHwService.h
+++ b/services/soundtrigger/SoundTriggerHwService.h
@@ -26,7 +26,7 @@
 #include <soundtrigger/ISoundTrigger.h>
 #include <soundtrigger/ISoundTriggerClient.h>
 #include <system/sound_trigger.h>
-#include <hardware/sound_trigger.h>
+#include "SoundTriggerHalInterface.h"
 
 namespace android {
 
@@ -39,6 +39,7 @@
     friend class BinderService<SoundTriggerHwService>;
 public:
     class Module;
+    class ModuleClient;
 
     static char const* getServiceName() { return "media.sound_trigger_hw"; }
 
@@ -69,7 +70,8 @@
         };
 
         Model(sound_model_handle_t handle, audio_session_t session, audio_io_handle_t ioHandle,
-              audio_devices_t device, sound_trigger_sound_model_type_t type);
+              audio_devices_t device, sound_trigger_sound_model_type_t type,
+              sp<ModuleClient>& moduleClient);
         ~Model() {}
 
         sound_model_handle_t    mHandle;
@@ -79,6 +81,7 @@
         audio_devices_t         mCaptureDevice;
         sound_trigger_sound_model_type_t mType;
         struct sound_trigger_recognition_config mConfig;
+        sp<ModuleClient>        mModuleClient;
     };
 
     class CallbackEvent : public RefBase {
@@ -88,27 +91,76 @@
             TYPE_SOUNDMODEL,
             TYPE_SERVICE_STATE,
         } event_type;
-        CallbackEvent(event_type type, sp<IMemory> memory, wp<Module> module);
+        CallbackEvent(event_type type, sp<IMemory> memory);
 
         virtual             ~CallbackEvent();
 
+        void setModule(wp<Module> module) { mModule = module; }
+        void setModuleClient(wp<ModuleClient> moduleClient) { mModuleClient = moduleClient; }
+
         event_type mType;
         sp<IMemory> mMemory;
         wp<Module> mModule;
+        wp<ModuleClient> mModuleClient;
     };
 
-    class Module : public virtual RefBase,
-                   public BnSoundTrigger,
-                   public IBinder::DeathRecipient     {
+    class Module : public RefBase {
     public:
 
        Module(const sp<SoundTriggerHwService>& service,
-              sound_trigger_hw_device* hwDevice,
-              sound_trigger_module_descriptor descriptor,
-              const sp<ISoundTriggerClient>& client);
+              const sp<SoundTriggerHalInterface>& halInterface,
+              sound_trigger_module_descriptor descriptor);
 
        virtual ~Module();
 
+       virtual status_t loadSoundModel(const sp<IMemory>& modelMemory,
+                                       sp<ModuleClient> moduleClient,
+                                       sound_model_handle_t *handle);
+
+       virtual status_t unloadSoundModel(sound_model_handle_t handle);
+
+       virtual status_t startRecognition(sound_model_handle_t handle,
+                                         const sp<IMemory>& dataMemory);
+       virtual status_t stopRecognition(sound_model_handle_t handle);
+
+       sp<SoundTriggerHalInterface> halInterface() const { return mHalInterface; }
+       struct sound_trigger_module_descriptor descriptor() { return mDescriptor; }
+       wp<SoundTriggerHwService> service() const { return mService; }
+       bool isConcurrentCaptureAllowed() const { return mDescriptor.properties.concurrent_capture; }
+
+       sp<Model> getModel(sound_model_handle_t handle);
+
+       void setCaptureState_l(bool active);
+
+       sp<ModuleClient> addClient(const sp<ISoundTriggerClient>& client);
+
+       void detach(const sp<ModuleClient>& moduleClient);
+
+       void onCallbackEvent(const sp<CallbackEvent>& event);
+
+    private:
+
+        status_t unloadSoundModel_l(sound_model_handle_t handle);
+
+        Mutex                                  mLock;
+        wp<SoundTriggerHwService>              mService;
+        sp<SoundTriggerHalInterface>           mHalInterface;
+        struct sound_trigger_module_descriptor mDescriptor;
+        Vector< sp<ModuleClient> >             mModuleClients;
+        DefaultKeyedVector< sound_model_handle_t, sp<Model> >     mModels;
+        sound_trigger_service_state_t          mServiceState;
+    }; // class Module
+
+    class ModuleClient : public virtual RefBase,
+                         public BnSoundTrigger,
+                         public IBinder::DeathRecipient {
+    public:
+
+       ModuleClient(const sp<Module>& module,
+              const sp<ISoundTriggerClient>& client);
+
+       virtual ~ModuleClient();
+
        virtual void detach();
 
        virtual status_t loadSoundModel(const sp<IMemory>& modelMemory,
@@ -122,36 +174,23 @@
 
        virtual status_t dump(int fd, const Vector<String16>& args);
 
-
-       sound_trigger_hw_device *hwDevice() const { return mHwDevice; }
-       struct sound_trigger_module_descriptor descriptor() { return mDescriptor; }
-       void setClient(const sp<ISoundTriggerClient>& client) { mClient = client; }
-       void clearClient() { mClient.clear(); }
-       sp<ISoundTriggerClient> client() const { return mClient; }
-       wp<SoundTriggerHwService> service() const { return mService; }
-
-       void onCallbackEvent(const sp<CallbackEvent>& event);
-
-       sp<Model> getModel(sound_model_handle_t handle);
-
-       void setCaptureState_l(bool active);
+       virtual void onFirstRef();
 
        // IBinder::DeathRecipient implementation
        virtual void        binderDied(const wp<IBinder> &who);
 
+       void onCallbackEvent(const sp<CallbackEvent>& event);
+
+       void setCaptureState_l(bool active);
+
+       sp<ISoundTriggerClient> client() const { return mClient; }
+
     private:
 
-       status_t unloadSoundModel_l(sound_model_handle_t handle);
-
-
-        Mutex                                  mLock;
-        wp<SoundTriggerHwService>              mService;
-        struct sound_trigger_hw_device*        mHwDevice;
-        struct sound_trigger_module_descriptor mDescriptor;
-        sp<ISoundTriggerClient>                mClient;
-        DefaultKeyedVector< sound_model_handle_t, sp<Model> >     mModels;
-        sound_trigger_service_state_t          mServiceState;
-    }; // class Module
+        mutable Mutex               mLock;
+        wp<Module>                  mModule;
+        sp<ISoundTriggerClient>     mClient;
+    }; // class ModuleClient
 
     class CallbackThread : public Thread {
     public:
@@ -176,8 +215,6 @@
         Vector< sp<CallbackEvent> > mEventQueue;
     };
 
-           void detachModule(const sp<Module>& module);
-
     static void recognitionCallback(struct sound_trigger_recognition_event *event, void *cookie);
            sp<IMemory> prepareRecognitionEvent_l(struct sound_trigger_recognition_event *event);
            void sendRecognitionEvent(struct sound_trigger_recognition_event *event, Module *module);
@@ -188,6 +225,8 @@
 
            sp<IMemory> prepareServiceStateEvent_l(sound_trigger_service_state_t state);
            void sendServiceStateEvent_l(sound_trigger_service_state_t state, Module *module);
+           void sendServiceStateEvent_l(sound_trigger_service_state_t state,
+                                        ModuleClient *moduleClient);
 
            void sendCallbackEvent_l(const sp<CallbackEvent>& event);
            void onCallbackEvent(const sp<CallbackEvent>& event);