Merge "Rename symbols to be more consistent"
diff --git a/camera/Camera.cpp b/camera/Camera.cpp
index bf9904c..c6c35ef 100644
--- a/camera/Camera.cpp
+++ b/camera/Camera.cpp
@@ -240,6 +240,14 @@
     c->releaseRecordingFrameHandle(handle);
 }
 
+void Camera::releaseRecordingFrameHandleBatch(
+        const std::vector<native_handle_t*> handles) {
+    ALOGV("releaseRecordingFrameHandleBatch");
+    sp <::android::hardware::ICamera> c = mCamera;
+    if (c == 0) return;
+    c->releaseRecordingFrameHandleBatch(handles);
+}
+
 // get preview state
 bool Camera::previewEnabled()
 {
@@ -418,6 +426,37 @@
     }
 }
 
+void Camera::recordingFrameHandleCallbackTimestampBatch(
+        const std::vector<nsecs_t>& timestamps,
+        const std::vector<native_handle_t*>& handles)
+{
+    // If recording proxy listener is registered, forward the frame and return.
+    // The other listener (mListener) is ignored because the receiver needs to
+    // call releaseRecordingFrameHandle.
+    sp<ICameraRecordingProxyListener> proxylistener;
+    {
+        Mutex::Autolock _l(mLock);
+        proxylistener = mRecordingProxyListener;
+    }
+    if (proxylistener != NULL) {
+        proxylistener->recordingFrameHandleCallbackTimestampBatch(timestamps, handles);
+        return;
+    }
+
+    sp<CameraListener> listener;
+    {
+        Mutex::Autolock _l(mLock);
+        listener = mListener;
+    }
+
+    if (listener != NULL) {
+        listener->postRecordingFrameHandleTimestampBatch(timestamps, handles);
+    } else {
+        ALOGW("No listener was set. Drop a batch of recording frames.");
+        releaseRecordingFrameHandleBatch(handles);
+    }
+}
+
 sp<ICameraRecordingProxy> Camera::getRecordingProxy() {
     ALOGV("getProxy");
     return new RecordingProxy(this);
@@ -448,6 +487,12 @@
     mCamera->releaseRecordingFrameHandle(handle);
 }
 
+void Camera::RecordingProxy::releaseRecordingFrameHandleBatch(
+        const std::vector<native_handle_t*>& handles) {
+    ALOGV("RecordingProxy::releaseRecordingFrameHandleBatch");
+    mCamera->releaseRecordingFrameHandleBatch(handles);
+}
+
 Camera::RecordingProxy::RecordingProxy(const sp<Camera>& camera)
 {
     mCamera = camera;
diff --git a/camera/CameraMetadata.cpp b/camera/CameraMetadata.cpp
index 373b94e..e143e05 100644
--- a/camera/CameraMetadata.cpp
+++ b/camera/CameraMetadata.cpp
@@ -170,7 +170,7 @@
 }
 
 status_t CameraMetadata::checkType(uint32_t tag, uint8_t expectedType) {
-    int tagType = get_camera_metadata_tag_type(tag);
+    int tagType = get_local_camera_metadata_tag_type(tag, mBuffer);
     if ( CC_UNLIKELY(tagType == -1)) {
         ALOGE("Update metadata entry: Unknown tag %d", tag);
         return INVALID_OPERATION;
@@ -178,7 +178,7 @@
     if ( CC_UNLIKELY(tagType != expectedType) ) {
         ALOGE("Mismatched tag type when updating entry %s (%d) of type %s; "
                 "got type %s data instead ",
-                get_camera_metadata_tag_name(tag), tag,
+                get_local_camera_metadata_tag_name(tag, mBuffer), tag,
                 camera_metadata_type_names[tagType],
                 camera_metadata_type_names[expectedType]);
         return INVALID_OPERATION;
@@ -297,7 +297,7 @@
         ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
         return INVALID_OPERATION;
     }
-    int type = get_camera_metadata_tag_type(tag);
+    int type = get_local_camera_metadata_tag_type(tag, mBuffer);
     if (type == -1) {
         ALOGE("%s: Tag %d not found", __FUNCTION__, tag);
         return BAD_VALUE;
@@ -332,8 +332,9 @@
 
     if (res != OK) {
         ALOGE("%s: Unable to update metadata entry %s.%s (%x): %s (%d)",
-                __FUNCTION__, get_camera_metadata_section_name(tag),
-                get_camera_metadata_tag_name(tag), tag, strerror(-res), res);
+                __FUNCTION__, get_local_camera_metadata_section_name(tag, mBuffer),
+                get_local_camera_metadata_tag_name(tag, mBuffer), tag,
+                strerror(-res), res);
     }
 
     IF_ALOGV() {
@@ -392,16 +393,18 @@
     } else if (res != OK) {
         ALOGE("%s: Error looking for entry %s.%s (%x): %s %d",
                 __FUNCTION__,
-                get_camera_metadata_section_name(tag),
-                get_camera_metadata_tag_name(tag), tag, strerror(-res), res);
+                get_local_camera_metadata_section_name(tag, mBuffer),
+                get_local_camera_metadata_tag_name(tag, mBuffer),
+                tag, strerror(-res), res);
         return res;
     }
     res = delete_camera_metadata_entry(mBuffer, entry.index);
     if (res != OK) {
         ALOGE("%s: Error deleting entry %s.%s (%x): %s %d",
                 __FUNCTION__,
-                get_camera_metadata_section_name(tag),
-                get_camera_metadata_tag_name(tag), tag, strerror(-res), res);
+                get_local_camera_metadata_section_name(tag, mBuffer),
+                get_local_camera_metadata_tag_name(tag, mBuffer),
+                tag, strerror(-res), res);
     }
     return res;
 }
diff --git a/camera/ICamera.cpp b/camera/ICamera.cpp
index 2bf956d..f0945c7 100644
--- a/camera/ICamera.cpp
+++ b/camera/ICamera.cpp
@@ -55,6 +55,7 @@
     SET_VIDEO_BUFFER_MODE,
     SET_VIDEO_BUFFER_TARGET,
     RELEASE_RECORDING_FRAME_HANDLE,
+    RELEASE_RECORDING_FRAME_HANDLE_BATCH,
 };
 
 class BpCamera: public BpInterface<ICamera>
@@ -172,6 +173,24 @@
         native_handle_delete(handle);
     }
 
+    void releaseRecordingFrameHandleBatch(const std::vector<native_handle_t*>& handles) {
+        ALOGV("releaseRecordingFrameHandleBatch");
+        Parcel data, reply;
+        data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
+        uint32_t n = handles.size();
+        data.writeUint32(n);
+        for (auto& handle : handles) {
+            data.writeNativeHandle(handle);
+        }
+        remote()->transact(RELEASE_RECORDING_FRAME_HANDLE_BATCH, data, &reply);
+
+        // Close the native handle because camera received a dup copy.
+        for (auto& handle : handles) {
+            native_handle_close(handle);
+            native_handle_delete(handle);
+        }
+    }
+
     status_t setVideoBufferMode(int32_t videoBufferMode)
     {
         ALOGV("setVideoBufferMode: %d", videoBufferMode);
@@ -378,6 +397,19 @@
             releaseRecordingFrameHandle(data.readNativeHandle());
             return NO_ERROR;
         } break;
+        case RELEASE_RECORDING_FRAME_HANDLE_BATCH: {
+            ALOGV("RELEASE_RECORDING_FRAME_HANDLE_BATCH");
+            CHECK_INTERFACE(ICamera, data, reply);
+            // releaseRecordingFrameHandle will be responsble to close the native handle.
+            uint32_t n = data.readUint32();
+            std::vector<native_handle_t*> handles;
+            handles.reserve(n);
+            for (uint32_t i = 0; i < n; i++) {
+                handles.push_back(data.readNativeHandle());
+            }
+            releaseRecordingFrameHandleBatch(handles);
+            return NO_ERROR;
+        } break;
         case SET_VIDEO_BUFFER_MODE: {
             ALOGV("SET_VIDEO_BUFFER_MODE");
             CHECK_INTERFACE(ICamera, data, reply);
diff --git a/camera/ICameraClient.cpp b/camera/ICameraClient.cpp
index 1b6fac4..7e6297c 100644
--- a/camera/ICameraClient.cpp
+++ b/camera/ICameraClient.cpp
@@ -32,6 +32,7 @@
     DATA_CALLBACK,
     DATA_CALLBACK_TIMESTAMP,
     RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP,
+    RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH,
 };
 
 class BpCameraClient: public BpInterface<ICameraClient>
@@ -91,6 +92,29 @@
         remote()->transact(RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP, data, &reply,
                 IBinder::FLAG_ONEWAY);
     }
+
+    void recordingFrameHandleCallbackTimestampBatch(
+            const std::vector<nsecs_t>& timestamps,
+            const std::vector<native_handle_t*>& handles) {
+        ALOGV("recordingFrameHandleCallbackTimestampBatch");
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraClient::getInterfaceDescriptor());
+        uint32_t n = timestamps.size();
+        if (n != handles.size()) {
+            ALOGE("%s: size of timestamps(%zu) and handles(%zu) mismatch!",
+                    __FUNCTION__, timestamps.size(), handles.size());
+            return;
+        }
+        data.writeUint32(n);
+        for (auto ts : timestamps) {
+            data.writeInt64(ts);
+        }
+        for (auto& handle : handles) {
+            data.writeNativeHandle(handle);
+        }
+        remote()->transact(RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH, data, &reply,
+                IBinder::FLAG_ONEWAY);
+    }
 };
 
 IMPLEMENT_META_INTERFACE(CameraClient, "android.hardware.ICameraClient");
@@ -154,6 +178,41 @@
             recordingFrameHandleCallbackTimestamp(timestamp, handle);
             return NO_ERROR;
         } break;
+        case RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH: {
+            ALOGV("RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH");
+            CHECK_INTERFACE(ICameraClient, data, reply);
+            uint32_t n = 0;
+            status_t res = data.readUint32(&n);
+            if (res != OK) {
+                ALOGE("%s: Failed to read batch size: %s (%d)", __FUNCTION__, strerror(-res), res);
+                return BAD_VALUE;
+            }
+            std::vector<nsecs_t> timestamps;
+            std::vector<native_handle_t*> handles;
+            timestamps.reserve(n);
+            handles.reserve(n);
+            for (uint32_t i = 0; i < n; i++) {
+                res = data.readInt64(&timestamps[i]);
+                if (res != OK) {
+                    ALOGE("%s: Failed to read timestamp[%d]: %s (%d)",
+                            __FUNCTION__, i, strerror(-res), res);
+                    return BAD_VALUE;
+                }
+            }
+            for (uint32_t i = 0; i < n; i++) {
+                native_handle_t* handle = data.readNativeHandle();
+                if (handle == nullptr) {
+                    ALOGE("%s: Received a null native handle at handles[%d]",
+                            __FUNCTION__, i);
+                    return BAD_VALUE;
+                }
+                handles.push_back(handle);
+            }
+
+            // The native handle will be freed in BpCamera::releaseRecordingFrameHandleBatch.
+            recordingFrameHandleCallbackTimestampBatch(timestamps, handles);
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/camera/ICameraRecordingProxy.cpp b/camera/ICameraRecordingProxy.cpp
index c9f8b5c..bd6af75 100644
--- a/camera/ICameraRecordingProxy.cpp
+++ b/camera/ICameraRecordingProxy.cpp
@@ -32,6 +32,7 @@
     STOP_RECORDING,
     RELEASE_RECORDING_FRAME,
     RELEASE_RECORDING_FRAME_HANDLE,
+    RELEASE_RECORDING_FRAME_HANDLE_BATCH,
 };
 
 
@@ -82,6 +83,24 @@
         native_handle_close(handle);
         native_handle_delete(handle);
     }
+
+    void releaseRecordingFrameHandleBatch(const std::vector<native_handle_t*>& handles) {
+        ALOGV("releaseRecordingFrameHandleBatch");
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraRecordingProxy::getInterfaceDescriptor());
+        uint32_t n = handles.size();
+        data.writeUint32(n);
+        for (auto& handle : handles) {
+            data.writeNativeHandle(handle);
+        }
+        remote()->transact(RELEASE_RECORDING_FRAME_HANDLE_BATCH, data, &reply);
+
+        // Close the native handle because camera received a dup copy.
+        for (auto& handle : handles) {
+            native_handle_close(handle);
+            native_handle_delete(handle);
+        }
+    }
 };
 
 IMPLEMENT_META_INTERFACE(CameraRecordingProxy, "android.hardware.ICameraRecordingProxy");
@@ -121,6 +140,31 @@
             releaseRecordingFrameHandle(data.readNativeHandle());
             return NO_ERROR;
         } break;
+        case RELEASE_RECORDING_FRAME_HANDLE_BATCH: {
+            ALOGV("RELEASE_RECORDING_FRAME_HANDLE_BATCH");
+            CHECK_INTERFACE(ICameraRecordingProxy, data, reply);
+            uint32_t n = 0;
+            status_t res = data.readUint32(&n);
+            if (res != OK) {
+                ALOGE("%s: Failed to read batch size: %s (%d)", __FUNCTION__, strerror(-res), res);
+                return BAD_VALUE;
+            }
+            std::vector<native_handle_t*> handles;
+            handles.reserve(n);
+            for (uint32_t i = 0; i < n; i++) {
+                native_handle_t* handle = data.readNativeHandle();
+                if (handle == nullptr) {
+                    ALOGE("%s: Received a null native handle at handles[%d]",
+                            __FUNCTION__, i);
+                    return BAD_VALUE;
+                }
+                handles.push_back(handle);
+            }
+
+            // releaseRecordingFrameHandleBatch will be responsble to close the native handle.
+            releaseRecordingFrameHandleBatch(handles);
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/camera/ICameraRecordingProxyListener.cpp b/camera/ICameraRecordingProxyListener.cpp
index 8529d3e..c954241 100644
--- a/camera/ICameraRecordingProxyListener.cpp
+++ b/camera/ICameraRecordingProxyListener.cpp
@@ -28,6 +28,7 @@
 enum {
     DATA_CALLBACK_TIMESTAMP = IBinder::FIRST_CALL_TRANSACTION,
     RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP,
+    RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH
 };
 
 class BpCameraRecordingProxyListener: public BpInterface<ICameraRecordingProxyListener>
@@ -62,6 +63,36 @@
         native_handle_close(handle);
         native_handle_delete(handle);
     }
+
+    void recordingFrameHandleCallbackTimestampBatch(
+            const std::vector<nsecs_t>& timestamps,
+            const std::vector<native_handle_t*>& handles) {
+        ALOGV("recordingFrameHandleCallbackTimestampBatch");
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraRecordingProxyListener::getInterfaceDescriptor());
+
+        uint32_t n = timestamps.size();
+        if (n != handles.size()) {
+            ALOGE("%s: size of timestamps(%zu) and handles(%zu) mismatch!",
+                    __FUNCTION__, timestamps.size(), handles.size());
+            return;
+        }
+        data.writeUint32(n);
+        for (auto ts : timestamps) {
+            data.writeInt64(ts);
+        }
+        for (auto& handle : handles) {
+            data.writeNativeHandle(handle);
+        }
+        remote()->transact(RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH, data, &reply,
+                IBinder::FLAG_ONEWAY);
+
+        // The native handle is dupped in ICameraClient so we need to free it here.
+        for (auto& handle : handles) {
+            native_handle_close(handle);
+            native_handle_delete(handle);
+        }
+    }
 };
 
 IMPLEMENT_META_INTERFACE(CameraRecordingProxyListener, "android.hardware.ICameraRecordingProxyListener");
@@ -101,6 +132,41 @@
             recordingFrameHandleCallbackTimestamp(timestamp, handle);
             return NO_ERROR;
         } break;
+        case RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH: {
+            ALOGV("RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH");
+            CHECK_INTERFACE(ICameraRecordingProxyListener, data, reply);
+            uint32_t n = 0;
+            status_t res = data.readUint32(&n);
+            if (res != OK) {
+                ALOGE("%s: Failed to read batch size: %s (%d)", __FUNCTION__, strerror(-res), res);
+                return BAD_VALUE;
+            }
+            std::vector<nsecs_t> timestamps;
+            std::vector<native_handle_t*> handles;
+            timestamps.reserve(n);
+            handles.reserve(n);
+            for (uint32_t i = 0; i < n; i++) {
+                res = data.readInt64(&timestamps[i]);
+                if (res != OK) {
+                    ALOGE("%s: Failed to read timestamp[%d]: %s (%d)",
+                            __FUNCTION__, i, strerror(-res), res);
+                    return BAD_VALUE;
+                }
+            }
+            for (uint32_t i = 0; i < n; i++) {
+                native_handle_t* handle = data.readNativeHandle();
+                if (handle == nullptr) {
+                    ALOGE("%s: Received a null native handle at handles[%d]",
+                            __FUNCTION__, i);
+                    return BAD_VALUE;
+                }
+                handles.push_back(handle);
+            }
+            // The native handle will be freed in
+            // BpCameraRecordingProxy::releaseRecordingFrameHandleBatch.
+            recordingFrameHandleCallbackTimestampBatch(timestamps, handles);
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/camera/VendorTagDescriptor.cpp b/camera/VendorTagDescriptor.cpp
index ed09b60..4c28789 100644
--- a/camera/VendorTagDescriptor.cpp
+++ b/camera/VendorTagDescriptor.cpp
@@ -29,6 +29,7 @@
 
 #include <stdio.h>
 #include <string.h>
+#include <inttypes.h>
 
 namespace android {
 
@@ -40,11 +41,22 @@
 static const char* vendor_tag_descriptor_get_tag_name(const vendor_tag_ops_t* v, uint32_t tag);
 static int vendor_tag_descriptor_get_tag_type(const vendor_tag_ops_t* v, uint32_t tag);
 
+static int vendor_tag_descriptor_cache_get_tag_count(metadata_vendor_id_t id);
+static void vendor_tag_descriptor_cache_get_all_tags(uint32_t* tagArray,
+        metadata_vendor_id_t id);
+static const char* vendor_tag_descriptor_cache_get_section_name(uint32_t tag,
+        metadata_vendor_id_t id);
+static const char* vendor_tag_descriptor_cache_get_tag_name(uint32_t tag,
+        metadata_vendor_id_t id);
+static int vendor_tag_descriptor_cache_get_tag_type(uint32_t tag,
+        metadata_vendor_id_t id);
+
 } /* extern "C" */
 
 
 static Mutex sLock;
 static sp<VendorTagDescriptor> sGlobalVendorTagDescriptor;
+static sp<VendorTagDescriptorCache> sGlobalVendorTagDescriptorCache;
 
 namespace hardware {
 namespace camera2 {
@@ -333,11 +345,166 @@
 
 }
 
+status_t VendorTagDescriptorCache::writeToParcel(Parcel* parcel) const {
+    status_t res = OK;
+    if (parcel == NULL) {
+        ALOGE("%s: parcel argument was NULL.", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    if ((res = parcel->writeInt32(mVendorMap.size())) != OK) {
+        return res;
+    }
+
+    for (const auto &iter : mVendorMap) {
+        if ((res = parcel->writeUint64(iter.first)) != OK) break;
+        if ((res = parcel->writeParcelable(*iter.second)) != OK) break;
+    }
+
+    return res;
+}
+
+
+status_t VendorTagDescriptorCache::readFromParcel(const Parcel* parcel) {
+    status_t res = OK;
+    if (parcel == NULL) {
+        ALOGE("%s: parcel argument was NULL.", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    int32_t vendorCount = 0;
+    if ((res = parcel->readInt32(&vendorCount)) != OK) {
+        ALOGE("%s: could not read vendor count from parcel", __FUNCTION__);
+        return res;
+    }
+
+    if (vendorCount < 0 || vendorCount > INT32_MAX) {
+        ALOGE("%s: vendor count %d from is invalid.", __FUNCTION__, vendorCount);
+        return BAD_VALUE;
+    }
+
+    metadata_vendor_id_t id;
+    for (int32_t i = 0; i < vendorCount; i++) {
+        if ((res = parcel->readUint64(&id)) != OK) {
+            ALOGE("%s: could not read vendor id from parcel for index %d",
+                  __FUNCTION__, i);
+            break;
+        }
+        sp<android::VendorTagDescriptor> desc = new android::VendorTagDescriptor();
+        if ((res = parcel->readParcelable(desc.get())) != OK) {
+            ALOGE("%s: could not read vendor tag descriptor from parcel for index %d rc = %d",
+                  __FUNCTION__, i, res);
+            break;
+        }
+
+        if ((res = addVendorDescriptor(id, desc)) != OK) {
+            ALOGE("%s: failed to add vendor tag descriptor for index: %d ",
+                  __FUNCTION__, i);
+            break;
+        }
+    }
+
+    return res;
+}
+
+int VendorTagDescriptorCache::getTagCount(metadata_vendor_id_t id) const {
+    int ret = 0;
+    auto desc = mVendorMap.find(id);
+    if (desc != mVendorMap.end()) {
+        ret = desc->second->getTagCount();
+    } else {
+        ALOGE("%s: Vendor descriptor id is missing!", __func__);
+    }
+
+    return ret;
+}
+
+void VendorTagDescriptorCache::getTagArray(uint32_t* tagArray,
+        metadata_vendor_id_t id) const {
+    auto desc = mVendorMap.find(id);
+    if (desc != mVendorMap.end()) {
+        desc->second->getTagArray(tagArray);
+    } else {
+        ALOGE("%s: Vendor descriptor id is missing!", __func__);
+    }
+}
+
+const char* VendorTagDescriptorCache::getSectionName(uint32_t tag,
+        metadata_vendor_id_t id) const {
+    const char *ret = nullptr;
+    auto desc = mVendorMap.find(id);
+    if (desc != mVendorMap.end()) {
+        ret = desc->second->getSectionName(tag);
+    } else {
+        ALOGE("%s: Vendor descriptor id is missing!", __func__);
+    }
+
+    return ret;
+}
+
+const char* VendorTagDescriptorCache::getTagName(uint32_t tag,
+        metadata_vendor_id_t id) const {
+    const char *ret = nullptr;
+    auto desc = mVendorMap.find(id);
+    if (desc != mVendorMap.end()) {
+        ret = desc->second->getTagName(tag);
+    } else {
+        ALOGE("%s: Vendor descriptor id is missing!", __func__);
+    }
+
+    return ret;
+}
+
+int VendorTagDescriptorCache::getTagType(uint32_t tag,
+        metadata_vendor_id_t id) const {
+    int ret = 0;
+    auto desc = mVendorMap.find(id);
+    if (desc != mVendorMap.end()) {
+        ret = desc->second->getTagType(tag);
+    } else {
+        ALOGE("%s: Vendor descriptor id is missing!", __func__);
+    }
+
+    return ret;
+}
+
+void VendorTagDescriptorCache::dump(int fd, int verbosity,
+        int indentation) const {
+    for (const auto &desc : mVendorMap) {
+        dprintf(fd, "%*sDumping vendor tag descriptors for vendor with"
+                " id %" PRIu64 " \n", indentation, "", desc.first);
+        desc.second->dump(fd, verbosity, indentation);
+    }
+}
+
+int32_t VendorTagDescriptorCache::addVendorDescriptor(metadata_vendor_id_t id,
+        sp<android::VendorTagDescriptor> desc) {
+    auto entry = mVendorMap.find(id);
+    if (entry != mVendorMap.end()) {
+        ALOGE("%s: Vendor descriptor with same id already present!", __func__);
+        return BAD_VALUE;
+    }
+
+    mVendorMap.emplace(id, desc);
+    return NO_ERROR;
+}
+
+int32_t VendorTagDescriptorCache::getVendorTagDescriptor(
+        metadata_vendor_id_t id, sp<android::VendorTagDescriptor> *desc /*out*/) {
+    auto entry = mVendorMap.find(id);
+    if (entry == mVendorMap.end()) {
+        return NAME_NOT_FOUND;
+    }
+
+    *desc = entry->second;
+
+    return NO_ERROR;
+}
+
 } // namespace params
 } // namespace camera2
 } // namespace hardware
 
-
 status_t VendorTagDescriptor::createDescriptorFromOps(const vendor_tag_ops_t* vOps,
             /*out*/
             sp<VendorTagDescriptor>& descriptor) {
@@ -451,6 +618,39 @@
     return sGlobalVendorTagDescriptor;
 }
 
+status_t VendorTagDescriptorCache::setAsGlobalVendorTagCache(
+        const sp<VendorTagDescriptorCache>& cache) {
+    status_t res = OK;
+    Mutex::Autolock al(sLock);
+    sGlobalVendorTagDescriptorCache = cache;
+
+    struct vendor_tag_cache_ops* opsPtr = NULL;
+    if (cache != NULL) {
+        opsPtr = &(cache->mVendorCacheOps);
+        opsPtr->get_tag_count = vendor_tag_descriptor_cache_get_tag_count;
+        opsPtr->get_all_tags = vendor_tag_descriptor_cache_get_all_tags;
+        opsPtr->get_section_name = vendor_tag_descriptor_cache_get_section_name;
+        opsPtr->get_tag_name = vendor_tag_descriptor_cache_get_tag_name;
+        opsPtr->get_tag_type = vendor_tag_descriptor_cache_get_tag_type;
+    }
+    if((res = set_camera_metadata_vendor_cache_ops(opsPtr)) != OK) {
+        ALOGE("%s: Could not set vendor tag cache, received error %s (%d)."
+                , __FUNCTION__, strerror(-res), res);
+    }
+    return res;
+}
+
+void VendorTagDescriptorCache::clearGlobalVendorTagCache() {
+    Mutex::Autolock al(sLock);
+    set_camera_metadata_vendor_cache_ops(NULL);
+    sGlobalVendorTagDescriptorCache.clear();
+}
+
+sp<VendorTagDescriptorCache> VendorTagDescriptorCache::getGlobalVendorTagCache() {
+    Mutex::Autolock al(sLock);
+    return sGlobalVendorTagDescriptorCache;
+}
+
 extern "C" {
 
 int vendor_tag_descriptor_get_tag_count(const vendor_tag_ops_t* /*v*/) {
@@ -498,5 +698,53 @@
     return sGlobalVendorTagDescriptor->getTagType(tag);
 }
 
+int vendor_tag_descriptor_cache_get_tag_count(metadata_vendor_id_t id) {
+    Mutex::Autolock al(sLock);
+    if (sGlobalVendorTagDescriptorCache == NULL) {
+        ALOGE("%s: Vendor tag descriptor cache not initialized.", __FUNCTION__);
+        return VENDOR_TAG_COUNT_ERR;
+    }
+    return sGlobalVendorTagDescriptorCache->getTagCount(id);
+}
+
+void vendor_tag_descriptor_cache_get_all_tags(uint32_t* tagArray,
+        metadata_vendor_id_t id) {
+    Mutex::Autolock al(sLock);
+    if (sGlobalVendorTagDescriptorCache == NULL) {
+        ALOGE("%s: Vendor tag descriptor cache not initialized.", __FUNCTION__);
+    }
+    sGlobalVendorTagDescriptorCache->getTagArray(tagArray, id);
+}
+
+const char* vendor_tag_descriptor_cache_get_section_name(uint32_t tag,
+        metadata_vendor_id_t id) {
+    Mutex::Autolock al(sLock);
+    if (sGlobalVendorTagDescriptorCache == NULL) {
+        ALOGE("%s: Vendor tag descriptor cache not initialized.", __FUNCTION__);
+        return VENDOR_SECTION_NAME_ERR;
+    }
+    return sGlobalVendorTagDescriptorCache->getSectionName(tag, id);
+}
+
+const char* vendor_tag_descriptor_cache_get_tag_name(uint32_t tag,
+        metadata_vendor_id_t id) {
+    Mutex::Autolock al(sLock);
+    if (sGlobalVendorTagDescriptorCache == NULL) {
+        ALOGE("%s: Vendor tag descriptor cache not initialized.", __FUNCTION__);
+        return VENDOR_TAG_NAME_ERR;
+    }
+    return sGlobalVendorTagDescriptorCache->getTagName(tag, id);
+}
+
+int vendor_tag_descriptor_cache_get_tag_type(uint32_t tag,
+        metadata_vendor_id_t id) {
+    Mutex::Autolock al(sLock);
+    if (sGlobalVendorTagDescriptorCache == NULL) {
+        ALOGE("%s: Vendor tag descriptor cache not initialized.", __FUNCTION__);
+        return VENDOR_TAG_NAME_ERR;
+    }
+    return sGlobalVendorTagDescriptorCache->getTagType(tag, id);
+}
+
 } /* extern "C" */
 } /* namespace android */
diff --git a/camera/aidl/android/hardware/ICameraService.aidl b/camera/aidl/android/hardware/ICameraService.aidl
index 99c479c..9c0f28b 100644
--- a/camera/aidl/android/hardware/ICameraService.aidl
+++ b/camera/aidl/android/hardware/ICameraService.aidl
@@ -21,6 +21,7 @@
 import android.hardware.camera2.ICameraDeviceUser;
 import android.hardware.camera2.ICameraDeviceCallbacks;
 import android.hardware.camera2.params.VendorTagDescriptor;
+import android.hardware.camera2.params.VendorTagDescriptorCache;
 import android.hardware.camera2.impl.CameraMetadataNative;
 import android.hardware.ICameraServiceListener;
 import android.hardware.CameraInfo;
@@ -130,6 +131,14 @@
     VendorTagDescriptor getCameraVendorTagDescriptor();
 
     /**
+     * Retrieve the vendor tag descriptor cache which can have multiple vendor
+     * providers.
+     * Intended to be used by the native code of CameraMetadataNative to correctly
+     * interpret camera metadata with vendor tags.
+     */
+    VendorTagDescriptorCache getCameraVendorTagCache();
+
+    /**
      * Read the legacy camera1 parameters into a String
      */
     String getLegacyParameters(int cameraId);
diff --git a/camera/aidl/android/hardware/camera2/params/VendorTagDescriptorCache.aidl b/camera/aidl/android/hardware/camera2/params/VendorTagDescriptorCache.aidl
new file mode 100644
index 0000000..d212207
--- /dev/null
+++ b/camera/aidl/android/hardware/camera2/params/VendorTagDescriptorCache.aidl
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.camera2.params;
+
+/** @hide */
+parcelable VendorTagDescriptorCache cpp_header "camera/VendorTagDescriptor.h";
diff --git a/camera/include/camera/Camera.h b/camera/include/camera/Camera.h
index 57dc228..430aa1c 100644
--- a/camera/include/camera/Camera.h
+++ b/camera/include/camera/Camera.h
@@ -44,6 +44,9 @@
                           camera_frame_metadata_t *metadata) = 0;
     virtual void postDataTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr) = 0;
     virtual void postRecordingFrameHandleTimestamp(nsecs_t timestamp, native_handle_t* handle) = 0;
+    virtual void postRecordingFrameHandleTimestampBatch(
+            const std::vector<nsecs_t>& timestamps,
+            const std::vector<native_handle_t*>& handles) = 0;
 };
 
 class Camera;
@@ -118,6 +121,10 @@
             // release a recording frame handle
             void        releaseRecordingFrameHandle(native_handle_t *handle);
 
+            // release a batch of recording frame handles
+            void        releaseRecordingFrameHandleBatch(
+                    const std::vector<native_handle_t*> handles);
+
             // autoFocus - status returned from callback
             status_t    autoFocus();
 
@@ -166,6 +173,10 @@
                                      camera_frame_metadata_t *metadata);
     virtual void        dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr);
     virtual void        recordingFrameHandleCallbackTimestamp(nsecs_t timestamp, native_handle_t* handle);
+    virtual void        recordingFrameHandleCallbackTimestampBatch(
+                                const std::vector<nsecs_t>& timestamps,
+                                const std::vector<native_handle_t*>& handles);
+
 
     class RecordingProxy : public BnCameraRecordingProxy
     {
@@ -177,6 +188,8 @@
         virtual void stopRecording();
         virtual void releaseRecordingFrame(const sp<IMemory>& mem);
         virtual void releaseRecordingFrameHandle(native_handle_t* handle);
+        virtual void releaseRecordingFrameHandleBatch(
+                const std::vector<native_handle_t*>& handles);
 
     private:
         sp<Camera>         mCamera;
diff --git a/camera/include/camera/ICameraRecordingProxy.h b/camera/include/camera/ICameraRecordingProxy.h
index cb6824a..02af2f3 100644
--- a/camera/include/camera/ICameraRecordingProxy.h
+++ b/camera/include/camera/ICameraRecordingProxy.h
@@ -17,6 +17,7 @@
 #ifndef ANDROID_HARDWARE_ICAMERA_RECORDING_PROXY_H
 #define ANDROID_HARDWARE_ICAMERA_RECORDING_PROXY_H
 
+#include <vector>
 #include <binder/IInterface.h>
 #include <cutils/native_handle.h>
 #include <utils/RefBase.h>
@@ -85,6 +86,8 @@
     virtual void            stopRecording() = 0;
     virtual void            releaseRecordingFrame(const sp<IMemory>& mem) = 0;
     virtual void            releaseRecordingFrameHandle(native_handle_t *handle) = 0;
+    virtual void            releaseRecordingFrameHandleBatch(
+                                    const std::vector<native_handle_t*>& handles) = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/camera/include/camera/ICameraRecordingProxyListener.h b/camera/include/camera/ICameraRecordingProxyListener.h
index 1fee5b9..da03c56 100644
--- a/camera/include/camera/ICameraRecordingProxyListener.h
+++ b/camera/include/camera/ICameraRecordingProxyListener.h
@@ -17,6 +17,7 @@
 #ifndef ANDROID_HARDWARE_ICAMERA_RECORDING_PROXY_LISTENER_H
 #define ANDROID_HARDWARE_ICAMERA_RECORDING_PROXY_LISTENER_H
 
+#include <vector>
 #include <binder/IInterface.h>
 #include <cutils/native_handle.h>
 #include <stdint.h>
@@ -38,6 +39,10 @@
 
     virtual void recordingFrameHandleCallbackTimestamp(nsecs_t timestamp,
                                                        native_handle_t* handle) = 0;
+
+    virtual void recordingFrameHandleCallbackTimestampBatch(
+            const std::vector<nsecs_t>& timestamps,
+            const std::vector<native_handle_t*>& handles) = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/camera/include/camera/VendorTagDescriptor.h b/camera/include/camera/VendorTagDescriptor.h
index adfc8c7..904fba2 100644
--- a/camera/include/camera/VendorTagDescriptor.h
+++ b/camera/include/camera/VendorTagDescriptor.h
@@ -22,7 +22,7 @@
 #include <utils/String8.h>
 #include <utils/RefBase.h>
 #include <system/camera_vendor_tags.h>
-
+#include <unordered_map>
 #include <stdint.h>
 
 namespace android {
@@ -166,8 +166,84 @@
 
 };
 
-} /* namespace android */
+namespace hardware {
+namespace camera2 {
+namespace params {
 
+class VendorTagDescriptorCache : public Parcelable {
+  public:
+
+    VendorTagDescriptorCache() {};
+
+    int32_t addVendorDescriptor(metadata_vendor_id_t id,
+            sp<android::VendorTagDescriptor> desc);
+
+    int32_t getVendorTagDescriptor(
+            metadata_vendor_id_t id,
+            sp<android::VendorTagDescriptor> *desc /*out*/);
+
+    // Parcelable interface
+    status_t writeToParcel(Parcel* parcel) const override;
+    status_t readFromParcel(const Parcel* parcel) override;
+
+    // Returns the number of vendor tags defined.
+    int getTagCount(metadata_vendor_id_t id) const;
+
+    // Returns an array containing the id's of vendor tags defined.
+    void getTagArray(uint32_t* tagArray, metadata_vendor_id_t id) const;
+
+    // Returns the section name string for a given vendor tag id.
+    const char* getSectionName(uint32_t tag, metadata_vendor_id_t id) const;
+
+    // Returns the tag name string for a given vendor tag id.
+    const char* getTagName(uint32_t tag, metadata_vendor_id_t id) const;
+
+    // Returns the tag type for a given vendor tag id.
+    int getTagType(uint32_t tag, metadata_vendor_id_t id) const;
+
+    /**
+     * Dump the currently configured vendor tags to a file descriptor.
+     */
+    void dump(int fd, int verbosity, int indentation) const;
+
+  protected:
+    std::unordered_map<metadata_vendor_id_t, sp<android::VendorTagDescriptor>> mVendorMap;
+    struct vendor_tag_cache_ops mVendorCacheOps;
+};
+
+} /* namespace params */
+} /* namespace camera2 */
+} /* namespace hardware */
+
+class VendorTagDescriptorCache :
+        public ::android::hardware::camera2::params::VendorTagDescriptorCache,
+        public LightRefBase<VendorTagDescriptorCache> {
+  public:
+
+    /**
+     * Sets the global vendor tag descriptor cache to use for this process.
+     * Camera metadata operations that access vendor tags will use the
+     * vendor tag definitions set this way.
+     *
+     * Returns OK on success, or a negative error code.
+     */
+    static status_t setAsGlobalVendorTagCache(
+            const sp<VendorTagDescriptorCache>& cache);
+
+    /**
+     * Returns the global vendor tag cache used by this process.
+     * This will contain NULL if no vendor tags are defined.
+     */
+    static sp<VendorTagDescriptorCache> getGlobalVendorTagCache();
+
+    /**
+     * Clears the global vendor tag cache used by this process.
+     */
+    static void clearGlobalVendorTagCache();
+
+};
+
+} /* namespace android */
 
 #define VENDOR_TAG_DESCRIPTOR_H
 #endif /* VENDOR_TAG_DESCRIPTOR_H */
diff --git a/camera/include/camera/android/hardware/ICamera.h b/camera/include/camera/android/hardware/ICamera.h
index 315669e..80823d6 100644
--- a/camera/include/camera/android/hardware/ICamera.h
+++ b/camera/include/camera/android/hardware/ICamera.h
@@ -101,6 +101,11 @@
     // ICameraClient::recordingFrameHandleCallbackTimestamp.
     virtual void            releaseRecordingFrameHandle(native_handle_t *handle) = 0;
 
+    // Release a batch of recording frame handles that was received via
+    // ICameraClient::recordingFrameHandleCallbackTimestampBatch
+    virtual void            releaseRecordingFrameHandleBatch(
+            const std::vector<native_handle_t*>& handles) = 0;
+
     // auto focus
     virtual status_t        autoFocus() = 0;
 
diff --git a/camera/include/camera/android/hardware/ICameraClient.h b/camera/include/camera/android/hardware/ICameraClient.h
index f6ee311..8e46d17 100644
--- a/camera/include/camera/android/hardware/ICameraClient.h
+++ b/camera/include/camera/android/hardware/ICameraClient.h
@@ -41,6 +41,13 @@
     // ICamera::releaseRecordingFrameHandle to release the frame handle.
     virtual void            recordingFrameHandleCallbackTimestamp(nsecs_t timestamp,
                                          native_handle_t* handle) = 0;
+
+    // Invoked to send a batch of recording frame handles with timestamp. Call
+    // ICamera::releaseRecordingFrameHandleBatch to release the frame handles.
+    // Size of timestamps and handles must match
+    virtual void            recordingFrameHandleCallbackTimestampBatch(
+                                        const std::vector<nsecs_t>& timestamps,
+                                        const std::vector<native_handle_t*>& handles) = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/camera/ndk/Android.bp b/camera/ndk/Android.bp
index c5fc646..ade0d72 100644
--- a/camera/ndk/Android.bp
+++ b/camera/ndk/Android.bp
@@ -17,7 +17,7 @@
 // frameworks/av/include.
 
 ndk_library {
-    name: "libcamera2ndk.ndk",
+    name: "libcamera2ndk",
     symbol_file: "libcamera2ndk.map.txt",
     first_version: "24",
     unversioned_until: "current",
diff --git a/camera/ndk/impl/ACameraManager.cpp b/camera/ndk/impl/ACameraManager.cpp
index ba2100c..3f64bcc 100644
--- a/camera/ndk/impl/ACameraManager.cpp
+++ b/camera/ndk/impl/ACameraManager.cpp
@@ -131,10 +131,36 @@
         binder::Status ret = mCameraService->getCameraVendorTagDescriptor(/*out*/desc.get());
 
         if (ret.isOk()) {
-            status_t err = VendorTagDescriptor::setAsGlobalVendorTagDescriptor(desc);
-            if (err != OK) {
-                ALOGE("%s: Failed to set vendor tag descriptors, received error %s (%d)",
-                        __FUNCTION__, strerror(-err), err);
+            if (0 < desc->getTagCount()) {
+                status_t err = VendorTagDescriptor::setAsGlobalVendorTagDescriptor(desc);
+                if (err != OK) {
+                    ALOGE("%s: Failed to set vendor tag descriptors, received error %s (%d)",
+                            __FUNCTION__, strerror(-err), err);
+                }
+            } else {
+                sp<VendorTagDescriptorCache> cache =
+                        new VendorTagDescriptorCache();
+                binder::Status res =
+                        mCameraService->getCameraVendorTagCache(
+                                /*out*/cache.get());
+                if (res.serviceSpecificErrorCode() ==
+                        hardware::ICameraService::ERROR_DISCONNECTED) {
+                    // No camera module available, not an error on devices with no cameras
+                    VendorTagDescriptorCache::clearGlobalVendorTagCache();
+                } else if (res.isOk()) {
+                    status_t err =
+                            VendorTagDescriptorCache::setAsGlobalVendorTagCache(
+                                    cache);
+                    if (err != OK) {
+                        ALOGE("%s: Failed to set vendor tag cache,"
+                                "received error %s (%d)", __FUNCTION__,
+                                strerror(-err), err);
+                    }
+                } else {
+                    VendorTagDescriptorCache::clearGlobalVendorTagCache();
+                    ALOGE("%s: Failed to setup vendor tag cache: %s",
+                            __FUNCTION__, res.toString8().string());
+                }
             }
         } else if (ret.serviceSpecificErrorCode() ==
                 hardware::ICameraService::ERROR_DEPRECATED_HAL) {
diff --git a/camera/tests/CameraZSLTests.cpp b/camera/tests/CameraZSLTests.cpp
index 6c91fdc..ecca354 100644
--- a/camera/tests/CameraZSLTests.cpp
+++ b/camera/tests/CameraZSLTests.cpp
@@ -51,6 +51,9 @@
             const sp<IMemory>&) override {};
     void recordingFrameHandleCallbackTimestamp(nsecs_t,
             native_handle_t*) override {};
+    void recordingFrameHandleCallbackTimestampBatch(
+            const std::vector<nsecs_t>&,
+            const std::vector<native_handle_t*>&) override {};
 
     status_t waitForPreviewStart();
     status_t waitForEvent(Mutex &mutex, Condition &condition, bool &flag);
diff --git a/drm/libmediadrm/CasImpl.cpp b/drm/libmediadrm/CasImpl.cpp
index de15244..fcedd6b 100644
--- a/drm/libmediadrm/CasImpl.cpp
+++ b/drm/libmediadrm/CasImpl.cpp
@@ -49,13 +49,24 @@
     return result;
 }
 
+struct CasImpl::PluginHolder : public RefBase {
+public:
+    explicit PluginHolder(CasPlugin *plugin) : mPlugin(plugin) {}
+    ~PluginHolder() { if (mPlugin != NULL) delete mPlugin; }
+    CasPlugin* get() { return mPlugin; }
+
+private:
+    CasPlugin *mPlugin;
+    DISALLOW_EVIL_CONSTRUCTORS(PluginHolder);
+};
+
 CasImpl::CasImpl(const sp<ICasListener> &listener)
-    : mPlugin(NULL), mListener(listener) {
-    ALOGV("CTOR: mPlugin=%p", mPlugin);
+    : mPluginHolder(NULL), mListener(listener) {
+    ALOGV("CTOR");
 }
 
 CasImpl::~CasImpl() {
-    ALOGV("DTOR: mPlugin=%p", mPlugin);
+    ALOGV("DTOR");
     release();
 }
 
@@ -76,7 +87,7 @@
 
 void CasImpl::init(const sp<SharedLibrary>& library, CasPlugin *plugin) {
     mLibrary = library;
-    mPlugin = plugin;
+    mPluginHolder = new PluginHolder(plugin);
 }
 
 void CasImpl::onEvent(
@@ -95,13 +106,20 @@
 
 Status CasImpl::setPrivateData(const CasData& pvtData) {
     ALOGV("setPrivateData");
-    return getBinderStatus(mPlugin->setPrivateData(pvtData));
+    sp<PluginHolder> holder = mPluginHolder;
+    if (holder == NULL) {
+        return getBinderStatus(INVALID_OPERATION);
+    }
+    return getBinderStatus(holder->get()->setPrivateData(pvtData));
 }
 
 Status CasImpl::openSession(int32_t program_number, CasSessionId* sessionId) {
     ALOGV("openSession: program_number=%d", program_number);
-
-    status_t err = mPlugin->openSession(program_number, sessionId);
+    sp<PluginHolder> holder = mPluginHolder;
+    if (holder == NULL) {
+        return getBinderStatus(INVALID_OPERATION);
+    }
+    status_t err = holder->get()->openSession(program_number, sessionId);
 
     ALOGV("openSession: session opened for program_number=%d, sessionId=%s",
             program_number, sessionIdToString(*sessionId).string());
@@ -115,8 +133,11 @@
         CasSessionId* sessionId) {
     ALOGV("openSession: program_number=%d, elementary_PID=%d",
             program_number, elementary_PID);
-
-    status_t err = mPlugin->openSession(
+    sp<PluginHolder> holder = mPluginHolder;
+    if (holder == NULL) {
+        return getBinderStatus(INVALID_OPERATION);
+    }
+    status_t err = holder->get()->openSession(
             program_number, elementary_PID, sessionId);
 
     ALOGV("openSession: session opened for "
@@ -131,69 +152,92 @@
         const CasSessionId &sessionId, const CasData& pvtData) {
     ALOGV("setSessionPrivateData: sessionId=%s",
             sessionIdToString(sessionId).string());
-
-    return getBinderStatus(mPlugin->setSessionPrivateData(sessionId, pvtData));
+    sp<PluginHolder> holder = mPluginHolder;
+    if (holder == NULL) {
+        return getBinderStatus(INVALID_OPERATION);
+    }
+    return getBinderStatus(holder->get()->setSessionPrivateData(sessionId, pvtData));
 }
 
 Status CasImpl::closeSession(const CasSessionId &sessionId) {
     ALOGV("closeSession: sessionId=%s",
             sessionIdToString(sessionId).string());
-
-    return getBinderStatus(mPlugin->closeSession(sessionId));
+    sp<PluginHolder> holder = mPluginHolder;
+    if (holder == NULL) {
+        return getBinderStatus(INVALID_OPERATION);
+    }
+    return getBinderStatus(holder->get()->closeSession(sessionId));
 }
 
 Status CasImpl::processEcm(const CasSessionId &sessionId, const ParcelableCasData& ecm) {
     ALOGV("processEcm: sessionId=%s",
             sessionIdToString(sessionId).string());
+    sp<PluginHolder> holder = mPluginHolder;
+    if (holder == NULL) {
+        return getBinderStatus(INVALID_OPERATION);
+    }
 
-    return getBinderStatus(mPlugin->processEcm(sessionId, ecm));
+    return getBinderStatus(holder->get()->processEcm(sessionId, ecm));
 }
 
 Status CasImpl::processEmm(const ParcelableCasData& emm) {
     ALOGV("processEmm");
+    sp<PluginHolder> holder = mPluginHolder;
+    if (holder == NULL) {
+        return getBinderStatus(INVALID_OPERATION);
+    }
 
-    return getBinderStatus(mPlugin->processEmm(emm));
+    return getBinderStatus(holder->get()->processEmm(emm));
 }
 
 Status CasImpl::sendEvent(
         int32_t event, int32_t arg, const ::std::unique_ptr<CasData> &eventData) {
     ALOGV("sendEvent");
+    sp<PluginHolder> holder = mPluginHolder;
+    if (holder == NULL) {
+        return getBinderStatus(INVALID_OPERATION);
+    }
 
     status_t err;
     if (eventData == nullptr) {
-        err = mPlugin->sendEvent(event, arg, CasData());
+        err = holder->get()->sendEvent(event, arg, CasData());
     } else {
-        err = mPlugin->sendEvent(event, arg, *eventData);
+        err = holder->get()->sendEvent(event, arg, *eventData);
     }
     return getBinderStatus(err);
 }
 
 Status CasImpl::provision(const String16& provisionString) {
     ALOGV("provision: provisionString=%s", String8(provisionString).string());
+    sp<PluginHolder> holder = mPluginHolder;
+    if (holder == NULL) {
+        return getBinderStatus(INVALID_OPERATION);
+    }
 
-    return getBinderStatus(mPlugin->provision(String8(provisionString)));
+    return getBinderStatus(holder->get()->provision(String8(provisionString)));
 }
 
 Status CasImpl::refreshEntitlements(
         int32_t refreshType, const ::std::unique_ptr<CasData> &refreshData) {
     ALOGV("refreshEntitlements");
+    sp<PluginHolder> holder = mPluginHolder;
+    if (holder == NULL) {
+        return getBinderStatus(INVALID_OPERATION);
+    }
 
     status_t err;
     if (refreshData == nullptr) {
-        err = mPlugin->refreshEntitlements(refreshType, CasData());
+        err = holder->get()->refreshEntitlements(refreshType, CasData());
     } else {
-        err = mPlugin->refreshEntitlements(refreshType, *refreshData);
+        err = holder->get()->refreshEntitlements(refreshType, *refreshData);
     }
     return getBinderStatus(err);
 }
 
 Status CasImpl::release() {
-    ALOGV("release: mPlugin=%p", mPlugin);
-
-    if (mPlugin != NULL) {
-        delete mPlugin;
-        mPlugin = NULL;
-    }
+    ALOGV("release: plugin=%p",
+            mPluginHolder == NULL ? mPluginHolder->get() : NULL);
+    mPluginHolder.clear();
     return Status::ok();
 }
 
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
index 221b74b..f4c3577 100644
--- a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
+++ b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
@@ -84,7 +84,7 @@
 ///////////////////////////////////////////////////////////////////////////////
 ClearKeyCasPlugin::ClearKeyCasPlugin(
         uint64_t appData, CasPluginCallback callback)
-    : mAppData(appData), mCallback(callback) {
+    : mCallback(callback), mAppData(appData) {
     ALOGV("CTOR");
 }
 
@@ -93,7 +93,7 @@
     ClearKeySessionLibrary::get()->destroyPlugin(this);
 }
 
-status_t ClearKeyCasPlugin::setPrivateData(const CasData &data) {
+status_t ClearKeyCasPlugin::setPrivateData(const CasData &/*data*/) {
     ALOGV("setPrivateData");
 
     return OK;
@@ -142,7 +142,7 @@
 }
 
 status_t ClearKeyCasPlugin::setSessionPrivateData(
-        const CasSessionId &sessionId, const CasData &data) {
+        const CasSessionId &sessionId, const CasData & /*data*/) {
     ALOGV("setSessionPrivateData: sessionId=%s",
             sessionIdToString(sessionId).string());
     sp<ClearKeyCasSession> session =
@@ -167,7 +167,7 @@
     return session->updateECM(mKeyFetcher.get(), (void*)ecm.data(), ecm.size());
 }
 
-status_t ClearKeyCasPlugin::processEmm(const CasEmm& emm) {
+status_t ClearKeyCasPlugin::processEmm(const CasEmm& /*emm*/) {
     ALOGV("processEmm");
     Mutex::Autolock lock(mKeyFetcherLock);
 
@@ -212,8 +212,8 @@
 }
 
 status_t ClearKeyCasPlugin::refreshEntitlements(
-        int32_t refreshType, const CasData &refreshData) {
-    ALOGV("refreshEntitlements");
+        int32_t refreshType, const CasData &/*refreshData*/) {
+    ALOGV("refreshEntitlements: refreshType=%d", refreshType);
     Mutex::Autolock lock(mKeyFetcherLock);
 
     return OK;
@@ -344,7 +344,7 @@
                 AES_BLOCK_SIZE * 8, &mKeyInfo[keyIndex].contentKey);
         mKeyInfo[keyIndex].valid = (result == 0);
         if (!mKeyInfo[keyIndex].valid) {
-            ALOGE("updateECM: failed to set key %d, key_id=%d",
+            ALOGE("updateECM: failed to set key %zu, key_id=%d",
                     keyIndex, keys[keyIndex].key_id);
         }
     }
@@ -356,6 +356,10 @@
         bool secure, DescramblerPlugin::ScramblingControl scramblingControl,
         size_t numSubSamples, const DescramblerPlugin::SubSample *subSamples,
         const void *srcPtr, void *dstPtr, AString * /* errorDetailMsg */) {
+    if (secure) {
+        return ERROR_DRM_CANNOT_HANDLE;
+    }
+
     AES_KEY contentKey;
 
     if (scramblingControl != DescramblerPlugin::kScrambling_Unscrambled) {
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyFetcher.cpp b/drm/mediacas/plugins/clearkey/ClearKeyFetcher.cpp
index 210bab3..cb69f91 100644
--- a/drm/mediacas/plugins/clearkey/ClearKeyFetcher.cpp
+++ b/drm/mediacas/plugins/clearkey/ClearKeyFetcher.cpp
@@ -18,6 +18,7 @@
 #define LOG_TAG "ClearKeyFetcher"
 
 #include <algorithm>
+#include <inttypes.h>
 #include <string>
 
 #include "ClearKeyFetcher.h"
@@ -70,7 +71,7 @@
         bool same_parity = (((container.descriptor(0).id() & 0x01) ^
                 (container.descriptor(1).id() & 0x01)) == 0);
         if (same_parity) {
-            ALOGW("asset_id=%llu: malformed Ecm, "
+            ALOGW("asset_id=%" PRIu64 ": malformed Ecm, "
                     "content keys have same parity, id0=%d, id1=%d",
                     container.descriptor(0).ecm().asset_id(),
                     container.descriptor(0).id(),
@@ -88,7 +89,7 @@
     // asset_id change. If it sends an EcmContainer with 2 Ecms with different
     // asset_ids (old and new) then it might be best to prefetch the Emm.
     if ((asset_.id() != 0) && (*asset_id != asset_.id())) {
-        ALOGW("Asset_id change from %llu to %llu", asset_.id(), *asset_id);
+        ALOGW("Asset_id change from %" PRIu64 " to %" PRIu64, asset_.id(), *asset_id);
         asset_.Clear();
     }
 
diff --git a/drm/mediacas/plugins/clearkey/ecm_generator.cpp b/drm/mediacas/plugins/clearkey/ecm_generator.cpp
index f1aa973..7d29659 100644
--- a/drm/mediacas/plugins/clearkey/ecm_generator.cpp
+++ b/drm/mediacas/plugins/clearkey/ecm_generator.cpp
@@ -80,7 +80,7 @@
     CHECK(default_fields);
 
     if (ecm->size() < kTotalEcmSize) {
-        ALOGE("Short ECM: expected_length=%zu, actual_length=%zu",
+        ALOGE("Short ECM: expected_length=%d, actual_length=%zu",
                 kTotalEcmSize, ecm->size());
         return BAD_VALUE;
     }
diff --git a/drm/mediadrm/plugins/clearkey/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
index d5a47ca..58421b9 100644
--- a/drm/mediadrm/plugins/clearkey/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
@@ -63,22 +63,28 @@
             Vector<uint8_t>& keySetId);
 
     virtual status_t removeKeys(const Vector<uint8_t>& sessionId) {
-        UNUSED(sessionId);
+        if (sessionId.size() == 0) {
+            return android::BAD_VALUE;
+        }
+
         return android::ERROR_DRM_CANNOT_HANDLE;
     }
 
     virtual status_t restoreKeys(
             const Vector<uint8_t>& sessionId,
             const Vector<uint8_t>& keySetId) {
-        UNUSED(sessionId);
-        UNUSED(keySetId);
+        if (sessionId.size() == 0 || keySetId.size() == 0) {
+            return android::BAD_VALUE;
+        }
         return android::ERROR_DRM_CANNOT_HANDLE;
     }
 
     virtual status_t queryKeyStatus(
             const Vector<uint8_t>& sessionId,
             KeyedVector<String8, String8>& infoMap) const {
-        UNUSED(sessionId);
+        if (sessionId.size() == 0) {
+            return android::BAD_VALUE;
+        }
         UNUSED(infoMap);
         return android::ERROR_DRM_CANNOT_HANDLE;
     }
@@ -179,10 +185,10 @@
             const Vector<uint8_t>& input,
             const Vector<uint8_t>& iv,
             Vector<uint8_t>& output) {
-        UNUSED(sessionId);
-        UNUSED(keyId);
-        UNUSED(input);
-        UNUSED(iv);
+        if (sessionId.size() == 0 || keyId.size() == 0 ||
+                input.size() == 0 || iv.size() == 0) {
+            return android::BAD_VALUE;
+        }
         UNUSED(output);
         return android::ERROR_DRM_CANNOT_HANDLE;
     }
@@ -193,10 +199,10 @@
             const Vector<uint8_t>& input,
             const Vector<uint8_t>& iv,
             Vector<uint8_t>& output) {
-        UNUSED(sessionId);
-        UNUSED(keyId);
-        UNUSED(input);
-        UNUSED(iv);
+        if (sessionId.size() == 0 || keyId.size() == 0 ||
+                input.size() == 0 || iv.size() == 0) {
+            return android::BAD_VALUE;
+        }
         UNUSED(output);
         return android::ERROR_DRM_CANNOT_HANDLE;
     }
@@ -206,9 +212,10 @@
             const Vector<uint8_t>& keyId,
             const Vector<uint8_t>& message,
             Vector<uint8_t>& signature) {
-        UNUSED(sessionId);
-        UNUSED(keyId);
-        UNUSED(message);
+        if (sessionId.size() == 0 || keyId.size() == 0 ||
+                message.size() == 0) {
+            return android::BAD_VALUE;
+        }
         UNUSED(signature);
         return android::ERROR_DRM_CANNOT_HANDLE;
     }
@@ -218,10 +225,10 @@
             const Vector<uint8_t>& keyId,
             const Vector<uint8_t>& message,
             const Vector<uint8_t>& signature, bool& match) {
-        UNUSED(sessionId);
-        UNUSED(keyId);
-        UNUSED(message);
-        UNUSED(signature);
+        if (sessionId.size() == 0 || keyId.size() == 0 ||
+                message.size() == 0 || signature.size() == 0) {
+            return android::BAD_VALUE;
+        }
         UNUSED(match);
         return android::ERROR_DRM_CANNOT_HANDLE;
     }
@@ -232,10 +239,10 @@
             const Vector<uint8_t>& message,
             const Vector<uint8_t>& wrappedKey,
             Vector<uint8_t>& signature) {
-        UNUSED(sessionId);
-        UNUSED(algorithm);
-        UNUSED(message);
-        UNUSED(wrappedKey);
+        if (sessionId.size() == 0 || algorithm.size() == 0 ||
+                message.size() == 0 || wrappedKey.size() == 0) {
+            return android::BAD_VALUE;
+        }
         UNUSED(signature);
         return android::ERROR_DRM_CANNOT_HANDLE;
     }
diff --git a/include/media/CasImpl.h b/include/media/CasImpl.h
index 80c901e..3c07092 100644
--- a/include/media/CasImpl.h
+++ b/include/media/CasImpl.h
@@ -84,8 +84,9 @@
     virtual Status release() override;
 
 private:
+    struct PluginHolder;
     sp<SharedLibrary> mLibrary;
-    CasPlugin *mPlugin;
+    sp<PluginHolder> mPluginHolder;
     sp<ICasListener> mListener;
 
     DISALLOW_EVIL_CONSTRUCTORS(CasImpl);
diff --git a/include/media/IMediaAnalyticsService.h b/include/media/IMediaAnalyticsService.h
index 97915e4..a596d60 120000
--- a/include/media/IMediaAnalyticsService.h
+++ b/include/media/IMediaAnalyticsService.h
@@ -1 +1 @@
-../../media/libmedia/include/IMediaAnalyticsService.h
\ No newline at end of file
+../../media/libmediametrics/include/IMediaAnalyticsService.h
\ No newline at end of file
diff --git a/include/media/MediaAnalyticsItem.h b/include/media/MediaAnalyticsItem.h
index 71957a5..e8124e0 120000
--- a/include/media/MediaAnalyticsItem.h
+++ b/include/media/MediaAnalyticsItem.h
@@ -1 +1 @@
-../../media/libmedia/include/MediaAnalyticsItem.h
\ No newline at end of file
+../../media/libmediametrics/include/MediaAnalyticsItem.h
\ No newline at end of file
diff --git a/include/ndk/NdkMediaDrm.h b/include/ndk/NdkMediaDrm.h
index 9dd6283..cba4380 100644
--- a/include/ndk/NdkMediaDrm.h
+++ b/include/ndk/NdkMediaDrm.h
@@ -159,8 +159,7 @@
  * to obtain or release keys used to decrypt encrypted content.
  * AMediaDrm_getKeyRequest is used to obtain an opaque key request byte array that
  * is delivered to the license server.  The opaque key request byte array is
- * returned in KeyRequest.data.  The recommended URL to deliver the key request to
- * is returned in KeyRequest.defaultUrl.
+ * returned in KeyRequest.data.
  *
  * After the app has received the key request response from the server,
  * it should deliver to the response to the DRM engine plugin using the method
diff --git a/media/audioserver/Android.mk b/media/audioserver/Android.mk
index 63fa16b..afd1189 100644
--- a/media/audioserver/Android.mk
+++ b/media/audioserver/Android.mk
@@ -6,6 +6,7 @@
 	main_audioserver.cpp
 
 LOCAL_SHARED_LIBRARIES := \
+	libaaudioservice \
 	libaudioflinger \
 	libaudiopolicyservice \
 	libbinder \
@@ -18,6 +19,7 @@
 	libutils \
 	libhwbinder
 
+# TODO oboeservice is the old folder name for aaudioservice. It will be changed.
 LOCAL_C_INCLUDES := \
 	frameworks/av/services/audioflinger \
 	frameworks/av/services/audiopolicy \
@@ -26,8 +28,12 @@
 	frameworks/av/services/audiopolicy/engine/interface \
 	frameworks/av/services/audiopolicy/service \
 	frameworks/av/services/medialog \
+	frameworks/av/services/oboeservice \
 	frameworks/av/services/radio \
 	frameworks/av/services/soundtrigger \
+	frameworks/av/media/libaaudio/include \
+	frameworks/av/media/libaaudio/src \
+	frameworks/av/media/libaaudio/src/binding \
 	$(call include-path-for, audio-utils) \
 	external/sonic \
 
diff --git a/media/audioserver/main_audioserver.cpp b/media/audioserver/main_audioserver.cpp
index bcd0342..ee02d23 100644
--- a/media/audioserver/main_audioserver.cpp
+++ b/media/audioserver/main_audioserver.cpp
@@ -34,6 +34,7 @@
 // from LOCAL_C_INCLUDES
 #include "AudioFlinger.h"
 #include "AudioPolicyService.h"
+#include "AAudioService.h"
 #include "MediaLogService.h"
 #include "RadioService.h"
 #include "SoundTriggerHwService.h"
@@ -131,6 +132,7 @@
         ALOGI("ServiceManager: %p", sm.get());
         AudioFlinger::instantiate();
         AudioPolicyService::instantiate();
+        AAudioService::instantiate();
         RadioService::instantiate();
         SoundTriggerHwService::instantiate();
         ProcessState::self()->startThreadPool();
diff --git a/media/libaaudio/Android.bp b/media/libaaudio/Android.bp
index e41d62b..f539ba9 100644
--- a/media/libaaudio/Android.bp
+++ b/media/libaaudio/Android.bp
@@ -21,7 +21,7 @@
 }
 
 ndk_library {
-    name: "libaaudio.ndk",
+    name: "libaaudio",
     symbol_file: "libaaudio.map.txt",
     first_version: "26",
     unversioned_until: "current",
diff --git a/media/libaaudio/examples/input_monitor/Android.mk b/media/libaaudio/examples/input_monitor/Android.mk
new file mode 100644
index 0000000..b56328b
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/Android.mk
@@ -0,0 +1,6 @@
+# include $(call all-subdir-makefiles)
+
+# Just include static/ for now.
+LOCAL_PATH := $(call my-dir)
+#include $(LOCAL_PATH)/jni/Android.mk
+include $(LOCAL_PATH)/static/Android.mk
diff --git a/media/libaaudio/examples/input_monitor/README.md b/media/libaaudio/examples/input_monitor/README.md
new file mode 100644
index 0000000..3e54ef0
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/README.md
@@ -0,0 +1 @@
+Monitor input level and print value.
diff --git a/media/libaaudio/examples/input_monitor/jni/Android.mk b/media/libaaudio/examples/input_monitor/jni/Android.mk
new file mode 100644
index 0000000..51a5a85
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/jni/Android.mk
@@ -0,0 +1,35 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/av/media/liboboe/include
+
+LOCAL_SRC_FILES:= frameworks/av/media/liboboe/src/write_sine.cpp
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia libtinyalsa \
+        libbinder libcutils libutils
+LOCAL_STATIC_LIBRARIES := libsndfile
+LOCAL_MODULE := write_sine_ndk
+LOCAL_SHARED_LIBRARIES += liboboe_prebuilt
+include $(BUILD_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/av/media/liboboe/include
+
+LOCAL_SRC_FILES:= frameworks/av/media/liboboe/src/write_sine_threaded.cpp
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia libtinyalsa \
+        libbinder libcutils libutils
+LOCAL_STATIC_LIBRARIES := libsndfile
+LOCAL_MODULE := write_sine_threaded_ndk
+LOCAL_SHARED_LIBRARIES += liboboe_prebuilt
+include $(BUILD_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := liboboe_prebuilt
+LOCAL_SRC_FILES := liboboe.so
+LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
+include $(PREBUILT_SHARED_LIBRARY)
diff --git a/media/libaaudio/examples/input_monitor/jni/Application.mk b/media/libaaudio/examples/input_monitor/jni/Application.mk
new file mode 100644
index 0000000..e74475c
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/jni/Application.mk
@@ -0,0 +1,3 @@
+# TODO remove then when we support other architectures
+APP_ABI := arm64-v8a
+APP_CPPFLAGS += -std=c++11
diff --git a/media/libaaudio/examples/input_monitor/src/input_monitor.cpp b/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
new file mode 100644
index 0000000..545496f
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Record input using AAudio and display the peak amplitudes.
+
+#include <new>
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <aaudio/AAudioDefinitions.h>
+#include <aaudio/AAudio.h>
+
+#define SAMPLE_RATE        48000
+#define NUM_SECONDS        10
+#define NANOS_PER_MICROSECOND ((int64_t)1000)
+#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
+#define NANOS_PER_SECOND   (NANOS_PER_MILLISECOND * 1000)
+
+#define DECAY_FACTOR       0.999
+#define MIN_FRAMES_TO_READ 48  /* arbitrary, 1 msec at 48000 Hz */
+
+static const char *getSharingModeText(aaudio_sharing_mode_t mode) {
+    const char *modeText = "unknown";
+    switch (mode) {
+    case AAUDIO_SHARING_MODE_EXCLUSIVE:
+        modeText = "EXCLUSIVE";
+        break;
+    case AAUDIO_SHARING_MODE_SHARED:
+        modeText = "SHARED";
+        break;
+    default:
+        break;
+    }
+    return modeText;
+}
+
+int main(int argc, char **argv)
+{
+    (void)argc; // unused
+
+    aaudio_result_t result;
+
+    int actualSamplesPerFrame;
+    int actualSampleRate;
+    const aaudio_audio_format_t requestedDataFormat = AAUDIO_FORMAT_PCM_I16;
+    aaudio_audio_format_t actualDataFormat;
+
+    const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_SHARED;
+    aaudio_sharing_mode_t actualSharingMode;
+
+    AAudioStreamBuilder *aaudioBuilder = nullptr;
+    AAudioStream *aaudioStream = nullptr;
+    aaudio_stream_state_t state;
+    int32_t framesPerBurst = 0;
+    int32_t framesPerRead = 0;
+    int32_t framesToRecord = 0;
+    int32_t framesLeft = 0;
+    int32_t xRunCount = 0;
+    int16_t *data = nullptr;
+    float peakLevel = 0.0;
+    int loopCounter = 0;
+
+    // Make printf print immediately so that debug info is not stuck
+    // in a buffer if we hang or crash.
+    setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+
+    printf("%s - Monitor input level using AAudio\n", argv[0]);
+
+    // Use an AAudioStreamBuilder to contain requested parameters.
+    result = AAudio_createStreamBuilder(&aaudioBuilder);
+    if (result != AAUDIO_OK) {
+        goto finish;
+    }
+
+    // Request stream properties.
+    AAudioStreamBuilder_setDirection(aaudioBuilder, AAUDIO_DIRECTION_INPUT);
+    AAudioStreamBuilder_setFormat(aaudioBuilder, requestedDataFormat);
+    AAudioStreamBuilder_setSharingMode(aaudioBuilder, requestedSharingMode);
+
+    // Create an AAudioStream using the Builder.
+    result = AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream);
+    if (result != AAUDIO_OK) {
+        goto finish;
+    }
+
+    actualSamplesPerFrame = AAudioStream_getSamplesPerFrame(aaudioStream);
+    printf("SamplesPerFrame = %d\n", actualSamplesPerFrame);
+    actualSampleRate = AAudioStream_getSampleRate(aaudioStream);
+    printf("SamplesPerFrame = %d\n", actualSampleRate);
+
+    actualSharingMode = AAudioStream_getSharingMode(aaudioStream);
+    printf("SharingMode: requested = %s, actual = %s\n",
+            getSharingModeText(requestedSharingMode),
+            getSharingModeText(actualSharingMode));
+
+    // This is the number of frames that are written in one chunk by a DMA controller
+    // or a DSP.
+    framesPerBurst = AAudioStream_getFramesPerBurst(aaudioStream);
+    printf("DataFormat: framesPerBurst = %d\n",framesPerBurst);
+
+    // Some DMA might use very short bursts of 16 frames. We don't need to read such small
+    // buffers. But it helps to use a multiple of the burst size for predictable scheduling.
+    framesPerRead = framesPerBurst;
+    while (framesPerRead < MIN_FRAMES_TO_READ) {
+        framesPerRead *= 2;
+    }
+    printf("DataFormat: framesPerRead = %d\n",framesPerRead);
+
+    actualDataFormat = AAudioStream_getFormat(aaudioStream);
+    printf("DataFormat: requested = %d, actual = %d\n", requestedDataFormat, actualDataFormat);
+    // TODO handle other data formats
+    assert(actualDataFormat == AAUDIO_FORMAT_PCM_I16);
+
+    // Allocate a buffer for the audio data.
+    data = new(std::nothrow) int16_t[framesPerRead * actualSamplesPerFrame];
+    if (data == nullptr) {
+        fprintf(stderr, "ERROR - could not allocate data buffer\n");
+        result = AAUDIO_ERROR_NO_MEMORY;
+        goto finish;
+    }
+
+    // Start the stream.
+    printf("call AAudioStream_requestStart()\n");
+    result = AAudioStream_requestStart(aaudioStream);
+    if (result != AAUDIO_OK) {
+        fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d\n", result);
+        goto finish;
+    }
+
+    state = AAudioStream_getState(aaudioStream);
+    printf("after start, state = %s\n", AAudio_convertStreamStateToText(state));
+
+    // Play for a while.
+    framesToRecord = actualSampleRate * NUM_SECONDS;
+    framesLeft = framesToRecord;
+    while (framesLeft > 0) {
+        // Read audio data from the stream.
+        int64_t timeoutNanos = 100 * NANOS_PER_MILLISECOND;
+        int minFrames = (framesToRecord < framesPerRead) ? framesToRecord : framesPerRead;
+        int actual = AAudioStream_read(aaudioStream, data, minFrames, timeoutNanos);
+        if (actual < 0) {
+            fprintf(stderr, "ERROR - AAudioStream_read() returned %zd\n", actual);
+            goto finish;
+        } else if (actual == 0) {
+            fprintf(stderr, "WARNING - AAudioStream_read() returned %zd\n", actual);
+            goto finish;
+        }
+        framesLeft -= actual;
+
+        // Peak follower.
+        for (int frameIndex = 0; frameIndex < actual; frameIndex++) {
+            float sample = data[frameIndex * actualSamplesPerFrame] * (1.0/32768);
+            peakLevel *= DECAY_FACTOR;
+            if (sample > peakLevel) {
+                peakLevel = sample;
+            }
+        }
+
+        // Display level as stars, eg. "******".
+        if ((loopCounter++ % 10) == 0) {
+            printf("%5.3f ", peakLevel);
+            int numStars = (int)(peakLevel * 50);
+            for (int i = 0; i < numStars; i++) {
+                printf("*");
+            }
+            printf("\n");
+        }
+    }
+
+    xRunCount = AAudioStream_getXRunCount(aaudioStream);
+    printf("AAudioStream_getXRunCount %d\n", xRunCount);
+
+finish:
+    delete[] data;
+    AAudioStream_close(aaudioStream);
+    AAudioStreamBuilder_delete(aaudioBuilder);
+    printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+    return (result != AAUDIO_OK) ? EXIT_FAILURE : EXIT_SUCCESS;
+}
+
diff --git a/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp b/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
new file mode 100644
index 0000000..8d40d94
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
@@ -0,0 +1,284 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Record input using AAudio and display the peak amplitudes.
+
+#include <assert.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <math.h>
+#include <time.h>
+#include <aaudio/AAudioDefinitions.h>
+#include <aaudio/AAudio.h>
+
+#define NUM_SECONDS           10
+#define NANOS_PER_MICROSECOND ((int64_t)1000)
+#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
+#define NANOS_PER_SECOND      (NANOS_PER_MILLISECOND * 1000)
+
+//#define SHARING_MODE  AAUDIO_SHARING_MODE_EXCLUSIVE
+#define SHARING_MODE  AAUDIO_SHARING_MODE_SHARED
+
+/**
+ * Simple wrapper for AAudio that opens a default stream and then calls
+ * a callback function to fill the output buffers.
+ */
+class SimpleAAudioPlayer {
+public:
+    SimpleAAudioPlayer() {}
+    ~SimpleAAudioPlayer() {
+        close();
+    };
+
+    /**
+     * Call this before calling open().
+     * @param requestedSharingMode
+     */
+    void setSharingMode(aaudio_sharing_mode_t requestedSharingMode) {
+        mRequestedSharingMode = requestedSharingMode;
+    }
+
+    /**
+     * Also known as "sample rate"
+     * Only call this after open() has been called.
+     */
+    int32_t getFramesPerSecond() {
+        if (mStream == nullptr) {
+            return AAUDIO_ERROR_INVALID_STATE;
+        }
+        return AAudioStream_getSampleRate(mStream);;
+    }
+
+    /**
+     * Only call this after open() has been called.
+     */
+    int32_t getSamplesPerFrame() {
+        if (mStream == nullptr) {
+            return AAUDIO_ERROR_INVALID_STATE;
+        }
+        return AAudioStream_getSamplesPerFrame(mStream);;
+    }
+
+    /**
+     * Open a stream
+     */
+    aaudio_result_t open(AAudioStream_dataCallback proc, void *userContext) {
+        aaudio_result_t result = AAUDIO_OK;
+
+        // Use an AAudioStreamBuilder to contain requested parameters.
+        result = AAudio_createStreamBuilder(&mBuilder);
+        if (result != AAUDIO_OK) return result;
+
+        AAudioStreamBuilder_setDirection(mBuilder, AAUDIO_DIRECTION_INPUT);
+        AAudioStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
+        AAudioStreamBuilder_setDataCallback(mBuilder, proc, userContext);
+        AAudioStreamBuilder_setFormat(mBuilder, AAUDIO_FORMAT_PCM_I16);
+
+        // Open an AAudioStream using the Builder.
+        result = AAudioStreamBuilder_openStream(mBuilder, &mStream);
+        if (result != AAUDIO_OK) {
+            fprintf(stderr, "ERROR - AAudioStreamBuilder_openStream() returned %d %s\n",
+                    result, AAudio_convertResultToText(result));
+            goto finish1;
+        }
+
+        printf("AAudioStream_getFramesPerBurst() = %d\n",
+               AAudioStream_getFramesPerBurst(mStream));
+        printf("AAudioStream_getBufferSizeInFrames() = %d\n",
+               AAudioStream_getBufferSizeInFrames(mStream));
+        printf("AAudioStream_getBufferCapacityInFrames() = %d\n",
+               AAudioStream_getBufferCapacityInFrames(mStream));
+        return result;
+
+     finish1:
+        AAudioStreamBuilder_delete(mBuilder);
+        mBuilder = nullptr;
+        return result;
+    }
+
+    aaudio_result_t close() {
+        if (mStream != nullptr) {
+            printf("call AAudioStream_close(%p)\n", mStream);  fflush(stdout);
+            AAudioStream_close(mStream);
+            mStream = nullptr;
+            AAudioStreamBuilder_delete(mBuilder);
+            mBuilder = nullptr;
+        }
+        return AAUDIO_OK;
+    }
+
+    // Write zero data to fill up the buffer and prevent underruns.
+    // Assume format is PCM_I16. TODO use floats.
+    aaudio_result_t prime() {
+        int32_t samplesPerFrame = AAudioStream_getSamplesPerFrame(mStream);
+        const int numFrames = 32; // arbitrary
+        int16_t zeros[numFrames * samplesPerFrame];
+        memset(zeros, 0, sizeof(zeros));
+        aaudio_result_t result = numFrames;
+        while (result == numFrames) {
+            result = AAudioStream_write(mStream, zeros, numFrames, 0);
+        }
+        return result;
+    }
+
+    // Start the stream. AAudio will start calling your callback function.
+     aaudio_result_t start() {
+        aaudio_result_t result = AAudioStream_requestStart(mStream);
+        if (result != AAUDIO_OK) {
+            fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d %s\n",
+                    result, AAudio_convertResultToText(result));
+        }
+        return result;
+    }
+
+    // Stop the stream. AAudio will stop calling your callback function.
+    aaudio_result_t stop() {
+        aaudio_result_t result = AAudioStream_requestStop(mStream);
+        if (result != AAUDIO_OK) {
+            fprintf(stderr, "ERROR - AAudioStream_requestStop() returned %d %s\n",
+                    result, AAudio_convertResultToText(result));
+        }
+        int32_t xRunCount = AAudioStream_getXRunCount(mStream);
+        printf("AAudioStream_getXRunCount %d\n", xRunCount);
+        return result;
+    }
+
+private:
+    AAudioStreamBuilder    *mBuilder = nullptr;
+    AAudioStream           *mStream = nullptr;
+    aaudio_sharing_mode_t   mRequestedSharingMode = SHARING_MODE;
+};
+
+// Application data that gets passed to the callback.
+typedef struct PeakTrackerData {
+    float peakLevel;
+} PeakTrackerData_t;
+
+#define DECAY_FACTOR   0.999
+
+// Callback function that fills the audio output buffer.
+aaudio_data_callback_result_t MyDataCallbackProc(
+        AAudioStream *stream,
+        void *userData,
+        void *audioData,
+        int32_t numFrames
+        ) {
+
+    PeakTrackerData_t *data = (PeakTrackerData_t *) userData;
+    // printf("MyCallbackProc(): frameCount = %d\n", numFrames);
+    int32_t samplesPerFrame = AAudioStream_getSamplesPerFrame(stream);
+    float sample;
+    // This code assume mono or stereo.
+    switch (AAudioStream_getFormat(stream)) {
+        case AAUDIO_FORMAT_PCM_I16: {
+            int16_t *audioBuffer = (int16_t *) audioData;
+            // Peak follower
+            for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
+                sample = audioBuffer[frameIndex * samplesPerFrame] * (1.0/32768);
+                data->peakLevel *= DECAY_FACTOR;
+                if (sample > data->peakLevel) {
+                    data->peakLevel = sample;
+                }
+            }
+        }
+        break;
+        case AAUDIO_FORMAT_PCM_FLOAT: {
+            float *audioBuffer = (float *) audioData;
+            // Peak follower
+            for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
+                sample = audioBuffer[frameIndex * samplesPerFrame];
+                data->peakLevel *= DECAY_FACTOR;
+                if (sample > data->peakLevel) {
+                    data->peakLevel = sample;
+                }
+            }
+        }
+        break;
+        default:
+            return AAUDIO_CALLBACK_RESULT_STOP;
+    }
+
+    return AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+void displayPeakLevel(float peakLevel) {
+    printf("%5.3f ", peakLevel);
+    const int maxStars = 50; // arbitrary, fits on one line
+    int numStars = (int) (peakLevel * maxStars);
+    for (int i = 0; i < numStars; i++) {
+        printf("*");
+    }
+    printf("\n");
+}
+
+int main(int argc, char **argv)
+{
+    (void)argc; // unused
+    SimpleAAudioPlayer player;
+    PeakTrackerData_t myData = {0.0};
+    aaudio_result_t result;
+    const int displayRateHz = 20; // arbitrary
+    const int loopsNeeded = NUM_SECONDS * displayRateHz;
+
+    // Make printf print immediately so that debug info is not stuck
+    // in a buffer if we hang or crash.
+    setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+    printf("%s - Display audio input using an AAudio callback\n", argv[0]);
+
+    player.setSharingMode(SHARING_MODE);
+
+    result = player.open(MyDataCallbackProc, &myData);
+    if (result != AAUDIO_OK) {
+        fprintf(stderr, "ERROR -  player.open() returned %d\n", result);
+        goto error;
+    }
+    printf("player.getFramesPerSecond() = %d\n", player.getFramesPerSecond());
+    printf("player.getSamplesPerFrame() = %d\n", player.getSamplesPerFrame());
+
+    result = player.start();
+    if (result != AAUDIO_OK) {
+        fprintf(stderr, "ERROR -  player.start() returned %d\n", result);
+        goto error;
+    }
+
+    printf("Sleep for %d seconds while audio plays in a callback thread.\n", NUM_SECONDS);
+   for (int i = 0; i < loopsNeeded; i++)
+    {
+        const struct timespec request = { .tv_sec = 0,
+                .tv_nsec = NANOS_PER_SECOND / displayRateHz };
+        (void) clock_nanosleep(CLOCK_MONOTONIC, 0 /*flags*/, &request, NULL /*remain*/);
+        displayPeakLevel(myData.peakLevel);
+    }
+    printf("Woke up now.\n");
+
+    result = player.stop();
+    if (result != AAUDIO_OK) {
+        goto error;
+    }
+    result = player.close();
+    if (result != AAUDIO_OK) {
+        goto error;
+    }
+
+    printf("SUCCESS\n");
+    return EXIT_SUCCESS;
+error:
+    player.close();
+    printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+    return EXIT_FAILURE;
+}
+
diff --git a/media/libaaudio/examples/input_monitor/static/Android.mk b/media/libaaudio/examples/input_monitor/static/Android.mk
new file mode 100644
index 0000000..e83f179
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/static/Android.mk
@@ -0,0 +1,35 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := examples
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/av/media/libaaudio/include
+
+# TODO reorganize folders to avoid using ../
+LOCAL_SRC_FILES:= ../src/input_monitor.cpp
+
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
+                          libbinder libcutils libutils \
+                          libaudioclient liblog libtinyalsa
+LOCAL_STATIC_LIBRARIES := libaaudio
+
+LOCAL_MODULE := input_monitor
+include $(BUILD_EXECUTABLE)
+
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/av/media/libaaudio/include
+
+LOCAL_SRC_FILES:= ../src/input_monitor_callback.cpp
+
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
+                          libbinder libcutils libutils \
+                          libaudioclient liblog
+LOCAL_STATIC_LIBRARIES := libaaudio
+
+LOCAL_MODULE := input_monitor_callback
+include $(BUILD_EXECUTABLE)
diff --git a/media/libaaudio/examples/input_monitor/static/README.md b/media/libaaudio/examples/input_monitor/static/README.md
new file mode 100644
index 0000000..6e26d7b
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/static/README.md
@@ -0,0 +1,2 @@
+Makefile for building simple command line examples.
+They link with AAudio as a static library.
diff --git a/media/libaaudio/examples/write_sine/Android.mk b/media/libaaudio/examples/write_sine/Android.mk
index b56328b..5053e7d 100644
--- a/media/libaaudio/examples/write_sine/Android.mk
+++ b/media/libaaudio/examples/write_sine/Android.mk
@@ -1,6 +1 @@
-# include $(call all-subdir-makefiles)
-
-# Just include static/ for now.
-LOCAL_PATH := $(call my-dir)
-#include $(LOCAL_PATH)/jni/Android.mk
-include $(LOCAL_PATH)/static/Android.mk
+include $(call all-subdir-makefiles)
diff --git a/media/libaaudio/examples/write_sine/jni/Android.mk b/media/libaaudio/examples/write_sine/jni/Android.mk
index 51a5a85..8cd0f03 100644
--- a/media/libaaudio/examples/write_sine/jni/Android.mk
+++ b/media/libaaudio/examples/write_sine/jni/Android.mk
@@ -4,32 +4,27 @@
 LOCAL_MODULE_TAGS := tests
 LOCAL_C_INCLUDES := \
     $(call include-path-for, audio-utils) \
-    frameworks/av/media/liboboe/include
+    frameworks/av/media/libaaudio/include
 
-LOCAL_SRC_FILES:= frameworks/av/media/liboboe/src/write_sine.cpp
-LOCAL_SHARED_LIBRARIES := libaudioutils libmedia libtinyalsa \
-        libbinder libcutils libutils
-LOCAL_STATIC_LIBRARIES := libsndfile
+# NDK recommends using this kind of relative path instead of an absolute path.
+LOCAL_SRC_FILES:= ../src/write_sine.cpp
+LOCAL_SHARED_LIBRARIES := libaaudio
 LOCAL_MODULE := write_sine_ndk
-LOCAL_SHARED_LIBRARIES += liboboe_prebuilt
 include $(BUILD_EXECUTABLE)
 
 include $(CLEAR_VARS)
 LOCAL_MODULE_TAGS := tests
 LOCAL_C_INCLUDES := \
     $(call include-path-for, audio-utils) \
-    frameworks/av/media/liboboe/include
+    frameworks/av/media/libaaudio/include
 
-LOCAL_SRC_FILES:= frameworks/av/media/liboboe/src/write_sine_threaded.cpp
-LOCAL_SHARED_LIBRARIES := libaudioutils libmedia libtinyalsa \
-        libbinder libcutils libutils
-LOCAL_STATIC_LIBRARIES := libsndfile
+LOCAL_SRC_FILES:= ../src/write_sine_threaded.cpp
+LOCAL_SHARED_LIBRARIES := libaaudio
 LOCAL_MODULE := write_sine_threaded_ndk
-LOCAL_SHARED_LIBRARIES += liboboe_prebuilt
 include $(BUILD_EXECUTABLE)
 
 include $(CLEAR_VARS)
-LOCAL_MODULE := liboboe_prebuilt
-LOCAL_SRC_FILES := liboboe.so
+LOCAL_MODULE := libaaudio_prebuilt
+LOCAL_SRC_FILES := libaaudio.so
 LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
 include $(PREBUILT_SHARED_LIBRARY)
diff --git a/media/libaaudio/examples/write_sine/src/SineGenerator.h b/media/libaaudio/examples/write_sine/src/SineGenerator.h
index ade7527..64b772d 100644
--- a/media/libaaudio/examples/write_sine/src/SineGenerator.h
+++ b/media/libaaudio/examples/write_sine/src/SineGenerator.h
@@ -79,7 +79,7 @@
         }
     }
 
-    double mAmplitude = 0.01;
+    double mAmplitude = 0.05;  // unitless scaler
     double mPhase = 0.0;
     double mPhaseIncrement = 440 * M_PI * 2 / 48000;
     double mFrameRate = 48000;
diff --git a/media/libaaudio/examples/write_sine/src/write_sine.cpp b/media/libaaudio/examples/write_sine/src/write_sine.cpp
index 80b6252..d8e5ec1 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine.cpp
@@ -19,7 +19,6 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <math.h>
-#include <aaudio/AAudioDefinitions.h>
 #include <aaudio/AAudio.h>
 #include "SineGenerator.h"
 
@@ -44,6 +43,7 @@
     return modeText;
 }
 
+// TODO move to a common utility library
 static int64_t getNanoseconds(clockid_t clockId = CLOCK_MONOTONIC) {
     struct timespec time;
     int result = clock_gettime(clockId, &time);
@@ -74,6 +74,8 @@
     AAudioStream *aaudioStream = nullptr;
     aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNINITIALIZED;
     int32_t framesPerBurst = 0;
+    int32_t framesPerWrite = 0;
+    int32_t bufferCapacity = 0;
     int32_t framesToPlay = 0;
     int32_t framesLeft = 0;
     int32_t xRunCount = 0;
@@ -100,7 +102,6 @@
     AAudioStreamBuilder_setFormat(aaudioBuilder, requestedDataFormat);
     AAudioStreamBuilder_setSharingMode(aaudioBuilder, requestedSharingMode);
 
-
     // Create an AAudioStream using the Builder.
     result = AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream);
     if (result != AAUDIO_OK) {
@@ -129,21 +130,25 @@
     // This is the number of frames that are read in one chunk by a DMA controller
     // or a DSP or a mixer.
     framesPerBurst = AAudioStream_getFramesPerBurst(aaudioStream);
-    printf("DataFormat: original framesPerBurst = %d\n",framesPerBurst);
+    printf("DataFormat: framesPerBurst = %d\n",framesPerBurst);
+    bufferCapacity = AAudioStream_getBufferCapacityInFrames(aaudioStream);
+    printf("DataFormat: bufferCapacity = %d, remainder = %d\n",
+           bufferCapacity, bufferCapacity % framesPerBurst);
 
     // Some DMA might use very short bursts of 16 frames. We don't need to write such small
     // buffers. But it helps to use a multiple of the burst size for predictable scheduling.
-    while (framesPerBurst < 48) {
-        framesPerBurst *= 2;
+    framesPerWrite = framesPerBurst;
+    while (framesPerWrite < 48) {
+        framesPerWrite *= 2;
     }
-    printf("DataFormat: final framesPerBurst = %d\n",framesPerBurst);
+    printf("DataFormat: framesPerWrite = %d\n",framesPerWrite);
 
     actualDataFormat = AAudioStream_getFormat(aaudioStream);
     printf("DataFormat: requested = %d, actual = %d\n", requestedDataFormat, actualDataFormat);
     // TODO handle other data formats
 
     // Allocate a buffer for the audio data.
-    data = new int16_t[framesPerBurst * actualSamplesPerFrame];
+    data = new int16_t[framesPerWrite * actualSamplesPerFrame];
     if (data == nullptr) {
         fprintf(stderr, "ERROR - could not allocate data buffer\n");
         result = AAUDIO_ERROR_NO_MEMORY;
@@ -166,14 +171,14 @@
     framesLeft = framesToPlay;
     while (framesLeft > 0) {
         // Render sine waves to left and right channels.
-        sineOsc1.render(&data[0], actualSamplesPerFrame, framesPerBurst);
+        sineOsc1.render(&data[0], actualSamplesPerFrame, framesPerWrite);
         if (actualSamplesPerFrame > 1) {
-            sineOsc2.render(&data[1], actualSamplesPerFrame, framesPerBurst);
+            sineOsc2.render(&data[1], actualSamplesPerFrame, framesPerWrite);
         }
 
         // Write audio data to the stream.
         int64_t timeoutNanos = 100 * NANOS_PER_MILLISECOND;
-        int minFrames = (framesToPlay < framesPerBurst) ? framesToPlay : framesPerBurst;
+        int minFrames = (framesToPlay < framesPerWrite) ? framesToPlay : framesPerWrite;
         int actual = AAudioStream_write(aaudioStream, data, minFrames, timeoutNanos);
         if (actual < 0) {
             fprintf(stderr, "ERROR - AAudioStream_write() returned %zd\n", actual);
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
new file mode 100644
index 0000000..9414236
--- /dev/null
+++ b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
@@ -0,0 +1,320 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Play sine waves using an AAudio callback.
+
+#include <assert.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <sched.h>
+#include <stdio.h>
+#include <math.h>
+#include <time.h>
+#include <aaudio/AAudio.h>
+#include "SineGenerator.h"
+
+#define NUM_SECONDS              5
+
+//#define SHARING_MODE  AAUDIO_SHARING_MODE_EXCLUSIVE
+#define SHARING_MODE  AAUDIO_SHARING_MODE_SHARED
+
+#define  CALLBACK_SIZE_FRAMES    128
+
+// TODO refactor common code into a single SimpleAAudio class
+/**
+ * Simple wrapper for AAudio that opens a default stream and then calls
+ * a callback function to fill the output buffers.
+ */
+class SimpleAAudioPlayer {
+public:
+    SimpleAAudioPlayer() {}
+    ~SimpleAAudioPlayer() {
+        close();
+    };
+
+    /**
+     * Call this before calling open().
+     * @param requestedSharingMode
+     */
+    void setSharingMode(aaudio_sharing_mode_t requestedSharingMode) {
+        mRequestedSharingMode = requestedSharingMode;
+    }
+
+    /**
+     * Also known as "sample rate"
+     * Only call this after open() has been called.
+     */
+    int32_t getFramesPerSecond() {
+        if (mStream == nullptr) {
+            return AAUDIO_ERROR_INVALID_STATE;
+        }
+        return AAudioStream_getSampleRate(mStream);;
+    }
+
+    /**
+     * Only call this after open() has been called.
+     */
+    int32_t getSamplesPerFrame() {
+        if (mStream == nullptr) {
+            return AAUDIO_ERROR_INVALID_STATE;
+        }
+        return AAudioStream_getSamplesPerFrame(mStream);;
+    }
+
+    /**
+     * Open a stream
+     */
+    aaudio_result_t open(AAudioStream_dataCallback dataProc, void *userContext) {
+        aaudio_result_t result = AAUDIO_OK;
+
+        // Use an AAudioStreamBuilder to contain requested parameters.
+        result = AAudio_createStreamBuilder(&mBuilder);
+        if (result != AAUDIO_OK) return result;
+
+        AAudioStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
+        AAudioStreamBuilder_setDataCallback(mBuilder, dataProc, userContext);
+        AAudioStreamBuilder_setFormat(mBuilder, AAUDIO_FORMAT_PCM_FLOAT);
+        AAudioStreamBuilder_setFramesPerDataCallback(mBuilder, CALLBACK_SIZE_FRAMES);
+ //       AAudioStreamBuilder_setBufferCapacityInFrames(mBuilder, CALLBACK_SIZE_FRAMES * 4);
+
+        // Open an AAudioStream using the Builder.
+        result = AAudioStreamBuilder_openStream(mBuilder, &mStream);
+        if (result != AAUDIO_OK) goto finish1;
+
+        printf("AAudioStream_getFramesPerBurst() = %d\n",
+               AAudioStream_getFramesPerBurst(mStream));
+        printf("AAudioStream_getBufferSizeInFrames() = %d\n",
+               AAudioStream_getBufferSizeInFrames(mStream));
+        printf("AAudioStream_getBufferCapacityInFrames() = %d\n",
+               AAudioStream_getBufferCapacityInFrames(mStream));
+        return result;
+
+     finish1:
+        AAudioStreamBuilder_delete(mBuilder);
+        mBuilder = nullptr;
+        return result;
+    }
+
+    aaudio_result_t close() {
+        if (mStream != nullptr) {
+            printf("call AAudioStream_close(%p)\n", mStream);  fflush(stdout);
+            AAudioStream_close(mStream);
+            mStream = nullptr;
+            AAudioStreamBuilder_delete(mBuilder);
+            mBuilder = nullptr;
+        }
+        return AAUDIO_OK;
+    }
+
+    // Write zero data to fill up the buffer and prevent underruns.
+    aaudio_result_t prime() {
+        int32_t samplesPerFrame = AAudioStream_getSamplesPerFrame(mStream);
+        const int numFrames = 32;
+        float zeros[numFrames * samplesPerFrame];
+        memset(zeros, 0, sizeof(zeros));
+        aaudio_result_t result = numFrames;
+        while (result == numFrames) {
+            result = AAudioStream_write(mStream, zeros, numFrames, 0);
+        }
+        return result;
+    }
+
+    // Start the stream. AAudio will start calling your callback function.
+     aaudio_result_t start() {
+        aaudio_result_t result = AAudioStream_requestStart(mStream);
+        if (result != AAUDIO_OK) {
+            fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d %s\n",
+                    result, AAudio_convertResultToText(result));
+        }
+        return result;
+    }
+
+    // Stop the stream. AAudio will stop calling your callback function.
+    aaudio_result_t stop() {
+        aaudio_result_t result = AAudioStream_requestStop(mStream);
+        if (result != AAUDIO_OK) {
+            fprintf(stderr, "ERROR - AAudioStream_requestStop() returned %d %s\n",
+                    result, AAudio_convertResultToText(result));
+        }
+        int32_t xRunCount = AAudioStream_getXRunCount(mStream);
+        printf("AAudioStream_getXRunCount %d\n", xRunCount);
+        return result;
+    }
+
+    AAudioStream *getStream() const {
+        return mStream;
+    }
+
+private:
+    AAudioStreamBuilder    *mBuilder = nullptr;
+    AAudioStream           *mStream = nullptr;
+    aaudio_sharing_mode_t   mRequestedSharingMode = SHARING_MODE;
+};
+
+// Application data that gets passed to the callback.
+#define MAX_FRAME_COUNT_RECORDS    256
+typedef struct SineThreadedData_s {
+    SineGenerator  sineOsc1;
+    SineGenerator  sineOsc2;
+    // Remove these variables used for testing.
+    int32_t        numFrameCounts;
+    int32_t        frameCounts[MAX_FRAME_COUNT_RECORDS];
+    int            scheduler;
+    bool           schedulerChecked;
+} SineThreadedData_t;
+
+// Callback function that fills the audio output buffer.
+aaudio_data_callback_result_t MyDataCallbackProc(
+        AAudioStream *stream,
+        void *userData,
+        void *audioData,
+        int32_t numFrames
+        ) {
+
+    SineThreadedData_t *sineData = (SineThreadedData_t *) userData;
+
+    if (sineData->numFrameCounts < MAX_FRAME_COUNT_RECORDS) {
+        sineData->frameCounts[sineData->numFrameCounts++] = numFrames;
+    }
+
+    if (!sineData->schedulerChecked) {
+        sineData->scheduler = sched_getscheduler(gettid());
+        sineData->schedulerChecked = true;
+    }
+
+    int32_t samplesPerFrame = AAudioStream_getSamplesPerFrame(stream);
+    // This code only plays on the first one or two channels.
+    // TODO Support arbitrary number of channels.
+    switch (AAudioStream_getFormat(stream)) {
+        case AAUDIO_FORMAT_PCM_I16: {
+            int16_t *audioBuffer = (int16_t *) audioData;
+            // Render sine waves as shorts to first channel.
+            sineData->sineOsc1.render(&audioBuffer[0], samplesPerFrame, numFrames);
+            // Render sine waves to second channel if there is one.
+            if (samplesPerFrame > 1) {
+                sineData->sineOsc2.render(&audioBuffer[1], samplesPerFrame, numFrames);
+            }
+        }
+        break;
+        case AAUDIO_FORMAT_PCM_FLOAT: {
+            float *audioBuffer = (float *) audioData;
+            // Render sine waves as floats to first channel.
+            sineData->sineOsc1.render(&audioBuffer[0], samplesPerFrame, numFrames);
+            // Render sine waves to second channel if there is one.
+            if (samplesPerFrame > 1) {
+                sineData->sineOsc2.render(&audioBuffer[1], samplesPerFrame, numFrames);
+            }
+        }
+        break;
+        default:
+            return AAUDIO_CALLBACK_RESULT_STOP;
+    }
+
+    return AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+int main(int argc, char **argv)
+{
+    (void)argc; // unused
+    SimpleAAudioPlayer player;
+    SineThreadedData_t myData;
+    aaudio_result_t result;
+
+    // Make printf print immediately so that debug info is not stuck
+    // in a buffer if we hang or crash.
+    setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+    printf("%s - Play a sine sweep using an AAudio callback\n", argv[0]);
+
+    player.setSharingMode(SHARING_MODE);
+
+    myData.numFrameCounts = 0;
+    myData.schedulerChecked = false;
+
+    result = player.open(MyDataCallbackProc, &myData);
+    if (result != AAUDIO_OK) {
+        fprintf(stderr, "ERROR -  player.open() returned %d\n", result);
+        goto error;
+    }
+    printf("player.getFramesPerSecond() = %d\n", player.getFramesPerSecond());
+    printf("player.getSamplesPerFrame() = %d\n", player.getSamplesPerFrame());
+    myData.sineOsc1.setup(440.0, 48000);
+    myData.sineOsc1.setSweep(300.0, 600.0, 5.0);
+    myData.sineOsc2.setup(660.0, 48000);
+    myData.sineOsc2.setSweep(350.0, 900.0, 7.0);
+
+#if 0
+    result = player.prime(); // FIXME crashes AudioTrack.cpp
+    if (result != AAUDIO_OK) {
+        fprintf(stderr, "ERROR - player.prime() returned %d\n", result);
+        goto error;
+    }
+#endif
+
+    result = player.start();
+    if (result != AAUDIO_OK) {
+        fprintf(stderr, "ERROR - player.start() returned %d\n", result);
+        goto error;
+    }
+
+    printf("Sleep for %d seconds while audio plays in a callback thread.\n", NUM_SECONDS);
+    for (int second = 0; second < NUM_SECONDS; second++)
+    {
+        const struct timespec request = { .tv_sec = 1, .tv_nsec = 0 };
+        (void) clock_nanosleep(CLOCK_MONOTONIC, 0 /*flags*/, &request, NULL /*remain*/);
+
+        aaudio_stream_state_t state;
+        result = AAudioStream_waitForStateChange(player.getStream(),
+                                                 AAUDIO_STREAM_STATE_CLOSED,
+                                                 &state,
+                                                 0);
+        if (result != AAUDIO_OK) {
+            fprintf(stderr, "ERROR - AAudioStream_waitForStateChange() returned %d\n", result);
+            goto error;
+        }
+        if (state != AAUDIO_STREAM_STATE_STARTING && state != AAUDIO_STREAM_STATE_STARTED) {
+            printf("Stream state is %d %s!\n", state, AAudio_convertStreamStateToText(state));
+            break;
+        }
+    }
+    printf("Woke up now.\n");
+
+    result = player.stop();
+    if (result != AAUDIO_OK) {
+        goto error;
+    }
+    result = player.close();
+    if (result != AAUDIO_OK) {
+        goto error;
+    }
+
+    // Report data gathered in the callback.
+    for (int i = 0; i < myData.numFrameCounts; i++) {
+        printf("numFrames[%4d] = %4d\n", i, myData.frameCounts[i]);
+    }
+    if (myData.schedulerChecked) {
+        printf("scheduler = 0x%08x, SCHED_FIFO = 0x%08X\n",
+               myData.scheduler,
+               SCHED_FIFO);
+    }
+
+    printf("SUCCESS\n");
+    return EXIT_SUCCESS;
+error:
+    player.close();
+    printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+    return EXIT_FAILURE;
+}
+
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_threaded.cpp b/media/libaaudio/examples/write_sine/src/write_sine_threaded.cpp
index 40e5016..9bc5886 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine_threaded.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine_threaded.cpp
@@ -16,24 +16,24 @@
 
 // Play sine waves using an AAudio background thread.
 
-#include <assert.h>
+//#include <assert.h>
+#include <atomic>
 #include <unistd.h>
 #include <stdlib.h>
 #include <stdio.h>
 #include <math.h>
 #include <time.h>
-#include <aaudio/AAudioDefinitions.h>
 #include <aaudio/AAudio.h>
 #include "SineGenerator.h"
 
-#define NUM_SECONDS           10
+#define NUM_SECONDS           5
 #define NANOS_PER_MICROSECOND ((int64_t)1000)
 #define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
 #define MILLIS_PER_SECOND     1000
 #define NANOS_PER_SECOND      (NANOS_PER_MILLISECOND * MILLIS_PER_SECOND)
 
-//#define SHARING_MODE  AAUDIO_SHARING_MODE_EXCLUSIVE
-#define SHARING_MODE  AAUDIO_SHARING_MODE_SHARED
+#define SHARING_MODE  AAUDIO_SHARING_MODE_EXCLUSIVE
+//#define SHARING_MODE  AAUDIO_SHARING_MODE_SHARED
 
 // Prototype for a callback.
 typedef int audio_callback_proc_t(float *outputBuffer,
@@ -42,6 +42,16 @@
 
 static void *SimpleAAudioPlayerThreadProc(void *arg);
 
+// TODO merge into common code
+static int64_t getNanoseconds(clockid_t clockId = CLOCK_MONOTONIC) {
+    struct timespec time;
+    int result = clock_gettime(clockId, &time);
+    if (result < 0) {
+        return -errno; // TODO standardize return value
+    }
+    return (time.tv_sec * NANOS_PER_SECOND) + time.tv_nsec;
+}
+
 /**
  * Simple wrapper for AAudio that opens a default stream and then calls
  * a callback function to fill the output buffers.
@@ -49,7 +59,7 @@
 class SimpleAAudioPlayer {
 public:
     SimpleAAudioPlayer() {}
-    virtual ~SimpleAAudioPlayer() {
+    ~SimpleAAudioPlayer() {
         close();
     };
 
@@ -80,21 +90,25 @@
         if (result != AAUDIO_OK) return result;
 
         AAudioStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
+        AAudioStreamBuilder_setSampleRate(mBuilder, 48000);
 
         // Open an AAudioStream using the Builder.
         result = AAudioStreamBuilder_openStream(mBuilder, &mStream);
-        if (result != AAUDIO_OK) goto finish1;
+        if (result != AAUDIO_OK) goto error;
+
+        printf("Requested sharing mode = %d\n", mRequestedSharingMode);
+        printf("Actual    sharing mode = %d\n", AAudioStream_getSharingMode(mStream));
 
         // Check to see what kind of stream we actually got.
         mFramesPerSecond = AAudioStream_getSampleRate(mStream);
-        printf("open() mFramesPerSecond = %d\n", mFramesPerSecond);
+        printf("Actual    framesPerSecond = %d\n", mFramesPerSecond);
 
         mSamplesPerFrame = AAudioStream_getSamplesPerFrame(mStream);
-        printf("open() mSamplesPerFrame = %d\n", mSamplesPerFrame);
+        printf("Actual    samplesPerFrame = %d\n", mSamplesPerFrame);
 
         {
             int32_t bufferCapacity = AAudioStream_getBufferCapacityInFrames(mStream);
-            printf("open() got bufferCapacity = %d\n", bufferCapacity);
+            printf("Actual    bufferCapacity = %d\n", bufferCapacity);
         }
 
         // This is the number of frames that are read in one chunk by a DMA controller
@@ -105,9 +119,10 @@
         while (mFramesPerBurst < 48) {
             mFramesPerBurst *= 2;
         }
-        printf("DataFormat: final framesPerBurst = %d\n",mFramesPerBurst);
+        printf("Actual    framesPerBurst = %d\n",mFramesPerBurst);
 
         mDataFormat = AAudioStream_getFormat(mStream);
+        printf("Actual    dataFormat = %d\n", mDataFormat);
 
         // Allocate a buffer for the audio data.
         mOutputBuffer = new float[mFramesPerBurst * mSamplesPerFrame];
@@ -118,6 +133,7 @@
 
         // If needed allocate a buffer for converting float to int16_t.
         if (mDataFormat == AAUDIO_FORMAT_PCM_I16) {
+            printf("Allocate data conversion buffer for float=>pcm16\n");
             mConversionBuffer = new int16_t[mFramesPerBurst * mSamplesPerFrame];
             if (mConversionBuffer == nullptr) {
                 fprintf(stderr, "ERROR - could not allocate conversion buffer\n");
@@ -126,7 +142,7 @@
         }
         return result;
 
-     finish1:
+    error:
         AAudioStreamBuilder_delete(mBuilder);
         mBuilder = nullptr;
         return result;
@@ -150,7 +166,7 @@
 
     // Start a thread that will call the callback proc.
     aaudio_result_t start() {
-        mEnabled = true;
+        mEnabled.store(true);
         int64_t nanosPerBurst = mFramesPerBurst * NANOS_PER_SECOND
                                            / mFramesPerSecond;
         return AAudioStream_createThread(mStream, nanosPerBurst,
@@ -160,56 +176,106 @@
 
     // Tell the thread to stop.
     aaudio_result_t stop() {
-        mEnabled = false;
+        mEnabled.store(false);
         return AAudioStream_joinThread(mStream, nullptr, 2 * NANOS_PER_SECOND);
     }
 
-    aaudio_result_t callbackLoop() {
-        int32_t framesWritten = 0;
-        int32_t xRunCount = 0;
-        aaudio_result_t result = AAUDIO_OK;
+    bool isEnabled() const {
+        return mEnabled.load();
+    }
 
-        result = AAudioStream_requestStart(mStream);
-        if (result != AAUDIO_OK) {
-            fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d\n", result);
-            return result;
-        }
+    aaudio_result_t callbackLoop() {
+        aaudio_result_t result = 0;
+        int64_t framesWritten = 0;
+        int32_t xRunCount = 0;
+        bool    started = false;
+        int64_t framesInBuffer =
+                AAudioStream_getFramesWritten(mStream) -
+                AAudioStream_getFramesRead(mStream);
+        int64_t framesAvailable =
+                AAudioStream_getBufferSizeInFrames(mStream) - framesInBuffer;
+
+        int64_t startTime = 0;
+        int64_t startPosition = 0;
+        int32_t loopCount = 0;
 
         // Give up after several burst periods have passed.
         const int burstsPerTimeout = 8;
-        int64_t nanosPerTimeout =
-                        burstsPerTimeout * mFramesPerBurst * NANOS_PER_SECOND
-                        / mFramesPerSecond;
+        int64_t nanosPerTimeout = 0;
+        int64_t runningNanosPerTimeout = 500 * NANOS_PER_MILLISECOND;
 
-        while (mEnabled && result >= 0) {
+        while (isEnabled() && result >= 0) {
             // Call application's callback function to fill the buffer.
             if (mCallbackProc(mOutputBuffer, mFramesPerBurst, mUserContext)) {
-                mEnabled = false;
+                mEnabled.store(false);
             }
+
             // if needed, convert from float to int16_t PCM
+            //printf("app callbackLoop writing %d frames, state = %s\n", mFramesPerBurst,
+            //       AAudio_convertStreamStateToText(AAudioStream_getState(mStream)));
             if (mConversionBuffer != nullptr) {
                 int32_t numSamples = mFramesPerBurst * mSamplesPerFrame;
                 for (int i = 0; i < numSamples; i++) {
                     mConversionBuffer[i] = (int16_t)(32767.0 * mOutputBuffer[i]);
                 }
                 // Write the application data to stream.
-                result = AAudioStream_write(mStream, mConversionBuffer, mFramesPerBurst, nanosPerTimeout);
+                result = AAudioStream_write(mStream, mConversionBuffer,
+                                            mFramesPerBurst, nanosPerTimeout);
             } else {
                 // Write the application data to stream.
-                result = AAudioStream_write(mStream, mOutputBuffer, mFramesPerBurst, nanosPerTimeout);
+                result = AAudioStream_write(mStream, mOutputBuffer,
+                                            mFramesPerBurst, nanosPerTimeout);
             }
-            framesWritten += result;
+
             if (result < 0) {
-                fprintf(stderr, "ERROR - AAudioStream_write() returned %zd\n", result);
+                fprintf(stderr, "ERROR - AAudioStream_write() returned %d %s\n", result,
+                        AAudio_convertResultToText(result));
+                break;
+            } else if (started && result != mFramesPerBurst) {
+                fprintf(stderr, "ERROR - AAudioStream_write() timed out! %d\n", result);
+                break;
+            } else {
+                framesWritten += result;
+            }
+
+            if (startTime > 0 && ((loopCount & 0x01FF) == 0)) {
+                double elapsedFrames = (double)(framesWritten - startPosition);
+                int64_t elapsedTime = getNanoseconds() - startTime;
+                double measuredRate = elapsedFrames * NANOS_PER_SECOND / elapsedTime;
+                printf("app callbackLoop write() measured rate %f\n", measuredRate);
+            }
+            loopCount++;
+
+            if (!started && framesWritten >= framesAvailable) {
+                // Start buffer if fully primed.{
+                result = AAudioStream_requestStart(mStream);
+                printf("app callbackLoop requestStart returned %d\n", result);
+                if (result != AAUDIO_OK) {
+                    fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d %s\n", result,
+                            AAudio_convertResultToText(result));
+                    mEnabled.store(false);
+                    return result;
+                }
+                started = true;
+                nanosPerTimeout = runningNanosPerTimeout;
+                startPosition = framesWritten;
+                startTime = getNanoseconds();
+            }
+
+            {
+                int32_t tempXRunCount = AAudioStream_getXRunCount(mStream);
+                if (tempXRunCount != xRunCount) {
+                    xRunCount = tempXRunCount;
+                    printf("AAudioStream_getXRunCount returns %d at frame %d\n",
+                           xRunCount, (int) framesWritten);
+                }
             }
         }
 
-        xRunCount = AAudioStream_getXRunCount(mStream);
-        printf("AAudioStream_getXRunCount %d\n", xRunCount);
-
         result = AAudioStream_requestStop(mStream);
         if (result != AAUDIO_OK) {
-            fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d\n", result);
+            fprintf(stderr, "ERROR - AAudioStream_requestStop() returned %d %s\n", result,
+                    AAudio_convertResultToText(result));
             return result;
         }
 
@@ -230,7 +296,7 @@
     int32_t               mFramesPerBurst = 0;
     aaudio_audio_format_t mDataFormat = AAUDIO_FORMAT_PCM_I16;
 
-    volatile bool         mEnabled = false; // used to request that callback exit its loop
+    std::atomic<bool>     mEnabled; // used to request that callback exit its loop
 };
 
 static void *SimpleAAudioPlayerThreadProc(void *arg) {
@@ -289,19 +355,21 @@
     }
 
     printf("Sleep for %d seconds while audio plays in a background thread.\n", NUM_SECONDS);
-    {
+    for (int i = 0; i < NUM_SECONDS && player.isEnabled(); i++) {
         // FIXME sleep is not an NDK API
         // sleep(NUM_SECONDS);
-        const struct timespec request = { .tv_sec = NUM_SECONDS, .tv_nsec = 0 };
+        const struct timespec request = { .tv_sec = 1, .tv_nsec = 0 };
         (void) clock_nanosleep(CLOCK_MONOTONIC, 0 /*flags*/, &request, NULL /*remain*/);
     }
-    printf("Woke up now.\n");
+    printf("Woke up now!\n");
 
     result = player.stop();
     if (result != AAUDIO_OK) {
         fprintf(stderr, "ERROR -  player.stop() returned %d\n", result);
         goto error;
     }
+
+    printf("Player stopped.\n");
     result = player.close();
     if (result != AAUDIO_OK) {
         fprintf(stderr, "ERROR -  player.close() returned %d\n", result);
diff --git a/media/libaaudio/examples/write_sine/static/Android.mk b/media/libaaudio/examples/write_sine/static/Android.mk
index 139b70a..c02b91c 100644
--- a/media/libaaudio/examples/write_sine/static/Android.mk
+++ b/media/libaaudio/examples/write_sine/static/Android.mk
@@ -6,7 +6,7 @@
     $(call include-path-for, audio-utils) \
     frameworks/av/media/libaaudio/include
 
-# TODO reorganize folders to avoid using ../
+# NDK recommends using this kind of relative path instead of an absolute path.
 LOCAL_SRC_FILES:= ../src/write_sine.cpp
 
 LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
@@ -17,6 +17,8 @@
 LOCAL_MODULE := write_sine
 include $(BUILD_EXECUTABLE)
 
+
+
 include $(CLEAR_VARS)
 LOCAL_MODULE_TAGS := tests
 LOCAL_C_INCLUDES := \
@@ -27,8 +29,26 @@
 
 LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
                           libbinder libcutils libutils \
-                          libaudioclient liblog libtinyalsa
+                          libaudioclient liblog
 LOCAL_STATIC_LIBRARIES := libaaudio
 
 LOCAL_MODULE := write_sine_threaded
 include $(BUILD_EXECUTABLE)
+
+
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/av/media/libaaudio/include
+
+LOCAL_SRC_FILES:= ../src/write_sine_callback.cpp
+
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
+                          libbinder libcutils libutils \
+                          libaudioclient liblog
+LOCAL_STATIC_LIBRARIES := libaaudio
+
+LOCAL_MODULE := write_sine_callback
+include $(BUILD_EXECUTABLE)
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index 921248a..25ad5f8 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -15,7 +15,16 @@
  */
 
 /**
- * This is the 'C' ABI for AAudio.
+ * @addtogroup Audio
+ * @{
+ */
+
+/**
+ * @file AAudio.h
+ */
+
+/**
+ * This is the 'C' API for AAudio.
  */
 #ifndef AAUDIO_AAUDIO_H
 #define AAUDIO_AAUDIO_H
@@ -80,7 +89,8 @@
  * Request an audio device identified device using an ID.
  * On Android, for example, the ID could be obtained from the Java AudioManager.
  *
- * By default, the primary device will be used.
+ * The default, if you do not call this function, is AAUDIO_DEVICE_UNSPECIFIED,
+ * in which case the primary device will be used.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param deviceId device identifier or AAUDIO_DEVICE_UNSPECIFIED
@@ -89,52 +99,71 @@
                                                      int32_t deviceId);
 
 /**
- * Request a sample rate in Hz.
+ * Request a sample rate in Hertz.
+ *
  * The stream may be opened with a different sample rate.
  * So the application should query for the actual rate after the stream is opened.
  *
  * Technically, this should be called the "frame rate" or "frames per second",
  * because it refers to the number of complete frames transferred per second.
- * But it is traditionally called "sample rate". Se we use that term.
+ * But it is traditionally called "sample rate". So we use that term.
  *
- * Default is AAUDIO_UNSPECIFIED.
-
+ * The default, if you do not call this function, is AAUDIO_UNSPECIFIED.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param sampleRate frames per second. Common rates include 44100 and 48000 Hz.
  */
 AAUDIO_API void AAudioStreamBuilder_setSampleRate(AAudioStreamBuilder* builder,
                                                        int32_t sampleRate);
 
 /**
  * Request a number of samples per frame.
+ *
  * The stream may be opened with a different value.
  * So the application should query for the actual value after the stream is opened.
  *
- * Default is AAUDIO_UNSPECIFIED.
+ * The default, if you do not call this function, is AAUDIO_UNSPECIFIED.
  *
  * Note, this quantity is sometimes referred to as "channel count".
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param samplesPerFrame Number of samples in one frame, ie. numChannels.
  */
 AAUDIO_API void AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder* builder,
                                                    int32_t samplesPerFrame);
 
 /**
  * Request a sample data format, for example AAUDIO_FORMAT_PCM_I16.
- * The application should query for the actual format after the stream is opened.
+ *
+ * The default, if you do not call this function, is AAUDIO_UNSPECIFIED.
+ *
+ * The stream may be opened with a different value.
+ * So the application should query for the actual value after the stream is opened.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param format Most common formats are AAUDIO_FORMAT_PCM_FLOAT and AAUDIO_FORMAT_PCM_I16.
  */
 AAUDIO_API void AAudioStreamBuilder_setFormat(AAudioStreamBuilder* builder,
                                                    aaudio_audio_format_t format);
 
 /**
  * Request a mode for sharing the device.
+ *
+ * The default, if you do not call this function, is AAUDIO_SHARING_MODE_SHARED.
+ *
  * The requested sharing mode may not be available.
- * So the application should query for the actual mode after the stream is opened.
+ * The application can query for the actual mode after the stream is opened.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
- * @param sharingMode AAUDIO_SHARING_MODE_LEGACY or AAUDIO_SHARING_MODE_EXCLUSIVE
+ * @param sharingMode AAUDIO_SHARING_MODE_SHARED or AAUDIO_SHARING_MODE_EXCLUSIVE
  */
 AAUDIO_API void AAudioStreamBuilder_setSharingMode(AAudioStreamBuilder* builder,
                                                         aaudio_sharing_mode_t sharingMode);
 
 /**
- * Request the direction for a stream. The default is AAUDIO_DIRECTION_OUTPUT.
+ * Request the direction for a stream.
+ *
+ * The default, if you do not call this function, is AAUDIO_DIRECTION_OUTPUT.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param direction AAUDIO_DIRECTION_OUTPUT or AAUDIO_DIRECTION_INPUT
@@ -143,16 +172,162 @@
                                                             aaudio_direction_t direction);
 
 /**
- * Set the requested maximum buffer capacity in frames.
+ * Set the requested buffer capacity in frames.
  * The final AAudioStream capacity may differ, but will probably be at least this big.
  *
- * Default is AAUDIO_UNSPECIFIED.
+ * The default, if you do not call this function, is AAUDIO_UNSPECIFIED.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
- * @param frames the desired buffer capacity in frames or AAUDIO_UNSPECIFIED
+ * @param numFrames the desired buffer capacity in frames or AAUDIO_UNSPECIFIED
  */
 AAUDIO_API void AAudioStreamBuilder_setBufferCapacityInFrames(AAudioStreamBuilder* builder,
-                                                                 int32_t frames);
+                                                                 int32_t numFrames);
+/**
+ * Return one of these values from the data callback function.
+ */
+enum {
+
+    /**
+     * Continue calling the callback.
+     */
+    AAUDIO_CALLBACK_RESULT_CONTINUE = 0,
+
+    /**
+     * Stop calling the callback.
+     *
+     * The application will still need to call AAudioStream_requestPause()
+     * or AAudioStream_requestStop().
+     */
+    AAUDIO_CALLBACK_RESULT_STOP,
+
+};
+typedef int32_t aaudio_data_callback_result_t;
+
+/**
+ * Prototype for the data function that is passed to AAudioStreamBuilder_setDataCallback().
+ *
+ * For an output stream, this function should render and write numFrames of data
+ * in the streams current data format to the audioData buffer.
+ *
+ * For an input stream, this function should read and process numFrames of data
+ * from the audioData buffer.
+ *
+ * Note that this callback function should be considered a "real-time" function.
+ * It must not do anything that could cause an unbounded delay because that can cause the
+ * audio to glitch or pop.
+ *
+ * These are things the function should NOT do:
+ * <ul>
+ * <li>allocate memory using, for example, malloc() or new</li>
+ * <li>any file operations such as opening, closing, reading or writing</li>
+ * <li>any network operations such as streaming</li>
+ * <li>use any mutexes or other synchronization primitives</li>
+ * <li>sleep</li>
+ * </ul>
+ *
+ * If you need to move data, eg. MIDI commands, in or out of the callback function then
+ * we recommend the use of non-blocking techniques such as an atomic FIFO.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @param userData the same address that was passed to AAudioStreamBuilder_setCallback()
+ * @param audioData a pointer to the audio data
+ * @param numFrames the number of frames to be processed
+ * @return AAUDIO_CALLBACK_RESULT_*
+ */
+typedef aaudio_data_callback_result_t (*AAudioStream_dataCallback)(
+        AAudioStream *stream,
+        void *userData,
+        void *audioData,
+        int32_t numFrames);
+
+/**
+ * Request that AAudio call this functions when the stream is running.
+ *
+ * Note that when using this callback, the audio data will be passed in or out
+ * of the function as an argument.
+ * So you cannot call AAudioStream_write() or AAudioStream_read() on the same stream
+ * that has an active data callback.
+ *
+ * The callback function will start being called after AAudioStream_requestStart() is called.
+ * It will stop being called after AAudioStream_requestPause() or
+ * AAudioStream_requestStop() is called.
+ *
+ * This callback function will be called on a real-time thread owned by AAudio. See
+ * {@link aaudio_data_callback_proc_t} for more information.
+ *
+ * Note that the AAudio callbacks will never be called simultaneously from multiple threads.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param callback pointer to a function that will process audio data.
+ * @param userData pointer to an application data structure that will be passed
+ *          to the callback functions.
+ */
+AAUDIO_API void AAudioStreamBuilder_setDataCallback(AAudioStreamBuilder* builder,
+                                                 AAudioStream_dataCallback callback,
+                                                 void *userData);
+
+/**
+ * Set the requested data callback buffer size in frames.
+ * See {@link AAudioStream_dataCallback}.
+ *
+ * The default, if you do not call this function, is AAUDIO_UNSPECIFIED.
+ *
+ * For the lowest possible latency, do not call this function. AAudio will then
+ * call the dataProc callback function with whatever size is optimal.
+ * That size may vary from one callback to another.
+ *
+ * Only use this function if the application requires a specific number of frames for processing.
+ * The application might, for example, be using an FFT that requires
+ * a specific power-of-two sized buffer.
+ *
+ * AAudio may need to add additional buffering in order to adapt between the internal
+ * buffer size and the requested buffer size.
+ *
+ * If you do call this function then the requested size should be less than
+ * half the buffer capacity, to allow double buffering.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param numFrames the desired buffer size in frames or AAUDIO_UNSPECIFIED
+ */
+AAUDIO_API void AAudioStreamBuilder_setFramesPerDataCallback(AAudioStreamBuilder* builder,
+                                                             int32_t numFrames);
+
+/**
+ * Prototype for the callback function that is passed to
+ * AAudioStreamBuilder_setErrorCallback().
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @param userData the same address that was passed to AAudioStreamBuilder_setErrorCallback()
+ * @param error an AAUDIO_ERROR_* value.
+ */
+typedef void (*AAudioStream_errorCallback)(
+        AAudioStream *stream,
+        void *userData,
+        aaudio_result_t error);
+
+/**
+ * Request that AAudio call this functions if any error occurs on a callback thread.
+ *
+ * It will be called, for example, if a headset or a USB device is unplugged causing the stream's
+ * device to be unavailable.
+ * In response, this function could signal or launch another thread to reopen a
+ * stream on another device. Do not reopen the stream in this callback.
+ *
+ * This will not be called because of actions by the application, such as stopping
+ * or closing a stream.
+ *
+ * Another possible cause of error would be a timeout or an unanticipated internal error.
+ *
+ * Note that the AAudio callbacks will never be called simultaneously from multiple threads.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param callback pointer to a function that will be called if an error occurs.
+ * @param userData pointer to an application data structure that will be passed
+ *          to the callback functions.
+ */
+AAUDIO_API void AAudioStreamBuilder_setErrorCallback(AAudioStreamBuilder* builder,
+                                                AAudioStream_errorCallback callback,
+                                                void *userData);
 
 /**
  * Open a stream based on the options in the StreamBuilder.
@@ -324,9 +499,14 @@
 // High priority audio threads
 // ============================================================
 
+/**
+ * @deprecated Use AudioStreamBuilder_setCallback()
+ */
 typedef void *(*aaudio_audio_thread_proc_t)(void *);
 
 /**
+ * @deprecated Use AudioStreamBuilder_setCallback()
+ *
  * Create a thread associated with a stream. The thread has special properties for
  * low latency audio performance. This thread can be used to implement a callback API.
  *
@@ -351,6 +531,8 @@
                                      void *arg);
 
 /**
+ * @deprecated Use AudioStreamBuilder_setCallback()
+ *
  * Wait until the thread exits or an error occurs.
  *
  * @param stream A stream created using AAudioStreamBuilder_openStream().
@@ -379,11 +561,11 @@
  * Call AAudioStream_getBufferSizeInFrames() to see what the actual final size is.
  *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
- * @param requestedFrames requested number of frames that can be filled without blocking
+ * @param numFrames requested number of frames that can be filled without blocking
  * @return actual buffer size in frames or a negative error
  */
 AAUDIO_API aaudio_result_t AAudioStream_setBufferSizeInFrames(AAudioStream* stream,
-                                                      int32_t requestedFrames);
+                                                      int32_t numFrames);
 
 /**
  * Query the maximum number of frames that can be filled without blocking.
@@ -412,11 +594,32 @@
  * Query maximum buffer capacity in frames.
  *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
- * @return  the buffer capacity in frames
+ * @return  buffer capacity in frames
  */
 AAUDIO_API int32_t AAudioStream_getBufferCapacityInFrames(AAudioStream* stream);
 
 /**
+ * Query the size of the buffer that will be passed to the dataProc callback
+ * in the numFrames parameter.
+ *
+ * This call can be used if the application needs to know the value of numFrames before
+ * the stream is started. This is not normally necessary.
+ *
+ * If a specific size was requested by calling AAudioStreamBuilder_setCallbackSizeInFrames()
+ * then this will be the same size.
+ *
+ * If AAudioStreamBuilder_setCallbackSizeInFrames() was not called then this will
+ * return the size chosen by AAudio, or AAUDIO_UNSPECIFIED.
+ *
+ * AAUDIO_UNSPECIFIED indicates that the callback buffer size for this stream
+ * may vary from one dataProc callback to the next.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return callback buffer size in frames or AAUDIO_UNSPECIFIED
+ */
+AAUDIO_API int32_t AAudioStream_getFramesPerDataCallback(AAudioStream* stream);
+
+/**
  * An XRun is an Underrun or an Overrun.
  * During playing, an underrun will occur if the stream is not written in time
  * and the system runs out of valid data.
@@ -525,3 +728,5 @@
 #endif
 
 #endif //AAUDIO_AAUDIO_H
+
+/** @} */
diff --git a/media/libaaudio/include/aaudio/AAudioDefinitions.h b/media/libaaudio/include/aaudio/AAudioDefinitions.h
index 846318c..57e3dbd 100644
--- a/media/libaaudio/include/aaudio/AAudioDefinitions.h
+++ b/media/libaaudio/include/aaudio/AAudioDefinitions.h
@@ -14,6 +14,15 @@
  * limitations under the License.
  */
 
+/**
+ * @addtogroup Audio
+ * @{
+ */
+
+/**
+ * @file AAudioDefinitions.h
+ */
+
 #ifndef AAUDIO_AAUDIODEFINITIONS_H
 #define AAUDIO_AAUDIODEFINITIONS_H
 
@@ -30,7 +39,7 @@
  * and would accept whatever it was given.
  */
 #define AAUDIO_UNSPECIFIED           0
-#define AAUDIO_DEVICE_UNSPECIFIED    ((int32_t) -1)
+#define AAUDIO_DEVICE_UNSPECIFIED    0
 
 enum {
     AAUDIO_DIRECTION_OUTPUT,
@@ -73,9 +82,10 @@
     AAUDIO_ERROR_NULL,
     AAUDIO_ERROR_TIMEOUT,
     AAUDIO_ERROR_WOULD_BLOCK,
-    AAUDIO_ERROR_INVALID_ORDER,
+    AAUDIO_ERROR_INVALID_FORMAT,
     AAUDIO_ERROR_OUT_OF_RANGE,
-    AAUDIO_ERROR_NO_SERVICE
+    AAUDIO_ERROR_NO_SERVICE,
+    AAUDIO_ERROR_INVALID_RATE
 };
 typedef int32_t  aaudio_result_t;
 
@@ -94,9 +104,11 @@
     AAUDIO_STREAM_STATE_STOPPED,
     AAUDIO_STREAM_STATE_CLOSING,
     AAUDIO_STREAM_STATE_CLOSED,
+    AAUDIO_STREAM_STATE_DISCONNECTED
 };
 typedef int32_t aaudio_stream_state_t;
 
+
 enum {
     /**
      * This will be the only stream using a particular source or sink.
@@ -117,3 +129,5 @@
 #endif
 
 #endif // AAUDIO_AAUDIODEFINITIONS_H
+
+/** @} */
diff --git a/media/libaaudio/libaaudio.map.txt b/media/libaaudio/libaaudio.map.txt
index a9e9109..f22fdfe 100644
--- a/media/libaaudio/libaaudio.map.txt
+++ b/media/libaaudio/libaaudio.map.txt
@@ -4,6 +4,9 @@
     AAudio_convertStreamStateToText;
     AAudio_createStreamBuilder;
     AAudioStreamBuilder_setDeviceId;
+    AAudioStreamBuilder_setDataCallback;
+    AAudioStreamBuilder_setErrorCallback;
+    AAudioStreamBuilder_setFramesPerDataCallback;
     AAudioStreamBuilder_setSampleRate;
     AAudioStreamBuilder_setSamplesPerFrame;
     AAudioStreamBuilder_setFormat;
@@ -25,6 +28,7 @@
     AAudioStream_joinThread;
     AAudioStream_setBufferSizeInFrames;
     AAudioStream_getBufferSizeInFrames;
+    AAudioStream_getFramesPerDataCallback;
     AAudioStream_getFramesPerBurst;
     AAudioStream_getBufferCapacityInFrames;
     AAudioStream_getXRunCount;
diff --git a/media/libaaudio/src/Android.mk b/media/libaaudio/src/Android.mk
index a016b49..b5bb75f 100644
--- a/media/libaaudio/src/Android.mk
+++ b/media/libaaudio/src/Android.mk
@@ -26,26 +26,32 @@
     $(LOCAL_PATH)/legacy \
     $(LOCAL_PATH)/utility
 
+# If you add a file here then also add it below in the SHARED target
 LOCAL_SRC_FILES = \
     core/AudioStream.cpp \
     core/AudioStreamBuilder.cpp \
     core/AAudioAudio.cpp \
+    legacy/AudioStreamLegacy.cpp \
     legacy/AudioStreamRecord.cpp \
     legacy/AudioStreamTrack.cpp \
     utility/HandleTracker.cpp \
     utility/AAudioUtilities.cpp \
+    utility/FixedBlockAdapter.cpp \
+    utility/FixedBlockReader.cpp \
+    utility/FixedBlockWriter.cpp \
     fifo/FifoBuffer.cpp \
     fifo/FifoControllerBase.cpp \
     client/AudioEndpoint.cpp \
     client/AudioStreamInternal.cpp \
     client/IsochronousClockModel.cpp \
-    binding/SharedMemoryParcelable.cpp \
-    binding/SharedRegionParcelable.cpp \
-    binding/RingBufferParcelable.cpp \
     binding/AudioEndpointParcelable.cpp \
+    binding/AAudioBinderClient.cpp \
     binding/AAudioStreamRequest.cpp \
     binding/AAudioStreamConfiguration.cpp \
-    binding/IAAudioService.cpp
+    binding/IAAudioService.cpp \
+    binding/RingBufferParcelable.cpp \
+    binding/SharedMemoryParcelable.cpp \
+    binding/SharedRegionParcelable.cpp
 
 LOCAL_CFLAGS += -Wno-unused-parameter -Wall -Werror
 
@@ -79,22 +85,27 @@
 LOCAL_SRC_FILES = core/AudioStream.cpp \
     core/AudioStreamBuilder.cpp \
     core/AAudioAudio.cpp \
+    legacy/AudioStreamLegacy.cpp \
     legacy/AudioStreamRecord.cpp \
     legacy/AudioStreamTrack.cpp \
     utility/HandleTracker.cpp \
     utility/AAudioUtilities.cpp \
+    utility/FixedBlockAdapter.cpp \
+    utility/FixedBlockReader.cpp \
+    utility/FixedBlockWriter.cpp \
     fifo/FifoBuffer.cpp \
     fifo/FifoControllerBase.cpp \
     client/AudioEndpoint.cpp \
     client/AudioStreamInternal.cpp \
     client/IsochronousClockModel.cpp \
-    binding/SharedMemoryParcelable.cpp \
-    binding/SharedRegionParcelable.cpp \
-    binding/RingBufferParcelable.cpp \
     binding/AudioEndpointParcelable.cpp \
+    binding/AAudioBinderClient.cpp \
     binding/AAudioStreamRequest.cpp \
     binding/AAudioStreamConfiguration.cpp \
-    binding/IAAudioService.cpp
+    binding/IAAudioService.cpp \
+    binding/RingBufferParcelable.cpp \
+    binding/SharedMemoryParcelable.cpp \
+    binding/SharedRegionParcelable.cpp
 
 LOCAL_CFLAGS += -Wno-unused-parameter -Wall -Werror
 
diff --git a/media/libaaudio/src/binding/AAudioBinderClient.cpp b/media/libaaudio/src/binding/AAudioBinderClient.cpp
new file mode 100644
index 0000000..8315c40
--- /dev/null
+++ b/media/libaaudio/src/binding/AAudioBinderClient.cpp
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <binder/IServiceManager.h>
+#include <utils/Mutex.h>
+#include <utils/RefBase.h>
+
+#include <aaudio/AAudio.h>
+
+#include "AudioEndpointParcelable.h"
+#include "binding/AAudioStreamRequest.h"
+#include "binding/AAudioStreamConfiguration.h"
+#include "binding/IAAudioService.h"
+#include "binding/AAudioServiceMessage.h"
+
+#include "AAudioBinderClient.h"
+#include "AAudioServiceInterface.h"
+
+using android::String16;
+using android::IServiceManager;
+using android::defaultServiceManager;
+using android::interface_cast;
+using android::IAAudioService;
+using android::Mutex;
+using android::sp;
+
+using namespace aaudio;
+
+static android::Mutex gServiceLock;
+static sp<IAAudioService>  gAAudioService;
+
+// TODO Share code with other service clients.
+// Helper function to get access to the "AAudioService" service.
+// This code was modeled after frameworks/av/media/libaudioclient/AudioSystem.cpp
+static const sp<IAAudioService> getAAudioService() {
+    sp<IBinder> binder;
+    Mutex::Autolock _l(gServiceLock);
+    if (gAAudioService == 0) {
+        sp<IServiceManager> sm = defaultServiceManager();
+        // Try several times to get the service.
+        int retries = 4;
+        do {
+            binder = sm->getService(String16(AAUDIO_SERVICE_NAME)); // This will wait a while.
+            if (binder != 0) {
+                break;
+            }
+        } while (retries-- > 0);
+
+        if (binder != 0) {
+            // TODO Add linkToDeath() like in frameworks/av/media/libaudioclient/AudioSystem.cpp
+            // TODO Create a DeathRecipient that disconnects all active streams.
+            gAAudioService = interface_cast<IAAudioService>(binder);
+        } else {
+            ALOGE("AudioStreamInternal could not get %s", AAUDIO_SERVICE_NAME);
+        }
+    }
+    return gAAudioService;
+}
+
+
+AAudioBinderClient::AAudioBinderClient()
+        : AAudioServiceInterface() {}
+
+AAudioBinderClient::~AAudioBinderClient() {}
+
+/**
+* @param request info needed to create the stream
+* @param configuration contains information about the created stream
+* @return handle to the stream or a negative error
+*/
+aaudio_handle_t AAudioBinderClient::openStream(const AAudioStreamRequest &request,
+                                               AAudioStreamConfiguration &configurationOutput) {
+
+    const sp<IAAudioService> &service = getAAudioService();
+    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+    return service->openStream(request, configurationOutput);
+}
+
+aaudio_result_t AAudioBinderClient::closeStream(aaudio_handle_t streamHandle) {
+
+    const sp<IAAudioService> &service = getAAudioService();
+    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+    return service->closeStream(streamHandle);
+}
+
+/* Get an immutable description of the in-memory queues
+* used to communicate with the underlying HAL or Service.
+*/
+aaudio_result_t AAudioBinderClient::getStreamDescription(aaudio_handle_t streamHandle,
+                                                         AudioEndpointParcelable &parcelable) {
+
+    const sp<IAAudioService> &service = getAAudioService();
+    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+    return service->getStreamDescription(streamHandle, parcelable);
+}
+
+/**
+* Start the flow of data.
+*/
+aaudio_result_t AAudioBinderClient::startStream(aaudio_handle_t streamHandle) {
+    const sp<IAAudioService> &service = getAAudioService();
+    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+    return service->startStream(streamHandle);
+}
+
+/**
+* Stop the flow of data such that start() can resume without loss of data.
+*/
+aaudio_result_t AAudioBinderClient::pauseStream(aaudio_handle_t streamHandle) {
+    const sp<IAAudioService> &service = getAAudioService();
+    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+    return service->startStream(streamHandle);
+}
+
+/**
+*  Discard any data held by the underlying HAL or Service.
+*/
+aaudio_result_t AAudioBinderClient::flushStream(aaudio_handle_t streamHandle) {
+    const sp<IAAudioService> &service = getAAudioService();
+    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+    return service->startStream(streamHandle);
+}
+
+/**
+* Manage the specified thread as a low latency audio thread.
+*/
+aaudio_result_t AAudioBinderClient::registerAudioThread(aaudio_handle_t streamHandle,
+                                                        pid_t clientProcessId,
+                                                        pid_t clientThreadId,
+                                                        int64_t periodNanoseconds) {
+    const sp<IAAudioService> &service = getAAudioService();
+    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+    return service->registerAudioThread(streamHandle,
+                                        clientProcessId,
+                                        clientThreadId,
+                                        periodNanoseconds);
+}
+
+aaudio_result_t AAudioBinderClient::unregisterAudioThread(aaudio_handle_t streamHandle,
+                                                          pid_t clientProcessId,
+                                                          pid_t clientThreadId) {
+    const sp<IAAudioService> &service = getAAudioService();
+    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+    return service->unregisterAudioThread(streamHandle,
+                                          clientProcessId,
+                                          clientThreadId);
+}
+
+
diff --git a/media/libaaudio/src/binding/AAudioBinderClient.h b/media/libaaudio/src/binding/AAudioBinderClient.h
new file mode 100644
index 0000000..5613d5b
--- /dev/null
+++ b/media/libaaudio/src/binding/AAudioBinderClient.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_BINDER_CLIENT_H
+#define AAUDIO_AAUDIO_BINDER_CLIENT_H
+
+#include <aaudio/AAudioDefinitions.h>
+#include "AAudioServiceDefinitions.h"
+#include "AAudioServiceInterface.h"
+#include "binding/AAudioStreamRequest.h"
+#include "binding/AAudioStreamConfiguration.h"
+#include "binding/AudioEndpointParcelable.h"
+
+/**
+ * Implements the AAudioServiceInterface by talking to the actual service through Binder.
+ */
+
+namespace aaudio {
+
+class AAudioBinderClient : public AAudioServiceInterface {
+
+public:
+
+    AAudioBinderClient();
+
+    virtual ~AAudioBinderClient();
+
+    /**
+     * @param request info needed to create the stream
+     * @param configuration contains resulting information about the created stream
+     * @return handle to the stream or a negative error
+     */
+    aaudio_handle_t openStream(const AAudioStreamRequest &request,
+                               AAudioStreamConfiguration &configurationOutput) override;
+
+    aaudio_result_t closeStream(aaudio_handle_t streamHandle) override;
+
+    /* Get an immutable description of the in-memory queues
+    * used to communicate with the underlying HAL or Service.
+    */
+    aaudio_result_t getStreamDescription(aaudio_handle_t streamHandle,
+                                                 AudioEndpointParcelable &parcelable) override;
+
+    /**
+     * Start the flow of data.
+     * This is asynchronous. When complete, the service will send a STARTED event.
+     */
+    aaudio_result_t startStream(aaudio_handle_t streamHandle) override;
+
+    /**
+     * Stop the flow of data such that start() can resume without loss of data.
+     * This is asynchronous. When complete, the service will send a PAUSED event.
+     */
+    aaudio_result_t pauseStream(aaudio_handle_t streamHandle) override;
+
+    /**
+     *  Discard any data held by the underlying HAL or Service.
+     * This is asynchronous. When complete, the service will send a FLUSHED event.
+     */
+    aaudio_result_t flushStream(aaudio_handle_t streamHandle) override;
+
+    /**
+     * Manage the specified thread as a low latency audio thread.
+     * TODO Consider passing this information as part of the startStream() call.
+     */
+    aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle,
+                                                pid_t clientProcessId,
+                                                pid_t clientThreadId,
+                                                int64_t periodNanoseconds) override;
+
+    aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle,
+                                                  pid_t clientProcessId,
+                                                  pid_t clientThreadId) override;
+};
+
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_AAUDIO_BINDER_CLIENT_H
diff --git a/media/libaaudio/src/binding/AAudioServiceDefinitions.h b/media/libaaudio/src/binding/AAudioServiceDefinitions.h
index b58d170..0d5bae5 100644
--- a/media/libaaudio/src/binding/AAudioServiceDefinitions.h
+++ b/media/libaaudio/src/binding/AAudioServiceDefinitions.h
@@ -48,25 +48,6 @@
 
 #define AAUDIO_HANDLE_INVALID  ((aaudio_handle_t) -1)
 
-enum aaudio_commands_t {
-    OPEN_STREAM = IBinder::FIRST_CALL_TRANSACTION,
-    CLOSE_STREAM,
-    GET_STREAM_DESCRIPTION,
-    START_STREAM,
-    PAUSE_STREAM,
-    FLUSH_STREAM,
-    REGISTER_AUDIO_THREAD,
-    UNREGISTER_AUDIO_THREAD
-};
-
-// TODO Expand this to include all the open parameters.
-typedef struct AAudioServiceStreamInfo_s {
-    int32_t               deviceId;
-    int32_t               samplesPerFrame;  // number of channels
-    int32_t               sampleRate;
-    aaudio_audio_format_t audioFormat;
-} AAudioServiceStreamInfo;
-
 // This must be a fixed width so it can be in shared memory.
 enum RingbufferFlags : uint32_t {
     NONE = 0,
diff --git a/media/libaaudio/src/binding/AAudioServiceInterface.h b/media/libaaudio/src/binding/AAudioServiceInterface.h
new file mode 100644
index 0000000..62fd894
--- /dev/null
+++ b/media/libaaudio/src/binding/AAudioServiceInterface.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_BINDING_AAUDIO_SERVICE_INTERFACE_H
+#define AAUDIO_BINDING_AAUDIO_SERVICE_INTERFACE_H
+
+#include "binding/AAudioServiceDefinitions.h"
+#include "binding/AAudioStreamRequest.h"
+#include "binding/AAudioStreamConfiguration.h"
+#include "binding/AudioEndpointParcelable.h"
+
+/**
+ * This has the same methods as IAAudioService but without the Binder features.
+ *
+ * It allows us to abstract the Binder interface and use an AudioStreamInternal
+ * both in the client and in the service.
+ */
+namespace aaudio {
+
+class AAudioServiceInterface {
+public:
+
+    AAudioServiceInterface() {};
+    virtual ~AAudioServiceInterface() = default;
+
+    /**
+     * @param request info needed to create the stream
+     * @param configuration contains information about the created stream
+     * @return handle to the stream or a negative error
+     */
+    virtual aaudio_handle_t openStream(const AAudioStreamRequest &request,
+                                       AAudioStreamConfiguration &configuration) = 0;
+
+    virtual aaudio_result_t closeStream(aaudio_handle_t streamHandle) = 0;
+
+    /* Get an immutable description of the in-memory queues
+    * used to communicate with the underlying HAL or Service.
+    */
+    virtual aaudio_result_t getStreamDescription(aaudio_handle_t streamHandle,
+                                                 AudioEndpointParcelable &parcelable) = 0;
+
+    /**
+     * Start the flow of data.
+     */
+    virtual aaudio_result_t startStream(aaudio_handle_t streamHandle) = 0;
+
+    /**
+     * Stop the flow of data such that start() can resume without loss of data.
+     */
+    virtual aaudio_result_t pauseStream(aaudio_handle_t streamHandle) = 0;
+
+    /**
+     *  Discard any data held by the underlying HAL or Service.
+     */
+    virtual aaudio_result_t flushStream(aaudio_handle_t streamHandle) = 0;
+
+    /**
+     * Manage the specified thread as a low latency audio thread.
+     */
+    virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle,
+                                                pid_t clientProcessId,
+                                                pid_t clientThreadId,
+                                                int64_t periodNanoseconds) = 0;
+
+    virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle,
+                                                  pid_t clientProcessId,
+                                                  pid_t clientThreadId) = 0;
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_BINDING_AAUDIO_SERVICE_INTERFACE_H
diff --git a/media/libaaudio/src/binding/AAudioServiceMessage.h b/media/libaaudio/src/binding/AAudioServiceMessage.h
index cc77d59..b74b6c2 100644
--- a/media/libaaudio/src/binding/AAudioServiceMessage.h
+++ b/media/libaaudio/src/binding/AAudioServiceMessage.h
@@ -25,10 +25,11 @@
 
 // TODO move this to an "include" folder for the service.
 
+// Used to send information about the HAL to the client.
 struct AAudioMessageTimestamp {
-    int64_t    position;
-    int64_t    deviceOffset; // add to client position to get device position
-    int64_t    timestamp;
+    int64_t position;     // number of frames transferred so far
+    int64_t deviceOffset; // add to client position to get device position
+    int64_t timestamp;    // time when that position was reached
 };
 
 typedef enum aaudio_service_event_e : uint32_t {
@@ -36,13 +37,14 @@
     AAUDIO_SERVICE_EVENT_PAUSED,
     AAUDIO_SERVICE_EVENT_FLUSHED,
     AAUDIO_SERVICE_EVENT_CLOSED,
-    AAUDIO_SERVICE_EVENT_DISCONNECTED
+    AAUDIO_SERVICE_EVENT_DISCONNECTED,
+    AAUDIO_SERVICE_EVENT_VOLUME
 } aaudio_service_event_t;
 
 struct AAudioMessageEvent {
     aaudio_service_event_t event;
-    int32_t                data1;
-    int64_t                data2;
+    double                 dataDouble;
+    int64_t                dataLong;
 };
 
 typedef struct AAudioServiceMessage_s {
@@ -54,12 +56,11 @@
 
     code what;
     union {
-        AAudioMessageTimestamp timestamp;
-        AAudioMessageEvent event;
+        AAudioMessageTimestamp timestamp; // what == TIMESTAMP
+        AAudioMessageEvent event;         // what == EVENT
     };
 } AAudioServiceMessage;
 
-
 } /* namespace aaudio */
 
 #endif //AAUDIO_AAUDIO_SERVICE_MESSAGE_H
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
index fe3a59f..ba41a3b 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
@@ -35,26 +35,50 @@
 AAudioStreamConfiguration::~AAudioStreamConfiguration() {}
 
 status_t AAudioStreamConfiguration::writeToParcel(Parcel* parcel) const {
-    parcel->writeInt32(mDeviceId);
-    parcel->writeInt32(mSampleRate);
-    parcel->writeInt32(mSamplesPerFrame);
-    parcel->writeInt32((int32_t) mAudioFormat);
-    parcel->writeInt32(mBufferCapacity);
-    return NO_ERROR; // TODO check for errors above
+    status_t status;
+    status = parcel->writeInt32(mDeviceId);
+    if (status != NO_ERROR) goto error;
+    status = parcel->writeInt32(mSampleRate);
+    if (status != NO_ERROR) goto error;
+    status = parcel->writeInt32(mSamplesPerFrame);
+    if (status != NO_ERROR) goto error;
+    status = parcel->writeInt32((int32_t) mSharingMode);
+    ALOGD("AAudioStreamConfiguration.writeToParcel(): mSharingMode = %d", mSharingMode);
+    if (status != NO_ERROR) goto error;
+    status = parcel->writeInt32((int32_t) mAudioFormat);
+    if (status != NO_ERROR) goto error;
+    status = parcel->writeInt32(mBufferCapacity);
+    if (status != NO_ERROR) goto error;
+    return NO_ERROR;
+error:
+    ALOGE("AAudioStreamConfiguration.writeToParcel(): write failed = %d", status);
+    return status;
 }
 
 status_t AAudioStreamConfiguration::readFromParcel(const Parcel* parcel) {
     int32_t temp;
-    parcel->readInt32(&mDeviceId);
-    parcel->readInt32(&mSampleRate);
-    parcel->readInt32(&mSamplesPerFrame);
-    parcel->readInt32(&temp);
+    status_t status = parcel->readInt32(&mDeviceId);
+    if (status != NO_ERROR) goto error;
+    status = parcel->readInt32(&mSampleRate);
+    if (status != NO_ERROR) goto error;
+    status = parcel->readInt32(&mSamplesPerFrame);
+    if (status != NO_ERROR) goto error;
+    status = parcel->readInt32(&temp);
+    if (status != NO_ERROR) goto error;
+    mSharingMode = (aaudio_sharing_mode_t) temp;
+    ALOGD("AAudioStreamConfiguration.readFromParcel(): mSharingMode = %d", mSharingMode);
+    status = parcel->readInt32(&temp);
+    if (status != NO_ERROR) goto error;
     mAudioFormat = (aaudio_audio_format_t) temp;
-    parcel->readInt32(&mBufferCapacity);
-    return NO_ERROR; // TODO check for errors above
+    status = parcel->readInt32(&mBufferCapacity);
+    if (status != NO_ERROR) goto error;
+    return NO_ERROR;
+error:
+    ALOGE("AAudioStreamConfiguration.readFromParcel(): read failed = %d", status);
+    return status;
 }
 
-aaudio_result_t AAudioStreamConfiguration::validate() {
+aaudio_result_t AAudioStreamConfiguration::validate() const {
     // Validate results of the open.
     if (mSampleRate < 0 || mSampleRate >= 8 * 48000) { // TODO review limits
         ALOGE("AAudioStreamConfiguration.validate(): invalid sampleRate = %d", mSampleRate);
@@ -84,9 +108,11 @@
     return AAUDIO_OK;
 }
 
-void AAudioStreamConfiguration::dump() {
-    ALOGD("AAudioStreamConfiguration mSampleRate      = %d -----", mSampleRate);
+void AAudioStreamConfiguration::dump() const {
+    ALOGD("AAudioStreamConfiguration mDeviceId        = %d", mDeviceId);
+    ALOGD("AAudioStreamConfiguration mSampleRate      = %d", mSampleRate);
     ALOGD("AAudioStreamConfiguration mSamplesPerFrame = %d", mSamplesPerFrame);
+    ALOGD("AAudioStreamConfiguration mSharingMode     = %d", (int)mSharingMode);
     ALOGD("AAudioStreamConfiguration mAudioFormat     = %d", (int)mAudioFormat);
     ALOGD("AAudioStreamConfiguration mBufferCapacity  = %d", mBufferCapacity);
 }
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.h b/media/libaaudio/src/binding/AAudioStreamConfiguration.h
index 57b1c59..b68d8b2 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.h
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.h
@@ -66,6 +66,14 @@
         mAudioFormat = audioFormat;
     }
 
+    aaudio_sharing_mode_t getSharingMode() const {
+        return mSharingMode;
+    }
+
+    void setSharingMode(aaudio_sharing_mode_t sharingMode) {
+        mSharingMode = sharingMode;
+    }
+
     int32_t getBufferCapacity() const {
         return mBufferCapacity;
     }
@@ -78,14 +86,15 @@
 
     virtual status_t readFromParcel(const Parcel* parcel) override;
 
-    aaudio_result_t validate();
+    aaudio_result_t validate() const;
 
-    void dump();
+    void dump() const;
 
-protected:
+private:
     int32_t               mDeviceId        = AAUDIO_DEVICE_UNSPECIFIED;
     int32_t               mSampleRate      = AAUDIO_UNSPECIFIED;
     int32_t               mSamplesPerFrame = AAUDIO_UNSPECIFIED;
+    aaudio_sharing_mode_t mSharingMode     = AAUDIO_SHARING_MODE_SHARED;
     aaudio_audio_format_t mAudioFormat     = AAUDIO_FORMAT_UNSPECIFIED;
     int32_t               mBufferCapacity  = AAUDIO_UNSPECIFIED;
 };
diff --git a/media/libaaudio/src/binding/AAudioStreamRequest.cpp b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
index 5202b73..b8a0429 100644
--- a/media/libaaudio/src/binding/AAudioStreamRequest.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
@@ -14,6 +14,10 @@
  * limitations under the License.
  */
 
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
 #include <stdint.h>
 
 #include <sys/mman.h>
@@ -39,28 +43,48 @@
 AAudioStreamRequest::~AAudioStreamRequest() {}
 
 status_t AAudioStreamRequest::writeToParcel(Parcel* parcel) const {
-    parcel->writeInt32((int32_t) mUserId);
-    parcel->writeInt32((int32_t) mProcessId);
-    mConfiguration.writeToParcel(parcel);
-    return NO_ERROR; // TODO check for errors above
+    status_t status = parcel->writeInt32((int32_t) mUserId);
+    if (status != NO_ERROR) goto error;
+    status = parcel->writeInt32((int32_t) mProcessId);
+    if (status != NO_ERROR) goto error;
+    status = parcel->writeInt32((int32_t) mDirection);
+    if (status != NO_ERROR) goto error;
+    status = mConfiguration.writeToParcel(parcel);
+    if (status != NO_ERROR) goto error;
+    return NO_ERROR;
+
+error:
+    ALOGE("AAudioStreamRequest.writeToParcel(): write failed = %d", status);
+    return status;
 }
 
 status_t AAudioStreamRequest::readFromParcel(const Parcel* parcel) {
     int32_t temp;
-    parcel->readInt32(&temp);
+    status_t status = parcel->readInt32(&temp);
+    if (status != NO_ERROR) goto error;
     mUserId = (uid_t) temp;
-    parcel->readInt32(&temp);
+    status = parcel->readInt32(&temp);
+    if (status != NO_ERROR) goto error;
     mProcessId = (pid_t) temp;
-    mConfiguration.readFromParcel(parcel);
-    return NO_ERROR; // TODO check for errors above
+    status = parcel->readInt32(&temp);
+    if (status != NO_ERROR) goto error;
+    mDirection = (aaudio_direction_t) temp;
+    status = mConfiguration.readFromParcel(parcel);
+    if (status != NO_ERROR) goto error;
+    return NO_ERROR;
+
+error:
+    ALOGE("AAudioStreamRequest.readFromParcel(): read failed = %d", status);
+    return status;
 }
 
-aaudio_result_t AAudioStreamRequest::validate() {
+aaudio_result_t AAudioStreamRequest::validate() const {
     return mConfiguration.validate();
 }
 
-void AAudioStreamRequest::dump() {
-    ALOGD("AAudioStreamRequest mUserId = %d -----", mUserId);
+void AAudioStreamRequest::dump() const {
+    ALOGD("AAudioStreamRequest mUserId    = %d", mUserId);
     ALOGD("AAudioStreamRequest mProcessId = %d", mProcessId);
+    ALOGD("AAudioStreamRequest mDirection = %d", mDirection);
     mConfiguration.dump();
 }
diff --git a/media/libaaudio/src/binding/AAudioStreamRequest.h b/media/libaaudio/src/binding/AAudioStreamRequest.h
index 0fd28ba..6546562 100644
--- a/media/libaaudio/src/binding/AAudioStreamRequest.h
+++ b/media/libaaudio/src/binding/AAudioStreamRequest.h
@@ -52,6 +52,18 @@
         mProcessId = processId;
     }
 
+    aaudio_direction_t getDirection() const {
+        return mDirection;
+    }
+
+    void setDirection(aaudio_direction_t direction) {
+        mDirection = direction;
+    }
+
+    const AAudioStreamConfiguration &getConstantConfiguration() const {
+        return mConfiguration;
+    }
+
     AAudioStreamConfiguration &getConfiguration() {
         return mConfiguration;
     }
@@ -60,14 +72,15 @@
 
     virtual status_t readFromParcel(const Parcel* parcel) override;
 
-    aaudio_result_t validate();
+    aaudio_result_t validate() const;
 
-    void dump();
+    void dump() const;
 
 protected:
     AAudioStreamConfiguration  mConfiguration;
-    uid_t    mUserId;
-    pid_t    mProcessId;
+    uid_t                      mUserId;
+    pid_t                      mProcessId;
+    aaudio_direction_t         mDirection;
 };
 
 } /* namespace aaudio */
diff --git a/media/libaaudio/src/binding/AudioEndpointParcelable.cpp b/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
index f40ee02..ee92ee3 100644
--- a/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
+++ b/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
@@ -14,11 +14,15 @@
  * limitations under the License.
  */
 
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
 #include <stdint.h>
 
-#include <sys/mman.h>
 #include <binder/Parcel.h>
 #include <binder/Parcelable.h>
+#include <utility/AAudioUtilities.h>
 
 #include "binding/AAudioServiceDefinitions.h"
 #include "binding/RingBufferParcelable.h"
@@ -82,13 +86,27 @@
 }
 
 aaudio_result_t AudioEndpointParcelable::resolve(EndpointDescriptor *descriptor) {
-    // TODO error check
-    mUpMessageQueueParcelable.resolve(mSharedMemories, &descriptor->upMessageQueueDescriptor);
-    mDownMessageQueueParcelable.resolve(mSharedMemories,
+    aaudio_result_t result = mUpMessageQueueParcelable.resolve(mSharedMemories,
+                                                           &descriptor->upMessageQueueDescriptor);
+    if (result != AAUDIO_OK) return result;
+    result = mDownMessageQueueParcelable.resolve(mSharedMemories,
                                         &descriptor->downMessageQueueDescriptor);
-    mUpDataQueueParcelable.resolve(mSharedMemories, &descriptor->upDataQueueDescriptor);
-    mDownDataQueueParcelable.resolve(mSharedMemories, &descriptor->downDataQueueDescriptor);
-    return AAUDIO_OK;
+    if (result != AAUDIO_OK) return result;
+
+    result = mUpDataQueueParcelable.resolve(mSharedMemories, &descriptor->upDataQueueDescriptor);
+    if (result != AAUDIO_OK) return result;
+    result = mDownDataQueueParcelable.resolve(mSharedMemories,
+                                              &descriptor->downDataQueueDescriptor);
+    return result;
+}
+
+aaudio_result_t AudioEndpointParcelable::close() {
+    int err = 0;
+    for (int i = 0; i < mNumSharedMemories; i++) {
+        int lastErr = mSharedMemories[i].close();
+        if (lastErr < 0) err = lastErr;
+    }
+    return AAudioConvert_androidToAAudioResult(err);
 }
 
 aaudio_result_t AudioEndpointParcelable::validate() {
@@ -100,6 +118,7 @@
     for (int i = 0; i < mNumSharedMemories; i++) {
         result = mSharedMemories[i].validate();
         if (result != AAUDIO_OK) {
+            ALOGE("AudioEndpointParcelable invalid mSharedMemories[%d] = %d", i, result);
             return result;
         }
     }
diff --git a/media/libaaudio/src/binding/AudioEndpointParcelable.h b/media/libaaudio/src/binding/AudioEndpointParcelable.h
index d4646d0..4a1cb72 100644
--- a/media/libaaudio/src/binding/AudioEndpointParcelable.h
+++ b/media/libaaudio/src/binding/AudioEndpointParcelable.h
@@ -57,6 +57,8 @@
 
     aaudio_result_t validate();
 
+    aaudio_result_t close();
+
     void dump();
 
 public: // TODO add getters
diff --git a/media/libaaudio/src/binding/IAAudioService.cpp b/media/libaaudio/src/binding/IAAudioService.cpp
index c21033e..20cbbc8 100644
--- a/media/libaaudio/src/binding/IAAudioService.cpp
+++ b/media/libaaudio/src/binding/IAAudioService.cpp
@@ -40,11 +40,13 @@
     {
     }
 
-    virtual aaudio_handle_t openStream(aaudio::AAudioStreamRequest &request,
-                                     aaudio::AAudioStreamConfiguration &configuration) override {
+    virtual aaudio_handle_t openStream(const aaudio::AAudioStreamRequest &request,
+                                     aaudio::AAudioStreamConfiguration &configurationOutput) override {
         Parcel data, reply;
         // send command
         data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+        ALOGE("BpAAudioService::client openStream request dump --------------------");
+        request.dump();
         request.writeToParcel(&data);
         status_t err = remote()->transact(OPEN_STREAM, data, &reply);
         if (err != NO_ERROR) {
@@ -53,7 +55,12 @@
         // parse reply
         aaudio_handle_t stream;
         reply.readInt32(&stream);
-        configuration.readFromParcel(&reply);
+        err = configurationOutput.readFromParcel(&reply);
+        if (err != NO_ERROR) {
+            ALOGE("BpAAudioService::client openStream readFromParcel failed %d", err);
+            closeStream(stream);
+            return AAudioConvert_androidToAAudioResult(err);
+        }
         return stream;
     }
 
@@ -80,16 +87,30 @@
         data.writeInt32(streamHandle);
         status_t err = remote()->transact(GET_STREAM_DESCRIPTION, data, &reply);
         if (err != NO_ERROR) {
+            ALOGE("BpAAudioService::client transact(GET_STREAM_DESCRIPTION) returns %d", err);
             return AAudioConvert_androidToAAudioResult(err);
         }
         // parse reply
-        parcelable.readFromParcel(&reply);
-        parcelable.dump();
-        aaudio_result_t result = parcelable.validate();
-        if (result != AAUDIO_OK) {
+        aaudio_result_t result;
+        err = reply.readInt32(&result);
+        if (err != NO_ERROR) {
+            ALOGE("BpAAudioService::client transact(GET_STREAM_DESCRIPTION) readInt %d", err);
+            return AAudioConvert_androidToAAudioResult(err);
+        } else if (result != AAUDIO_OK) {
+            ALOGE("BpAAudioService::client GET_STREAM_DESCRIPTION passed result %d", result);
             return result;
         }
-        reply.readInt32(&result);
+        err = parcelable.readFromParcel(&reply);;
+        if (err != NO_ERROR) {
+            ALOGE("BpAAudioService::client transact(GET_STREAM_DESCRIPTION) read endpoint %d", err);
+            return AAudioConvert_androidToAAudioResult(err);
+        }
+        //parcelable.dump();
+        result = parcelable.validate();
+        if (result != AAUDIO_OK) {
+            ALOGE("BpAAudioService::client GET_STREAM_DESCRIPTION validation fails %d", result);
+            return result;
+        }
         return result;
     }
 
@@ -139,13 +160,16 @@
         return res;
     }
 
-    virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle, pid_t clientThreadId,
+    virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle,
+                                              pid_t clientProcessId,
+                                              pid_t clientThreadId,
                                               int64_t periodNanoseconds)
     override {
         Parcel data, reply;
         // send command
         data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
         data.writeInt32(streamHandle);
+        data.writeInt32((int32_t) clientProcessId);
         data.writeInt32((int32_t) clientThreadId);
         data.writeInt64(periodNanoseconds);
         status_t err = remote()->transact(REGISTER_AUDIO_THREAD, data, &reply);
@@ -158,12 +182,15 @@
         return res;
     }
 
-    virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle, pid_t clientThreadId)
+    virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle,
+                                                  pid_t clientProcessId,
+                                                  pid_t clientThreadId)
     override {
         Parcel data, reply;
         // send command
         data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
         data.writeInt32(streamHandle);
+        data.writeInt32((int32_t) clientProcessId);
         data.writeInt32((int32_t) clientThreadId);
         status_t err = remote()->transact(UNREGISTER_AUDIO_THREAD, data, &reply);
         if (err != NO_ERROR) {
@@ -178,7 +205,7 @@
 };
 
 // Implement an interface to the service.
-// This is here so that you don't have to link with liboboe static library.
+// This is here so that you don't have to link with libaaudio static library.
 IMPLEMENT_META_INTERFACE(AAudioService, "IAAudioService");
 
 // The order of parameters in the Parcels must match with code in BpAAudioService
@@ -189,6 +216,7 @@
     aaudio::AAudioStreamRequest request;
     aaudio::AAudioStreamConfiguration configuration;
     pid_t pid;
+    pid_t tid;
     int64_t nanoseconds;
     aaudio_result_t result;
     ALOGV("BnAAudioService::onTransact(%i) %i", code, flags);
@@ -197,8 +225,12 @@
     switch(code) {
         case OPEN_STREAM: {
             request.readFromParcel(&data);
+
+            ALOGD("BnAAudioService::client openStream request dump --------------------");
+            request.dump();
+
             stream = openStream(request, configuration);
-            ALOGD("BnAAudioService::onTransact OPEN_STREAM server handle = 0x%08X", stream);
+            ALOGV("BnAAudioService::onTransact OPEN_STREAM server handle = 0x%08X", stream);
             reply->writeInt32(stream);
             configuration.writeToParcel(reply);
             return NO_ERROR;
@@ -206,7 +238,7 @@
 
         case CLOSE_STREAM: {
             data.readInt32(&stream);
-            ALOGD("BnAAudioService::onTransact CLOSE_STREAM 0x%08X", stream);
+            ALOGV("BnAAudioService::onTransact CLOSE_STREAM 0x%08X", stream);
             result = closeStream(stream);
             reply->writeInt32(result);
             return NO_ERROR;
@@ -214,26 +246,28 @@
 
         case GET_STREAM_DESCRIPTION: {
             data.readInt32(&stream);
-            ALOGD("BnAAudioService::onTransact GET_STREAM_DESCRIPTION 0x%08X", stream);
+            ALOGI("BnAAudioService::onTransact GET_STREAM_DESCRIPTION 0x%08X", stream);
             aaudio::AudioEndpointParcelable parcelable;
             result = getStreamDescription(stream, parcelable);
+            ALOGI("BnAAudioService::onTransact getStreamDescription() returns %d", result);
             if (result != AAUDIO_OK) {
                 return AAudioConvert_aaudioToAndroidStatus(result);
             }
-            parcelable.dump();
             result = parcelable.validate();
             if (result != AAUDIO_OK) {
+                ALOGE("BnAAudioService::onTransact getStreamDescription() returns %d", result);
+                parcelable.dump();
                 return AAudioConvert_aaudioToAndroidStatus(result);
             }
-            parcelable.writeToParcel(reply);
             reply->writeInt32(result);
+            parcelable.writeToParcel(reply);
             return NO_ERROR;
         } break;
 
         case START_STREAM: {
             data.readInt32(&stream);
             result = startStream(stream);
-            ALOGD("BnAAudioService::onTransact START_STREAM 0x%08X, result = %d",
+            ALOGV("BnAAudioService::onTransact START_STREAM 0x%08X, result = %d",
                     stream, result);
             reply->writeInt32(result);
             return NO_ERROR;
@@ -242,7 +276,7 @@
         case PAUSE_STREAM: {
             data.readInt32(&stream);
             result = pauseStream(stream);
-            ALOGD("BnAAudioService::onTransact PAUSE_STREAM 0x%08X, result = %d",
+            ALOGV("BnAAudioService::onTransact PAUSE_STREAM 0x%08X, result = %d",
                     stream, result);
             reply->writeInt32(result);
             return NO_ERROR;
@@ -251,7 +285,7 @@
         case FLUSH_STREAM: {
             data.readInt32(&stream);
             result = flushStream(stream);
-            ALOGD("BnAAudioService::onTransact FLUSH_STREAM 0x%08X, result = %d",
+            ALOGV("BnAAudioService::onTransact FLUSH_STREAM 0x%08X, result = %d",
                     stream, result);
             reply->writeInt32(result);
             return NO_ERROR;
@@ -260,9 +294,10 @@
         case REGISTER_AUDIO_THREAD: {
             data.readInt32(&stream);
             data.readInt32(&pid);
+            data.readInt32(&tid);
             data.readInt64(&nanoseconds);
-            result = registerAudioThread(stream, pid, nanoseconds);
-            ALOGD("BnAAudioService::onTransact REGISTER_AUDIO_THREAD 0x%08X, result = %d",
+            result = registerAudioThread(stream, pid, tid, nanoseconds);
+            ALOGV("BnAAudioService::onTransact REGISTER_AUDIO_THREAD 0x%08X, result = %d",
                     stream, result);
             reply->writeInt32(result);
             return NO_ERROR;
@@ -271,8 +306,9 @@
         case UNREGISTER_AUDIO_THREAD: {
             data.readInt32(&stream);
             data.readInt32(&pid);
-            result = unregisterAudioThread(stream, pid);
-            ALOGD("BnAAudioService::onTransact UNREGISTER_AUDIO_THREAD 0x%08X, result = %d",
+            data.readInt32(&tid);
+            result = unregisterAudioThread(stream, pid, tid);
+            ALOGV("BnAAudioService::onTransact UNREGISTER_AUDIO_THREAD 0x%08X, result = %d",
                     stream, result);
             reply->writeInt32(result);
             return NO_ERROR;
diff --git a/media/libaaudio/src/binding/IAAudioService.h b/media/libaaudio/src/binding/IAAudioService.h
index 53c3b45..ab7fd1b 100644
--- a/media/libaaudio/src/binding/IAAudioService.h
+++ b/media/libaaudio/src/binding/IAAudioService.h
@@ -28,9 +28,12 @@
 #include "binding/AudioEndpointParcelable.h"
 #include "binding/AAudioStreamRequest.h"
 #include "binding/AAudioStreamConfiguration.h"
+#include "utility/HandleTracker.h"
 
 namespace android {
 
+#define AAUDIO_SERVICE_NAME  "media.aaudio"
+
 // Interface (our AIDL) - Shared by server and client
 class IAAudioService : public IInterface {
 public:
@@ -42,8 +45,8 @@
      * @param configuration contains information about the created stream
      * @return handle to the stream or a negative error
      */
-    virtual aaudio::aaudio_handle_t openStream(aaudio::AAudioStreamRequest &request,
-                                     aaudio::AAudioStreamConfiguration &configuration) = 0;
+    virtual aaudio_handle_t openStream(const aaudio::AAudioStreamRequest &request,
+                                     aaudio::AAudioStreamConfiguration &configurationOutput) = 0;
 
     virtual aaudio_result_t closeStream(aaudio::aaudio_handle_t streamHandle) = 0;
 
@@ -55,26 +58,33 @@
 
     /**
      * Start the flow of data.
+     * This is asynchronous. When complete, the service will send a STARTED event.
      */
     virtual aaudio_result_t startStream(aaudio::aaudio_handle_t streamHandle) = 0;
 
     /**
      * Stop the flow of data such that start() can resume without loss of data.
+     * This is asynchronous. When complete, the service will send a PAUSED event.
      */
     virtual aaudio_result_t pauseStream(aaudio::aaudio_handle_t streamHandle) = 0;
 
     /**
      *  Discard any data held by the underlying HAL or Service.
+     * This is asynchronous. When complete, the service will send a FLUSHED event.
      */
     virtual aaudio_result_t flushStream(aaudio::aaudio_handle_t streamHandle) = 0;
 
     /**
      * Manage the specified thread as a low latency audio thread.
+     * TODO Consider passing this information as part of the startStream() call.
      */
-    virtual aaudio_result_t registerAudioThread(aaudio::aaudio_handle_t streamHandle, pid_t clientThreadId,
+    virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle,
+                                              pid_t clientProcessId,
+                                              pid_t clientThreadId,
                                               int64_t periodNanoseconds) = 0;
 
-    virtual aaudio_result_t unregisterAudioThread(aaudio::aaudio_handle_t streamHandle,
+    virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle,
+                                                pid_t clientProcessId,
                                                 pid_t clientThreadId) = 0;
 };
 
diff --git a/media/libaaudio/src/binding/RingBufferParcelable.cpp b/media/libaaudio/src/binding/RingBufferParcelable.cpp
index 3a92929..05451f9 100644
--- a/media/libaaudio/src/binding/RingBufferParcelable.cpp
+++ b/media/libaaudio/src/binding/RingBufferParcelable.cpp
@@ -14,6 +14,10 @@
  * limitations under the License.
  */
 
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
 #include <stdint.h>
 
 #include <binder/Parcelable.h>
diff --git a/media/libaaudio/src/binding/RingBufferParcelable.h b/media/libaaudio/src/binding/RingBufferParcelable.h
index 3f82c79..5fc5d00 100644
--- a/media/libaaudio/src/binding/RingBufferParcelable.h
+++ b/media/libaaudio/src/binding/RingBufferParcelable.h
@@ -55,6 +55,8 @@
 
     void setCapacityInFrames(int32_t capacityInFrames);
 
+    bool isFileDescriptorSafe(SharedMemoryParcelable *memoryParcels);
+
     /**
      * The read and write must be symmetric.
      */
diff --git a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
index 1102dec..cfb820f 100644
--- a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
+++ b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
@@ -14,12 +14,18 @@
  * limitations under the License.
  */
 
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
 #include <stdint.h>
+#include <stdio.h>
 
 #include <sys/mman.h>
 #include <aaudio/AAudioDefinitions.h>
 
 #include <binder/Parcelable.h>
+#include <utility/AAudioUtilities.h>
 
 #include "binding/SharedMemoryParcelable.h"
 
@@ -36,28 +42,55 @@
 void SharedMemoryParcelable::setup(int fd, int32_t sizeInBytes) {
     mFd = fd;
     mSizeInBytes = sizeInBytes;
+
 }
 
 status_t SharedMemoryParcelable::writeToParcel(Parcel* parcel) const {
-    parcel->writeInt32(mSizeInBytes);
+    status_t status = parcel->writeInt32(mSizeInBytes);
+    if (status != NO_ERROR) return status;
     if (mSizeInBytes > 0) {
-        parcel->writeDupFileDescriptor(mFd);
+        status = parcel->writeDupFileDescriptor(mFd);
+        ALOGE_IF(status != NO_ERROR, "SharedMemoryParcelable writeDupFileDescriptor failed : %d", status);
     }
-    return NO_ERROR; // TODO check for errors above
+    return status;
 }
 
 status_t SharedMemoryParcelable::readFromParcel(const Parcel* parcel) {
-    parcel->readInt32(&mSizeInBytes);
-    if (mSizeInBytes > 0) {
-        mFd = dup(parcel->readFileDescriptor());
+    status_t status = parcel->readInt32(&mSizeInBytes);
+    if (status != NO_ERROR) {
+        return status;
     }
-    return NO_ERROR; // TODO check for errors above
+    if (mSizeInBytes > 0) {
+// FIXME        mFd = dup(parcel->readFileDescriptor());
+        // Why is the ALSA resource not getting freed?!
+        mFd = fcntl(parcel->readFileDescriptor(), F_DUPFD_CLOEXEC, 0);
+        if (mFd == -1) {
+            status = -errno;
+            ALOGE("SharedMemoryParcelable readFileDescriptor fcntl() failed : %d", status);
+        }
+    }
+    return status;
 }
 
-// TODO Add code to unmmap()
+aaudio_result_t SharedMemoryParcelable::close() {
+    if (mResolvedAddress != nullptr) {
+        int err = munmap(mResolvedAddress, mSizeInBytes);
+        if (err < 0) {
+            ALOGE("SharedMemoryParcelable::close() munmap() failed %d", err);
+            return AAudioConvert_androidToAAudioResult(err);
+        }
+        mResolvedAddress = nullptr;
+    }
+    if (mFd != -1) {
+        ::close(mFd);
+        mFd = -1;
+    }
+    return AAUDIO_OK;
+}
 
 aaudio_result_t SharedMemoryParcelable::resolve(int32_t offsetInBytes, int32_t sizeInBytes,
                                               void **regionAddressPtr) {
+
     if (offsetInBytes < 0) {
         ALOGE("SharedMemoryParcelable illegal offsetInBytes = %d", offsetInBytes);
         return AAUDIO_ERROR_OUT_OF_RANGE;
@@ -68,6 +101,11 @@
         return AAUDIO_ERROR_OUT_OF_RANGE;
     }
     if (mResolvedAddress == nullptr) {
+        /* TODO remove
+        int fd = fcntl(mFd, F_DUPFD_CLOEXEC, 0);
+        ALOGE_IF(fd==-1, "cannot dup fd=%d, size=%zd, (%s)",
+                    mFd, mSizeInBytes, strerror(errno));
+        */
         mResolvedAddress = (uint8_t *) mmap(0, mSizeInBytes, PROT_READ|PROT_WRITE,
                                           MAP_SHARED, mFd, 0);
         if (mResolvedAddress == nullptr) {
@@ -76,8 +114,8 @@
         }
     }
     *regionAddressPtr = mResolvedAddress + offsetInBytes;
-    ALOGD("SharedMemoryParcelable mResolvedAddress = %p", mResolvedAddress);
-    ALOGD("SharedMemoryParcelable offset by %d, *regionAddressPtr = %p",
+    ALOGV("SharedMemoryParcelable mResolvedAddress = %p", mResolvedAddress);
+    ALOGV("SharedMemoryParcelable offset by %d, *regionAddressPtr = %p",
           offsetInBytes, *regionAddressPtr);
     return AAUDIO_OK;
 }
diff --git a/media/libaaudio/src/binding/SharedMemoryParcelable.h b/media/libaaudio/src/binding/SharedMemoryParcelable.h
index 7e0bf1a..22e16f0 100644
--- a/media/libaaudio/src/binding/SharedMemoryParcelable.h
+++ b/media/libaaudio/src/binding/SharedMemoryParcelable.h
@@ -49,8 +49,14 @@
 
     virtual status_t readFromParcel(const Parcel* parcel) override;
 
+    // mmap() shared memory
     aaudio_result_t resolve(int32_t offsetInBytes, int32_t sizeInBytes, void **regionAddressPtr);
 
+    // munmap() any mapped memory
+    aaudio_result_t close();
+
+    bool isFileDescriptorSafe();
+
     int32_t getSizeInBytes();
 
     aaudio_result_t validate();
diff --git a/media/libaaudio/src/binding/SharedRegionParcelable.cpp b/media/libaaudio/src/binding/SharedRegionParcelable.cpp
index 8ca0023..8e57832 100644
--- a/media/libaaudio/src/binding/SharedRegionParcelable.cpp
+++ b/media/libaaudio/src/binding/SharedRegionParcelable.cpp
@@ -14,6 +14,10 @@
  * limitations under the License.
  */
 
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
 #include <stdint.h>
 
 #include <sys/mman.h>
diff --git a/media/libaaudio/src/binding/SharedRegionParcelable.h b/media/libaaudio/src/binding/SharedRegionParcelable.h
index d6c2281..5fb2a4c 100644
--- a/media/libaaudio/src/binding/SharedRegionParcelable.h
+++ b/media/libaaudio/src/binding/SharedRegionParcelable.h
@@ -45,6 +45,8 @@
 
     aaudio_result_t resolve(SharedMemoryParcelable *memoryParcels, void **regionAddressPtr);
 
+    bool isFileDescriptorSafe(SharedMemoryParcelable *memoryParcels);
+
     aaudio_result_t validate();
 
     void dump();
diff --git a/media/libaaudio/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
index 47c4774..fe049b2 100644
--- a/media/libaaudio/src/client/AudioEndpoint.cpp
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -19,7 +19,7 @@
 #include <utils/Log.h>
 
 #include <cassert>
-#include <aaudio/AAudioDefinitions.h>
+#include <aaudio/AAudio.h>
 
 #include "AudioEndpointParcelable.h"
 #include "AudioEndpoint.h"
@@ -39,11 +39,26 @@
 {
 }
 
-static void AudioEndpoint_validateQueueDescriptor(const char *type,
+static aaudio_result_t AudioEndpoint_validateQueueDescriptor(const char *type,
                                                   const RingBufferDescriptor *descriptor) {
-    assert(descriptor->capacityInFrames > 0);
-    assert(descriptor->bytesPerFrame > 1);
-    assert(descriptor->dataAddress != nullptr);
+    if (descriptor == nullptr) {
+        ALOGE("AudioEndpoint_validateQueueDescriptor() NULL descriptor");
+        return AAUDIO_ERROR_NULL;
+    }
+    if (descriptor->capacityInFrames <= 0) {
+        ALOGE("AudioEndpoint_validateQueueDescriptor() bad capacityInFrames = %d",
+              descriptor->capacityInFrames);
+        return AAUDIO_ERROR_OUT_OF_RANGE;
+    }
+    if (descriptor->bytesPerFrame <= 1) {
+        ALOGE("AudioEndpoint_validateQueueDescriptor() bad bytesPerFrame = %d",
+              descriptor->bytesPerFrame);
+        return AAUDIO_ERROR_OUT_OF_RANGE;
+    }
+    if (descriptor->dataAddress == nullptr) {
+        ALOGE("AudioEndpoint_validateQueueDescriptor() NULL dataAddress");
+        return AAUDIO_ERROR_NULL;
+    }
     ALOGD("AudioEndpoint_validateQueueDescriptor %s, dataAddress at %p ====================",
           type,
           descriptor->dataAddress);
@@ -52,11 +67,12 @@
           descriptor->writeCounterAddress);
 
     // Try to READ from the data area.
+    // This code will crash if the mmap failed.
     uint8_t value = descriptor->dataAddress[0];
     ALOGD("AudioEndpoint_validateQueueDescriptor() dataAddress[0] = %d, then try to write",
         (int) value);
     // Try to WRITE to the data area.
-    descriptor->dataAddress[0] = value;
+    descriptor->dataAddress[0] = value * 3;
     ALOGD("AudioEndpoint_validateQueueDescriptor() wrote successfully");
 
     if (descriptor->readCounterAddress) {
@@ -73,17 +89,28 @@
         *descriptor->writeCounterAddress = counter;
         ALOGD("AudioEndpoint_validateQueueDescriptor() wrote writeCounterAddress successfully");
     }
+    return AAUDIO_OK;
 }
 
-void AudioEndpoint_validateDescriptor(const EndpointDescriptor *pEndpointDescriptor) {
-    AudioEndpoint_validateQueueDescriptor("msg", &pEndpointDescriptor->upMessageQueueDescriptor);
-    AudioEndpoint_validateQueueDescriptor("data", &pEndpointDescriptor->downDataQueueDescriptor);
+aaudio_result_t AudioEndpoint_validateDescriptor(const EndpointDescriptor *pEndpointDescriptor) {
+    aaudio_result_t result = AudioEndpoint_validateQueueDescriptor("messages",
+                                    &pEndpointDescriptor->upMessageQueueDescriptor);
+    if (result == AAUDIO_OK) {
+        result = AudioEndpoint_validateQueueDescriptor("data",
+                                                &pEndpointDescriptor->downDataQueueDescriptor);
+    }
+    return result;
 }
 
 aaudio_result_t AudioEndpoint::configure(const EndpointDescriptor *pEndpointDescriptor)
 {
-    aaudio_result_t result = AAUDIO_OK;
-    AudioEndpoint_validateDescriptor(pEndpointDescriptor); // FIXME remove after debugging
+    // TODO maybe remove after debugging
+    aaudio_result_t result = AudioEndpoint_validateDescriptor(pEndpointDescriptor);
+    if (result != AAUDIO_OK) {
+        ALOGD("AudioEndpoint_validateQueueDescriptor returned %d %s",
+              result, AAudio_convertResultToText(result));
+        return result;
+    }
 
     const RingBufferDescriptor *descriptor = &pEndpointDescriptor->upMessageQueueDescriptor;
     assert(descriptor->bytesPerFrame == sizeof(AAudioServiceMessage));
@@ -125,6 +152,7 @@
     int64_t *writeCounterAddress = (descriptor->writeCounterAddress == nullptr)
                                   ? &mDataWriteCounter
                                   : descriptor->writeCounterAddress;
+
     mDownDataQueue = new FifoBuffer(
             descriptor->bytesPerFrame,
             descriptor->capacityInFrames,
@@ -144,9 +172,19 @@
 
 aaudio_result_t AudioEndpoint::writeDataNow(const void *buffer, int32_t numFrames)
 {
+    // TODO Make it easier for the AAudioStreamInternal to scale floats and write shorts
+    // TODO Similar to block adapter write through technique. Add a DataConverter.
     return mDownDataQueue->write(buffer, numFrames);
 }
 
+void AudioEndpoint::getEmptyRoomAvailable(WrappingBuffer *wrappingBuffer) {
+    mDownDataQueue->getEmptyRoomAvailable(wrappingBuffer);
+}
+
+void AudioEndpoint::advanceWriteIndex(int32_t deltaFrames) {
+    mDownDataQueue->getFifoControllerBase()->advanceWriteIndex(deltaFrames);
+}
+
 void AudioEndpoint::setDownDataReadCounter(fifo_counter_t framesRead)
 {
     mDownDataQueue->setReadCounter(framesRead);
diff --git a/media/libaaudio/src/client/AudioEndpoint.h b/media/libaaudio/src/client/AudioEndpoint.h
index caee488..a24a705 100644
--- a/media/libaaudio/src/client/AudioEndpoint.h
+++ b/media/libaaudio/src/client/AudioEndpoint.h
@@ -19,13 +19,13 @@
 
 #include <aaudio/AAudio.h>
 
-#include "AAudioServiceMessage.h"
-#include "AudioEndpointParcelable.h"
+#include "binding/AAudioServiceMessage.h"
+#include "binding/AudioEndpointParcelable.h"
 #include "fifo/FifoBuffer.h"
 
 namespace aaudio {
 
-#define ENDPOINT_DATA_QUEUE_SIZE_MIN   64
+#define ENDPOINT_DATA_QUEUE_SIZE_MIN   48
 
 /**
  * A sink for audio.
@@ -54,15 +54,19 @@
      */
     aaudio_result_t writeDataNow(const void *buffer, int32_t numFrames);
 
+    void getEmptyRoomAvailable(android::WrappingBuffer *wrappingBuffer);
+
+    void advanceWriteIndex(int32_t deltaFrames);
+
     /**
      * Set the read index in the downData queue.
      * This is needed if the reader is not updating the index itself.
      */
-    void setDownDataReadCounter(fifo_counter_t framesRead);
-    fifo_counter_t getDownDataReadCounter();
+    void setDownDataReadCounter(android::fifo_counter_t framesRead);
+    android::fifo_counter_t getDownDataReadCounter();
 
-    void setDownDataWriteCounter(fifo_counter_t framesWritten);
-    fifo_counter_t getDownDataWriteCounter();
+    void setDownDataWriteCounter(android::fifo_counter_t framesWritten);
+    android::fifo_counter_t getDownDataWriteCounter();
 
     /**
      * The result is not valid until after configure() is called.
@@ -80,11 +84,11 @@
     int32_t getFullFramesAvailable();
 
 private:
-    FifoBuffer   * mUpCommandQueue;
-    FifoBuffer   * mDownDataQueue;
-    bool           mOutputFreeRunning;
-    fifo_counter_t mDataReadCounter; // only used if free-running
-    fifo_counter_t mDataWriteCounter; // only used if free-running
+    android::FifoBuffer    *mUpCommandQueue;
+    android::FifoBuffer    *mDownDataQueue;
+    bool                    mOutputFreeRunning;
+    android::fifo_counter_t mDataReadCounter; // only used if free-running
+    android::fifo_counter_t mDataWriteCounter; // only used if free-running
 };
 
 } // namespace aaudio
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 54f4870..662cb9e 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -22,9 +22,9 @@
 #include <assert.h>
 
 #include <binder/IServiceManager.h>
-#include <utils/Mutex.h>
 
 #include <aaudio/AAudio.h>
+#include <utils/String16.h>
 
 #include "AudioClock.h"
 #include "AudioEndpointParcelable.h"
@@ -32,6 +32,7 @@
 #include "binding/AAudioStreamConfiguration.h"
 #include "binding/IAAudioService.h"
 #include "binding/AAudioServiceMessage.h"
+#include "fifo/FifoBuffer.h"
 
 #include "core/AudioStreamBuilder.h"
 #include "AudioStreamInternal.h"
@@ -43,47 +44,25 @@
 using android::defaultServiceManager;
 using android::interface_cast;
 using android::Mutex;
+using android::WrappingBuffer;
 
 using namespace aaudio;
 
-static android::Mutex gServiceLock;
-static sp<IAAudioService>  gAAudioService;
+#define MIN_TIMEOUT_NANOS        (1000 * AAUDIO_NANOS_PER_MILLISECOND)
 
-#define AAUDIO_SERVICE_NAME   "AAudioService"
+// Wait at least this many times longer than the operation should take.
+#define MIN_TIMEOUT_OPERATIONS    4
 
-// Helper function to get access to the "AAudioService" service.
-// This code was modeled after frameworks/av/media/libaudioclient/AudioSystem.cpp
-static const sp<IAAudioService> getAAudioService() {
-    sp<IBinder> binder;
-    Mutex::Autolock _l(gServiceLock);
-    if (gAAudioService == 0) {
-        sp<IServiceManager> sm = defaultServiceManager();
-        // Try several times to get the service.
-        int retries = 4;
-        do {
-            binder = sm->getService(String16(AAUDIO_SERVICE_NAME)); // This will wait a while.
-            if (binder != 0) {
-                break;
-            }
-        } while (retries-- > 0);
+#define ALOG_CONDITION   (mInService == false)
 
-        if (binder != 0) {
-            // TODO Add linkToDeath() like in frameworks/av/media/libaudioclient/AudioSystem.cpp
-            // TODO Create a DeathRecipient that disconnects all active streams.
-            gAAudioService = interface_cast<IAAudioService>(binder);
-        } else {
-            ALOGE("AudioStreamInternal could not get %s", AAUDIO_SERVICE_NAME);
-        }
-    }
-    return gAAudioService;
-}
-
-AudioStreamInternal::AudioStreamInternal()
+AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface  &serviceInterface, bool inService)
         : AudioStream()
         , mClockModel()
         , mAudioEndpoint()
         , mServiceStreamHandle(AAUDIO_HANDLE_INVALID)
         , mFramesPerBurst(16)
+        , mServiceInterface(serviceInterface)
+        , mInService(inService)
 {
 }
 
@@ -92,9 +71,6 @@
 
 aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
 
-    const sp<IAAudioService>& service = getAAudioService();
-    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
-
     aaudio_result_t result = AAUDIO_OK;
     AAudioStreamRequest request;
     AAudioStreamConfiguration configuration;
@@ -104,22 +80,31 @@
         return result;
     }
 
+    // We have to do volume scaling. So we prefer FLOAT format.
+    if (getFormat() == AAUDIO_UNSPECIFIED) {
+        setFormat(AAUDIO_FORMAT_PCM_FLOAT);
+    }
+
     // Build the request to send to the server.
     request.setUserId(getuid());
     request.setProcessId(getpid());
+    request.setDirection(getDirection());
+
     request.getConfiguration().setDeviceId(getDeviceId());
     request.getConfiguration().setSampleRate(getSampleRate());
     request.getConfiguration().setSamplesPerFrame(getSamplesPerFrame());
     request.getConfiguration().setAudioFormat(getFormat());
+    aaudio_sharing_mode_t sharingMode = getSharingMode();
+    ALOGE("AudioStreamInternal.open(): sharingMode %d", sharingMode);
+    request.getConfiguration().setSharingMode(sharingMode);
     request.getConfiguration().setBufferCapacity(builder.getBufferCapacity());
-    request.dump();
 
-    mServiceStreamHandle = service->openStream(request, configuration);
-    ALOGD("AudioStreamInternal.open(): openStream returned mServiceStreamHandle = 0x%08X",
+    mServiceStreamHandle = mServiceInterface.openStream(request, configuration);
+    ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal.open(): openStream returned mServiceStreamHandle = 0x%08X",
          (unsigned int)mServiceStreamHandle);
     if (mServiceStreamHandle < 0) {
         result = mServiceStreamHandle;
-        ALOGE("AudioStreamInternal.open(): acquireRealtimeStream aaudio_result_t = 0x%08X", result);
+        ALOGE("AudioStreamInternal.open(): openStream() returned %d", result);
     } else {
         result = configuration.validate();
         if (result != AAUDIO_OK) {
@@ -129,17 +114,27 @@
         // Save results of the open.
         setSampleRate(configuration.getSampleRate());
         setSamplesPerFrame(configuration.getSamplesPerFrame());
-        setFormat(configuration.getAudioFormat());
+        setDeviceId(configuration.getDeviceId());
 
-        aaudio::AudioEndpointParcelable parcelable;
-        result = service->getStreamDescription(mServiceStreamHandle, parcelable);
+        // Save device format so we can do format conversion and volume scaling together.
+        mDeviceFormat = configuration.getAudioFormat();
+
+        result = mServiceInterface.getStreamDescription(mServiceStreamHandle, mEndPointParcelable);
+        ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal.open(): getStreamDescriptor(0x%08X) returns %d",
+              mServiceStreamHandle, result);
         if (result != AAUDIO_OK) {
             ALOGE("AudioStreamInternal.open(): getStreamDescriptor returns %d", result);
-            service->closeStream(mServiceStreamHandle);
+            mServiceInterface.closeStream(mServiceStreamHandle);
             return result;
         }
+
         // resolve parcelable into a descriptor
-        parcelable.resolve(&mEndpointDescriptor);
+        result = mEndPointParcelable.resolve(&mEndpointDescriptor);
+        if (result != AAUDIO_OK) {
+            ALOGE("AudioStreamInternal.open(): resolve() returns %d", result);
+            mServiceInterface.closeStream(mServiceStreamHandle);
+            return result;
+        }
 
         // Configure endpoint based on descriptor.
         mAudioEndpoint.configure(&mEndpointDescriptor);
@@ -151,67 +146,185 @@
         mClockModel.setSampleRate(getSampleRate());
         mClockModel.setFramesPerBurst(mFramesPerBurst);
 
+        if (getDataCallbackProc()) {
+            mCallbackFrames = builder.getFramesPerDataCallback();
+            if (mCallbackFrames > getBufferCapacity() / 2) {
+                ALOGE("AudioStreamInternal.open(): framesPerCallback too large");
+                mServiceInterface.closeStream(mServiceStreamHandle);
+                return AAUDIO_ERROR_OUT_OF_RANGE;
+
+            } else if (mCallbackFrames < 0) {
+                ALOGE("AudioStreamInternal.open(): framesPerCallback negative");
+                mServiceInterface.closeStream(mServiceStreamHandle);
+                return AAUDIO_ERROR_OUT_OF_RANGE;
+
+            }
+            if (mCallbackFrames == AAUDIO_UNSPECIFIED) {
+                mCallbackFrames = mFramesPerBurst;
+            }
+
+            int32_t bytesPerFrame = getSamplesPerFrame()
+                                    * AAudioConvert_formatToSizeInBytes(getFormat());
+            int32_t callbackBufferSize = mCallbackFrames * bytesPerFrame;
+            mCallbackBuffer = new uint8_t[callbackBufferSize];
+        }
+
         setState(AAUDIO_STREAM_STATE_OPEN);
     }
     return result;
 }
 
 aaudio_result_t AudioStreamInternal::close() {
-    ALOGD("AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X", mServiceStreamHandle);
+    ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X", mServiceStreamHandle);
     if (mServiceStreamHandle != AAUDIO_HANDLE_INVALID) {
         aaudio_handle_t serviceStreamHandle = mServiceStreamHandle;
         mServiceStreamHandle = AAUDIO_HANDLE_INVALID;
-        const sp<IAAudioService>& aaudioService = getAAudioService();
-        if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
-        aaudioService->closeStream(serviceStreamHandle);
-        return AAUDIO_OK;
+
+        mServiceInterface.closeStream(serviceStreamHandle);
+        delete[] mCallbackBuffer;
+        return mEndPointParcelable.close();
     } else {
         return AAUDIO_ERROR_INVALID_HANDLE;
     }
 }
 
+// Render audio in the application callback and then write the data to the stream.
+void *AudioStreamInternal::callbackLoop() {
+    aaudio_result_t result = AAUDIO_OK;
+    aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
+    AAudioStream_dataCallback appCallback = getDataCallbackProc();
+    if (appCallback == nullptr) return NULL;
+
+    // result might be a frame count
+    while (mCallbackEnabled.load() && isPlaying() && (result >= 0)) {
+        // Call application using the AAudio callback interface.
+        callbackResult = (*appCallback)(
+                (AAudioStream *) this,
+                getDataCallbackUserData(),
+                mCallbackBuffer,
+                mCallbackFrames);
+
+        if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
+            // Write audio data to stream.
+            int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
+
+            // This is a BLOCKING WRITE!
+            result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
+            if ((result != mCallbackFrames)) {
+                ALOGE("AudioStreamInternal(): callbackLoop: write() returned %d", result);
+                if (result >= 0) {
+                    // Only wrote some of the frames requested. Must have timed out.
+                    result = AAUDIO_ERROR_TIMEOUT;
+                }
+                if (getErrorCallbackProc() != nullptr) {
+                    (*getErrorCallbackProc())(
+                            (AAudioStream *) this,
+                            getErrorCallbackUserData(),
+                            result);
+                }
+                break;
+            }
+        } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
+            ALOGD("AudioStreamInternal(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
+            break;
+        }
+    }
+
+    ALOGD("AudioStreamInternal(): callbackLoop() exiting, result = %d, isPlaying() = %d",
+          result, (int) isPlaying());
+    return NULL; // TODO review
+}
+
+static void *aaudio_callback_thread_proc(void *context)
+{
+    AudioStreamInternal *stream = (AudioStreamInternal *)context;
+    //LOGD("AudioStreamInternal(): oboe_callback_thread, stream = %p", stream);
+    if (stream != NULL) {
+        return stream->callbackLoop();
+    } else {
+        return NULL;
+    }
+}
+
 aaudio_result_t AudioStreamInternal::requestStart()
 {
     int64_t startTime;
-    ALOGD("AudioStreamInternal(): start()");
+    ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): start()");
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    const sp<IAAudioService>& aaudioService = getAAudioService();
-    if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
+
     startTime = AudioClock::getNanoseconds();
     mClockModel.start(startTime);
     processTimestamp(0, startTime);
     setState(AAUDIO_STREAM_STATE_STARTING);
-    return aaudioService->startStream(mServiceStreamHandle);
+    aaudio_result_t result = mServiceInterface.startStream(mServiceStreamHandle);;
+
+    if (result == AAUDIO_OK && getDataCallbackProc() != nullptr) {
+        // Launch the callback loop thread.
+        int64_t periodNanos = mCallbackFrames
+                              * AAUDIO_NANOS_PER_SECOND
+                              / getSampleRate();
+        mCallbackEnabled.store(true);
+        result = createThread(periodNanos, aaudio_callback_thread_proc, this);
+    }
+    return result;
 }
 
-aaudio_result_t AudioStreamInternal::requestPause()
+int64_t AudioStreamInternal::calculateReasonableTimeout(int32_t framesPerOperation) {
+
+    // Wait for at least a second or some number of callbacks to join the thread.
+    int64_t timeoutNanoseconds = (MIN_TIMEOUT_OPERATIONS * framesPerOperation * AAUDIO_NANOS_PER_SECOND)
+                         / getSampleRate();
+    if (timeoutNanoseconds < MIN_TIMEOUT_NANOS) { // arbitrary number of seconds
+        timeoutNanoseconds = MIN_TIMEOUT_NANOS;
+    }
+    return timeoutNanoseconds;
+}
+
+aaudio_result_t AudioStreamInternal::stopCallback()
+{
+    if (isDataCallbackActive()) {
+        mCallbackEnabled.store(false);
+        return joinThread(NULL, calculateReasonableTimeout(mCallbackFrames));
+    } else {
+        return AAUDIO_OK;
+    }
+}
+
+aaudio_result_t AudioStreamInternal::requestPauseInternal()
 {
     ALOGD("AudioStreamInternal(): pause()");
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    const sp<IAAudioService>& aaudioService = getAAudioService();
-    if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
+
     mClockModel.stop(AudioClock::getNanoseconds());
     setState(AAUDIO_STREAM_STATE_PAUSING);
-    return aaudioService->pauseStream(mServiceStreamHandle);
+    return mServiceInterface.startStream(mServiceStreamHandle);
+}
+
+aaudio_result_t AudioStreamInternal::requestPause()
+{
+    aaudio_result_t result = stopCallback();
+    if (result != AAUDIO_OK) {
+        return result;
+    }
+    return requestPauseInternal();
 }
 
 aaudio_result_t AudioStreamInternal::requestFlush() {
-    ALOGD("AudioStreamInternal(): flush()");
+    ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): flush()");
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    const sp<IAAudioService>& aaudioService = getAAudioService();
-    if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
-setState(AAUDIO_STREAM_STATE_FLUSHING);
-    return aaudioService->flushStream(mServiceStreamHandle);
+
+    setState(AAUDIO_STREAM_STATE_FLUSHING);
+    return mServiceInterface.flushStream(mServiceStreamHandle);
 }
 
 void AudioStreamInternal::onFlushFromServer() {
-    ALOGD("AudioStreamInternal(): onFlushFromServer()");
+    ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): onFlushFromServer()");
     int64_t readCounter = mAudioEndpoint.getDownDataReadCounter();
     int64_t writeCounter = mAudioEndpoint.getDownDataWriteCounter();
     // Bump offset so caller does not see the retrograde motion in getFramesRead().
@@ -239,39 +352,38 @@
 }
 
 aaudio_result_t AudioStreamInternal::registerThread() {
-    ALOGD("AudioStreamInternal(): registerThread()");
+    ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): registerThread()");
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    const sp<IAAudioService>& aaudioService = getAAudioService();
-    if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
-    return aaudioService->registerAudioThread(mServiceStreamHandle,
-                                         gettid(),
-                                         getPeriodNanoseconds());
+    return mServiceInterface.registerAudioThread(mServiceStreamHandle,
+                                              getpid(),
+                                              gettid(),
+                                              getPeriodNanoseconds());
 }
 
 aaudio_result_t AudioStreamInternal::unregisterThread() {
-    ALOGD("AudioStreamInternal(): unregisterThread()");
+    ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): unregisterThread()");
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    const sp<IAAudioService>& aaudioService = getAAudioService();
-    if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
-    return aaudioService->unregisterAudioThread(mServiceStreamHandle, gettid());
+    return mServiceInterface.unregisterAudioThread(mServiceStreamHandle, getpid(), gettid());
 }
 
-// TODO use aaudio_clockid_t all the way down to AudioClock
 aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
                            int64_t *framePosition,
                            int64_t *timeNanoseconds) {
-// TODO implement using real HAL
+    // TODO implement using real HAL
     int64_t time = AudioClock::getNanoseconds();
     *framePosition = mClockModel.convertTimeToPosition(time);
     *timeNanoseconds = time + (10 * AAUDIO_NANOS_PER_MILLISECOND); // Fake hardware delay
     return AAUDIO_OK;
 }
 
-aaudio_result_t AudioStreamInternal::updateState() {
+aaudio_result_t AudioStreamInternal::updateStateWhileWaiting() {
+    if (isDataCallbackActive()) {
+        return AAUDIO_OK; // state is getting updated by the callback thread read/write call
+    }
     return processCommands();
 }
 
@@ -281,16 +393,16 @@
     static int64_t oldTime = 0;
     int64_t framePosition = command.timestamp.position;
     int64_t nanoTime = command.timestamp.timestamp;
-    ALOGD("AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %llu",
+    ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %llu",
          (long long) framePosition,
          (long long) nanoTime);
     int64_t nanosDelta = nanoTime - oldTime;
     if (nanosDelta > 0 && oldTime > 0) {
         int64_t framesDelta = framePosition - oldPosition;
         int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta;
-        ALOGD("AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
-        ALOGD("AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
-        ALOGD("AudioStreamInternal() - measured rate = %llu", (unsigned long long) rate);
+        ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
+        ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
+        ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() - measured rate = %llu", (unsigned long long) rate);
     }
     oldPosition = framePosition;
     oldTime = nanoTime;
@@ -309,29 +421,34 @@
 
 aaudio_result_t AudioStreamInternal::onEventFromServer(AAudioServiceMessage *message) {
     aaudio_result_t result = AAUDIO_OK;
-    ALOGD("processCommands() got event %d", message->event.event);
+    ALOGD_IF(ALOG_CONDITION, "processCommands() got event %d", message->event.event);
     switch (message->event.event) {
         case AAUDIO_SERVICE_EVENT_STARTED:
-            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_STARTED");
+            ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_STARTED");
             setState(AAUDIO_STREAM_STATE_STARTED);
             break;
         case AAUDIO_SERVICE_EVENT_PAUSED:
-            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_PAUSED");
+            ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_PAUSED");
             setState(AAUDIO_STREAM_STATE_PAUSED);
             break;
         case AAUDIO_SERVICE_EVENT_FLUSHED:
-            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED");
+            ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED");
             setState(AAUDIO_STREAM_STATE_FLUSHED);
             onFlushFromServer();
             break;
         case AAUDIO_SERVICE_EVENT_CLOSED:
-            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_CLOSED");
+            ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_CLOSED");
             setState(AAUDIO_STREAM_STATE_CLOSED);
             break;
         case AAUDIO_SERVICE_EVENT_DISCONNECTED:
             result = AAUDIO_ERROR_DISCONNECTED;
+            setState(AAUDIO_STREAM_STATE_DISCONNECTED);
             ALOGW("WARNING - processCommands() AAUDIO_SERVICE_EVENT_DISCONNECTED");
             break;
+        case AAUDIO_SERVICE_EVENT_VOLUME:
+            mVolume = message->event.dataDouble;
+            ALOGD_IF(ALOG_CONDITION, "processCommands() AAUDIO_SERVICE_EVENT_VOLUME %f", mVolume);
+            break;
         default:
             ALOGW("WARNING - processCommands() Unrecognized event = %d",
                  (int) message->event.event);
@@ -345,6 +462,7 @@
     aaudio_result_t result = AAUDIO_OK;
 
     while (result == AAUDIO_OK) {
+        //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::processCommands() - looping, %d", result);
         AAudioServiceMessage message;
         if (mAudioEndpoint.readUpCommand(&message) != 1) {
             break; // no command this time, no problem
@@ -373,21 +491,26 @@
                                          int64_t timeoutNanoseconds)
 {
     aaudio_result_t result = AAUDIO_OK;
+    int32_t loopCount = 0;
     uint8_t* source = (uint8_t*)buffer;
     int64_t currentTimeNanos = AudioClock::getNanoseconds();
     int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
     int32_t framesLeft = numFrames;
-//    ALOGD("AudioStreamInternal::write(%p, %d) at time %08llu , mState = %d ------------------",
-//         buffer, numFrames, (unsigned long long) currentTimeNanos, mState);
+    //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write(%p, %d) at time %08llu , mState = %s",
+    //      buffer, numFrames, (unsigned long long) currentTimeNanos,
+    //      AAudio_convertStreamStateToText(getState()));
 
     // Write until all the data has been written or until a timeout occurs.
     while (framesLeft > 0) {
+        //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write() loop: framesLeft = %d, loopCount = %d  =====",
+        //      framesLeft, loopCount++);
         // The call to writeNow() will not block. It will just write as much as it can.
         int64_t wakeTimeNanos = 0;
         aaudio_result_t framesWritten = writeNow(source, framesLeft,
                                                currentTimeNanos, &wakeTimeNanos);
-//        ALOGD("AudioStreamInternal::write() writeNow() framesLeft = %d --> framesWritten = %d", framesLeft, framesWritten);
+        //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write() loop: framesWritten = %d", framesWritten);
         if (framesWritten < 0) {
+            ALOGE("AudioStreamInternal::write() loop: writeNow returned %d", framesWritten);
             result = framesWritten;
             break;
         }
@@ -398,18 +521,19 @@
         if (timeoutNanoseconds == 0) {
             break; // don't block
         } else if (framesLeft > 0) {
-            //ALOGD("AudioStreamInternal:: original wakeTimeNanos %lld", (long long) wakeTimeNanos);
+            //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal:: original wakeTimeNanos %lld", (long long) wakeTimeNanos);
             // clip the wake time to something reasonable
             if (wakeTimeNanos < currentTimeNanos) {
                 wakeTimeNanos = currentTimeNanos;
             }
             if (wakeTimeNanos > deadlineNanos) {
                 // If we time out, just return the framesWritten so far.
-                ALOGE("AudioStreamInternal::write(): timed out after %lld nanos", (long long) timeoutNanoseconds);
+                ALOGE("AudioStreamInternal::write(): timed out after %lld nanos",
+                      (long long) timeoutNanoseconds);
                 break;
             }
 
-            //ALOGD("AudioStreamInternal:: sleep until %lld, dur = %lld", (long long) wakeTimeNanos,
+            //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal:: sleep until %lld, dur = %lld", (long long) wakeTimeNanos,
             //        (long long) (wakeTimeNanos - currentTimeNanos));
             AudioClock::sleepForNanos(wakeTimeNanos - currentTimeNanos);
             currentTimeNanos = AudioClock::getNanoseconds();
@@ -417,43 +541,52 @@
     }
 
     // return error or framesWritten
+    //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write() result = %d, framesLeft = %d, #%d",
+    //      result, framesLeft, loopCount);
+    (void) loopCount;
     return (result < 0) ? result : numFrames - framesLeft;
 }
 
 // Write as much data as we can without blocking.
 aaudio_result_t AudioStreamInternal::writeNow(const void *buffer, int32_t numFrames,
                                          int64_t currentNanoTime, int64_t *wakeTimePtr) {
+
+    //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow(%p) - enter", buffer);
     {
         aaudio_result_t result = processCommands();
+        //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - processCommands() returned %d", result);
         if (result != AAUDIO_OK) {
             return result;
         }
     }
 
     if (mAudioEndpoint.isOutputFreeRunning()) {
+        ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - update read counter");
         // Update data queue based on the timing model.
         int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
         mAudioEndpoint.setDownDataReadCounter(estimatedReadCounter);
-        // If the read index passed the write index then consider it an underrun.
-        if (mAudioEndpoint.getFullFramesAvailable() < 0) {
-            mXRunCount++;
-        }
     }
     // TODO else query from endpoint cuz set by actual reader, maybe
 
-    // Write some data to the buffer.
-    int32_t framesWritten = mAudioEndpoint.writeDataNow(buffer, numFrames);
-    if (framesWritten > 0) {
-        incrementFramesWritten(framesWritten);
+    // If the read index passed the write index then consider it an underrun.
+    if (mAudioEndpoint.getFullFramesAvailable() < 0) {
+        mXRunCount++;
     }
-    //ALOGD("AudioStreamInternal::writeNow() - tried to write %d frames, wrote %d",
+
+    // Write some data to the buffer.
+    //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - writeNowWithConversion(%d)", numFrames);
+    int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
+    //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - tried to write %d frames, wrote %d",
     //    numFrames, framesWritten);
 
     // Calculate an ideal time to wake up.
     if (wakeTimePtr != nullptr && framesWritten >= 0) {
         // By default wake up a few milliseconds from now.  // TODO review
-        int64_t wakeTime = currentNanoTime + (2 * AAUDIO_NANOS_PER_MILLISECOND);
-        switch (getState()) {
+        int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
+        aaudio_stream_state_t state = getState();
+        //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - wakeTime based on %s",
+        //      AAudio_convertStreamStateToText(state));
+        switch (state) {
             case AAUDIO_STREAM_STATE_OPEN:
             case AAUDIO_STREAM_STATE_STARTING:
                 if (framesWritten != 0) {
@@ -478,50 +611,68 @@
         *wakeTimePtr = wakeTime;
 
     }
-//    ALOGD("AudioStreamInternal::writeNow finished: now = %llu, read# = %llu, wrote# = %llu",
+//    ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow finished: now = %llu, read# = %llu, wrote# = %llu",
 //         (unsigned long long)currentNanoTime,
 //         (unsigned long long)mAudioEndpoint.getDownDataReadCounter(),
 //         (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
     return framesWritten;
 }
 
-aaudio_result_t AudioStreamInternal::waitForStateChange(aaudio_stream_state_t currentState,
-                                                      aaudio_stream_state_t *nextState,
-                                                      int64_t timeoutNanoseconds)
 
-{
-    aaudio_result_t result = processCommands();
-//    ALOGD("AudioStreamInternal::waitForStateChange() - processCommands() returned %d", result);
-    if (result != AAUDIO_OK) {
-        return result;
-    }
-    // TODO replace this polling with a timed sleep on a futex on the message queue
-    int32_t durationNanos = 5 * AAUDIO_NANOS_PER_MILLISECOND;
-    aaudio_stream_state_t state = getState();
-//    ALOGD("AudioStreamInternal::waitForStateChange() - state = %d", state);
-    while (state == currentState && timeoutNanoseconds > 0) {
-        // TODO use futex from service message queue
-        if (durationNanos > timeoutNanoseconds) {
-            durationNanos = timeoutNanoseconds;
+// TODO this function needs a major cleanup.
+aaudio_result_t AudioStreamInternal::writeNowWithConversion(const void *buffer,
+                                       int32_t numFrames) {
+    // ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNowWithConversion(%p, %d)", buffer, numFrames);
+    WrappingBuffer wrappingBuffer;
+    mAudioEndpoint.getEmptyRoomAvailable(&wrappingBuffer);
+    uint8_t *source = (uint8_t *) buffer;
+    int32_t framesLeft = numFrames;
+
+    mAudioEndpoint.getEmptyRoomAvailable(&wrappingBuffer);
+
+    // Read data in one or two parts.
+    int partIndex = 0;
+    while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
+        int32_t framesToWrite = framesLeft;
+        int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
+        if (framesAvailable > 0) {
+            if (framesToWrite > framesAvailable) {
+                framesToWrite = framesAvailable;
+            }
+            int32_t numBytes = getBytesPerFrame();
+            // TODO handle volume scaling
+            if (getFormat() == mDeviceFormat) {
+                // Copy straight through.
+                memcpy(wrappingBuffer.data[partIndex], source, numBytes);
+            } else if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT
+                    && mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
+                // Data conversion.
+                AAudioConvert_floatToPcm16(
+                        (const float *) source,
+                        framesToWrite * getSamplesPerFrame(),
+                        (int16_t *) wrappingBuffer.data[partIndex]);
+            } else {
+                // TODO handle more conversions
+                ALOGE("AudioStreamInternal::writeNowWithConversion() unsupported formats: %d, %d",
+                      getFormat(), mDeviceFormat);
+                return AAUDIO_ERROR_UNEXPECTED_VALUE;
+            }
+
+            source += numBytes;
+            framesLeft -= framesToWrite;
         }
-        AudioClock::sleepForNanos(durationNanos);
-        timeoutNanoseconds -= durationNanos;
-
-        result = processCommands();
-        if (result != AAUDIO_OK) {
-            return result;
-        }
-
-        state = getState();
-//        ALOGD("AudioStreamInternal::waitForStateChange() - state = %d", state);
+        partIndex++;
     }
-    if (nextState != nullptr) {
-        *nextState = state;
+    int32_t framesWritten = numFrames - framesLeft;
+    mAudioEndpoint.advanceWriteIndex(framesWritten);
+
+    if (framesWritten > 0) {
+        incrementFramesWritten(framesWritten);
     }
-    return (state == currentState) ? AAUDIO_ERROR_TIMEOUT : AAUDIO_OK;
+    // ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
+    return framesWritten;
 }
 
-
 void AudioStreamInternal::processTimestamp(uint64_t position, int64_t time) {
     mClockModel.processTimestamp( position, time);
 }
@@ -562,7 +713,7 @@
     } else {
         mLastFramesRead = framesRead;
     }
-    ALOGD("AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
+    ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
     return framesRead;
 }
 
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 6f3a7ac..1aa3b0f 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -26,6 +26,8 @@
 #include "client/AudioEndpoint.h"
 #include "core/AudioStream.h"
 
+#include "binding/AAudioServiceInterface.h"
+
 using android::sp;
 using android::IAAudioService;
 
@@ -35,61 +37,66 @@
 class AudioStreamInternal : public AudioStream {
 
 public:
-    AudioStreamInternal();
+    AudioStreamInternal(AAudioServiceInterface  &serviceInterface, bool inService = false);
     virtual ~AudioStreamInternal();
 
     // =========== Begin ABSTRACT methods ===========================
-    virtual aaudio_result_t requestStart() override;
+    aaudio_result_t requestStart() override;
 
-    virtual aaudio_result_t requestPause() override;
+    aaudio_result_t requestPause() override;
 
-    virtual aaudio_result_t requestFlush() override;
+    aaudio_result_t requestFlush() override;
 
-    virtual aaudio_result_t requestStop() override;
+    aaudio_result_t requestStop() override;
 
     // TODO use aaudio_clockid_t all the way down to AudioClock
-    virtual aaudio_result_t getTimestamp(clockid_t clockId,
+    aaudio_result_t getTimestamp(clockid_t clockId,
                                        int64_t *framePosition,
                                        int64_t *timeNanoseconds) override;
 
 
-    virtual aaudio_result_t updateState() override;
+
+    virtual aaudio_result_t updateStateWhileWaiting() override;
+
     // =========== End ABSTRACT methods ===========================
 
-    virtual aaudio_result_t open(const AudioStreamBuilder &builder) override;
+    aaudio_result_t open(const AudioStreamBuilder &builder) override;
 
-    virtual aaudio_result_t close() override;
+    aaudio_result_t close() override;
 
-    virtual aaudio_result_t write(const void *buffer,
+    aaudio_result_t write(const void *buffer,
                              int32_t numFrames,
                              int64_t timeoutNanoseconds) override;
 
-    virtual aaudio_result_t waitForStateChange(aaudio_stream_state_t currentState,
-                                          aaudio_stream_state_t *nextState,
-                                          int64_t timeoutNanoseconds) override;
+    aaudio_result_t setBufferSize(int32_t requestedFrames) override;
 
-    virtual aaudio_result_t setBufferSize(int32_t requestedFrames) override;
+    int32_t getBufferSize() const override;
 
-    virtual int32_t getBufferSize() const override;
+    int32_t getBufferCapacity() const override;
 
-    virtual int32_t getBufferCapacity() const override;
+    int32_t getFramesPerBurst() const override;
 
-    virtual int32_t getFramesPerBurst() const override;
+    int64_t getFramesRead() override;
 
-    virtual int64_t getFramesRead() override;
-
-    virtual int32_t getXRunCount() const override {
+    int32_t getXRunCount() const override {
         return mXRunCount;
     }
 
-    virtual aaudio_result_t registerThread() override;
+    aaudio_result_t registerThread() override;
 
-    virtual aaudio_result_t unregisterThread() override;
+    aaudio_result_t unregisterThread() override;
+
+    // Called internally from 'C'
+    void *callbackLoop();
 
 protected:
 
     aaudio_result_t processCommands();
 
+    aaudio_result_t requestPauseInternal();
+
+    aaudio_result_t stopCallback();
+
 /**
  * Low level write that will not block. It will just write as much as it can.
  *
@@ -97,10 +104,10 @@
  *
  * @return the number of frames written or a negative error code.
  */
-    virtual aaudio_result_t writeNow(const void *buffer,
-                                int32_t numFrames,
-                                int64_t currentTimeNanos,
-                                int64_t *wakeTimePtr);
+    aaudio_result_t writeNow(const void *buffer,
+                                     int32_t numFrames,
+                                     int64_t currentTimeNanos,
+                                     int64_t *wakeTimePtr);
 
     void onFlushFromServer();
 
@@ -108,18 +115,45 @@
 
     aaudio_result_t onTimestampFromServer(AAudioServiceMessage *message);
 
-private:
-    IsochronousClockModel    mClockModel;
-    AudioEndpoint            mAudioEndpoint;
-    aaudio_handle_t          mServiceStreamHandle;
-    EndpointDescriptor       mEndpointDescriptor;
-    // Offset from underlying frame position.
-    int64_t                  mFramesOffsetFromService = 0;
-    int64_t                  mLastFramesRead = 0;
-    int32_t                  mFramesPerBurst;
-    int32_t                  mXRunCount = 0;
+    // Calculate timeout for an operation involving framesPerOperation.
+    int64_t calculateReasonableTimeout(int32_t framesPerOperation);
 
+private:
+    /*
+     * Asynchronous write with data conversion.
+     * @param buffer
+     * @param numFrames
+     * @return fdrames written or negative error
+     */
+    aaudio_result_t writeNowWithConversion(const void *buffer,
+                                     int32_t numFrames);
     void processTimestamp(uint64_t position, int64_t time);
+
+    // Adjust timing model based on timestamp from service.
+
+    IsochronousClockModel    mClockModel;      // timing model for chasing the HAL
+    AudioEndpoint            mAudioEndpoint;   // sink for writes
+    aaudio_handle_t          mServiceStreamHandle; // opaque handle returned from service
+
+    AudioEndpointParcelable  mEndPointParcelable; // description of the buffers filled by service
+    EndpointDescriptor       mEndpointDescriptor; // buffer description with resolved addresses
+
+    aaudio_audio_format_t    mDeviceFormat = AAUDIO_FORMAT_UNSPECIFIED;
+
+    uint8_t                 *mCallbackBuffer = nullptr;
+    int32_t                  mCallbackFrames = 0;
+
+    // Offset from underlying frame position.
+    int64_t                  mFramesOffsetFromService = 0; // offset for timestamps
+    int64_t                  mLastFramesRead = 0; // used to prevent retrograde motion
+    int32_t                  mFramesPerBurst;     // frames per HAL transfer
+    int32_t                  mXRunCount = 0;      // how many underrun events?
+    float                    mVolume = 1.0;       // volume that the server told us to use
+
+    AAudioServiceInterface  &mServiceInterface;   // abstract interface to the service
+
+    // The service uses this for SHARED mode.
+    bool                     mInService = false;  // Are running in the client or the service?
 };
 
 } /* namespace aaudio */
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index 4c8aabc..c278c8b 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -19,7 +19,6 @@
 #include <utils/Log.h>
 
 #include <stdint.h>
-#include <aaudio/AAudioDefinitions.h>
 
 #include "utility/AudioClock.h"
 #include "IsochronousClockModel.h"
diff --git a/media/libaaudio/src/client/IsochronousClockModel.h b/media/libaaudio/src/client/IsochronousClockModel.h
index 524c286..205c341 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.h
+++ b/media/libaaudio/src/client/IsochronousClockModel.h
@@ -14,11 +14,10 @@
  * limitations under the License.
  */
 
-#ifndef AAUDIO_ISOCHRONOUSCLOCKMODEL_H
-#define AAUDIO_ISOCHRONOUSCLOCKMODEL_H
+#ifndef AAUDIO_ISOCHRONOUS_CLOCK_MODEL_H
+#define AAUDIO_ISOCHRONOUS_CLOCK_MODEL_H
 
 #include <stdint.h>
-#include <aaudio/AAudio.h>
 
 namespace aaudio {
 
@@ -107,4 +106,4 @@
 
 } /* namespace aaudio */
 
-#endif //AAUDIO_ISOCHRONOUSCLOCKMODEL_H
+#endif //AAUDIO_ISOCHRONOUS_CLOCK_MODEL_H
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index 52bad70..d91d0e4 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -49,10 +49,13 @@
 AAUDIO_API const char * AAudio_convertResultToText(aaudio_result_t returnCode) {
     switch (returnCode) {
         AAUDIO_CASE_ENUM(AAUDIO_OK);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_DISCONNECTED);
         AAUDIO_CASE_ENUM(AAUDIO_ERROR_ILLEGAL_ARGUMENT);
         AAUDIO_CASE_ENUM(AAUDIO_ERROR_INCOMPATIBLE);
         AAUDIO_CASE_ENUM(AAUDIO_ERROR_INTERNAL);
         AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_STATE);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_UNEXPECTED_STATE);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_UNEXPECTED_VALUE);
         AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_HANDLE);
         AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_QUERY);
         AAUDIO_CASE_ENUM(AAUDIO_ERROR_UNIMPLEMENTED);
@@ -62,9 +65,10 @@
         AAUDIO_CASE_ENUM(AAUDIO_ERROR_NULL);
         AAUDIO_CASE_ENUM(AAUDIO_ERROR_TIMEOUT);
         AAUDIO_CASE_ENUM(AAUDIO_ERROR_WOULD_BLOCK);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_ORDER);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_FORMAT);
         AAUDIO_CASE_ENUM(AAUDIO_ERROR_OUT_OF_RANGE);
         AAUDIO_CASE_ENUM(AAUDIO_ERROR_NO_SERVICE);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_RATE);
     }
     return "Unrecognized AAudio error.";
 }
@@ -82,6 +86,7 @@
         AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_FLUSHED);
         AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STOPPING);
         AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STOPPED);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_DISCONNECTED);
         AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_CLOSING);
         AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_CLOSED);
     }
@@ -102,7 +107,6 @@
 
 AAUDIO_API aaudio_result_t AAudio_createStreamBuilder(AAudioStreamBuilder** builder)
 {
-    ALOGD("AAudio_createStreamBuilder(): check sHandleTracker.isInitialized ()");
     AudioStreamBuilder *audioStreamBuilder =  new AudioStreamBuilder();
     if (audioStreamBuilder == nullptr) {
         return AAUDIO_ERROR_NO_MEMORY;
@@ -114,53 +118,79 @@
 AAUDIO_API void AAudioStreamBuilder_setDeviceId(AAudioStreamBuilder* builder,
                                                      int32_t deviceId)
 {
-    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
     streamBuilder->setDeviceId(deviceId);
 }
 
 AAUDIO_API void AAudioStreamBuilder_setSampleRate(AAudioStreamBuilder* builder,
                                               int32_t sampleRate)
 {
-    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
     streamBuilder->setSampleRate(sampleRate);
 }
 
 AAUDIO_API void AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder* builder,
                                                    int32_t samplesPerFrame)
 {
-    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
     streamBuilder->setSamplesPerFrame(samplesPerFrame);
 }
 
 AAUDIO_API void AAudioStreamBuilder_setDirection(AAudioStreamBuilder* builder,
                                              aaudio_direction_t direction)
 {
-    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
     streamBuilder->setDirection(direction);
 }
 
-
 AAUDIO_API void AAudioStreamBuilder_setFormat(AAudioStreamBuilder* builder,
                                                    aaudio_audio_format_t format)
 {
-    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
     streamBuilder->setFormat(format);
 }
 
 AAUDIO_API void AAudioStreamBuilder_setSharingMode(AAudioStreamBuilder* builder,
                                                         aaudio_sharing_mode_t sharingMode)
 {
-    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
     streamBuilder->setSharingMode(sharingMode);
 }
 
 AAUDIO_API void AAudioStreamBuilder_setBufferCapacityInFrames(AAudioStreamBuilder* builder,
                                                         int32_t frames)
 {
-    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
     streamBuilder->setBufferCapacity(frames);
 }
 
+AAUDIO_API void AAudioStreamBuilder_setDataCallback(AAudioStreamBuilder* builder,
+                                                    AAudioStream_dataCallback callback,
+                                                    void *userData)
+{
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+    ALOGD("AAudioStreamBuilder_setCallback(): userData = %p", userData);
+    streamBuilder->setDataCallbackProc(callback);
+    streamBuilder->setDataCallbackUserData(userData);
+}
+AAUDIO_API void AAudioStreamBuilder_setErrorCallback(AAudioStreamBuilder* builder,
+                                                 AAudioStream_errorCallback callback,
+                                                 void *userData)
+{
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+    ALOGD("AAudioStreamBuilder_setCallback(): userData = %p", userData);
+    streamBuilder->setErrorCallbackProc(callback);
+    streamBuilder->setErrorCallbackUserData(userData);
+}
+
+AAUDIO_API void AAudioStreamBuilder_setFramesPerDataCallback(AAudioStreamBuilder* builder,
+                                                int32_t frames)
+{
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+    ALOGD("%s: frames = %d", __func__, frames);
+    streamBuilder->setFramesPerDataCallback(frames);
+}
+
 static aaudio_result_t  AAudioInternal_openStream(AudioStreamBuilder *streamBuilder,
                                               AAudioStream** streamPtr)
 {
@@ -276,6 +306,13 @@
     if (buffer == nullptr) {
         return AAUDIO_ERROR_NULL;
     }
+
+    // Don't allow writes when playing with a callback.
+    if (audioStream->getDataCallbackProc() != nullptr && audioStream->isPlaying()) {
+        ALOGE("Cannot write to a callback stream when running.");
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+
     if (numFrames < 0) {
         return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
     } else if (numFrames == 0) {
@@ -297,6 +334,9 @@
                                      aaudio_audio_thread_proc_t threadProc, void *arg)
 {
     AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    if (audioStream->getDataCallbackProc() != nullptr) {
+        return AAUDIO_ERROR_INCOMPATIBLE;
+    }
     return audioStream->createThread(periodNanoseconds, threadProc, arg);
 }
 
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index b054d94..7c0b5ae 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -28,7 +28,9 @@
 
 using namespace aaudio;
 
-AudioStream::AudioStream() {
+AudioStream::AudioStream()
+        : mCallbackEnabled(false)
+{
     // mThread is a pthread_t of unknown size so we need memset.
     memset(&mThread, 0, sizeof(mThread));
     setPeriodNanoseconds(0);
@@ -36,13 +38,31 @@
 
 aaudio_result_t AudioStream::open(const AudioStreamBuilder& builder)
 {
-    // TODO validate parameters.
+
     // Copy parameters from the Builder because the Builder may be deleted after this call.
     mSamplesPerFrame = builder.getSamplesPerFrame();
     mSampleRate = builder.getSampleRate();
     mDeviceId = builder.getDeviceId();
     mFormat = builder.getFormat();
+    mDirection = builder.getDirection();
     mSharingMode = builder.getSharingMode();
+
+    // callbacks
+    mFramesPerDataCallback = builder.getFramesPerDataCallback();
+    mDataCallbackProc = builder.getDataCallbackProc();
+    mErrorCallbackProc = builder.getErrorCallbackProc();
+    mDataCallbackUserData = builder.getDataCallbackUserData();
+
+    // TODO validate more parameters.
+    if (mErrorCallbackProc != nullptr && mDataCallbackProc == nullptr) {
+        ALOGE("AudioStream::open(): disconnect callback cannot be used without a data callback.");
+        return AAUDIO_ERROR_UNEXPECTED_VALUE;
+    }
+    if (mDirection != AAUDIO_DIRECTION_INPUT && mDirection != AAUDIO_DIRECTION_OUTPUT) {
+        ALOGE("AudioStream::open(): illegal direction %d", mDirection);
+        return AAUDIO_ERROR_UNEXPECTED_VALUE;
+    }
+
     return AAUDIO_OK;
 }
 
@@ -75,8 +95,13 @@
                                                 aaudio_stream_state_t *nextState,
                                                 int64_t timeoutNanoseconds)
 {
+    aaudio_result_t result = updateStateWhileWaiting();
+    if (result != AAUDIO_OK) {
+        return result;
+    }
+
     // TODO replace this when similar functionality added to AudioTrack.cpp
-    int64_t durationNanos = 20 * AAUDIO_NANOS_PER_MILLISECOND;
+    int64_t durationNanos = 20 * AAUDIO_NANOS_PER_MILLISECOND; // arbitrary
     aaudio_stream_state_t state = getState();
     while (state == currentState && timeoutNanoseconds > 0) {
         if (durationNanos > timeoutNanoseconds) {
@@ -85,7 +110,7 @@
         AudioClock::sleepForNanos(durationNanos);
         timeoutNanoseconds -= durationNanos;
 
-        aaudio_result_t result = updateState();
+        aaudio_result_t result = updateStateWhileWaiting();
         if (result != AAUDIO_OK) {
             return result;
         }
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 6ac8554..da71906 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -18,12 +18,12 @@
 #define AAUDIO_AUDIOSTREAM_H
 
 #include <atomic>
+#include <mutex>
 #include <stdint.h>
-#include <aaudio/AAudioDefinitions.h>
 #include <aaudio/AAudio.h>
 
-#include "AAudioUtilities.h"
-#include "MonotonicCounter.h"
+#include "utility/AAudioUtilities.h"
+#include "utility/MonotonicCounter.h"
 
 namespace aaudio {
 
@@ -55,14 +55,18 @@
                                        int64_t *timeNanoseconds) = 0;
 
 
-    virtual aaudio_result_t updateState() = 0;
+    /**
+     * Update state while in the middle of waitForStateChange()
+     * @return
+     */
+    virtual aaudio_result_t updateStateWhileWaiting() = 0;
 
 
     // =========== End ABSTRACT methods ===========================
 
     virtual aaudio_result_t waitForStateChange(aaudio_stream_state_t currentState,
-                                          aaudio_stream_state_t *nextState,
-                                          int64_t timeoutNanoseconds);
+                                               aaudio_stream_state_t *nextState,
+                                               int64_t timeoutNanoseconds);
 
     /**
      * Open the stream using the parameters in the builder.
@@ -152,10 +156,16 @@
         return mDirection;
     }
 
+    /**
+     * This is only valid after setSamplesPerFrame() and setFormat() have been called.
+     */
     int32_t getBytesPerFrame() const {
         return mSamplesPerFrame * getBytesPerSample();
     }
 
+    /**
+     * This is only valid after setFormat() has been called.
+     */
     int32_t getBytesPerSample() const {
         return AAudioConvert_formatToSizeInBytes(mFormat);
     }
@@ -168,6 +178,27 @@
         return mFramesRead.get();
     }
 
+    AAudioStream_dataCallback getDataCallbackProc() const {
+        return mDataCallbackProc;
+    }
+    AAudioStream_errorCallback getErrorCallbackProc() const {
+        return mErrorCallbackProc;
+    }
+
+    void *getDataCallbackUserData() const {
+        return mDataCallbackUserData;
+    }
+    void *getErrorCallbackUserData() const {
+        return mErrorCallbackUserData;
+    }
+
+    int32_t getFramesPerDataCallback() const {
+        return mFramesPerDataCallback;
+    }
+
+    bool isDataCallbackActive() {
+        return (mDataCallbackProc != nullptr) && isPlaying();
+    }
 
     // ============== I/O ===========================
     // A Stream will only implement read() or write() depending on its direction.
@@ -235,7 +266,13 @@
         mState = state;
     }
 
+    void setDeviceId(int32_t deviceId) {
+        mDeviceId = deviceId;
+    }
 
+    std::mutex           mStreamMutex;
+
+    std::atomic<bool>    mCallbackEnabled;
 
 protected:
     MonotonicCounter     mFramesWritten;
@@ -259,6 +296,15 @@
     aaudio_direction_t     mDirection = AAUDIO_DIRECTION_OUTPUT;
     aaudio_stream_state_t  mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
 
+    // callback ----------------------------------
+
+    AAudioStream_dataCallback   mDataCallbackProc = nullptr;  // external callback functions
+    void                       *mDataCallbackUserData = nullptr;
+    int32_t                     mFramesPerDataCallback = AAUDIO_UNSPECIFIED; // frames
+
+    AAudioStream_errorCallback  mErrorCallbackProc = nullptr;
+    void                       *mErrorCallbackUserData = nullptr;
+
     // background thread ----------------------------------
     bool                   mHasThread = false;
     pthread_t              mThread; // initialized in constructor
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index 5a54e62..c0b59bb 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -24,12 +24,18 @@
 #include <aaudio/AAudioDefinitions.h>
 #include <aaudio/AAudio.h>
 
+#include "binding/AAudioBinderClient.h"
 #include "client/AudioStreamInternal.h"
 #include "core/AudioStream.h"
 #include "core/AudioStreamBuilder.h"
 #include "legacy/AudioStreamRecord.h"
 #include "legacy/AudioStreamTrack.h"
 
+// Enable a mixer in AAudio service that will mix stream to an ALSA MMAP buffer.
+#define MMAP_SHARED_ENABLED      0
+// Enable AAUDIO_SHARING_MODE_EXCLUSIVE that uses an ALSA MMAP buffer.
+#define MMAP_EXCLUSIVE_ENABLED   1
+
 using namespace aaudio;
 
 /*
@@ -43,8 +49,11 @@
 
 aaudio_result_t AudioStreamBuilder::build(AudioStream** streamPtr) {
     AudioStream* audioStream = nullptr;
+    AAudioBinderClient *aaudioClient = nullptr;
     const aaudio_sharing_mode_t sharingMode = getSharingMode();
+    ALOGD("AudioStreamBuilder.build() sharingMode = %d", sharingMode);
     switch (getDirection()) {
+
     case AAUDIO_DIRECTION_INPUT:
         switch (sharingMode) {
             case AAUDIO_SHARING_MODE_SHARED:
@@ -56,26 +65,37 @@
                 break;
         }
         break;
+
     case AAUDIO_DIRECTION_OUTPUT:
         switch (sharingMode) {
             case AAUDIO_SHARING_MODE_SHARED:
+#if MMAP_SHARED_ENABLED
+                aaudioClient = new AAudioBinderClient();
+                audioStream = new(std::nothrow) AudioStreamInternal(*aaudioClient, false);
+#else
                 audioStream = new(std::nothrow) AudioStreamTrack();
+#endif
                 break;
+#if MMAP_EXCLUSIVE_ENABLED
             case AAUDIO_SHARING_MODE_EXCLUSIVE:
-                audioStream = new(std::nothrow) AudioStreamInternal();
+                aaudioClient = new AAudioBinderClient();
+                audioStream = new(std::nothrow) AudioStreamInternal(*aaudioClient, false);
                 break;
+#endif
             default:
                 ALOGE("AudioStreamBuilder(): bad sharing mode = %d", sharingMode);
                 return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
                 break;
         }
         break;
+
     default:
         ALOGE("AudioStreamBuilder(): bad direction = %d", getDirection());
         return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
         break;
     }
     if (audioStream == nullptr) {
+        delete aaudioClient;
         return AAUDIO_ERROR_NO_MEMORY;
     }
     ALOGD("AudioStreamBuilder(): created audioStream = %p", audioStream);
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.h b/media/libaaudio/src/core/AudioStreamBuilder.h
index 7b5f35c..93ca7f5 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.h
+++ b/media/libaaudio/src/core/AudioStreamBuilder.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef AAUDIO_AUDIOSTREAMBUILDER_H
-#define AAUDIO_AUDIOSTREAMBUILDER_H
+#ifndef AAUDIO_AUDIO_STREAM_BUILDER_H
+#define AAUDIO_AUDIO_STREAM_BUILDER_H
 
 #include <stdint.h>
 
@@ -101,6 +101,52 @@
         return this;
     }
 
+    AAudioStream_dataCallback getDataCallbackProc() const {
+        return mDataCallbackProc;
+    }
+
+    AudioStreamBuilder* setDataCallbackProc(AAudioStream_dataCallback proc) {
+        mDataCallbackProc = proc;
+        return this;
+    }
+
+
+    void *getDataCallbackUserData() const {
+        return mDataCallbackUserData;
+    }
+
+    AudioStreamBuilder* setDataCallbackUserData(void *userData) {
+        mDataCallbackUserData = userData;
+        return this;
+    }
+
+    AAudioStream_errorCallback getErrorCallbackProc() const {
+        return mErrorCallbackProc;
+    }
+
+    AudioStreamBuilder* setErrorCallbackProc(AAudioStream_errorCallback proc) {
+        mErrorCallbackProc = proc;
+        return this;
+    }
+
+    AudioStreamBuilder* setErrorCallbackUserData(void *userData) {
+        mErrorCallbackUserData = userData;
+        return this;
+    }
+
+    void *getErrorCallbackUserData() const {
+        return mErrorCallbackUserData;
+    }
+
+    int32_t getFramesPerDataCallback() const {
+        return mFramesPerDataCallback;
+    }
+
+    AudioStreamBuilder* setFramesPerDataCallback(int32_t sizeInFrames) {
+        mFramesPerDataCallback = sizeInFrames;
+        return this;
+    }
+
     aaudio_result_t build(AudioStream **streamPtr);
 
 private:
@@ -111,8 +157,15 @@
     aaudio_audio_format_t  mFormat = AAUDIO_FORMAT_UNSPECIFIED;
     aaudio_direction_t     mDirection = AAUDIO_DIRECTION_OUTPUT;
     int32_t                mBufferCapacity = AAUDIO_UNSPECIFIED;
+
+    AAudioStream_dataCallback  mDataCallbackProc = nullptr;  // external callback functions
+    void                      *mDataCallbackUserData = nullptr;
+    int32_t                    mFramesPerDataCallback = AAUDIO_UNSPECIFIED; // frames
+
+    AAudioStream_errorCallback mErrorCallbackProc = nullptr;
+    void                      *mErrorCallbackUserData = nullptr;
 };
 
 } /* namespace aaudio */
 
-#endif /* AAUDIO_AUDIOSTREAMBUILDER_H */
+#endif //AAUDIO_AUDIO_STREAM_BUILDER_H
diff --git a/media/libaaudio/src/fifo/FifoBuffer.cpp b/media/libaaudio/src/fifo/FifoBuffer.cpp
index c5489f1..857780c 100644
--- a/media/libaaudio/src/fifo/FifoBuffer.cpp
+++ b/media/libaaudio/src/fifo/FifoBuffer.cpp
@@ -17,6 +17,7 @@
 #include <cstring>
 #include <unistd.h>
 
+
 #define LOG_TAG "FifoBuffer"
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
@@ -26,6 +27,8 @@
 #include "FifoControllerIndirect.h"
 #include "FifoBuffer.h"
 
+using namespace android; // TODO just import names needed
+
 FifoBuffer::FifoBuffer(int32_t bytesPerFrame, fifo_frames_t capacityInFrames)
         : mFrameCapacity(capacityInFrames)
         , mBytesPerFrame(bytesPerFrame)
@@ -79,80 +82,102 @@
     return frames * mBytesPerFrame;
 }
 
-fifo_frames_t FifoBuffer::read(void *buffer, fifo_frames_t numFrames) {
-    size_t numBytes;
-    fifo_frames_t framesAvailable = mFifo->getFullFramesAvailable();
-    fifo_frames_t framesToRead = numFrames;
-    // Is there enough data in the FIFO
-    if (framesToRead > framesAvailable) {
-        framesToRead = framesAvailable;
-    }
-    if (framesToRead == 0) {
-        return 0;
-    }
+void FifoBuffer::fillWrappingBuffer(WrappingBuffer *wrappingBuffer,
+                                    int32_t framesAvailable,
+                                    int32_t startIndex) {
+    wrappingBuffer->data[1] = nullptr;
+    wrappingBuffer->numFrames[1] = 0;
+    if (framesAvailable > 0) {
 
-    fifo_frames_t readIndex = mFifo->getReadIndex();
-    uint8_t *destination = (uint8_t *) buffer;
-    uint8_t *source = &mStorage[convertFramesToBytes(readIndex)];
-    if ((readIndex + framesToRead) > mFrameCapacity) {
-        // read in two parts, first part here
-        fifo_frames_t frames1 = mFrameCapacity - readIndex;
-        int32_t numBytes = convertFramesToBytes(frames1);
-        memcpy(destination, source, numBytes);
-        destination += numBytes;
-        // read second part
-        source = &mStorage[0];
-        fifo_frames_t frames2 = framesToRead - frames1;
-        numBytes = convertFramesToBytes(frames2);
-        memcpy(destination, source, numBytes);
+        uint8_t *source = &mStorage[convertFramesToBytes(startIndex)];
+        // Does the available data cross the end of the FIFO?
+        if ((startIndex + framesAvailable) > mFrameCapacity) {
+            wrappingBuffer->data[0] = source;
+            wrappingBuffer->numFrames[0] = mFrameCapacity - startIndex;
+            wrappingBuffer->data[1] = &mStorage[0];
+            wrappingBuffer->numFrames[1] = mFrameCapacity - startIndex;
+
+        } else {
+            wrappingBuffer->data[0] = source;
+            wrappingBuffer->numFrames[0] = framesAvailable;
+        }
     } else {
-        // just read in one shot
-        numBytes = convertFramesToBytes(framesToRead);
-        memcpy(destination, source, numBytes);
+        wrappingBuffer->data[0] = nullptr;
+        wrappingBuffer->numFrames[0] = 0;
     }
-    mFifo->advanceReadIndex(framesToRead);
 
-    return framesToRead;
 }
 
-fifo_frames_t FifoBuffer::write(const void *buffer, fifo_frames_t framesToWrite) {
+void FifoBuffer::getFullDataAvailable(WrappingBuffer *wrappingBuffer) {
+    fifo_frames_t framesAvailable = mFifo->getFullFramesAvailable();
+    fifo_frames_t startIndex = mFifo->getReadIndex();
+    fillWrappingBuffer(wrappingBuffer, framesAvailable, startIndex);
+}
+
+void FifoBuffer::getEmptyRoomAvailable(WrappingBuffer *wrappingBuffer) {
     fifo_frames_t framesAvailable = mFifo->getEmptyFramesAvailable();
-//    ALOGD("FifoBuffer::write() framesToWrite = %d, framesAvailable = %d",
-//         framesToWrite, framesAvailable);
-    if (framesToWrite > framesAvailable) {
-        framesToWrite = framesAvailable;
-    }
-    if (framesToWrite <= 0) {
-        return 0;
-    }
+    fifo_frames_t startIndex = mFifo->getWriteIndex();
+    fillWrappingBuffer(wrappingBuffer, framesAvailable, startIndex);
+}
 
-    size_t numBytes;
-    fifo_frames_t writeIndex = mFifo->getWriteIndex();
-    int byteIndex = convertFramesToBytes(writeIndex);
-    const uint8_t *source = (const uint8_t *) buffer;
-    uint8_t *destination = &mStorage[byteIndex];
-    if ((writeIndex + framesToWrite) > mFrameCapacity) {
-        // write in two parts, first part here
-        fifo_frames_t frames1 = mFrameCapacity - writeIndex;
-        numBytes = convertFramesToBytes(frames1);
-        memcpy(destination, source, numBytes);
-//        ALOGD("FifoBuffer::write(%p to %p, numBytes = %d", source, destination, numBytes);
-        // read second part
-        source += convertFramesToBytes(frames1);
-        destination = &mStorage[0];
-        fifo_frames_t framesLeft = framesToWrite - frames1;
-        numBytes = convertFramesToBytes(framesLeft);
-//        ALOGD("FifoBuffer::write(%p to %p, numBytes = %d", source, destination, numBytes);
-        memcpy(destination, source, numBytes);
-    } else {
-        // just write in one shot
-        numBytes = convertFramesToBytes(framesToWrite);
-//        ALOGD("FifoBuffer::write(%p to %p, numBytes = %d", source, destination, numBytes);
-        memcpy(destination, source, numBytes);
-    }
-    mFifo->advanceWriteIndex(framesToWrite);
+fifo_frames_t FifoBuffer::read(void *buffer, fifo_frames_t numFrames) {
+    WrappingBuffer wrappingBuffer;
+    uint8_t *destination = (uint8_t *) buffer;
+    fifo_frames_t framesLeft = numFrames;
 
-    return framesToWrite;
+    getFullDataAvailable(&wrappingBuffer);
+
+    // Read data in one or two parts.
+    int partIndex = 0;
+    while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
+        fifo_frames_t framesToRead = framesLeft;
+        fifo_frames_t framesAvailable = wrappingBuffer.numFrames[partIndex];
+        //ALOGD("FifoProcessor::read() framesAvailable = %d, partIndex = %d",
+        //      framesAvailable, partIndex);
+        if (framesAvailable > 0) {
+            if (framesToRead > framesAvailable) {
+                framesToRead = framesAvailable;
+            }
+            int32_t numBytes = convertFramesToBytes(framesToRead);
+            memcpy(destination, wrappingBuffer.data[partIndex], numBytes);
+
+            destination += numBytes;
+            framesLeft -= framesToRead;
+        }
+        partIndex++;
+    }
+    fifo_frames_t framesRead = numFrames - framesLeft;
+    mFifo->advanceReadIndex(framesRead);
+    return framesRead;
+}
+
+fifo_frames_t FifoBuffer::write(const void *buffer, fifo_frames_t numFrames) {
+    WrappingBuffer wrappingBuffer;
+    uint8_t *source = (uint8_t *) buffer;
+    fifo_frames_t framesLeft = numFrames;
+
+    getEmptyRoomAvailable(&wrappingBuffer);
+
+    // Read data in one or two parts.
+    int partIndex = 0;
+    while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
+        fifo_frames_t framesToWrite = framesLeft;
+        fifo_frames_t framesAvailable = wrappingBuffer.numFrames[partIndex];
+        if (framesAvailable > 0) {
+            if (framesToWrite > framesAvailable) {
+                framesToWrite = framesAvailable;
+            }
+            int32_t numBytes = convertFramesToBytes(framesToWrite);
+            memcpy(wrappingBuffer.data[partIndex], source, numBytes);
+
+            source += numBytes;
+            framesLeft -= framesToWrite;
+        }
+        partIndex++;
+    }
+    fifo_frames_t framesWritten = numFrames - framesLeft;
+    mFifo->advanceWriteIndex(framesWritten);
+    return framesWritten;
 }
 
 fifo_frames_t FifoBuffer::readNow(void *buffer, fifo_frames_t numFrames) {
diff --git a/media/libaaudio/src/fifo/FifoBuffer.h b/media/libaaudio/src/fifo/FifoBuffer.h
index faa9ae2..2b262a1 100644
--- a/media/libaaudio/src/fifo/FifoBuffer.h
+++ b/media/libaaudio/src/fifo/FifoBuffer.h
@@ -21,15 +21,29 @@
 
 #include "FifoControllerBase.h"
 
+namespace android {
+
+/**
+ * Structure that represents a region in a circular buffer that might be at the
+ * end of the array and split in two.
+ */
+struct WrappingBuffer {
+    enum {
+        SIZE = 2
+    };
+    void *data[SIZE];
+    int32_t numFrames[SIZE];
+};
+
 class FifoBuffer {
 public:
     FifoBuffer(int32_t bytesPerFrame, fifo_frames_t capacityInFrames);
 
-    FifoBuffer(int32_t   bytesPerFrame,
-               fifo_frames_t   capacityInFrames,
-               fifo_counter_t * readCounterAddress,
-               fifo_counter_t * writeCounterAddress,
-               void * dataStorageAddress);
+    FifoBuffer(int32_t bytesPerFrame,
+               fifo_frames_t capacityInFrames,
+               fifo_counter_t *readCounterAddress,
+               fifo_counter_t *writeCounterAddress,
+               void *dataStorageAddress);
 
     ~FifoBuffer();
 
@@ -40,10 +54,33 @@
     fifo_frames_t write(const void *source, fifo_frames_t framesToWrite);
 
     fifo_frames_t getThreshold();
+
     void setThreshold(fifo_frames_t threshold);
 
     fifo_frames_t getBufferCapacityInFrames();
 
+    /**
+     * Return pointer to available full frames in data1 and set size in numFrames1.
+     * if the data is split across the end of the FIFO then set data2 and numFrames2.
+     * Other wise set them to null
+     * @param wrappingBuffer
+     */
+    void getFullDataAvailable(WrappingBuffer *wrappingBuffer);
+
+    /**
+     * Return pointer to available empty frames in data1 and set size in numFrames1.
+     * if the room is split across the end of the FIFO then set data2 and numFrames2.
+     * Other wise set them to null
+     * @param wrappingBuffer
+     */
+    void getEmptyRoomAvailable(WrappingBuffer *wrappingBuffer);
+
+    /**
+     * Copy data from the FIFO into the buffer.
+     * @param buffer
+     * @param numFrames
+     * @return
+     */
     fifo_frames_t readNow(void *buffer, fifo_frames_t numFrames);
 
     int64_t getNextReadTime(int32_t frameRate);
@@ -73,15 +110,21 @@
     }
 
 private:
+
+    void fillWrappingBuffer(WrappingBuffer *wrappingBuffer,
+                            int32_t framesAvailable, int32_t startIndex);
+
     const fifo_frames_t mFrameCapacity;
-    const int32_t       mBytesPerFrame;
-    uint8_t *           mStorage;
-    bool                mStorageOwned; // did this object allocate the storage?
+    const int32_t mBytesPerFrame;
+    uint8_t *mStorage;
+    bool mStorageOwned; // did this object allocate the storage?
     FifoControllerBase *mFifo;
-    fifo_counter_t      mFramesReadCount;
-    fifo_counter_t      mFramesUnderrunCount;
-    int32_t             mUnderrunCount; // need? just use frames
-    int32_t             mLastReadSize;
+    fifo_counter_t mFramesReadCount;
+    fifo_counter_t mFramesUnderrunCount;
+    int32_t mUnderrunCount; // need? just use frames
+    int32_t mLastReadSize;
 };
 
+}  // android
+
 #endif //FIFO_FIFO_BUFFER_H
diff --git a/media/libaaudio/src/fifo/FifoController.h b/media/libaaudio/src/fifo/FifoController.h
index 7434634..79d98a1 100644
--- a/media/libaaudio/src/fifo/FifoController.h
+++ b/media/libaaudio/src/fifo/FifoController.h
@@ -22,6 +22,8 @@
 
 #include "FifoControllerBase.h"
 
+namespace android {
+
 /**
  * A FIFO with counters contained in the class.
  */
@@ -55,5 +57,6 @@
     std::atomic<fifo_counter_t> mWriteCounter;
 };
 
+}  // android
 
 #endif //FIFO_FIFO_CONTROLLER_H
diff --git a/media/libaaudio/src/fifo/FifoControllerBase.cpp b/media/libaaudio/src/fifo/FifoControllerBase.cpp
index 33a253e..14a2be1 100644
--- a/media/libaaudio/src/fifo/FifoControllerBase.cpp
+++ b/media/libaaudio/src/fifo/FifoControllerBase.cpp
@@ -21,6 +21,8 @@
 #include <stdint.h>
 #include "FifoControllerBase.h"
 
+using namespace android;  // TODO just import names needed
+
 FifoControllerBase::FifoControllerBase(fifo_frames_t capacity, fifo_frames_t threshold)
         : mCapacity(capacity)
         , mThreshold(threshold)
diff --git a/media/libaaudio/src/fifo/FifoControllerBase.h b/media/libaaudio/src/fifo/FifoControllerBase.h
index c543519..64af777 100644
--- a/media/libaaudio/src/fifo/FifoControllerBase.h
+++ b/media/libaaudio/src/fifo/FifoControllerBase.h
@@ -19,6 +19,8 @@
 
 #include <stdint.h>
 
+namespace android {
+
 typedef int64_t fifo_counter_t;
 typedef int32_t fifo_frames_t;
 
@@ -118,4 +120,6 @@
     fifo_frames_t mThreshold;
 };
 
+}  // android
+
 #endif // FIFO_FIFO_CONTROLLER_BASE_H
diff --git a/media/libaaudio/src/fifo/FifoControllerIndirect.h b/media/libaaudio/src/fifo/FifoControllerIndirect.h
index 1aaf9ea..5832d9c 100644
--- a/media/libaaudio/src/fifo/FifoControllerIndirect.h
+++ b/media/libaaudio/src/fifo/FifoControllerIndirect.h
@@ -22,6 +22,8 @@
 
 #include "FifoControllerBase.h"
 
+namespace android {
+
 /**
  * A FifoControllerBase with counters external to the class.
  *
@@ -66,4 +68,6 @@
     std::atomic<fifo_counter_t> * mWriteCounterAddress;
 };
 
+}  // android
+
 #endif //FIFO_FIFO_CONTROLLER_INDIRECT_H
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
new file mode 100644
index 0000000..baa24c9
--- /dev/null
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioStreamLegacy"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <utils/String16.h>
+#include <media/AudioTrack.h>
+#include <aaudio/AAudio.h>
+
+#include "core/AudioStream.h"
+#include "legacy/AudioStreamLegacy.h"
+
+using namespace android;
+using namespace aaudio;
+
+AudioStreamLegacy::AudioStreamLegacy()
+        : AudioStream() {
+}
+
+AudioStreamLegacy::~AudioStreamLegacy() {
+}
+
+// Called from AudioTrack.cpp or AudioRecord.cpp
+static void AudioStreamLegacy_callback(int event, void* userData, void *info) {
+    AudioStreamLegacy *streamLegacy = (AudioStreamLegacy *) userData;
+    streamLegacy->processCallback(event, info);
+}
+
+aaudio_legacy_callback_t AudioStreamLegacy::getLegacyCallback() {
+    return AudioStreamLegacy_callback;
+}
+
+// Implement FixedBlockProcessor
+int32_t AudioStreamLegacy::onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) {
+    int32_t frameCount = numBytes / getBytesPerFrame();
+    // Call using the AAudio callback interface.
+    AAudioStream_dataCallback appCallback = getDataCallbackProc();
+    return (*appCallback)(
+            (AAudioStream *) this,
+            getDataCallbackUserData(),
+            buffer,
+            frameCount);
+}
+
+void AudioStreamLegacy::processCallbackCommon(aaudio_callback_operation_t opcode, void *info) {
+    aaudio_data_callback_result_t callbackResult;
+    switch (opcode) {
+        case AAUDIO_CALLBACK_OPERATION_PROCESS_DATA: {
+            // Note that this code assumes an AudioTrack::Buffer is the same as AudioRecord::Buffer
+            // TODO define our own AudioBuffer and pass it from the subclasses.
+            AudioTrack::Buffer *audioBuffer = static_cast<AudioTrack::Buffer *>(info);
+            if (audioBuffer->frameCount == 0) return;
+
+            // If the caller specified an exact size then use a block size adapter.
+            if (mBlockAdapter != nullptr) {
+                int32_t byteCount = audioBuffer->frameCount * getBytesPerFrame();
+                callbackResult = mBlockAdapter->processVariableBlock((uint8_t *) audioBuffer->raw,
+                                                                     byteCount);
+            } else {
+                // Call using the AAudio callback interface.
+                callbackResult = (*getDataCallbackProc())(
+                        (AAudioStream *) this,
+                        getDataCallbackUserData(),
+                        audioBuffer->raw,
+                        audioBuffer->frameCount
+                        );
+            }
+            if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
+                audioBuffer->size = audioBuffer->frameCount * getBytesPerFrame();
+            } else {
+                audioBuffer->size = 0;
+            }
+        }
+            break;
+
+            // Stream got rerouted so we disconnect.
+        case AAUDIO_CALLBACK_OPERATION_DISCONNECTED: {
+            ALOGD("AudioStreamAAudio(): callbackLoop() stream disconnected");
+            if (getErrorCallbackProc() != nullptr) {
+                (*getErrorCallbackProc())(
+                        (AAudioStream *) this,
+                        getErrorCallbackUserData(),
+                        AAUDIO_OK
+                        );
+            }
+            mCallbackEnabled.store(false);
+        }
+            break;
+
+        default:
+            break;
+    }
+}
+
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.h b/media/libaaudio/src/legacy/AudioStreamLegacy.h
new file mode 100644
index 0000000..c109ee7
--- /dev/null
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LEGACY_AUDIO_STREAM_LEGACY_H
+#define LEGACY_AUDIO_STREAM_LEGACY_H
+
+
+#include <aaudio/AAudio.h>
+
+#include "AudioStream.h"
+#include "AAudioLegacy.h"
+#include "utility/FixedBlockAdapter.h"
+
+namespace aaudio {
+
+
+typedef void (*aaudio_legacy_callback_t)(int event, void* user, void *info);
+
+enum {
+    /**
+     * Request that the callback function should fill the data buffer of an output stream,
+     * or process the data of an input stream.
+     * The address parameter passed to the callback function will point to a data buffer.
+     * For an input stream, the data is read-only.
+     * The value1 parameter will be the number of frames.
+     * The value2 parameter is reserved and will be set to zero.
+     * The callback should return AAUDIO_CALLBACK_RESULT_CONTINUE or AAUDIO_CALLBACK_RESULT_STOP.
+     */
+            AAUDIO_CALLBACK_OPERATION_PROCESS_DATA,
+
+    /**
+     * Inform the callback function that the stream was disconnected.
+     * The address parameter passed to the callback function will be NULL.
+     * The value1 will be an error code or AAUDIO_OK.
+     * The value2 parameter is reserved and will be set to zero.
+     * The callback return value will be ignored.
+     */
+            AAUDIO_CALLBACK_OPERATION_DISCONNECTED,
+};
+typedef int32_t aaudio_callback_operation_t;
+
+
+class AudioStreamLegacy : public AudioStream, public FixedBlockProcessor {
+public:
+    AudioStreamLegacy();
+
+    virtual ~AudioStreamLegacy();
+
+    aaudio_legacy_callback_t getLegacyCallback();
+
+    // This is public so it can be called from the C callback function.
+    // This is called from the AudioTrack/AudioRecord client.
+    virtual void processCallback(int event, void *info) = 0;
+
+    void processCallbackCommon(aaudio_callback_operation_t opcode, void *info);
+
+    // Implement FixedBlockProcessor
+    int32_t onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) override;
+
+protected:
+    FixedBlockAdapter         *mBlockAdapter = nullptr;
+    aaudio_wrapping_frames_t   mPositionWhenStarting = 0;
+    int32_t                    mCallbackBufferSize = 0;
+};
+
+} /* namespace aaudio */
+
+#endif //LEGACY_AUDIO_STREAM_LEGACY_H
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index 78c68ae..f0a6ceb 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -24,14 +24,16 @@
 #include <aaudio/AAudio.h>
 
 #include "AudioClock.h"
-#include "AudioStreamRecord.h"
-#include "utility/AAudioUtilities.h"
+#include "legacy/AudioStreamLegacy.h"
+#include "legacy/AudioStreamRecord.h"
+#include "utility/FixedBlockWriter.h"
 
 using namespace android;
 using namespace aaudio;
 
 AudioStreamRecord::AudioStreamRecord()
-    : AudioStream()
+    : AudioStreamLegacy()
+    , mFixedBlockWriter(*this)
 {
 }
 
@@ -58,7 +60,6 @@
                               ? 2 : getSamplesPerFrame();
     audio_channel_mask_t channelMask = audio_channel_in_mask_from_count(samplesPerFrame);
 
-    AudioRecord::callback_t callback = nullptr;
     audio_input_flags_t flags = (audio_input_flags_t) AUDIO_INPUT_FLAG_NONE;
 
     size_t frameCount = (builder.getBufferCapacity() == AAUDIO_UNSPECIFIED) ? 0
@@ -68,6 +69,17 @@
             ? AUDIO_FORMAT_PCM_FLOAT
             : AAudioConvert_aaudioToAndroidDataFormat(getFormat());
 
+    // Setup the callback if there is one.
+    AudioRecord::callback_t callback = nullptr;
+    void *callbackData = nullptr;
+    AudioRecord::transfer_type streamTransferType = AudioRecord::transfer_type::TRANSFER_SYNC;
+    if (builder.getDataCallbackProc() != nullptr) {
+        streamTransferType = AudioRecord::transfer_type::TRANSFER_CALLBACK;
+        callback = getLegacyCallback();
+        callbackData = this;
+    }
+    mCallbackBufferSize = builder.getFramesPerDataCallback();
+
     mAudioRecord = new AudioRecord(
             AUDIO_SOURCE_DEFAULT,
             getSampleRate(),
@@ -76,10 +88,10 @@
             mOpPackageName, // const String16& opPackageName TODO does not compile
             frameCount,
             callback,
-            nullptr, //    void* user = nullptr,
+            callbackData,
             0,    //    uint32_t notificationFrames = 0,
             AUDIO_SESSION_ALLOCATE,
-            AudioRecord::TRANSFER_DEFAULT,
+            streamTransferType,
             flags
             //   int uid = -1,
             //   pid_t pid = -1,
@@ -99,6 +111,15 @@
     setSamplesPerFrame(mAudioRecord->channelCount());
     setFormat(AAudioConvert_androidToAAudioDataFormat(mAudioRecord->format()));
 
+    // We may need to pass the data through a block size adapter to guarantee constant size.
+    if (mCallbackBufferSize != AAUDIO_UNSPECIFIED) {
+        int callbackSizeBytes = getBytesPerFrame() * mCallbackBufferSize;
+        mFixedBlockWriter.open(callbackSizeBytes);
+        mBlockAdapter = &mFixedBlockWriter;
+    } else {
+        mBlockAdapter = nullptr;
+    }
+
     setState(AAUDIO_STREAM_STATE_OPEN);
 
     return AAUDIO_OK;
@@ -111,9 +132,29 @@
         mAudioRecord.clear();
         setState(AAUDIO_STREAM_STATE_CLOSED);
     }
+    mFixedBlockWriter.close();
     return AAUDIO_OK;
 }
 
+void AudioStreamRecord::processCallback(int event, void *info) {
+
+    ALOGD("AudioStreamRecord::processCallback(), event %d", event);
+    switch (event) {
+        case AudioRecord::EVENT_MORE_DATA:
+            processCallbackCommon(AAUDIO_CALLBACK_OPERATION_PROCESS_DATA, info);
+            break;
+
+            // Stream got rerouted so we disconnect.
+        case AudioRecord::EVENT_NEW_IAUDIORECORD:
+            processCallbackCommon(AAUDIO_CALLBACK_OPERATION_DISCONNECTED, info);
+            break;
+
+        default:
+            break;
+    }
+    return;
+}
+
 aaudio_result_t AudioStreamRecord::requestStart()
 {
     if (mAudioRecord.get() == nullptr) {
@@ -124,6 +165,7 @@
     if (err != OK) {
         return AAudioConvert_androidToAAudioResult(err);
     }
+
     err = mAudioRecord->start();
     if (err != OK) {
         return AAudioConvert_androidToAAudioResult(err);
@@ -151,7 +193,7 @@
     return AAUDIO_OK;
 }
 
-aaudio_result_t AudioStreamRecord::updateState()
+aaudio_result_t AudioStreamRecord::updateStateWhileWaiting()
 {
     aaudio_result_t result = AAUDIO_OK;
     aaudio_wrapping_frames_t position;
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.h b/media/libaaudio/src/legacy/AudioStreamRecord.h
index 4667f05..897a5b3 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.h
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.h
@@ -23,51 +23,58 @@
 #include "AudioStreamBuilder.h"
 #include "AudioStream.h"
 #include "AAudioLegacy.h"
+#include "legacy/AudioStreamLegacy.h"
+#include "utility/FixedBlockWriter.h"
 
 namespace aaudio {
 
 /**
  * Internal stream that uses the legacy AudioTrack path.
  */
-class AudioStreamRecord : public AudioStream {
+class AudioStreamRecord : public AudioStreamLegacy {
 public:
     AudioStreamRecord();
 
     virtual ~AudioStreamRecord();
 
-    virtual aaudio_result_t open(const AudioStreamBuilder & builder) override;
-    virtual aaudio_result_t close() override;
+    aaudio_result_t open(const AudioStreamBuilder & builder) override;
+    aaudio_result_t close() override;
 
-    virtual aaudio_result_t requestStart() override;
-    virtual aaudio_result_t requestPause() override;
-    virtual aaudio_result_t requestFlush() override;
-    virtual aaudio_result_t requestStop() override;
+    aaudio_result_t requestStart() override;
+    aaudio_result_t requestPause() override;
+    aaudio_result_t requestFlush() override;
+    aaudio_result_t requestStop() override;
 
     virtual aaudio_result_t getTimestamp(clockid_t clockId,
                                          int64_t *framePosition,
                                          int64_t *timeNanoseconds) override;
 
-    virtual aaudio_result_t read(void *buffer,
+    aaudio_result_t read(void *buffer,
                              int32_t numFrames,
                              int64_t timeoutNanoseconds) override;
 
-    virtual aaudio_result_t setBufferSize(int32_t requestedFrames) override;
+    aaudio_result_t setBufferSize(int32_t requestedFrames) override;
 
-    virtual int32_t getBufferSize() const override;
+    int32_t getBufferSize() const override;
 
-    virtual int32_t getBufferCapacity() const override;
+    int32_t getBufferCapacity() const override;
 
-    virtual int32_t getXRunCount() const override;
+    int32_t getXRunCount() const override;
 
-    virtual int32_t getFramesPerBurst() const override;
+    int32_t getFramesPerBurst() const override;
 
-    virtual aaudio_result_t updateState() override;
+    aaudio_result_t updateStateWhileWaiting() override;
+
+    // This is public so it can be called from the C callback function.
+    void processCallback(int event, void *info) override;
 
 private:
     android::sp<android::AudioRecord> mAudioRecord;
+    // adapts between variable sized blocks and fixed size blocks
+    FixedBlockWriter                 mFixedBlockWriter;
+
     // TODO add 64-bit position reporting to AudioRecord and use it.
-    aaudio_wrapping_frames_t   mPositionWhenStarting = 0;
-    android::String16          mOpPackageName;
+    android::String16                mOpPackageName;
 };
 
 } /* namespace aaudio */
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index a7c0677..1bb9e53 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -20,20 +20,25 @@
 
 #include <stdint.h>
 #include <media/AudioTrack.h>
-#include <aaudio/AAudio.h>
 
+#include <aaudio/AAudio.h>
 #include "utility/AudioClock.h"
-#include "AudioStreamTrack.h"
-#include "utility/AAudioUtilities.h"
+#include "legacy/AudioStreamLegacy.h"
+#include "legacy/AudioStreamTrack.h"
+#include "utility/FixedBlockReader.h"
 
 using namespace android;
 using namespace aaudio;
 
+// Arbitrary and somewhat generous number of bursts.
+#define DEFAULT_BURSTS_PER_BUFFER_CAPACITY     8
+
 /*
  * Create a stream that uses the AudioTrack.
  */
 AudioStreamTrack::AudioStreamTrack()
-    : AudioStream()
+    : AudioStreamLegacy()
+    , mFixedBlockReader(*this)
 {
 }
 
@@ -53,6 +58,8 @@
         return result;
     }
 
+    ALOGD("AudioStreamTrack::open = %p", this);
+
     // Try to create an AudioTrack
     // TODO Support UNSPECIFIED in AudioTrack. For now, use stereo if unspecified.
     int32_t samplesPerFrame = (getSamplesPerFrame() == AAUDIO_UNSPECIFIED)
@@ -61,16 +68,40 @@
     ALOGD("AudioStreamTrack::open(), samplesPerFrame = %d, channelMask = 0x%08x",
             samplesPerFrame, channelMask);
 
-    AudioTrack::callback_t callback = nullptr;
     // TODO add more performance options
     audio_output_flags_t flags = (audio_output_flags_t) AUDIO_OUTPUT_FLAG_FAST;
-    size_t frameCount = (builder.getBufferCapacity() == AAUDIO_UNSPECIFIED) ? 0
-                        : builder.getBufferCapacity();
+
+    int32_t frameCount = builder.getBufferCapacity();
+    ALOGD("AudioStreamTrack::open(), requested buffer capacity %d", frameCount);
+
+    int32_t notificationFrames = 0;
+
     // TODO implement an unspecified AudioTrack format then use that.
-    audio_format_t format = (getFormat() == AAUDIO_UNSPECIFIED)
+    audio_format_t format = (getFormat() == AAUDIO_FORMAT_UNSPECIFIED)
             ? AUDIO_FORMAT_PCM_FLOAT
             : AAudioConvert_aaudioToAndroidDataFormat(getFormat());
 
+    // Setup the callback if there is one.
+    AudioTrack::callback_t callback = nullptr;
+    void *callbackData = nullptr;
+    // Note that TRANSFER_SYNC does not allow FAST track
+    AudioTrack::transfer_type streamTransferType = AudioTrack::transfer_type::TRANSFER_SYNC;
+    if (builder.getDataCallbackProc() != nullptr) {
+        streamTransferType = AudioTrack::transfer_type::TRANSFER_CALLBACK;
+        callback = getLegacyCallback();
+        callbackData = this;
+
+        notificationFrames = builder.getFramesPerDataCallback();
+        // If the total buffer size is unspecified then base the size on the burst size.
+        if (frameCount == AAUDIO_UNSPECIFIED) {
+            // Take advantage of a special trick that allows us to create a buffer
+            // that is some multiple of the burst size.
+            notificationFrames = 0 - DEFAULT_BURSTS_PER_BUFFER_CAPACITY;
+        }
+    }
+    mCallbackBufferSize = builder.getFramesPerDataCallback();
+
+    ALOGD("AudioStreamTrack::open(), notificationFrames = %d", notificationFrames);
     mAudioTrack = new AudioTrack(
             (audio_stream_type_t) AUDIO_STREAM_MUSIC,
             getSampleRate(),
@@ -79,10 +110,10 @@
             frameCount,
             flags,
             callback,
-            nullptr,    // user callback data
-            0,          // notificationFrames
+            callbackData,
+            notificationFrames,
             AUDIO_SESSION_ALLOCATE,
-            AudioTrack::transfer_type::TRANSFER_SYNC // TODO - this does not allow FAST
+            streamTransferType
             );
 
     // Did we get a valid track?
@@ -97,9 +128,21 @@
     // Get the actual values from the AudioTrack.
     setSamplesPerFrame(mAudioTrack->channelCount());
     setSampleRate(mAudioTrack->getSampleRate());
-    setFormat(AAudioConvert_androidToAAudioDataFormat(mAudioTrack->format()));
+    aaudio_audio_format_t aaudioFormat =
+            AAudioConvert_androidToAAudioDataFormat(mAudioTrack->format());
+    setFormat(aaudioFormat);
+
+    // We may need to pass the data through a block size adapter to guarantee constant size.
+    if (mCallbackBufferSize != AAUDIO_UNSPECIFIED) {
+        int callbackSizeBytes = getBytesPerFrame() * mCallbackBufferSize;
+        mFixedBlockReader.open(callbackSizeBytes);
+        mBlockAdapter = &mFixedBlockReader;
+    } else {
+        mBlockAdapter = nullptr;
+    }
 
     setState(AAUDIO_STREAM_STATE_OPEN);
+    setDeviceId(mAudioTrack->getRoutedDeviceId());
 
     return AAUDIO_OK;
 }
@@ -111,11 +154,32 @@
         mAudioTrack.clear(); // TODO is this right?
         setState(AAUDIO_STREAM_STATE_CLOSED);
     }
+    mFixedBlockReader.close();
     return AAUDIO_OK;
 }
 
+void AudioStreamTrack::processCallback(int event, void *info) {
+
+    switch (event) {
+        case AudioTrack::EVENT_MORE_DATA:
+            processCallbackCommon(AAUDIO_CALLBACK_OPERATION_PROCESS_DATA, info);
+            break;
+
+            // Stream got rerouted so we disconnect.
+        case AudioTrack::EVENT_NEW_IAUDIOTRACK:
+            processCallbackCommon(AAUDIO_CALLBACK_OPERATION_DISCONNECTED, info);
+            break;
+
+        default:
+            break;
+    }
+    return;
+}
+
 aaudio_result_t AudioStreamTrack::requestStart()
 {
+    std::lock_guard<std::mutex> lock(mStreamMutex);
+
     if (mAudioTrack.get() == nullptr) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
@@ -124,6 +188,7 @@
     if (err != OK) {
         return AAudioConvert_androidToAAudioResult(err);
     }
+
     err = mAudioTrack->start();
     if (err != OK) {
         return AAudioConvert_androidToAAudioResult(err);
@@ -135,11 +200,14 @@
 
 aaudio_result_t AudioStreamTrack::requestPause()
 {
+    std::lock_guard<std::mutex> lock(mStreamMutex);
+
     if (mAudioTrack.get() == nullptr) {
         return AAUDIO_ERROR_INVALID_STATE;
     } else if (getState() != AAUDIO_STREAM_STATE_STARTING
             && getState() != AAUDIO_STREAM_STATE_STARTED) {
-        ALOGE("requestPause(), called when state is %s", AAudio_convertStreamStateToText(getState()));
+        ALOGE("requestPause(), called when state is %s",
+              AAudio_convertStreamStateToText(getState()));
         return AAUDIO_ERROR_INVALID_STATE;
     }
     setState(AAUDIO_STREAM_STATE_PAUSING);
@@ -152,6 +220,8 @@
 }
 
 aaudio_result_t AudioStreamTrack::requestFlush() {
+    std::lock_guard<std::mutex> lock(mStreamMutex);
+
     if (mAudioTrack.get() == nullptr) {
         return AAUDIO_ERROR_INVALID_STATE;
     } else if (getState() != AAUDIO_STREAM_STATE_PAUSED) {
@@ -165,6 +235,8 @@
 }
 
 aaudio_result_t AudioStreamTrack::requestStop() {
+    std::lock_guard<std::mutex> lock(mStreamMutex);
+
     if (mAudioTrack.get() == nullptr) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
@@ -175,7 +247,7 @@
     return AAUDIO_OK;
 }
 
-aaudio_result_t AudioStreamTrack::updateState()
+aaudio_result_t AudioStreamTrack::updateStateWhileWaiting()
 {
     status_t err;
     aaudio_wrapping_frames_t position;
@@ -303,7 +375,7 @@
     }
     // TODO Merge common code into AudioStreamLegacy after rebasing.
     int timebase;
-    switch(clockId) {
+    switch (clockId) {
         case CLOCK_BOOTTIME:
             timebase = ExtendedTimestamp::TIMEBASE_BOOTTIME;
             break;
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.h b/media/libaaudio/src/legacy/AudioStreamTrack.h
index 7a53022..29f5d15 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.h
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.h
@@ -17,54 +17,63 @@
 #ifndef LEGACY_AUDIO_STREAM_TRACK_H
 #define LEGACY_AUDIO_STREAM_TRACK_H
 
+#include <math.h>
 #include <media/AudioTrack.h>
 #include <aaudio/AAudio.h>
 
 #include "AudioStreamBuilder.h"
 #include "AudioStream.h"
-#include "AAudioLegacy.h"
+#include "legacy/AAudioLegacy.h"
+#include "legacy/AudioStreamLegacy.h"
+#include "utility/FixedBlockReader.h"
 
 namespace aaudio {
 
-
 /**
  * Internal stream that uses the legacy AudioTrack path.
  */
-class AudioStreamTrack : public AudioStream {
+class AudioStreamTrack : public AudioStreamLegacy {
 public:
     AudioStreamTrack();
 
     virtual ~AudioStreamTrack();
 
 
-    virtual aaudio_result_t open(const AudioStreamBuilder & builder) override;
-    virtual aaudio_result_t close() override;
+    aaudio_result_t open(const AudioStreamBuilder & builder) override;
+    aaudio_result_t close() override;
 
-    virtual aaudio_result_t requestStart() override;
-    virtual aaudio_result_t requestPause() override;
-    virtual aaudio_result_t requestFlush() override;
-    virtual aaudio_result_t requestStop() override;
+    aaudio_result_t requestStart() override;
+    aaudio_result_t requestPause() override;
+    aaudio_result_t requestFlush() override;
+    aaudio_result_t requestStop() override;
 
-    virtual aaudio_result_t getTimestamp(clockid_t clockId,
+    aaudio_result_t getTimestamp(clockid_t clockId,
                                        int64_t *framePosition,
                                        int64_t *timeNanoseconds) override;
 
-    virtual aaudio_result_t write(const void *buffer,
+    aaudio_result_t write(const void *buffer,
                              int32_t numFrames,
                              int64_t timeoutNanoseconds) override;
 
-    virtual aaudio_result_t setBufferSize(int32_t requestedFrames) override;
-    virtual int32_t getBufferSize() const override;
-    virtual int32_t getBufferCapacity() const override;
-    virtual int32_t getFramesPerBurst()const  override;
-    virtual int32_t getXRunCount() const override;
+    aaudio_result_t setBufferSize(int32_t requestedFrames) override;
+    int32_t getBufferSize() const override;
+    int32_t getBufferCapacity() const override;
+    int32_t getFramesPerBurst()const  override;
+    int32_t getXRunCount() const override;
 
-    virtual int64_t getFramesRead() override;
+    int64_t getFramesRead() override;
 
-    virtual aaudio_result_t updateState() override;
+    aaudio_result_t updateStateWhileWaiting() override;
+
+    // This is public so it can be called from the C callback function.
+    void processCallback(int event, void *info) override;
 
 private:
+
     android::sp<android::AudioTrack> mAudioTrack;
+    // adapts between variable sized blocks and fixed size blocks
+    FixedBlockReader                 mFixedBlockReader;
+
     // TODO add 64-bit position reporting to AudioRecord and use it.
     aaudio_wrapping_frames_t         mPositionWhenStarting = 0;
     aaudio_wrapping_frames_t         mPositionWhenPausing = 0;
diff --git a/media/libaaudio/src/utility/FixedBlockAdapter.cpp b/media/libaaudio/src/utility/FixedBlockAdapter.cpp
new file mode 100644
index 0000000..f4666af
--- /dev/null
+++ b/media/libaaudio/src/utility/FixedBlockAdapter.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include "FixedBlockAdapter.h"
+
+FixedBlockAdapter::~FixedBlockAdapter() {
+    close();
+}
+
+int32_t FixedBlockAdapter::open(int32_t bytesPerFixedBlock)
+{
+    mSize = bytesPerFixedBlock;
+    mStorage = new uint8_t[bytesPerFixedBlock]; // TODO use std::nothrow
+    mPosition = 0;
+    return 0;
+}
+
+int32_t FixedBlockAdapter::close()
+{
+    delete[] mStorage;
+    mStorage = nullptr;
+    mSize = 0;
+    mPosition = 0;
+    return 0;
+}
diff --git a/media/libaaudio/src/utility/FixedBlockAdapter.h b/media/libaaudio/src/utility/FixedBlockAdapter.h
new file mode 100644
index 0000000..7008b25
--- /dev/null
+++ b/media/libaaudio/src/utility/FixedBlockAdapter.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_FIXED_BLOCK_ADAPTER_H
+#define AAUDIO_FIXED_BLOCK_ADAPTER_H
+
+#include <stdio.h>
+
+/**
+ * Interface for a class that needs fixed-size blocks.
+ */
+class FixedBlockProcessor {
+public:
+    virtual ~FixedBlockProcessor() = default;
+    virtual int32_t onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) = 0;
+};
+
+/**
+ * Base class for a variable-to-fixed-size block adapter.
+ */
+class FixedBlockAdapter
+{
+public:
+    FixedBlockAdapter(FixedBlockProcessor &fixedBlockProcessor)
+    : mFixedBlockProcessor(fixedBlockProcessor) {}
+
+    virtual ~FixedBlockAdapter();
+
+    /**
+     * Allocate internal resources needed for buffering data.
+     */
+    virtual int32_t open(int32_t bytesPerFixedBlock);
+
+    /**
+     * Note that if the fixed-sized blocks must be aligned, then the variable-sized blocks
+     * must have the same alignment.
+     * For example, if the fixed-size blocks must be a multiple of 8, then the variable-sized
+     * blocks must also be a multiple of 8.
+     *
+     * @param buffer
+     * @param numBytes
+     * @return zero if OK or a non-zero code
+     */
+    virtual int32_t processVariableBlock(uint8_t *buffer, int32_t numBytes) = 0;
+
+    /**
+     * Free internal resources.
+     */
+    int32_t close();
+
+protected:
+    FixedBlockProcessor  &mFixedBlockProcessor;
+    uint8_t              *mStorage = nullptr;    // Store data here while assembling buffers.
+    int32_t               mSize = 0;             // Size in bytes of the fixed size buffer.
+    int32_t               mPosition = 0;         // Offset of the last byte read or written.
+};
+
+#endif /* AAUDIO_FIXED_BLOCK_ADAPTER_H */
diff --git a/media/libaaudio/src/utility/FixedBlockReader.cpp b/media/libaaudio/src/utility/FixedBlockReader.cpp
new file mode 100644
index 0000000..21ea70e
--- /dev/null
+++ b/media/libaaudio/src/utility/FixedBlockReader.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+#include <memory.h>
+
+#include "FixedBlockAdapter.h"
+
+#include "FixedBlockReader.h"
+
+
+FixedBlockReader::FixedBlockReader(FixedBlockProcessor &fixedBlockProcessor)
+    : FixedBlockAdapter(fixedBlockProcessor) {
+    mPosition = mSize;
+}
+
+int32_t FixedBlockReader::open(int32_t bytesPerFixedBlock) {
+    int32_t result = FixedBlockAdapter::open(bytesPerFixedBlock);
+    mPosition = mSize; // Indicate no data in storage.
+    return result;
+}
+
+int32_t FixedBlockReader::readFromStorage(uint8_t *buffer, int32_t numBytes) {
+    int32_t bytesToRead = numBytes;
+    int32_t dataAvailable = mSize - mPosition;
+    if (bytesToRead > dataAvailable) {
+        bytesToRead = dataAvailable;
+    }
+    memcpy(buffer, mStorage + mPosition, bytesToRead);
+    mPosition += bytesToRead;
+    return bytesToRead;
+}
+
+int32_t FixedBlockReader::processVariableBlock(uint8_t *buffer, int32_t numBytes) {
+    int32_t result = 0;
+    int32_t bytesLeft = numBytes;
+    while(bytesLeft > 0 && result == 0) {
+        if (mPosition < mSize) {
+            // Use up bytes currently in storage.
+            int32_t bytesRead = readFromStorage(buffer, bytesLeft);
+            buffer += bytesRead;
+            bytesLeft -= bytesRead;
+        } else if (bytesLeft >= mSize) {
+            // Read through if enough for a complete block.
+            result = mFixedBlockProcessor.onProcessFixedBlock(buffer, mSize);
+            buffer += mSize;
+            bytesLeft -= mSize;
+        } else {
+            // Just need a partial block so we have to use storage.
+            result = mFixedBlockProcessor.onProcessFixedBlock(mStorage, mSize);
+            mPosition = 0;
+        }
+    }
+    return result;
+}
+
diff --git a/media/libaaudio/src/utility/FixedBlockReader.h b/media/libaaudio/src/utility/FixedBlockReader.h
new file mode 100644
index 0000000..128dd52
--- /dev/null
+++ b/media/libaaudio/src/utility/FixedBlockReader.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_FIXED_BLOCK_READER_H
+#define AAUDIO_FIXED_BLOCK_READER_H
+
+#include <stdint.h>
+
+#include "FixedBlockAdapter.h"
+
+/**
+ * Read from a fixed-size block to a variable sized block.
+ *
+ * This can be used to convert a pull data flow from fixed sized buffers to variable sized buffers.
+ * An example would be an audio output callback that reads from the app.
+ */
+class FixedBlockReader : public FixedBlockAdapter
+{
+public:
+    FixedBlockReader(FixedBlockProcessor &fixedBlockProcessor);
+
+    virtual ~FixedBlockReader() = default;
+
+    int32_t open(int32_t bytesPerFixedBlock) override;
+
+    int32_t readFromStorage(uint8_t *buffer, int32_t numBytes);
+
+    /**
+     * Read into a variable sized block.
+     */
+    int32_t processVariableBlock(uint8_t *buffer, int32_t numBytes) override;
+};
+
+
+#endif /* AAUDIO_FIXED_BLOCK_READER_H */
diff --git a/media/libaaudio/src/utility/FixedBlockWriter.cpp b/media/libaaudio/src/utility/FixedBlockWriter.cpp
new file mode 100644
index 0000000..2ce8046
--- /dev/null
+++ b/media/libaaudio/src/utility/FixedBlockWriter.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+#include <memory.h>
+
+#include "FixedBlockAdapter.h"
+#include "FixedBlockWriter.h"
+
+FixedBlockWriter::FixedBlockWriter(FixedBlockProcessor &fixedBlockProcessor)
+        : FixedBlockAdapter(fixedBlockProcessor) {}
+
+
+int32_t FixedBlockWriter::writeToStorage(uint8_t *buffer, int32_t numBytes) {
+    int32_t bytesToStore = numBytes;
+    int32_t roomAvailable = mSize - mPosition;
+    if (bytesToStore > roomAvailable) {
+        bytesToStore = roomAvailable;
+    }
+    memcpy(mStorage + mPosition, buffer, bytesToStore);
+    mPosition += bytesToStore;
+    return bytesToStore;
+}
+
+int32_t FixedBlockWriter::processVariableBlock(uint8_t *buffer, int32_t numBytes) {
+    int32_t result = 0;
+    int32_t bytesLeft = numBytes;
+
+    // If we already have data in storage then add to it.
+    if (mPosition > 0) {
+        int32_t bytesWritten = writeToStorage(buffer, bytesLeft);
+        buffer += bytesWritten;
+        bytesLeft -= bytesWritten;
+        // If storage full then flush it out
+        if (mPosition == mSize) {
+            result = mFixedBlockProcessor.onProcessFixedBlock(mStorage, mSize);
+            mPosition = 0;
+        }
+    }
+
+    // Write through if enough for a complete block.
+    while(bytesLeft > mSize && result == 0) {
+        result = mFixedBlockProcessor.onProcessFixedBlock(buffer, mSize);
+        buffer += mSize;
+        bytesLeft -= mSize;
+    }
+
+    // Save any remaining partial block for next time.
+    if (bytesLeft > 0) {
+        writeToStorage(buffer, bytesLeft);
+    }
+
+    return result;
+}
diff --git a/media/libaaudio/src/utility/FixedBlockWriter.h b/media/libaaudio/src/utility/FixedBlockWriter.h
new file mode 100644
index 0000000..f1d917c
--- /dev/null
+++ b/media/libaaudio/src/utility/FixedBlockWriter.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_FIXED_BLOCK_WRITER_H
+#define AAUDIO_FIXED_BLOCK_WRITER_H
+
+#include <stdint.h>
+
+#include "FixedBlockAdapter.h"
+
+/**
+ * This can be used to convert a push data flow from variable sized buffers to fixed sized buffers.
+ * An example would be an audio input callback.
+ */
+class FixedBlockWriter : public FixedBlockAdapter
+{
+public:
+    FixedBlockWriter(FixedBlockProcessor &fixedBlockProcessor);
+
+    virtual ~FixedBlockWriter() = default;
+
+    int32_t writeToStorage(uint8_t *buffer, int32_t numBytes);
+
+    /**
+     * Write from a variable sized block.
+     */
+    int32_t processVariableBlock(uint8_t *buffer, int32_t numBytes) override;
+};
+
+#endif /* AAUDIO_FIXED_BLOCK_WRITER_H */
diff --git a/media/libaaudio/tests/Android.mk b/media/libaaudio/tests/Android.mk
index 7899cf5..06c9364 100644
--- a/media/libaaudio/tests/Android.mk
+++ b/media/libaaudio/tests/Android.mk
@@ -4,8 +4,7 @@
 LOCAL_C_INCLUDES := \
     $(call include-path-for, audio-utils) \
     frameworks/av/media/libaaudio/include \
-    frameworks/av/media/libaaudio/src/core \
-    frameworks/av/media/libaaudio/src/utility
+    frameworks/av/media/libaaudio/src
 LOCAL_SRC_FILES:= test_handle_tracker.cpp
 LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
                           libcutils liblog libmedia libutils
@@ -17,13 +16,22 @@
 LOCAL_C_INCLUDES := \
     $(call include-path-for, audio-utils) \
     frameworks/av/media/libaaudio/include \
-    frameworks/av/media/libaaudio/src \
-    frameworks/av/media/libaaudio/src/core \
-    frameworks/av/media/libaaudio/src/fifo \
-    frameworks/av/media/libaaudio/src/utility
+    frameworks/av/media/libaaudio/src
 LOCAL_SRC_FILES:= test_marshalling.cpp
 LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
                           libcutils liblog libmedia libutils
 LOCAL_STATIC_LIBRARIES := libaaudio
-LOCAL_MODULE := test_marshalling
+LOCAL_MODULE := test_aaudio_marshalling
+include $(BUILD_NATIVE_TEST)
+
+include $(CLEAR_VARS)
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/av/media/libaaudio/include \
+    frameworks/av/media/libaaudio/src
+LOCAL_SRC_FILES:= test_block_adapter.cpp
+LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
+                          libcutils liblog libmedia libutils
+LOCAL_STATIC_LIBRARIES := libaaudio
+LOCAL_MODULE := test_block_adapter
 include $(BUILD_NATIVE_TEST)
diff --git a/media/libaaudio/tests/test_block_adapter.cpp b/media/libaaudio/tests/test_block_adapter.cpp
new file mode 100644
index 0000000..a22abb9
--- /dev/null
+++ b/media/libaaudio/tests/test_block_adapter.cpp
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+
+#include <gtest/gtest.h>
+
+#include "utility/FixedBlockAdapter.h"
+#include "utility/FixedBlockWriter.h"
+#include "utility/FixedBlockReader.h"
+
+#define FIXED_BLOCK_SIZE   47
+#define TEST_BUFFER_SIZE   103
+
+// Pass varying sized blocks.
+// Frames contain a sequential index, which are easily checked.
+class TestBlockAdapter {
+public:
+    TestBlockAdapter()
+            : mTestIndex(0), mLastIndex(0) {
+    }
+
+    ~TestBlockAdapter() = default;
+
+    void fillSequence(int32_t *indexBuffer, int32_t frameCount) {
+        ASSERT_LE(frameCount, TEST_BUFFER_SIZE);
+        for (int i = 0; i < frameCount; i++) {
+            indexBuffer[i] = mLastIndex++;
+        }
+    }
+
+    int checkSequence(const int32_t *indexBuffer, int32_t frameCount) {
+        // This is equivalent to calling an output callback.
+        for (int i = 0; i < frameCount; i++) {
+            int32_t expected = mTestIndex++;
+            int32_t actual = indexBuffer[i];
+            EXPECT_EQ(expected, actual);
+            if (actual != expected) {
+                return -1;
+            }
+        }
+        return 0;
+    }
+
+    int32_t            mTestBuffer[TEST_BUFFER_SIZE];
+    int32_t            mTestIndex;
+    int32_t            mLastIndex;
+};
+
+class TestBlockWriter : public TestBlockAdapter, FixedBlockProcessor {
+public:
+    TestBlockWriter()
+            : mFixedBlockWriter(*this) {
+        mFixedBlockWriter.open(sizeof(int32_t) * FIXED_BLOCK_SIZE);
+    }
+
+    ~TestBlockWriter() {
+        mFixedBlockWriter.close();
+    }
+
+    int32_t onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) override {
+        int32_t frameCount = numBytes / sizeof(int32_t);
+        return checkSequence((int32_t *) buffer, frameCount);
+    }
+
+    // Simulate audio input from a variable sized callback.
+    int32_t testInputWrite(int32_t variableCount) {
+        fillSequence(mTestBuffer, variableCount);
+        int32_t sizeBytes = variableCount * sizeof(int32_t);
+        return mFixedBlockWriter.processVariableBlock((uint8_t *) mTestBuffer, sizeBytes);
+    }
+
+private:
+    FixedBlockWriter mFixedBlockWriter;
+};
+
+class TestBlockReader : public TestBlockAdapter, FixedBlockProcessor {
+public:
+    TestBlockReader()
+            : mFixedBlockReader(*this) {
+        mFixedBlockReader.open(sizeof(int32_t) * FIXED_BLOCK_SIZE);
+    }
+
+    ~TestBlockReader() {
+        mFixedBlockReader.close();
+    }
+
+    int32_t onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) override {
+        int32_t frameCount = numBytes / sizeof(int32_t);
+        fillSequence((int32_t *) buffer, frameCount);
+        return 0;
+    }
+
+    // Simulate audio output from a variable sized callback.
+    int32_t testOutputRead(int32_t variableCount) {
+        int32_t sizeBytes = variableCount * sizeof(int32_t);
+        int32_t result = mFixedBlockReader.processVariableBlock((uint8_t *) mTestBuffer, sizeBytes);
+        if (result >= 0) {
+            result = checkSequence((int32_t *)mTestBuffer, variableCount);
+        }
+        return result;
+    }
+
+private:
+    FixedBlockReader   mFixedBlockReader;
+};
+
+
+TEST(test_block_adapter, block_adapter_write) {
+    TestBlockWriter tester;
+    int result = 0;
+    const int numLoops = 1000;
+
+    for (int i = 0; i<numLoops && result == 0; i++) {
+        long r = random();
+        int32_t size = (r % TEST_BUFFER_SIZE);
+        ASSERT_LE(size, TEST_BUFFER_SIZE);
+        ASSERT_GE(size, 0);
+        result = tester.testInputWrite(size);
+    }
+    ASSERT_EQ(0, result);
+}
+
+TEST(test_block_adapter, block_adapter_read) {
+    TestBlockReader tester;
+    int result = 0;
+    const int numLoops = 1000;
+
+    for (int i = 0; i < numLoops && result == 0; i++) {
+        long r = random();
+        int32_t size = (r % TEST_BUFFER_SIZE);
+        ASSERT_LE(size, TEST_BUFFER_SIZE);
+        ASSERT_GE(size, 0);
+        result = tester.testOutputRead(size);
+    }
+    ASSERT_EQ(0, result);
+};
+
diff --git a/media/libaaudio/tests/test_handle_tracker.cpp b/media/libaaudio/tests/test_handle_tracker.cpp
index e51c39c..e1cb676 100644
--- a/media/libaaudio/tests/test_handle_tracker.cpp
+++ b/media/libaaudio/tests/test_handle_tracker.cpp
@@ -22,7 +22,7 @@
 #include <gtest/gtest.h>
 
 #include <aaudio/AAudioDefinitions.h>
-#include "HandleTracker.h"
+#include "utility/HandleTracker.h"
 
 // Test adding one address.
 TEST(test_handle_tracker, aaudio_handle_tracker) {
diff --git a/media/libeffects/factory/EffectsFactory.c b/media/libeffects/factory/EffectsFactory.c
index 554c14d..523b6e1 100644
--- a/media/libeffects/factory/EffectsFactory.c
+++ b/media/libeffects/factory/EffectsFactory.c
@@ -48,6 +48,7 @@
 static int gCanQueryEffect; // indicates that call to EffectQueryEffect() is valid, i.e. that the list of effects
                           // was not modified since last call to EffectQueryNumberEffects()
 
+static list_elem_t *gLibraryFailedList;  //list of lib_failed_entry_t: libraries failed to load
 
 /////////////////////////////////////////////////
 //      Local functions prototypes
@@ -584,6 +585,17 @@
     if (hdl != NULL) {
         dlclose(hdl);
     }
+    //add entry for library errors in gLibraryFailedList
+    lib_failed_entry_t *fl = malloc(sizeof(lib_failed_entry_t));
+    fl->name = strndup(name, PATH_MAX);
+    fl->path = strndup(path, PATH_MAX);
+
+    list_elem_t *fe = malloc(sizeof(list_elem_t));
+    fe->object = fl;
+    fe->next = gLibraryFailedList;
+    gLibraryFailedList = fe;
+    ALOGV("getLibrary() linked error in library %p for path %s", fl, path);
+
     return -EINVAL;
 }
 
@@ -986,16 +998,31 @@
 
 int EffectDumpEffects(int fd) {
     char s[512];
+
+    list_elem_t *fe = gLibraryFailedList;
+    lib_failed_entry_t *fl = NULL;
+
+    dprintf(fd, "Libraries NOT loaded:\n");
+
+    while (fe) {
+        fl = (lib_failed_entry_t *)fe->object;
+        dprintf(fd, " Library %s\n", fl->name);
+        dprintf(fd, "  path: %s\n", fl->path);
+        fe = fe->next;
+    }
+
     list_elem_t *e = gLibraryList;
     lib_entry_t *l = NULL;
     effect_descriptor_t *d = NULL;
     int found = 0;
     int ret = 0;
 
+    dprintf(fd, "Libraries loaded:\n");
     while (e) {
         l = (lib_entry_t *)e->object;
         list_elem_t *efx = l->effects;
-        dprintf(fd, "Library %s\n", l->name);
+        dprintf(fd, " Library %s\n", l->name);
+        dprintf(fd, "  path: %s\n", l->path);
         if (!efx) {
             dprintf(fd, "  (no effects)\n");
         }
diff --git a/media/libeffects/factory/EffectsFactory.h b/media/libeffects/factory/EffectsFactory.h
index b7936e0..72e0931 100644
--- a/media/libeffects/factory/EffectsFactory.h
+++ b/media/libeffects/factory/EffectsFactory.h
@@ -58,6 +58,11 @@
     lib_entry_t *lib;
 } effect_entry_t;
 
+typedef struct lib_failed_entry_s {
+    char *name;
+    char *path;
+} lib_failed_entry_t;
+
 // Structure used to store the lib entry
 // and the descriptor of the sub effects.
 // The library entry is to be stored in case of
@@ -69,6 +74,7 @@
 } sub_effect_entry_t;
 
 
+
 ////////////////////////////////////////////////////////////////////////////////
 //
 //    Function:       EffectGetSubEffects
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 8a1ce22..b0bd22e 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -26,7 +26,6 @@
     IMediaPlayer.cpp \
     IMediaRecorder.cpp \
     IMediaSource.cpp \
-    IMediaAnalyticsService.cpp \
     IRemoteDisplay.cpp \
     IRemoteDisplayClient.cpp \
     IResourceManagerClient.cpp \
@@ -35,7 +34,6 @@
     MediaCodecBuffer.cpp \
     MediaCodecInfo.cpp \
     MediaDefs.cpp \
-    MediaAnalyticsItem.cpp \
     MediaUtils.cpp \
     Metadata.cpp \
     mediarecorder.cpp \
@@ -66,6 +64,7 @@
         libcamera_client libstagefright_foundation \
         libgui libdl libaudioutils libaudioclient \
         libmedia_helper libmediadrm \
+        libmediametrics \
         libbase \
         libhidlbase \
         libhidltransport \
diff --git a/media/libmedia/include/mediaplayer.h b/media/libmedia/include/mediaplayer.h
index 18d69a7..623c374 100644
--- a/media/libmedia/include/mediaplayer.h
+++ b/media/libmedia/include/mediaplayer.h
@@ -133,6 +133,10 @@
     MEDIA_INFO_NOT_SEEKABLE = 801,
     // New media metadata is available.
     MEDIA_INFO_METADATA_UPDATE = 802,
+    // Audio can not be played.
+    MEDIA_INFO_PLAY_AUDIO_ERROR = 804,
+    // Video can not be played.
+    MEDIA_INFO_PLAY_VIDEO_ERROR = 805,
 
     //9xx
     MEDIA_INFO_TIMED_TEXT_ERROR = 900,
diff --git a/media/libmedia/omx/1.0/WOmxNode.cpp b/media/libmedia/omx/1.0/WOmxNode.cpp
index b5186b5..6c92b52 100644
--- a/media/libmedia/omx/1.0/WOmxNode.cpp
+++ b/media/libmedia/omx/1.0/WOmxNode.cpp
@@ -411,7 +411,7 @@
         getExtensionIndex_cb _hidl_cb) {
     OMX_INDEXTYPE index;
     Status status = toStatus(mBase->getExtensionIndex(
-            parameterName, &index));
+            parameterName.c_str(), &index));
     _hidl_cb(status, toRawIndexType(index));
     return Void();
 }
diff --git a/media/libmediametrics/Android.mk b/media/libmediametrics/Android.mk
new file mode 100644
index 0000000..f8c4bb3
--- /dev/null
+++ b/media/libmediametrics/Android.mk
@@ -0,0 +1,34 @@
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES += \
+    IMediaAnalyticsService.cpp \
+    MediaAnalyticsItem.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+        liblog libcutils libutils libbinder \
+        libstagefright_foundation \
+        libbase \
+
+LOCAL_MODULE:= libmediametrics
+
+LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
+
+LOCAL_C_INCLUDES := \
+    $(TOP)/system/libhidl/base/include \
+    $(TOP)/frameworks/native/include/media/openmax \
+    $(TOP)/frameworks/av/include/media/ \
+    $(TOP)/frameworks/av/media/libmedia/aidl \
+    $(TOP)/frameworks/av/include \
+    $(TOP)/frameworks/native/include \
+    $(call include-path-for, audio-utils)
+
+LOCAL_EXPORT_C_INCLUDE_DIRS := \
+    frameworks/av/include/media \
+
+LOCAL_CFLAGS += -Werror -Wno-error=deprecated-declarations -Wall
+LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow cfi
+LOCAL_SANITIZE_DIAG := cfi
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libmedia/IMediaAnalyticsService.cpp b/media/libmediametrics/IMediaAnalyticsService.cpp
similarity index 94%
rename from media/libmedia/IMediaAnalyticsService.cpp
rename to media/libmediametrics/IMediaAnalyticsService.cpp
index 340cf19..68bafe1 100644
--- a/media/libmedia/IMediaAnalyticsService.cpp
+++ b/media/libmediametrics/IMediaAnalyticsService.cpp
@@ -23,15 +23,6 @@
 #include <binder/Parcel.h>
 #include <binder/IMemory.h>
 #include <binder/IPCThreadState.h>
-#include <media/IHDCP.h>
-#include <media/IMediaCodecList.h>
-#include <media/IMediaHTTPService.h>
-#include <media/IMediaPlayerService.h>
-#include <media/IMediaRecorder.h>
-#include <media/IOMX.h>
-#include <media/IRemoteDisplay.h>
-#include <media/IRemoteDisplayClient.h>
-#include <media/IStreamSource.h>
 
 #include <utils/Errors.h>  // for status_t
 #include <utils/List.h>
diff --git a/media/libmedia/MediaAnalyticsItem.cpp b/media/libmediametrics/MediaAnalyticsItem.cpp
similarity index 100%
rename from media/libmedia/MediaAnalyticsItem.cpp
rename to media/libmediametrics/MediaAnalyticsItem.cpp
diff --git a/media/libmedia/include/IMediaAnalyticsService.h b/media/libmediametrics/include/IMediaAnalyticsService.h
similarity index 100%
rename from media/libmedia/include/IMediaAnalyticsService.h
rename to media/libmediametrics/include/IMediaAnalyticsService.h
diff --git a/media/libmedia/include/MediaAnalyticsItem.h b/media/libmediametrics/include/MediaAnalyticsItem.h
similarity index 99%
rename from media/libmedia/include/MediaAnalyticsItem.h
rename to media/libmediametrics/include/MediaAnalyticsItem.h
index f050e7f..dc501b2 100644
--- a/media/libmedia/include/MediaAnalyticsItem.h
+++ b/media/libmediametrics/include/MediaAnalyticsItem.h
@@ -41,6 +41,7 @@
     friend class MediaAnalyticsService;
     friend class IMediaAnalyticsService;
     friend class MediaMetricsJNI;
+    friend class MetricsSummarizer;
 
     public:
 
@@ -231,7 +232,6 @@
         size_t mPropCount;
         size_t mPropSize;
         Prop *mProps;
-
 };
 
 } // namespace android
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index f7e1ff5..7af7031 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -27,6 +27,7 @@
     libgui                      \
     libaudioclient              \
     libmedia                    \
+    libmediametrics             \
     libmediadrm                 \
     libmediautils               \
     libmemunreachable           \
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 316f824..1e4fdfe 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -197,6 +197,8 @@
       mPrepared(false),
       mResetting(false),
       mSourceStarted(false),
+      mAudioDecoderError(false),
+      mVideoDecoderError(false),
       mPaused(false),
       mPausedByClient(true),
       mPausedForBuffering(false),
@@ -1094,12 +1096,14 @@
                 ALOGV("%s shutdown completed", audio ? "audio" : "video");
                 if (audio) {
                     mAudioDecoder.clear();
+                    mAudioDecoderError = false;
                     ++mAudioDecoderGeneration;
 
                     CHECK_EQ((int)mFlushingAudio, (int)SHUTTING_DOWN_DECODER);
                     mFlushingAudio = SHUT_DOWN;
                 } else {
                     mVideoDecoder.clear();
+                    mVideoDecoderError = false;
                     ++mVideoDecoderGeneration;
 
                     CHECK_EQ((int)mFlushingVideo, (int)SHUTTING_DOWN_DECODER);
@@ -1154,7 +1158,29 @@
                         finishFlushIfPossible();  // Should not occur.
                         break;                    // Finish anyways.
                 }
-                notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
+                if (mSource != nullptr) {
+                    if (audio) {
+                        if (mVideoDecoderError || mSource->getFormat(false /* audio */) == NULL) {
+                            // When both audio and video have error, or this stream has only audio
+                            // which has error, notify client of error.
+                            notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
+                        } else {
+                            // Only audio track has error. Video track could be still good to play.
+                            notifyListener(MEDIA_INFO, MEDIA_INFO_PLAY_AUDIO_ERROR, err);
+                        }
+                        mAudioDecoderError = true;
+                    } else {
+                        if (mAudioDecoderError || mSource->getFormat(true /* audio */) == NULL) {
+                            // When both audio and video have error, or this stream has only video
+                            // which has error, notify client of error.
+                            notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
+                        } else {
+                            // Only video track has error. Audio track could be still good to play.
+                            notifyListener(MEDIA_INFO, MEDIA_INFO_PLAY_VIDEO_ERROR, err);
+                        }
+                        mVideoDecoderError = true;
+                    }
+                }
             } else {
                 ALOGV("Unhandled decoder notification %d '%c%c%c%c'.",
                       what,
@@ -1618,7 +1644,8 @@
     // is possible; otherwise the decoders call the renderer openAudioSink directly.
 
     status_t err = mRenderer->openAudioSink(
-            format, true /* offloadOnly */, hasVideo, AUDIO_OUTPUT_FLAG_NONE, &mOffloadAudio);
+            format, true /* offloadOnly */, hasVideo,
+            AUDIO_OUTPUT_FLAG_NONE, &mOffloadAudio, mSource->isStreaming());
     if (err != OK) {
         // Any failure we turn off mOffloadAudio.
         mOffloadAudio = false;
@@ -1636,6 +1663,7 @@
     if (mAudioDecoder != NULL) {
         mAudioDecoder->pause();
         mAudioDecoder.clear();
+        mAudioDecoderError = false;
         ++mAudioDecoderGeneration;
     }
     if (mFlushingAudio == FLUSHING_DECODER) {
@@ -1773,6 +1801,7 @@
             *decoder = new Decoder(notify, mSource, mPID, mUID, mRenderer);
             ALOGV("instantiateDecoder audio Decoder");
         }
+        mAudioDecoderError = false;
     } else {
         sp<AMessage> notify = new AMessage(kWhatVideoNotify, this);
         ++mVideoDecoderGeneration;
@@ -1780,6 +1809,7 @@
 
         *decoder = new Decoder(
                 notify, mSource, mPID, mUID, mRenderer, mSurface, mCCDecoder);
+        mVideoDecoderError = false;
 
         // enable FRC if high-quality AV sync is requested, even if not
         // directly queuing to display, as this will even improve textureview
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index d3cb7c1..d542749 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -218,6 +218,8 @@
     bool mPrepared;
     bool mResetting;
     bool mSourceStarted;
+    bool mAudioDecoderError;
+    bool mVideoDecoderError;
 
     // Actual pause state, either as requested by client or due to buffering.
     bool mPaused;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 9a2224e..9e579f9 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -762,8 +762,7 @@
         int64_t durationUs;
         bool hasVideo = (mSource->getFormat(false /* audio */) != NULL);
         if (getAudioDeepBufferSetting() // override regardless of source duration
-                || (!hasVideo
-                        && mSource->getDuration(&durationUs) == OK
+                || (mSource->getDuration(&durationUs) == OK
                         && durationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US)) {
             flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
         } else {
@@ -773,7 +772,8 @@
         sp<AMessage> reply = new AMessage(kWhatAudioOutputFormatChanged, this);
         reply->setInt32("generation", mBufferGeneration);
         mRenderer->changeAudioFormat(
-                format, false /* offloadOnly */, hasVideo, flags, reply);
+                format, false /* offloadOnly */, hasVideo,
+                flags, mSource->isStreaming(), reply);
     }
 }
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
index 9a4bc8c..6b05b53 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
@@ -76,7 +76,7 @@
     // format is different.
     status_t err = mRenderer->openAudioSink(
             format, true /* offloadOnly */, hasVideo,
-            AUDIO_OUTPUT_FLAG_NONE /* flags */, NULL /* isOffloaded */);
+            AUDIO_OUTPUT_FLAG_NONE /* flags */, NULL /* isOffloaded */, mSource->isStreaming());
     if (err != OK) {
         handleError(err);
     }
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 9350440..9fe61703 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -397,12 +397,14 @@
         bool offloadOnly,
         bool hasVideo,
         uint32_t flags,
-        bool *isOffloaded) {
+        bool *isOffloaded,
+        bool isStreaming) {
     sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
     msg->setMessage("format", format);
     msg->setInt32("offload-only", offloadOnly);
     msg->setInt32("has-video", hasVideo);
     msg->setInt32("flags", flags);
+    msg->setInt32("isStreaming", isStreaming);
 
     sp<AMessage> response;
     status_t postStatus = msg->postAndAwaitResponse(&response);
@@ -430,12 +432,14 @@
         bool offloadOnly,
         bool hasVideo,
         uint32_t flags,
+        bool isStreaming,
         const sp<AMessage> &notify) {
     sp<AMessage> meta = new AMessage;
     meta->setMessage("format", format);
     meta->setInt32("offload-only", offloadOnly);
     meta->setInt32("has-video", hasVideo);
     meta->setInt32("flags", flags);
+    meta->setInt32("isStreaming", isStreaming);
 
     sp<AMessage> msg = new AMessage(kWhatChangeAudioFormat, this);
     msg->setInt32("queueGeneration", getQueueGeneration(true /* audio */));
@@ -460,7 +464,10 @@
             uint32_t flags;
             CHECK(msg->findInt32("flags", (int32_t *)&flags));
 
-            status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags);
+            uint32_t isStreaming;
+            CHECK(msg->findInt32("isStreaming", (int32_t *)&isStreaming));
+
+            status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
 
             sp<AMessage> response = new AMessage;
             response->setInt32("err", err);
@@ -1838,7 +1845,8 @@
         const sp<AMessage> &format,
         bool offloadOnly,
         bool hasVideo,
-        uint32_t flags) {
+        uint32_t flags,
+        bool isStreaming) {
     ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
             offloadOnly, offloadingAudio());
     bool audioSinkChanged = false;
@@ -1891,7 +1899,7 @@
             offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
             offloadInfo.bit_rate = avgBitRate;
             offloadInfo.has_video = hasVideo;
-            offloadInfo.is_streaming = true;
+            offloadInfo.is_streaming = isStreaming;
 
             if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
                 ALOGV("openAudioSink: no change in offload mode");
@@ -2043,7 +2051,10 @@
     uint32_t flags;
     CHECK(meta->findInt32("flags", (int32_t *)&flags));
 
-    status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags);
+    uint32_t isStreaming;
+    CHECK(meta->findInt32("isStreaming", (int32_t *)&isStreaming));
+
+    status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
 
     if (err != OK) {
         notify->setInt32("err", err);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index 385bb06..e6850b5 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -76,7 +76,8 @@
             bool offloadOnly,
             bool hasVideo,
             uint32_t flags,
-            bool *isOffloaded);
+            bool *isOffloaded,
+            bool isStreaming);
     void closeAudioSink();
 
     // re-open audio sink after all pending audio buffers played.
@@ -85,6 +86,7 @@
             bool offloadOnly,
             bool hasVideo,
             uint32_t flags,
+            bool isStreaming,
             const sp<AMessage> &notify);
 
     enum {
@@ -267,7 +269,8 @@
             const sp<AMessage> &format,
             bool offloadOnly,
             bool hasVideo,
-            uint32_t flags);
+            uint32_t flags,
+            bool isStreaming);
     void onCloseAudioSink();
     void onChangeAudioFormat(const sp<AMessage> &meta, const sp<AMessage> &notify);
 
diff --git a/media/libnbaio/NBLog.cpp b/media/libnbaio/NBLog.cpp
index 1d1d61b..adbbb74 100644
--- a/media/libnbaio/NBLog.cpp
+++ b/media/libnbaio/NBLog.cpp
@@ -30,7 +30,6 @@
 #include <utils/Log.h>
 #include <utils/String8.h>
 
-#include <map>
 #include <queue>
 #include <utility>
 
@@ -848,8 +847,6 @@
     }
     bool deferredTimestamp = false;
 #endif
-    std::map<std::pair<log_hash_t, int>, std::vector<int>> hists;
-    std::map<std::pair<log_hash_t, int>, int64_t*> lastTSs;
 
     for (auto entry = snapshot.begin(); entry != snapshot.end();) {
         switch (entry->type) {
@@ -928,31 +925,38 @@
             // There's probably a more efficient way to do it
             log_hash_t hash;
             memcpy(&hash, &(data->hash), sizeof(hash));
+            int64_t ts;
+            memcpy(&ts, &data->ts, sizeof(ts));
             const std::pair<log_hash_t, int> key(hash, data->author);
-            if (lastTSs[key] != nullptr) {
-                int64_t ts1;
-                memcpy(&ts1, lastTSs[key], sizeof(ts1));
-                int64_t ts2;
-                memcpy(&ts2, &data->ts, sizeof(ts2));
-                // TODO might want to filter excessively high outliers, which are usually caused
-                // by the thread being inactive.
-                hists[key].push_back(deltaMs(ts1, ts2));
-            }
-            lastTSs[key] = &(data->ts);
+            // TODO might want to filter excessively high outliers, which are usually caused
+            // by the thread being inactive.
+            mHists[key].push_back(ts);
             ++entry;
             break;
         }
-        case EVENT_HISTOGRAM_FLUSH:
-            body.appendFormat("Histograms:\n");
-            for (auto const &hist : hists) {
-                body.appendFormat("Histogram %X - ", (int)hist.first.first);
-                handleAuthor(HistogramEntry(entry), &body);
-                drawHistogram(&body, hist.second);
+        case EVENT_HISTOGRAM_FLUSH: {
+            HistogramEntry histEntry(entry);
+            // Log timestamp
+            int64_t ts = histEntry.timestamp();
+            timestamp.clear();
+            timestamp.appendFormat("[%d.%03d]", (int) (ts / (1000 * 1000 * 1000)),
+                            (int) ((ts / (1000 * 1000)) % 1000));
+            // Log histograms
+            body.appendFormat("Histogram flush - ");
+            handleAuthor(histEntry, &body);
+            body.appendFormat("\n");
+            for (auto hist = mHists.begin(); hist != mHists.end();) {
+                if (hist->first.second == histEntry.author()) {
+                    body.appendFormat("Histogram %X", (int)hist->first.first);
+                    drawHistogram(&body, hist->second, true/*logScale*/, indent + timestamp.size());
+                    hist = mHists.erase(hist);
+                } else {
+                    ++hist;
+                }
             }
-            hists.clear();
-            lastTSs.clear();
             ++entry;
             break;
+        }
         case EVENT_END_FMT:
             body.appendFormat("warning: got to end format event");
             ++entry;
@@ -1137,25 +1141,53 @@
     return width;
 }
 
-static std::map<int, int> buildBuckets(const std::vector<int> &samples) {
+static std::map<int, int> buildBuckets(const std::vector<int64_t> &samples) {
     // TODO allow buckets of variable resolution
     std::map<int, int> buckets;
-    for (int x : samples) {
-        ++buckets[x];
+    for (size_t i = 1; i < samples.size(); ++i) {
+        ++buckets[deltaMs(samples[i - 1], samples[i])];
     }
     return buckets;
 }
 
+static inline uint32_t log2(uint32_t x) {
+    // This works for x > 0
+    return 31 - __builtin_clz(x);
+}
+
 // TODO put this function in separate file. Make it return a std::string instead of modifying body
-void NBLog::Reader::drawHistogram(String8 *body, const std::vector<int> &samples, int maxHeight) {
+/*
+Example output:
+[54.234] Histogram flush - AudioOut_D:
+Histogram 33640BF1
+            [ 1][ 1][ 1][ 3][54][69][ 1][ 2][ 1]
+        64|                      []
+        32|                  []  []
+        16|                  []  []
+         8|                  []  []
+         4|                  []  []
+         2|______________[]__[]__[]______[]____
+              4   5   6   8   9  10  11  13  15
+Notice that all values that fall in the same row have the same height (65 and 127 are displayed
+identically). That's why exact counts are added at the top.
+*/
+void NBLog::Reader::drawHistogram(String8 *body,
+                                  const std::vector<int64_t> &samples,
+                                  bool logScale,
+                                  int indent,
+                                  int maxHeight) {
+    if (samples.size() <= 1) {
+        return;
+    }
     std::map<int, int> buckets = buildBuckets(samples);
-    // TODO add option for log scale
+    // TODO consider changing all ints to uint32_t or uint64_t
     static const char *underscores = "________________";
     static const char *spaces = "                ";
 
     auto it = buckets.begin();
     int maxLabel = it->first;
     int maxVal = it->second;
+    // Compute maximum values
     while (++it != buckets.end()) {
         if (it->first > maxLabel) {
             maxLabel = it->first;
@@ -1164,31 +1196,36 @@
             maxVal = it->second;
         }
     }
-    int height = maxVal;
+    int height = (logScale) ? log2(maxVal) + 1 : maxVal; // maxVal > 0, safe to call log2
     int leftPadding = widthOf(maxVal);
     int colWidth = std::max(std::max(widthOf(maxLabel) + 1, 3), leftPadding + 2);
     int scalingFactor = 1;
+    // scale data if it exceeds maximum height
     if (height > maxHeight) {
         scalingFactor = (height + maxHeight) / maxHeight;
         height /= scalingFactor;
     }
-    body->appendFormat("\n");
+    // write header line with bucket values
+    body->appendFormat("\n%*s", indent, " ");
     body->appendFormat("%*s", leftPadding + 2, " ");
     for (auto const &x : buckets)
     {
         body->appendFormat("[%*d]", colWidth - 2, x.second);
     }
-    body->appendFormat("\n");
+    // write histogram ascii art
+    body->appendFormat("\n%*s", indent, " ");
     for (int row = height * scalingFactor; row > 0; row -= scalingFactor)
     {
-        body->appendFormat("%*d|", leftPadding, row);
+        int value = ((logScale) ? (1 << row) : row);
+        body->appendFormat("%*u|", leftPadding, value);
         for (auto const &x : buckets) {
             body->appendFormat("%.*s%s", colWidth - 2,
-                   (row == scalingFactor) ? underscores : spaces,
-                   x.second < row ? ((row == scalingFactor) ? "__" : "  ") : "[]");
+                   (row <= scalingFactor) ? underscores : spaces,
+                   x.second < value ? ((row <= scalingFactor) ? "__" : "  ") : "[]");
         }
-        body->appendFormat("\n");
+        body->appendFormat("\n%*s", indent, " ");
     }
+    // write footer with bucket labels
     body->appendFormat("%*s", leftPadding + 1, " ");
     for (auto const &x : buckets)
     {
diff --git a/media/libnbaio/include/NBLog.h b/media/libnbaio/include/NBLog.h
index 403f692..2893dc9 100644
--- a/media/libnbaio/include/NBLog.h
+++ b/media/libnbaio/include/NBLog.h
@@ -24,6 +24,7 @@
 #include <utils/Mutex.h>
 #include <utils/threads.h>
 
+#include <map>
 #include <set>
 #include <vector>
 
@@ -454,6 +455,8 @@
     audio_utils_fifo_reader * const mFifoReader;    // used to read from FIFO,
                                                     // non-NULL unless constructor fails
 
+    std::map<std::pair<log_hash_t, int>, std::vector<int64_t>> mHists;
+
     void    dumpLine(const String8& timestamp, String8& body);
 
     EntryIterator   handleFormat(const FormatEntry &fmtEntry,
@@ -462,7 +465,8 @@
     // dummy method for handling absent author entry
     virtual void handleAuthor(const AbstractEntry &fmtEntry, String8 *body) {}
 
-    static void drawHistogram(String8 *body, const std::vector<int> &samples, int maxHeight = 10);
+    static void drawHistogram(String8 *body, const std::vector<int64_t> &samples,
+                              bool logScale, int indent = 0, int maxHeight = 10);
 
     // Searches for the last entry of type <type> in the range [front, back)
     // back has to be entry-aligned. Returns nullptr if none enconuntered.
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 0d4dd04..72645ab 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -1212,7 +1212,7 @@
             break;
         }
 
-        sp<GraphicBuffer> graphicBuffer(new GraphicBuffer(buf, false));
+        sp<GraphicBuffer> graphicBuffer(GraphicBuffer::from(buf));
         BufferInfo info;
         info.mStatus = BufferInfo::OWNED_BY_US;
         info.mFenceFd = fenceFd;
@@ -1516,7 +1516,7 @@
     CHECK(storingMetadataInDecodedBuffers());
 
     // discard buffer in LRU info and replace with new buffer
-    oldest->mGraphicBuffer = new GraphicBuffer(buf, false);
+    oldest->mGraphicBuffer = GraphicBuffer::from(buf);
     oldest->mNewGraphicBuffer = true;
     oldest->mStatus = BufferInfo::OWNED_BY_US;
     oldest->setWriteFence(fenceFd, "dequeueBufferFromNativeWindow for oldest");
@@ -5782,8 +5782,7 @@
                 case IOMX::kPortModeDynamicANWBuffer:
                     if (info->mCodecData->size() >= sizeof(VideoNativeMetadata)) {
                         VideoNativeMetadata *vnmd = (VideoNativeMetadata*)info->mCodecData->base();
-                        sp<GraphicBuffer> graphicBuffer = new GraphicBuffer(
-                                vnmd->pBuffer, false /* keepOwnership */);
+                        sp<GraphicBuffer> graphicBuffer = GraphicBuffer::from(vnmd->pBuffer);
                         err2 = mCodec->mOMXNode->emptyBuffer(
                             bufferID, graphicBuffer, flags, timeUs, info->mFenceFd);
                     }
diff --git a/media/libstagefright/ACodecBufferChannel.cpp b/media/libstagefright/ACodecBufferChannel.cpp
index 40ac986..0d9696f 100644
--- a/media/libstagefright/ACodecBufferChannel.cpp
+++ b/media/libstagefright/ACodecBufferChannel.cpp
@@ -300,8 +300,10 @@
                 });
         size_t destinationBufferSize = maxSize;
         size_t heapSize = totalSize + destinationBufferSize;
-        mDealer = makeMemoryDealer(heapSize);
-        mDecryptDestination = mDealer->allocate(destinationBufferSize);
+        if (heapSize > 0) {
+            mDealer = makeMemoryDealer(heapSize);
+            mDecryptDestination = mDealer->allocate(destinationBufferSize);
+        }
     }
     std::vector<const BufferInfo> inputBuffers;
     for (const BufferAndId &elem : array) {
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 18cfc0e..bdc37a5 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -86,6 +86,7 @@
         liblog \
         libmedia \
         libaudioclient \
+        libmediametrics \
         libmediautils \
         libnetd_client \
         libsonivox \
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index 7bb950e..61a2b5f 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -58,6 +58,10 @@
 
     virtual void postRecordingFrameHandleTimestamp(nsecs_t timestamp, native_handle_t* handle);
 
+    virtual void postRecordingFrameHandleTimestampBatch(
+                const std::vector<nsecs_t>& timestamps,
+                const std::vector<native_handle_t*>& handles);
+
 protected:
     virtual ~CameraSourceListener();
 
@@ -110,6 +114,20 @@
     }
 }
 
+void CameraSourceListener::postRecordingFrameHandleTimestampBatch(
+        const std::vector<nsecs_t>& timestamps,
+        const std::vector<native_handle_t*>& handles) {
+    sp<CameraSource> source = mSource.promote();
+    if (source.get() != nullptr) {
+        int n = timestamps.size();
+        std::vector<nsecs_t> modifiedTimestamps(n);
+        for (int i = 0; i < n; i++) {
+            modifiedTimestamps[i] = timestamps[i] / 1000;
+        }
+        source->recordingFrameHandleCallbackTimestampBatch(modifiedTimestamps, handles);
+    }
+}
+
 static int32_t getColorFormat(const char* colorFormat) {
     if (!colorFormat) {
         ALOGE("Invalid color format");
@@ -954,10 +972,35 @@
         }
 
         if (handle != nullptr) {
-            // Frame contains a VideoNativeHandleMetadata. Send the handle back to camera.
-            releaseRecordingFrameHandle(handle);
-            mMemoryBases.push_back(frame);
-            mMemoryBaseAvailableCond.signal();
+            uint32_t batchSize = 0;
+            {
+                Mutex::Autolock autoLock(mBatchLock);
+                if (mInflightBatchSizes.size() > 0) {
+                    batchSize = mInflightBatchSizes[0];
+                }
+            }
+            if (batchSize == 0) { // return buffers one by one
+                // Frame contains a VideoNativeHandleMetadata. Send the handle back to camera.
+                releaseRecordingFrameHandle(handle);
+                mMemoryBases.push_back(frame);
+                mMemoryBaseAvailableCond.signal();
+            } else { // Group buffers in batch then return
+                Mutex::Autolock autoLock(mBatchLock);
+                mInflightReturnedHandles.push_back(handle);
+                mInflightReturnedMemorys.push_back(frame);
+                if (mInflightReturnedHandles.size() == batchSize) {
+                    releaseRecordingFrameHandleBatch(mInflightReturnedHandles);
+
+                    mInflightBatchSizes.pop_front();
+                    mInflightReturnedHandles.clear();
+                    for (const auto& mem : mInflightReturnedMemorys) {
+                        mMemoryBases.push_back(mem);
+                        mMemoryBaseAvailableCond.signal();
+                    }
+                    mInflightReturnedMemorys.clear();
+                }
+            }
+
         } else if (mCameraRecordingProxy != nullptr) {
             // mCamera is created by application. Return the frame back to camera via camera
             // recording proxy.
@@ -1149,6 +1192,21 @@
     }
 }
 
+void CameraSource::releaseRecordingFrameHandleBatch(const std::vector<native_handle_t*>& handles) {
+    if (mCameraRecordingProxy != nullptr) {
+        mCameraRecordingProxy->releaseRecordingFrameHandleBatch(handles);
+    } else if (mCamera != nullptr) {
+        int64_t token = IPCThreadState::self()->clearCallingIdentity();
+        mCamera->releaseRecordingFrameHandleBatch(handles);
+        IPCThreadState::self()->restoreCallingIdentity(token);
+    } else {
+        for (auto& handle : handles) {
+            native_handle_close(handle);
+            native_handle_delete(handle);
+        }
+    }
+}
+
 void CameraSource::recordingFrameHandleCallbackTimestamp(int64_t timestampUs,
                 native_handle_t* handle) {
     ALOGV("%s: timestamp %lld us", __FUNCTION__, (long long)timestampUs);
@@ -1186,6 +1244,62 @@
     mFrameAvailableCondition.signal();
 }
 
+void CameraSource::recordingFrameHandleCallbackTimestampBatch(
+        const std::vector<int64_t>& timestampsUs,
+        const std::vector<native_handle_t*>& handles) {
+    size_t n = timestampsUs.size();
+    if (n != handles.size()) {
+        ALOGE("%s: timestampsUs(%zu) and handles(%zu) size mismatch!",
+                __FUNCTION__, timestampsUs.size(), handles.size());
+    }
+
+    Mutex::Autolock autoLock(mLock);
+    int batchSize = 0;
+    for (size_t i = 0; i < n; i++) {
+        int64_t timestampUs = timestampsUs[i];
+        native_handle_t* handle = handles[i];
+
+        ALOGV("%s: timestamp %lld us", __FUNCTION__, (long long)timestampUs);
+        if (handle == nullptr) continue;
+
+        if (shouldSkipFrameLocked(timestampUs)) {
+            releaseRecordingFrameHandle(handle);
+            continue;
+        }
+
+        while (mMemoryBases.empty()) {
+            if (mMemoryBaseAvailableCond.waitRelative(mLock, kMemoryBaseAvailableTimeoutNs) ==
+                    TIMED_OUT) {
+                ALOGW("Waiting on an available memory base timed out. Dropping a recording frame.");
+                releaseRecordingFrameHandle(handle);
+                continue;
+            }
+        }
+        ++batchSize;
+        ++mNumFramesReceived;
+        sp<IMemory> data = *mMemoryBases.begin();
+        mMemoryBases.erase(mMemoryBases.begin());
+
+        // Wrap native handle in sp<IMemory> so it can be pushed to mFramesReceived.
+        VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(data->pointer());
+        metadata->eType = kMetadataBufferTypeNativeHandleSource;
+        metadata->pHandle = handle;
+
+        mFramesReceived.push_back(data);
+        int64_t timeUs = mStartTimeUs + (timestampUs - mFirstFrameTimeUs);
+        mFrameTimes.push_back(timeUs);
+        ALOGV("initial delay: %" PRId64 ", current time stamp: %" PRId64, mStartTimeUs, timeUs);
+
+    }
+    if (batchSize > 0) {
+        Mutex::Autolock autoLock(mBatchLock);
+        mInflightBatchSizes.push_back(batchSize);
+    }
+    for (int i = 0; i < batchSize; i++) {
+        mFrameAvailableCondition.signal();
+    }
+}
+
 CameraSource::BufferQueueListener::BufferQueueListener(const sp<BufferItemConsumer>& consumer,
         const sp<CameraSource>& cameraSource) {
     mConsumer = consumer;
@@ -1302,6 +1416,17 @@
     mSource->recordingFrameHandleCallbackTimestamp(timestamp / 1000, handle);
 }
 
+void CameraSource::ProxyListener::recordingFrameHandleCallbackTimestampBatch(
+        const std::vector<int64_t>& timestampsUs,
+        const std::vector<native_handle_t*>& handles) {
+    int n = timestampsUs.size();
+    std::vector<nsecs_t> modifiedTimestamps(n);
+    for (int i = 0; i < n; i++) {
+        modifiedTimestamps[i] = timestampsUs[i] / 1000;
+    }
+    mSource->recordingFrameHandleCallbackTimestampBatch(modifiedTimestamps, handles);
+}
+
 void CameraSource::DeathNotifier::binderDied(const wp<IBinder>& who __unused) {
     ALOGI("Camera recording proxy died");
 }
diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp
index 390c556..970526a 100644
--- a/media/libstagefright/CameraSourceTimeLapse.cpp
+++ b/media/libstagefright/CameraSourceTimeLapse.cpp
@@ -315,6 +315,17 @@
     CameraSource::recordingFrameHandleCallbackTimestamp(timestampUs, handle);
 }
 
+void CameraSourceTimeLapse::recordingFrameHandleCallbackTimestampBatch(
+        const std::vector<int64_t>& timestampsUs,
+        const std::vector<native_handle_t*>& handles) {
+    ALOGV("recordingFrameHandleCallbackTimestampBatch");
+    int n = timestampsUs.size();
+    for (int i = 0; i < n; i++) {
+        // Don't do batching for CameraSourceTimeLapse for now
+        recordingFrameHandleCallbackTimestamp(timestampsUs[i], handles[i]);
+    }
+}
+
 void CameraSourceTimeLapse::processBufferQueueFrame(BufferItem& buffer) {
     ALOGV("processBufferQueueFrame");
     int64_t timestampUs = buffer.mTimestamp / 1000;
diff --git a/media/libstagefright/SurfaceUtils.cpp b/media/libstagefright/SurfaceUtils.cpp
index 82e959e..b6b315d 100644
--- a/media/libstagefright/SurfaceUtils.cpp
+++ b/media/libstagefright/SurfaceUtils.cpp
@@ -176,7 +176,7 @@
             break;
         }
 
-        sp<GraphicBuffer> buf(new GraphicBuffer(anb, false));
+        sp<GraphicBuffer> buf(GraphicBuffer::from(anb));
 
         // Fill the buffer with the a 1x1 checkerboard pattern ;)
         uint32_t *img = NULL;
diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
index 536d40d..a07787a 100644
--- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp
+++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
@@ -30,10 +30,6 @@
 
 namespace android {
 
-static bool runningInEmulator() {
-    char prop[PROPERTY_VALUE_MAX];
-    return (property_get("ro.kernel.qemu", prop, NULL) > 0);
-}
 
 static int ALIGN(int x, int y) {
     // y must be a power of 2.
@@ -108,7 +104,7 @@
     size_t bufHeight = mCropHeight;
 
     // hardware has YUV12 and RGBA8888 support, so convert known formats
-    if (!runningInEmulator()) {
+    {
         switch (mColorFormat) {
             case OMX_COLOR_FormatYUV420Planar:
             case OMX_COLOR_FormatYUV420SemiPlanar:
@@ -205,7 +201,7 @@
 }
 
 std::list<FrameRenderTracker::Info> SoftwareRenderer::render(
-        const void *data, size_t size, int64_t mediaTimeUs, nsecs_t renderTimeNs,
+        const void *data, size_t , int64_t mediaTimeUs, nsecs_t renderTimeNs,
         void* /*platformPrivate*/, const sp<AMessage>& format) {
     resetFormatIfChanged(format);
     FrameRenderTracker::Info *info = NULL;
@@ -244,14 +240,15 @@
                 buf->stride, buf->height,
                 0, 0, mCropWidth - 1, mCropHeight - 1);
     } else if (mColorFormat == OMX_COLOR_FormatYUV420Planar) {
-        if ((size_t)mWidth * mHeight * 3 / 2 > size) {
-            goto skip_copying;
-        }
         const uint8_t *src_y = (const uint8_t *)data;
         const uint8_t *src_u =
                 (const uint8_t *)data + mWidth * mHeight;
         const uint8_t *src_v = src_u + (mWidth / 2 * mHeight / 2);
 
+        src_y +=mCropLeft + mCropTop * mWidth;
+        src_u +=(mCropLeft + mCropTop * mWidth / 2)/2;
+        src_v +=(mCropLeft + mCropTop * mWidth / 2)/2;
+
         uint8_t *dst_y = (uint8_t *)dst;
         size_t dst_y_size = buf->stride * buf->height;
         size_t dst_c_stride = ALIGN(buf->stride / 2, 16);
@@ -259,6 +256,10 @@
         uint8_t *dst_v = dst_y + dst_y_size;
         uint8_t *dst_u = dst_v + dst_c_size;
 
+        dst_y += mCropTop * buf->stride + mCropLeft;
+        dst_v += (mCropTop/2) * dst_c_stride + mCropLeft/2;
+        dst_u += (mCropTop/2) * dst_c_stride + mCropLeft/2;
+
         for (int y = 0; y < mCropHeight; ++y) {
             memcpy(dst_y, src_y, mCropWidth);
 
@@ -277,12 +278,12 @@
         }
     } else if (mColorFormat == OMX_TI_COLOR_FormatYUV420PackedSemiPlanar
             || mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
-        if ((size_t)mWidth * mHeight * 3 / 2 > size) {
-            goto skip_copying;
-        }
         const uint8_t *src_y = (const uint8_t *)data;
         const uint8_t *src_uv = (const uint8_t *)data
-                + mWidth * (mHeight - mCropTop / 2);
+                + mWidth * mHeight;
+
+        src_y += mCropLeft + mCropTop * mWidth;
+        src_uv += (mCropLeft + mCropTop * mWidth) / 2;
 
         uint8_t *dst_y = (uint8_t *)dst;
 
@@ -292,6 +293,10 @@
         uint8_t *dst_v = dst_y + dst_y_size;
         uint8_t *dst_u = dst_v + dst_c_size;
 
+        dst_y += mCropTop * buf->stride + mCropLeft;
+        dst_v += (mCropTop/2) * dst_c_stride + mCropLeft/2;
+        dst_u += (mCropTop/2) * dst_c_stride + mCropLeft/2;
+
         for (int y = 0; y < mCropHeight; ++y) {
             memcpy(dst_y, src_y, mCropWidth);
 
@@ -311,11 +316,8 @@
             dst_v += dst_c_stride;
         }
     } else if (mColorFormat == OMX_COLOR_Format24bitRGB888) {
-        if ((size_t)mWidth * mHeight * 3 > size) {
-            goto skip_copying;
-        }
-        uint8_t* srcPtr = (uint8_t*)data;
-        uint8_t* dstPtr = (uint8_t*)dst;
+        uint8_t* srcPtr = (uint8_t*)data + mWidth * mCropTop * 3 + mCropLeft * 3;
+        uint8_t* dstPtr = (uint8_t*)dst + buf->stride * mCropTop * 3 + mCropLeft * 3;
 
         for (size_t y = 0; y < (size_t)mCropHeight; ++y) {
             memcpy(dstPtr, srcPtr, mCropWidth * 3);
@@ -323,14 +325,11 @@
             dstPtr += buf->stride * 3;
         }
     } else if (mColorFormat == OMX_COLOR_Format32bitARGB8888) {
-        if ((size_t)mWidth * mHeight * 4 > size) {
-            goto skip_copying;
-        }
         uint8_t *srcPtr, *dstPtr;
 
         for (size_t y = 0; y < (size_t)mCropHeight; ++y) {
-            srcPtr = (uint8_t*)data + mWidth * 4 * y;
-            dstPtr = (uint8_t*)dst + buf->stride * 4 * y;
+            srcPtr = (uint8_t*)data + mWidth * 4 * (y + mCropTop) + mCropLeft * 4;
+            dstPtr = (uint8_t*)dst + buf->stride * 4 * (y + mCropTop) + mCropLeft * 4;
             for (size_t x = 0; x < (size_t)mCropWidth; ++x) {
                 uint8_t a = *srcPtr++;
                 for (size_t i = 0; i < 3; ++i) {   // copy RGB
@@ -340,11 +339,8 @@
             }
         }
     } else if (mColorFormat == OMX_COLOR_Format32BitRGBA8888) {
-        if ((size_t)mWidth * mHeight * 4 > size) {
-            goto skip_copying;
-        }
-        uint8_t* srcPtr = (uint8_t*)data;
-        uint8_t* dstPtr = (uint8_t*)dst;
+        uint8_t* srcPtr = (uint8_t*)data + mWidth * mCropTop * 4 + mCropLeft * 4;
+        uint8_t* dstPtr = (uint8_t*)dst + buf->stride * mCropTop * 4 + mCropLeft * 4;
 
         for (size_t y = 0; y < (size_t)mCropHeight; ++y) {
             memcpy(dstPtr, srcPtr, mCropWidth * 4);
diff --git a/media/libstagefright/include/CameraSource.h b/media/libstagefright/include/CameraSource.h
index 2cc8eda..2aaa884 100644
--- a/media/libstagefright/include/CameraSource.h
+++ b/media/libstagefright/include/CameraSource.h
@@ -18,6 +18,7 @@
 
 #define CAMERA_SOURCE_H_
 
+#include <deque>
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaSource.h>
 #include <camera/android/hardware/ICamera.h>
@@ -142,6 +143,9 @@
                 const sp<IMemory> &data);
         virtual void recordingFrameHandleCallbackTimestamp(int64_t timestampUs,
                 native_handle_t* handle);
+        virtual void recordingFrameHandleCallbackTimestampBatch(
+                const std::vector<int64_t>& timestampsUs,
+                const std::vector<native_handle_t*>& handles);
 
     private:
         sp<CameraSource> mSource;
@@ -214,6 +218,8 @@
     virtual status_t startCameraRecording();
     virtual void releaseRecordingFrame(const sp<IMemory>& frame);
     virtual void releaseRecordingFrameHandle(native_handle_t* handle);
+    // stagefright recorder not using this for now
+    virtual void releaseRecordingFrameHandleBatch(const std::vector<native_handle_t*>& handles);
 
     // Returns true if need to skip the current frame.
     // Called from dataCallbackTimestamp.
@@ -228,6 +234,10 @@
     virtual void recordingFrameHandleCallbackTimestamp(int64_t timestampUs,
             native_handle_t* handle);
 
+    virtual void recordingFrameHandleCallbackTimestampBatch(
+            const std::vector<int64_t>& timestampsUs,
+            const std::vector<native_handle_t*>& handles);
+
     // Process a buffer item received in BufferQueueListener.
     virtual void processBufferQueueFrame(BufferItem& buffer);
 
@@ -273,6 +283,13 @@
     KeyedVector<ANativeWindowBuffer*, BufferItem> mReceivedBufferItemMap;
     sp<BufferQueueListener> mBufferQueueListener;
 
+    Mutex mBatchLock; // protecting access to mInflightXXXXX members below
+    // Start of members protected by mBatchLock
+    std::deque<uint32_t> mInflightBatchSizes;
+    std::vector<native_handle_t*> mInflightReturnedHandles;
+    std::vector<const sp<IMemory>> mInflightReturnedMemorys;
+    // End of members protected by mBatchLock
+
     void releaseQueuedFrames();
     void releaseOneRecordingFrame(const sp<IMemory>& frame);
     void createVideoBufferMemoryHeap(size_t size, uint32_t bufferCount);
diff --git a/media/libstagefright/include/CameraSourceTimeLapse.h b/media/libstagefright/include/CameraSourceTimeLapse.h
index 871c1d9..b066f9a 100644
--- a/media/libstagefright/include/CameraSourceTimeLapse.h
+++ b/media/libstagefright/include/CameraSourceTimeLapse.h
@@ -147,12 +147,23 @@
 
     // In the video camera case calls skipFrameAndModifyTimeStamp() to modify
     // timestamp and set mSkipCurrentFrame.
-    // Then it calls the base CameraSource::recordingFrameHandleCallbackTimestamp()
+    // Then it calls the base CameraSource::recordingFrameHandleCallbackTimestamp() or
+    // CameraSource::recordingFrameHandleCallbackTimestampBatch()
     // This will be called in VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA mode when
     // the metadata is VideoNativeHandleMetadata.
     virtual void recordingFrameHandleCallbackTimestamp(int64_t timestampUs,
             native_handle_t* handle);
 
+    // In the video camera case calls skipFrameAndModifyTimeStamp() to modify
+    // timestamp and set mSkipCurrentFrame.
+    // Then it calls the base CameraSource::recordingFrameHandleCallbackTimestamp() or
+    // CameraSource::recordingFrameHandleCallbackTimestampBatch()
+    // This will be called in VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA mode when
+    // the metadata is VideoNativeHandleMetadata.
+    virtual void recordingFrameHandleCallbackTimestampBatch(
+            const std::vector<int64_t>& timestampsUs,
+            const std::vector<native_handle_t*>& handles);
+
     // Process a buffer item received in CameraSource::BufferQueueListener.
     // This will be called in VIDEO_BUFFER_MODE_BUFFER_QUEUE mode.
     virtual void processBufferQueueFrame(BufferItem& buffer);
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index b933002..6bac1db 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -577,7 +577,7 @@
     // fill in CryptoInfo fields for AnotherPacketSource::read()
     // MediaCas doesn't use cryptoMode, but set to non-zero value here.
     scrambledAccessUnit->meta()->setInt32(
-            "cryptoMode", CryptoPlugin::kMode_AES_CBC);
+            "cryptoMode", CryptoPlugin::kMode_AES_CTR);
     scrambledAccessUnit->meta()->setInt32("cryptoKey", keyId);
     scrambledAccessUnit->meta()->setBuffer("clearBytes", clearSizes);
     scrambledAccessUnit->meta()->setBuffer("encBytes", encSizes);
diff --git a/media/libstagefright/omx/1.0/Omx.cpp b/media/libstagefright/omx/1.0/Omx.cpp
index 134c661..e5b89da 100644
--- a/media/libstagefright/omx/1.0/Omx.cpp
+++ b/media/libstagefright/omx/1.0/Omx.cpp
@@ -95,11 +95,11 @@
     }
 
     sp<OMXNodeInstance> instance = new OMXNodeInstance(
-            this, new LWOmxObserver(observer), name);
+            this, new LWOmxObserver(observer), name.c_str());
 
     OMX_COMPONENTTYPE *handle;
     OMX_ERRORTYPE err = mMaster->makeComponentInstance(
-            name, &OMXNodeInstance::kCallbacks,
+            name.c_str(), &OMXNodeInstance::kCallbacks,
             instance.get(), &handle);
 
     if (err != OMX_ErrorNone) {
diff --git a/media/libstagefright/omx/1.0/WOmxNode.cpp b/media/libstagefright/omx/1.0/WOmxNode.cpp
index dc5c8e1..ea9fb35 100644
--- a/media/libstagefright/omx/1.0/WOmxNode.cpp
+++ b/media/libstagefright/omx/1.0/WOmxNode.cpp
@@ -414,7 +414,7 @@
         getExtensionIndex_cb _hidl_cb) {
     OMX_INDEXTYPE index;
     Status status = toStatus(mBase->getExtensionIndex(
-            parameterName, &index));
+            parameterName.c_str(), &index));
     _hidl_cb(status, toRawIndexType(index));
     return Void();
 }
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 39ed759..7132f9b 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -902,6 +902,9 @@
                         ? kMetadataBufferTypeGrallocSource : requestedType;
             err = OMX_SetParameter(mHandle, index, &params);
         }
+        if (err == OMX_ErrorBadParameter) {
+            err = OMX_ErrorUnsupportedIndex;
+        }
     }
 
     // don't log loud error if component does not support metadata mode on the output
@@ -1030,6 +1033,11 @@
     }
 
     Mutex::Autolock autoLock(mLock);
+    if (!mSailed) {
+        ALOGE("b/35467458");
+        android_errorWriteLog(0x534e4554, "35467458");
+        return BAD_VALUE;
+    }
 
     switch (omxBuffer.mBufferType) {
         case OMXBuffer::kBufferTypePreset:
@@ -1467,6 +1475,11 @@
 
     Mutex::Autolock autoLock(mLock);
 
+    if (!mSailed) {
+        ALOGE("b/35467458");
+        android_errorWriteLog(0x534e4554, "35467458");
+        return BAD_VALUE;
+    }
     BufferMeta *buffer_meta = new BufferMeta(portIndex);
 
     OMX_BUFFERHEADERTYPE *header;
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index 5505aa4..c6c0245 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -33,6 +33,7 @@
 #include "ASessionDescription.h"
 
 #include <ctype.h>
+#include <cutils/properties.h>
 
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -135,7 +136,7 @@
           mCheckPending(false),
           mCheckGeneration(0),
           mCheckTimeoutGeneration(0),
-          mTryTCPInterleaving(false),
+          mTryTCPInterleaving(property_get_bool("rtp.transport.TCP", false)),
           mTryFakeRTCP(false),
           mReceivedFirstRTCPPacket(false),
           mReceivedFirstRTPPacket(false),
diff --git a/media/libstagefright/tests/SurfaceMediaSource_test.cpp b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
index ea58343..7c464ff 100644
--- a/media/libstagefright/tests/SurfaceMediaSource_test.cpp
+++ b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
@@ -510,7 +510,7 @@
 
     // Fill the buffer with the a checkerboard pattern
     uint8_t* img = NULL;
-    sp<GraphicBuffer> buf(new GraphicBuffer(anb, false));
+    sp<GraphicBuffer> buf(GraphicBuffer::from(anb));
     buf->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, (void**)(&img));
     SurfaceMediaSourceTest::fillYV12Buffer(img, width, height, buf->getStride());
     buf->unlock();
@@ -527,7 +527,7 @@
     ASSERT_TRUE(anb != NULL);
 
     // We do not fill the buffer in. Just queue it back.
-    sp<GraphicBuffer> buf(new GraphicBuffer(anb, false));
+    sp<GraphicBuffer> buf(GraphicBuffer::from(anb));
     ASSERT_EQ(NO_ERROR, mANW->queueBuffer(mANW.get(), buf->getNativeBuffer(),
             -1));
 }
diff --git a/media/mtp/AsyncIO.cpp b/media/mtp/AsyncIO.cpp
index e77ad38..bfb07dc 100644
--- a/media/mtp/AsyncIO.cpp
+++ b/media/mtp/AsyncIO.cpp
@@ -96,6 +96,10 @@
 
 } // end anonymous namespace
 
+aiocb::~aiocb() {
+    CHECK(!thread.joinable());
+}
+
 void aio_pool_init(void(f)(int)) {
     CHECK(done == 1);
     done = 0;
diff --git a/media/mtp/AsyncIO.h b/media/mtp/AsyncIO.h
index f7515a2..ed80828 100644
--- a/media/mtp/AsyncIO.h
+++ b/media/mtp/AsyncIO.h
@@ -48,6 +48,8 @@
     std::thread thread;
     ssize_t ret;
     int error;
+
+    ~aiocb();
 };
 
 // Submit a request for IO to be completed
@@ -58,9 +60,13 @@
 
 // Suspend current thread until given IO is complete, at which point
 // its return value and any errors can be accessed
+// All submitted requests must have a corresponding suspend.
+// aiocb->aio_buf must refer to valid memory until after the suspend call
 int aio_suspend(struct aiocb *[], int, const struct timespec *);
 int aio_error(const struct aiocb *);
 ssize_t aio_return(struct aiocb *);
+
+// (Currently unimplemented)
 int aio_cancel(int, struct aiocb *);
 
 // Initialize a threadpool to perform IO. Only one pool can be
diff --git a/media/mtp/MtpFfsHandle.cpp b/media/mtp/MtpFfsHandle.cpp
index 35dd10f..565a2fe 100644
--- a/media/mtp/MtpFfsHandle.cpp
+++ b/media/mtp/MtpFfsHandle.cpp
@@ -490,7 +490,11 @@
 
 int MtpFfsHandle::configure(bool usePtp) {
     // Wait till previous server invocation has closed
-    std::lock_guard<std::mutex> lk(mLock);
+    if (!mLock.try_lock_for(std::chrono::milliseconds(1000))) {
+        LOG(ERROR) << "MtpServer was unable to get configure lock";
+        return -1;
+    }
+    int ret = 0;
 
     // If ptp is changed, the configuration must be rewritten
     if (mPtp != usePtp) {
@@ -500,10 +504,10 @@
     mPtp = usePtp;
 
     if (!initFunctionfs()) {
-        return -1;
+        ret = -1;
     }
-
-    return 0;
+    mLock.unlock();
+    return ret;
 }
 
 void MtpFfsHandle::close() {
@@ -537,14 +541,12 @@
         if (file_length > 0) {
             length = std::min(static_cast<uint32_t>(MAX_FILE_CHUNK_SIZE), file_length);
 
-            // Read data from USB
-            if ((ret = readHandle(mBulkOut, data, length)) == -1) {
-                return -1;
-            }
+            // Read data from USB, handle errors after waiting for write thread.
+            ret = readHandle(mBulkOut, data, length);
 
             if (file_length != MAX_MTP_FILE_SIZE && ret < static_cast<int>(length)) {
+                ret = -1;
                 errno = EIO;
-                return -1;
             }
             read = true;
         }
@@ -565,6 +567,11 @@
             write = false;
         }
 
+        // If there was an error reading above
+        if (ret == -1) {
+            return -1;
+        }
+
         if (read) {
             // Enqueue a new write request
             aio.aio_buf = data;
@@ -622,6 +629,7 @@
     aio.aio_fildes = mfr.fd;
     struct aiocb *aiol[] = {&aio};
     int ret, length;
+    int error = 0;
     bool read = false;
     bool write = false;
 
@@ -665,6 +673,10 @@
             write = true;
         }
 
+        if (error == -1) {
+            return -1;
+        }
+
         if (file_length > 0) {
             length = std::min(static_cast<uint64_t>(MAX_FILE_CHUNK_SIZE), file_length);
             // Queue up another read
@@ -676,8 +688,9 @@
         }
 
         if (write) {
-            if (writeHandle(mBulkIn, data2, ret) == -1)
-                return -1;
+            if (writeHandle(mBulkIn, data2, ret) == -1) {
+                error = -1;
+            }
             write = false;
         }
     }
diff --git a/media/mtp/MtpFfsHandle.h b/media/mtp/MtpFfsHandle.h
index b4d5a97..7491a1b 100644
--- a/media/mtp/MtpFfsHandle.h
+++ b/media/mtp/MtpFfsHandle.h
@@ -36,7 +36,7 @@
 
     bool mPtp;
 
-    std::mutex mLock;
+    std::timed_mutex mLock;
 
     android::base::unique_fd mControl;
     // "in" from the host's perspective => sink for mtp server
diff --git a/media/ndk/Android.bp b/media/ndk/Android.bp
index e4e3d8f..824872f 100644
--- a/media/ndk/Android.bp
+++ b/media/ndk/Android.bp
@@ -17,7 +17,7 @@
 // frameworks/av/include.
 
 ndk_library {
-    name: "libmediandk.ndk",
+    name: "libmediandk",
     symbol_file: "libmediandk.map.txt",
     first_version: "21",
     unversioned_until: "current",
diff --git a/media/ndk/Android.mk b/media/ndk/Android.mk
index 0984ca4..2c070af 100644
--- a/media/ndk/Android.mk
+++ b/media/ndk/Android.mk
@@ -47,6 +47,9 @@
 
 LOCAL_CFLAGS += -Werror -Wall
 
+LOCAL_STATIC_LIBRARIES := \
+    libgrallocusage \
+
 LOCAL_SHARED_LIBRARIES := \
     libbinder \
     libmedia \
diff --git a/media/ndk/NdkImageReader.cpp b/media/ndk/NdkImageReader.cpp
index c0aee90..c449611 100644
--- a/media/ndk/NdkImageReader.cpp
+++ b/media/ndk/NdkImageReader.cpp
@@ -27,6 +27,7 @@
 #include <android_media_Utils.h>
 #include <android_runtime/android_view_Surface.h>
 #include <android_runtime/android_hardware_HardwareBuffer.h>
+#include <grallocusage/GrallocUsageConversion.h>
 
 using namespace android;
 
@@ -260,7 +261,8 @@
     uint64_t consumerUsage;
     android_hardware_HardwareBuffer_convertToGrallocUsageBits(
             &producerUsage, &consumerUsage, mUsage0, mUsage1);
-    mHalUsage = consumerUsage;
+    // Strip out producerUsage here.
+    mHalUsage = android_convertGralloc1To0Usage(0, consumerUsage);
 
     sp<IGraphicBufferProducer> gbProducer;
     sp<IGraphicBufferConsumer> gbConsumer;
@@ -411,11 +413,9 @@
         }
 
         // Check if the producer buffer configurations match what ImageReader configured.
-        if ((bufferFmt != HAL_PIXEL_FORMAT_BLOB) && (readerFmt != HAL_PIXEL_FORMAT_BLOB) &&
-                (readerWidth != bufferWidth || readerHeight != bufferHeight)) {
-            ALOGW("%s: Buffer size: %dx%d, doesn't match AImageReader configured size: %dx%d",
-                    __FUNCTION__, bufferWidth, bufferHeight, readerWidth, readerHeight);
-        }
+        ALOGV_IF(readerWidth != bufferWidth || readerHeight != bufferHeight,
+                "%s: Buffer size: %dx%d, doesn't match AImageReader configured size: %dx%d",
+                __FUNCTION__, bufferWidth, bufferHeight, readerWidth, readerHeight);
 
         // Check if the buffer usage is a super set of reader's usage bits, aka all usage bits that
         // ImageReader requested has been supported from the producer side.
diff --git a/media/utils/Android.bp b/media/utils/Android.bp
new file mode 100644
index 0000000..72917dd
--- /dev/null
+++ b/media/utils/Android.bp
@@ -0,0 +1,41 @@
+// Copyright 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+cc_library_shared {
+    name: "libmediautils",
+
+    srcs: [
+        "BatteryNotifier.cpp",
+        "ISchedulingPolicyService.cpp",
+        "MemoryLeakTrackUtil.cpp",
+        "ProcessInfo.cpp",
+        "SchedulingPolicyService.cpp",
+    ],
+    shared_libs: [
+        "libbinder",
+        "libcutils",
+        "liblog",
+        "libutils",
+        "libmemunreachable",
+    ],
+
+    cflags: [
+        "-Wall",
+        "-Wextra",
+        "-Werror",
+    ],
+
+    local_include_dirs: ["include"],
+    export_include_dirs: ["include"],
+}
diff --git a/media/utils/Android.mk b/media/utils/Android.mk
deleted file mode 100644
index 21d1b5b..0000000
--- a/media/utils/Android.mk
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES := \
-  BatteryNotifier.cpp \
-  ISchedulingPolicyService.cpp \
-  MemoryLeakTrackUtil.cpp \
-  ProcessInfo.cpp \
-  SchedulingPolicyService.cpp
-
-LOCAL_SHARED_LIBRARIES := \
-  libbinder \
-  libcutils \
-  liblog \
-  libutils \
-  libmemunreachable \
-
-LOCAL_C_INCLUDES := $(LOCAL_PATH)/include
-
-LOCAL_CFLAGS += \
-  -Wall \
-  -Wextra \
-  -Werror \
-
-LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/include
-
-LOCAL_MODULE := libmediautils
-
-include $(BUILD_SHARED_LIBRARY)
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 4b2e643..42e9c6b 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -117,6 +117,22 @@
 Mutex gLock;
 wp<AudioFlinger> gAudioFlinger;
 
+// Keep a strong reference to media.log service around forever.
+// The service is within our parent process so it can never die in a way that we could observe.
+// These two variables are const after initialization.
+static sp<IBinder> sMediaLogServiceAsBinder;
+static sp<IMediaLogService> sMediaLogService;
+
+static pthread_once_t sMediaLogOnce = PTHREAD_ONCE_INIT;
+
+static void sMediaLogInit()
+{
+    sMediaLogServiceAsBinder = defaultServiceManager()->getService(String16("media.log"));
+    if (sMediaLogServiceAsBinder != 0) {
+        sMediaLogService = interface_cast<IMediaLogService>(sMediaLogServiceAsBinder);
+    }
+}
+
 // ----------------------------------------------------------------------------
 
 std::string formatToString(audio_format_t format) {
@@ -154,6 +170,7 @@
     if (doLog) {
         mLogMemoryDealer = new MemoryDealer(kLogMemorySize, "LogWriters",
                 MemoryHeapBase::READ_ONLY);
+        (void) pthread_once(&sMediaLogOnce, sMediaLogInit);
     }
 
     // reset battery stats.
@@ -230,15 +247,11 @@
     }
 
     // Tell media.log service about any old writers that still need to be unregistered
-    if (mLogMemoryDealer != 0) {
-        sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log"));
-        if (binder != 0) {
-            sp<IMediaLogService> mediaLogService(interface_cast<IMediaLogService>(binder));
-            for (size_t count = mUnregisteredWriters.size(); count > 0; count--) {
-                sp<IMemory> iMemory(mUnregisteredWriters.top()->getIMemory());
-                mUnregisteredWriters.pop();
-                mediaLogService->unregisterWriter(iMemory);
-            }
+    if (sMediaLogService != 0) {
+        for (size_t count = mUnregisteredWriters.size(); count > 0; count--) {
+            sp<IMemory> iMemory(mUnregisteredWriters.top()->getIMemory());
+            mUnregisteredWriters.pop();
+            sMediaLogService->unregisterWriter(iMemory);
         }
     }
 }
@@ -519,13 +532,10 @@
 
         // append a copy of media.log here by forwarding fd to it, but don't attempt
         // to lookup the service if it's not running, as it will block for a second
-        if (mLogMemoryDealer != 0) {
-            sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log"));
-            if (binder != 0) {
-                dprintf(fd, "\nmedia.log:\n");
-                Vector<String16> args;
-                binder->dump(fd, args);
-            }
+        if (sMediaLogServiceAsBinder != 0) {
+            dprintf(fd, "\nmedia.log:\n");
+            Vector<String16> args;
+            sMediaLogServiceAsBinder->dump(fd, args);
         }
 
         // check for optional arguments
@@ -570,16 +580,11 @@
 
 sp<NBLog::Writer> AudioFlinger::newWriter_l(size_t size, const char *name)
 {
-    // If there is no memory allocated for logs, return a dummy writer that does nothing
-    if (mLogMemoryDealer == 0) {
+    // If there is no memory allocated for logs, return a dummy writer that does nothing.
+    // Similarly if we can't contact the media.log service, also return a dummy writer.
+    if (mLogMemoryDealer == 0 || sMediaLogService == 0) {
         return new NBLog::Writer();
     }
-    sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log"));
-    // Similarly if we can't contact the media.log service, also return a dummy writer
-    if (binder == 0) {
-        return new NBLog::Writer();
-    }
-    sp<IMediaLogService> mediaLogService(interface_cast<IMediaLogService>(binder));
     sp<IMemory> shared = mLogMemoryDealer->allocate(NBLog::Timeline::sharedSize(size));
     // If allocation fails, consult the vector of previously unregistered writers
     // and garbage-collect one or more them until an allocation succeeds
@@ -590,7 +595,7 @@
                 // Pick the oldest stale writer to garbage-collect
                 sp<IMemory> iMemory(mUnregisteredWriters[0]->getIMemory());
                 mUnregisteredWriters.removeAt(0);
-                mediaLogService->unregisterWriter(iMemory);
+                sMediaLogService->unregisterWriter(iMemory);
                 // Now the media.log remote reference to IMemory is gone.  When our last local
                 // reference to IMemory also drops to zero at end of this block,
                 // the IMemory destructor will deallocate the region from mLogMemoryDealer.
@@ -609,7 +614,7 @@
     NBLog::Shared *sharedRawPtr = (NBLog::Shared *) shared->pointer();
     new((void *) sharedRawPtr) NBLog::Shared(); // placement new here, but the corresponding
                                                 // explicit destructor not needed since it is POD
-    mediaLogService->registerWriter(shared, size, name);
+    sMediaLogService->registerWriter(shared, size, name);
     return new NBLog::Writer(shared, size);
 }
 
@@ -1544,6 +1549,10 @@
 }
 
 bool AudioFlinger::MediaLogNotifier::threadLoop() {
+    // Should already have been checked, but just in case
+    if (sMediaLogService == 0) {
+        return false;
+    }
     // Wait until there are pending requests
     {
         AutoMutex _l(mMutex);
@@ -1555,11 +1564,7 @@
         mPendingRequests = false;
     }
     // Execute the actual MediaLogService binder call and ignore extra requests for a while
-    sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log"));
-    if (binder != 0) {
-        sp<IMediaLogService> mediaLogService(interface_cast<IMediaLogService>(binder));
-        mediaLogService->requestMergeWakeup();
-    }
+    sMediaLogService->requestMergeWakeup();
     usleep(kPostTriggerSleepPeriod);
     return true;
 }
@@ -2133,7 +2138,7 @@
                 return BAD_VALUE;
             }
             mMmapThreads.removeItem(output);
-            ALOGV("closing mmapThread %p", mmapThread.get());
+            ALOGD("closing mmapThread %p", mmapThread.get());
         }
         const sp<AudioIoDescriptor> ioDesc = new AudioIoDescriptor();
         ioDesc->mIoHandle = output;
@@ -2148,7 +2153,7 @@
             closeOutputFinish(playbackThread);
         }
     } else if (mmapThread != 0) {
-        ALOGV("mmapThread exit()");
+        ALOGD("mmapThread exit()");
         mmapThread->exit();
         AudioStreamOut *out = mmapThread->clearOutput();
         ALOG_ASSERT(out != NULL, "out shouldn't be NULL");
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 7f7e353..255e795 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -6802,7 +6802,7 @@
 bool AudioFlinger::RecordThread::stop(RecordThread::RecordTrack* recordTrack) {
     ALOGV("RecordThread::stop");
     AutoMutex _l(mLock);
-    if (mActiveTracks.indexOf(recordTrack) != 0 || recordTrack->mState == TrackBase::PAUSING) {
+    if (mActiveTracks.indexOf(recordTrack) < 0 || recordTrack->mState == TrackBase::PAUSING) {
         return false;
     }
     // note that threadLoop may still be processing the track at this point [without lock]
@@ -6816,7 +6816,7 @@
     // FIXME incorrect usage of wait: no explicit predicate or loop
     mStartStopCond.wait(mLock);
     // if we have been restarted, recordTrack is in mActiveTracks here
-    if (exitPending() || mActiveTracks.indexOf(recordTrack) != 0) {
+    if (exitPending() || mActiveTracks.indexOf(recordTrack) < 0) {
         ALOGV("Record stopped OK");
         return true;
     }
@@ -7594,7 +7594,11 @@
 
     if (mActiveTracks.size() == 0) {
         // for the first track, reuse portId and session allocated when the stream was opened
-        mHalStream->start();
+        ret = mHalStream->start();
+        if (ret != NO_ERROR) {
+            ALOGE("%s: error mHalStream->start() = %d for first track", __FUNCTION__, ret);
+            return ret;
+        }
         portId = mPortId;
         sessionId = mSessionId;
         mStandby = false;
@@ -7649,6 +7653,7 @@
 
     // abort if start is rejected by audio policy manager
     if (ret != NO_ERROR) {
+        ALOGE("%s: error start rejected by AudioPolicyManager = %d", __FUNCTION__, ret);
         if (mActiveTracks.size() != 0) {
             if (isOutput()) {
                 AudioSystem::releaseOutput(mId, streamType(), sessionId);
@@ -7944,15 +7949,17 @@
     if (isOutput() && mPrevOutDevice != mOutDevice) {
         mPrevOutDevice = type;
         sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
-        if (mCallback != 0) {
-            mCallback->onRoutingChanged(deviceId);
+        sp<MmapStreamCallback> callback = mCallback.promote();
+        if (callback != 0) {
+            callback->onRoutingChanged(deviceId);
         }
     }
     if (!isOutput() && mPrevInDevice != mInDevice) {
         mPrevInDevice = type;
         sendIoConfigEvent_l(AUDIO_INPUT_CONFIG_CHANGED);
-        if (mCallback != 0) {
-            mCallback->onRoutingChanged(deviceId);
+        sp<MmapStreamCallback> callback = mCallback.promote();
+        if (callback != 0) {
+            callback->onRoutingChanged(deviceId);
         }
     }
     return status;
@@ -8067,8 +8074,9 @@
 
 void AudioFlinger::MmapThread::threadLoop_exit()
 {
-    if (mCallback != 0) {
-        mCallback->onTearDown();
+    sp<MmapStreamCallback> callback = mCallback.promote();
+    if (callback != 0) {
+        callback->onTearDown();
     }
 }
 
@@ -8116,8 +8124,9 @@
 {
     for (const sp<MmapTrack> &track : mActiveTracks) {
         if (track->isInvalid()) {
-            if (mCallback != 0) {
-                mCallback->onTearDown();
+            sp<MmapStreamCallback> callback = mCallback.promote();
+            if (callback != 0) {
+                callback->onTearDown();
             }
             break;
         }
@@ -8292,7 +8301,8 @@
 
         mOutput->stream->setVolume(volume, volume);
 
-        if (mCallback != 0) {
+        sp<MmapStreamCallback> callback = mCallback.promote();
+        if (callback != 0) {
             int channelCount;
             if (isOutput()) {
                 channelCount = audio_channel_count_from_out_mask(mChannelMask);
@@ -8303,7 +8313,7 @@
             for (int i = 0; i < channelCount; i++) {
                 values.add(volume);
             }
-            mCallback->onVolumeChanged(mChannelMask, values);
+            callback->onVolumeChanged(mChannelMask, values);
         }
     }
 }
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index cc66cad..7469710 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -1528,7 +1528,7 @@
                 audio_session_t         mSessionId;
                 audio_port_handle_t     mPortId;
 
-                sp<MmapStreamCallback>  mCallback;
+                wp<MmapStreamCallback>  mCallback;
                 sp<StreamHalInterface>  mHalStream;
                 sp<DeviceHalInterface>  mHalDevice;
                 AudioHwDevice* const    mAudioHwDev;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
index dbdcca7..bea9f4f 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
@@ -91,8 +91,10 @@
                 AUDIO_CONFIG_BASE_INITIALIZER;
         const audio_patch_handle_t patchHandle = (provider != NULL) ? provider->getPatchHandle() :
                 AUDIO_PATCH_HANDLE_NONE;
-        mClientInterface->onRecordingConfigurationUpdate(event, mSession, mInputSource,
-                &mConfig, &deviceConfig, patchHandle);
+        if (patchHandle != AUDIO_PATCH_HANDLE_NONE) {
+            mClientInterface->onRecordingConfigurationUpdate(event, mSession, mInputSource,
+                    &mConfig, &deviceConfig, patchHandle);
+        }
     }
 
     return mActiveCount;
@@ -126,9 +128,11 @@
                 AUDIO_CONFIG_BASE_INITIALIZER;
         const audio_patch_handle_t patchHandle = (provider != NULL) ? provider->getPatchHandle() :
                 AUDIO_PATCH_HANDLE_NONE;
-        mClientInterface->onRecordingConfigurationUpdate(RECORD_CONFIG_EVENT_START,
-                mSession, mInputSource,
-                &mConfig, &deviceConfig, patchHandle);
+        if (patchHandle != AUDIO_PATCH_HANDLE_NONE) {
+            mClientInterface->onRecordingConfigurationUpdate(RECORD_CONFIG_EVENT_START,
+                    mSession, mInputSource,
+                    &mConfig, &deviceConfig, patchHandle);
+        }
     }
 }
 
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/PolicyConfigurableDomains.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/PolicyConfigurableDomains.xml
index aa2af0f..b43f83b 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/PolicyConfigurableDomains.xml
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/PolicyConfigurableDomains.xml
@@ -3062,7 +3062,7 @@
         <CompoundRule Type="All">
           <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
           <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
-          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="Is" Value="ForceNoBtA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dp"/>
         </CompoundRule>
       </Configuration>
@@ -3070,7 +3070,7 @@
         <CompoundRule Type="All">
           <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
           <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
-          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="Is" Value="ForceNoBtA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpHeadphones"/>
         </CompoundRule>
       </Configuration>
@@ -3078,7 +3078,7 @@
         <CompoundRule Type="All">
           <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
           <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
-          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="Is" Value="ForceNoBtA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpSpeaker"/>
         </CompoundRule>
       </Configuration>
@@ -6472,7 +6472,7 @@
               <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
               <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
               <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="UsbAccessory"/>
-              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceSpeaker"/>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
             </CompoundRule>
           </CompoundRule>
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="UsbDevice"/>
@@ -8416,6 +8416,7 @@
       <ConfigurableElement Path="/Policy/policy/usages/assistance_navigation_guidance/applicable_strategy/strategy"/>
       <ConfigurableElement Path="/Policy/policy/usages/assistance_sonification/applicable_strategy/strategy"/>
       <ConfigurableElement Path="/Policy/policy/usages/game/applicable_strategy/strategy"/>
+      <ConfigurableElement Path="/Policy/policy/usages/assistant/applicable_strategy/strategy"/>
     </ConfigurableElements>
     <Settings>
       <Configuration Name="Calibration">
@@ -8461,6 +8462,9 @@
         <ConfigurableElement Path="/Policy/policy/usages/game/applicable_strategy/strategy">
           <EnumParameter Name="strategy">media</EnumParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/usages/assistant/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">media</EnumParameter>
+        </ConfigurableElement>
       </Configuration>
     </Settings>
   </ConfigurableDomain>
@@ -8738,6 +8742,7 @@
       <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/loopback"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/ip"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/bus"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/stub"/>
     </ConfigurableElements>
     <Settings>
       <Configuration Name="Calibration">
@@ -9428,6 +9433,9 @@
         <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/bus">
           <BitParameter Name="bus">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/stub">
+          <BitParameter Name="stub">0</BitParameter>
+        </ConfigurableElement>
       </Configuration>
     </Settings>
   </ConfigurableDomain>
@@ -9758,7 +9766,7 @@
       </Configuration>
     </Settings>
   </ConfigurableDomain>
-  <ConfigurableDomain Name="DeviceForInputSource.VoiceRecognitionAndHotword" SequenceAware="false">
+  <ConfigurableDomain Name="DeviceForInputSource.VoiceRecognitionAndUnprocessedAndHotword" SequenceAware="false">
     <Configurations>
       <Configuration Name="ScoHeadset">
         <CompoundRule Type="All">
@@ -9790,6 +9798,10 @@
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/wired_headset"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/usb_device"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bluetooth_sco_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/wired_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/usb_device"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/builtin_mic"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/wired_headset"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/usb_device"/>
@@ -9809,6 +9821,18 @@
         <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic">
           <BitParameter Name="builtin_mic">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset">
           <BitParameter Name="bluetooth_sco_headset">1</BitParameter>
         </ConfigurableElement>
@@ -9835,6 +9859,18 @@
         <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic">
           <BitParameter Name="builtin_mic">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset">
           <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
         </ConfigurableElement>
@@ -9861,6 +9897,18 @@
         <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic">
           <BitParameter Name="builtin_mic">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset">
           <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
         </ConfigurableElement>
@@ -9887,6 +9935,18 @@
         <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic">
           <BitParameter Name="builtin_mic">1</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">1</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset">
           <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
         </ConfigurableElement>
@@ -9913,6 +9973,18 @@
         <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic">
           <BitParameter Name="builtin_mic">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset">
           <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
         </ConfigurableElement>
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw
index ecd56b0..eb11980 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw
@@ -375,7 +375,7 @@
 						TelephonyMode IsNot InCall
 						TelephonyMode IsNot InCommunication
 						AvailableOutputDevices Excludes UsbAccessory
-						ForceUseForCommunication Is ForceSpeaker
+						ForceUseForMedia IsNot ForceSpeaker
 				AvailableOutputDevices Includes UsbDevice
 
 				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification_respectful.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification_respectful.pfw
index b30aa4c..cee7cd1 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification_respectful.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification_respectful.pfw
@@ -78,7 +78,7 @@
 				#
 				TelephonyMode IsNot InCall
 				TelephonyMode IsNot InCommunication
-				ForceUseForMedia Is ForceNoBtA2dp
+				ForceUseForMedia IsNot ForceNoBtA2dp
 				AvailableOutputDevices Includes BluetoothA2dp
 
 				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
@@ -105,7 +105,7 @@
 				#
 				TelephonyMode IsNot InCall
 				TelephonyMode IsNot InCommunication
-				ForceUseForMedia Is ForceNoBtA2dp
+				ForceUseForMedia IsNot ForceNoBtA2dp
 				AvailableOutputDevices Includes BluetoothA2dpHeadphones
 
 				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
@@ -132,7 +132,7 @@
 				#
 				TelephonyMode IsNot InCall
 				TelephonyMode IsNot InCommunication
-				ForceUseForMedia Is ForceNoBtA2dp
+				ForceUseForMedia IsNot ForceNoBtA2dp
 				AvailableOutputDevices Includes BluetoothA2dpSpeaker
 
 				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_usage.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_usage.pfw
index 3f5da13..b3115e7 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_usage.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_usage.pfw
@@ -16,6 +16,7 @@
 			/Policy/policy/usages/assistance_navigation_guidance/applicable_strategy/strategy = media
 			/Policy/policy/usages/assistance_sonification/applicable_strategy/strategy = media
 			/Policy/policy/usages/game/applicable_strategy/strategy = media
+			/Policy/policy/usages/assistant/applicable_strategy/strategy = media
 
 	domain: AssistanceAccessibility
 		conf: Sonification
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml
index 71b2b62..ad9c356 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml
@@ -68,7 +68,7 @@
 
         <!--#################### USAGE BEGIN ####################-->
 
-        <ComponentType Name="Usages" Description="associated to audio_stream_type_t definition,
+        <ComponentType Name="Usages" Description="associated to audio_usage_t definition,
                              identifier mapping must match the value of the enum">
             <Component Name="unknown" Type="Usage" Mapping="Amend1:Unknown,Identifier:0"/>
             <Component Name="media" Type="Usage" Mapping="Amend1:Media,Identifier:1"/>
@@ -97,6 +97,7 @@
             <Component Name="game" Type="Usage" Mapping="Amend1:BluetoothSco,Identifier:14"/>
             <Component Name="virtual_source" Type="Usage"
                                              Mapping="Amend1:VirtualSource,Identifier:15"/>
+            <Component Name="assistant" Type="Usage" Mapping="Amend1:Assistant,Identifier:16"/>
         </ComponentType>
 
         <!--#################### USAGE END ####################-->
diff --git a/services/camera/libcameraservice/CameraFlashlight.cpp b/services/camera/libcameraservice/CameraFlashlight.cpp
index 0ff9314..4537ae6 100644
--- a/services/camera/libcameraservice/CameraFlashlight.cpp
+++ b/services/camera/libcameraservice/CameraFlashlight.cpp
@@ -944,7 +944,7 @@
     }
 
     // need to set __get_memory in set_callbacks().
-    device->setCallbacks(NULL, NULL, NULL, NULL);
+    device->setCallbacks(NULL, NULL, NULL, NULL, NULL);
 
     mParameters = device->getParameters();
 
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 4318a11..39351e7 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -661,6 +661,22 @@
     return Status::ok();
 }
 
+Status CameraService::getCameraVendorTagCache(
+        /*out*/ hardware::camera2::params::VendorTagDescriptorCache* cache) {
+    ATRACE_CALL();
+    if (!mInitialized) {
+        ALOGE("%s: Camera HAL couldn't be initialized", __FUNCTION__);
+        return STATUS_ERROR(ERROR_DISCONNECTED,
+                "Camera subsystem not available");
+    }
+    sp<VendorTagDescriptorCache> globalCache =
+            VendorTagDescriptorCache::getGlobalVendorTagCache();
+    if (globalCache != nullptr) {
+        *cache = *(globalCache.get());
+    }
+    return Status::ok();
+}
+
 int CameraService::getDeviceVersion(const String8& cameraId, int* facing) {
     ATRACE_CALL();
 
@@ -2859,7 +2875,13 @@
 
     sp<VendorTagDescriptor> desc = VendorTagDescriptor::getGlobalVendorTagDescriptor();
     if (desc == NULL) {
-        dprintf(fd, "No vendor tags.\n");
+        sp<VendorTagDescriptorCache> cache =
+                VendorTagDescriptorCache::getGlobalVendorTagCache();
+        if (cache == NULL) {
+            dprintf(fd, "No vendor tags.\n");
+        } else {
+            cache->dump(fd, /*verbosity*/2, /*indentation*/2);
+        }
     } else {
         desc->dump(fd, /*verbosity*/2, /*indentation*/2);
     }
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index c7acdc9..e49fe62 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -112,6 +112,9 @@
     virtual binder::Status     getCameraVendorTagDescriptor(
             /*out*/
             hardware::camera2::params::VendorTagDescriptor* desc);
+    virtual binder::Status     getCameraVendorTagCache(
+            /*out*/
+            hardware::camera2::params::VendorTagDescriptorCache* cache);
 
     virtual binder::Status     connect(const sp<hardware::ICameraClient>& cameraClient,
             int32_t cameraId, const String16& clientPackageName,
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 3aec562..335e999 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -1261,6 +1261,13 @@
     ALOGW("%s: Not supported in buffer queue mode.", __FUNCTION__);
 }
 
+void Camera2Client::releaseRecordingFrameHandleBatch(
+        const std::vector<native_handle_t*>& handles) {
+    (void)handles;
+    ATRACE_CALL();
+    ALOGW("%s: Not supported in buffer queue mode.", __FUNCTION__);
+}
+
 status_t Camera2Client::autoFocus() {
     ATRACE_CALL();
     Mutex::Autolock icl(mBinderSerializationLock);
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index 87c91a0..9738aca 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -72,6 +72,8 @@
     virtual bool            recordingEnabled();
     virtual void            releaseRecordingFrame(const sp<IMemory>& mem);
     virtual void            releaseRecordingFrameHandle(native_handle_t *handle);
+    virtual void            releaseRecordingFrameHandleBatch(
+                                    const std::vector<native_handle_t*>& handles);
     virtual status_t        autoFocus();
     virtual status_t        cancelAutoFocus();
     virtual status_t        takePicture(int msgType);
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index ffb657e..df8726e 100644
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -98,6 +98,7 @@
     mHardware->setCallbacks(notifyCallback,
             dataCallback,
             dataCallbackTimestamp,
+            handleCallbackTimestampBatch,
             (void *)(uintptr_t)mCameraId);
 
     // Enable zoom, error, focus, and metadata messages by default
@@ -533,6 +534,50 @@
     mHardware->releaseRecordingFrame(dataPtr);
 }
 
+void CameraClient::releaseRecordingFrameHandleBatch(const std::vector<native_handle_t*>& handles) {
+    size_t n = handles.size();
+    std::vector<sp<IMemory>> frames;
+    frames.reserve(n);
+    bool error = false;
+    for (auto& handle : handles) {
+        sp<IMemory> dataPtr;
+        {
+            Mutex::Autolock l(mAvailableCallbackBuffersLock);
+            if (!mAvailableCallbackBuffers.empty()) {
+                dataPtr = mAvailableCallbackBuffers.back();
+                mAvailableCallbackBuffers.pop_back();
+            }
+        }
+
+        if (dataPtr == nullptr) {
+            ALOGE("%s: %d: No callback buffer available. Dropping frames.", __FUNCTION__,
+                    __LINE__);
+            error = true;
+            break;
+        } else if (dataPtr->size() != sizeof(VideoNativeHandleMetadata)) {
+            ALOGE("%s: %d: Callback buffer must be VideoNativeHandleMetadata", __FUNCTION__,
+                    __LINE__);
+            error = true;
+            break;
+        }
+
+        VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(dataPtr->pointer());
+        metadata->eType = kMetadataBufferTypeNativeHandleSource;
+        metadata->pHandle = handle;
+        frames.push_back(dataPtr);
+    }
+
+    if (error) {
+        for (auto& handle : handles) {
+            native_handle_close(handle);
+            native_handle_delete(handle);
+        }
+    } else {
+        mHardware->releaseRecordingFrameBatch(frames);
+    }
+    return;
+}
+
 status_t CameraClient::setVideoBufferMode(int32_t videoBufferMode) {
     LOG1("setVideoBufferMode: %d", videoBufferMode);
     bool enableMetadataInBuffers = false;
@@ -855,6 +900,49 @@
     client->handleGenericDataTimestamp(timestamp, msgType, dataPtr);
 }
 
+void CameraClient::handleCallbackTimestampBatch(
+        int32_t msgType, const std::vector<HandleTimestampMessage>& msgs, void* user) {
+    LOG2("dataCallbackTimestampBatch");
+    sp<CameraClient> client = getClientFromCookie(user);
+    if (client.get() == nullptr) return;
+    if (!client->lockIfMessageWanted(msgType)) return;
+
+    sp<hardware::ICameraClient> c = client->mRemoteCallback;
+    client->mLock.unlock();
+    if (c != 0 && msgs.size() > 0) {
+        size_t n = msgs.size();
+        std::vector<nsecs_t> timestamps;
+        std::vector<native_handle_t*> handles;
+        timestamps.reserve(n);
+        handles.reserve(n);
+        for (auto& msg : msgs) {
+            native_handle_t* handle = nullptr;
+            if (msg.dataPtr->size() != sizeof(VideoNativeHandleMetadata)) {
+                ALOGE("%s: dataPtr does not contain VideoNativeHandleMetadata!", __FUNCTION__);
+                return;
+            }
+            VideoNativeHandleMetadata *metadata =
+                (VideoNativeHandleMetadata*)(msg.dataPtr->pointer());
+            if (metadata->eType == kMetadataBufferTypeNativeHandleSource) {
+                handle = metadata->pHandle;
+            }
+
+            if (handle == nullptr) {
+                ALOGE("%s: VideoNativeHandleMetadata type mismatch or null handle passed!",
+                        __FUNCTION__);
+                return;
+            }
+            {
+                Mutex::Autolock l(client->mAvailableCallbackBuffersLock);
+                client->mAvailableCallbackBuffers.push_back(msg.dataPtr);
+            }
+            timestamps.push_back(msg.timestamp);
+            handles.push_back(handle);
+        }
+        c->recordingFrameHandleCallbackTimestampBatch(timestamps, handles);
+    }
+}
+
 // snapshot taken callback
 void CameraClient::handleShutter(void) {
     if (mPlayShutterSound) {
diff --git a/services/camera/libcameraservice/api1/CameraClient.h b/services/camera/libcameraservice/api1/CameraClient.h
index 91f00e3..1073384 100644
--- a/services/camera/libcameraservice/api1/CameraClient.h
+++ b/services/camera/libcameraservice/api1/CameraClient.h
@@ -50,6 +50,8 @@
     virtual bool            recordingEnabled();
     virtual void            releaseRecordingFrame(const sp<IMemory>& mem);
     virtual void            releaseRecordingFrameHandle(native_handle_t *handle);
+    virtual void            releaseRecordingFrameHandleBatch(
+                                    const std::vector<native_handle_t*>& handles);
     virtual status_t        autoFocus();
     virtual status_t        cancelAutoFocus();
     virtual status_t        takePicture(int msgType);
@@ -109,6 +111,8 @@
     static void             dataCallback(int32_t msgType, const sp<IMemory>& dataPtr,
             camera_frame_metadata_t *metadata, void* user);
     static void             dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr, void* user);
+    static void             handleCallbackTimestampBatch(
+                                    int32_t msgType, const std::vector<HandleTimestampMessage>&, void* user);
     // handlers for messages
     void                    handleShutter(void);
     void                    handlePreviewData(int32_t msgType, const sp<IMemory>& mem,
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
index 394eb4c..733a78e 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -367,9 +367,12 @@
 
     entry = result.find(tag);
     if (entry.count == 0) {
+        const camera_metadata *metaBuffer = result.getAndLock();
         ALOGV("%s: Camera %d: No %s provided by HAL for frame %d in this result!",
                 __FUNCTION__, cameraId,
-                get_camera_metadata_tag_name(tag), frameNumber);
+                get_local_camera_metadata_tag_name(tag, metaBuffer),
+                frameNumber);
+        result.unlock(metaBuffer);
         return false;
     } else {
         switch(sizeof(Src)){
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 83c84af..b2686bf 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -798,16 +798,38 @@
             exposureCompensationStep.data.r[0].denominator);
 
     autoExposureLock = false;
-    params.set(CameraParameters::KEY_AUTO_EXPOSURE_LOCK,
-            CameraParameters::FALSE);
-    params.set(CameraParameters::KEY_AUTO_EXPOSURE_LOCK_SUPPORTED,
-            CameraParameters::TRUE);
+    autoExposureLockAvailable = false;
+    camera_metadata_ro_entry_t exposureLockAvailable =
+        staticInfo(ANDROID_CONTROL_AE_LOCK_AVAILABLE, 1, 1);
+    if ((0 < exposureLockAvailable.count) &&
+            (ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE ==
+                    exposureLockAvailable.data.u8[0])) {
+        params.set(CameraParameters::KEY_AUTO_EXPOSURE_LOCK,
+                CameraParameters::FALSE);
+        params.set(CameraParameters::KEY_AUTO_EXPOSURE_LOCK_SUPPORTED,
+                   CameraParameters::TRUE);
+        autoExposureLockAvailable = true;
+    } else {
+        params.set(CameraParameters::KEY_AUTO_EXPOSURE_LOCK_SUPPORTED,
+                   CameraParameters::FALSE);
+    }
 
     autoWhiteBalanceLock = false;
-    params.set(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK,
-            CameraParameters::FALSE);
-    params.set(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK_SUPPORTED,
-            CameraParameters::TRUE);
+    autoWhiteBalanceLockAvailable = false;
+    camera_metadata_ro_entry_t whitebalanceLockAvailable =
+        staticInfo(ANDROID_CONTROL_AWB_LOCK_AVAILABLE, 1, 1);
+    if ((0 < whitebalanceLockAvailable.count) &&
+            (ANDROID_CONTROL_AWB_LOCK_AVAILABLE_TRUE ==
+                    whitebalanceLockAvailable.data.u8[0])) {
+        params.set(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK,
+                CameraParameters::FALSE);
+        params.set(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK_SUPPORTED,
+                CameraParameters::TRUE);
+        autoWhiteBalanceLockAvailable = true;
+    } else {
+        params.set(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK_SUPPORTED,
+                CameraParameters::FALSE);
+    }
 
     meteringAreas.add(Parameters::Area(0, 0, 0, 0, 0));
     params.set(CameraParameters::KEY_MAX_NUM_METERING_AREAS,
@@ -816,30 +838,37 @@
             "(0,0,0,0,0)");
 
     zoom = 0;
-    params.set(CameraParameters::KEY_ZOOM, zoom);
-    params.set(CameraParameters::KEY_MAX_ZOOM, NUM_ZOOM_STEPS - 1);
-
+    zoomAvailable = false;
     camera_metadata_ro_entry_t maxDigitalZoom =
         staticInfo(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM, /*minCount*/1, /*maxCount*/1);
     if (!maxDigitalZoom.count) return NO_INIT;
 
-    {
-        String8 zoomRatios;
-        float zoom = 1.f;
-        float zoomIncrement = (maxDigitalZoom.data.f[0] - zoom) /
-                (NUM_ZOOM_STEPS-1);
-        bool addComma = false;
-        for (size_t i=0; i < NUM_ZOOM_STEPS; i++) {
-            if (addComma) zoomRatios += ",";
-            addComma = true;
-            zoomRatios += String8::format("%d", static_cast<int>(zoom * 100));
-            zoom += zoomIncrement;
-        }
-        params.set(CameraParameters::KEY_ZOOM_RATIOS, zoomRatios);
-    }
+    if (fabs(maxDigitalZoom.data.f[0] - 1.f) > 0.00001f) {
+        params.set(CameraParameters::KEY_ZOOM, zoom);
+        params.set(CameraParameters::KEY_MAX_ZOOM, NUM_ZOOM_STEPS - 1);
 
-    params.set(CameraParameters::KEY_ZOOM_SUPPORTED,
-            CameraParameters::TRUE);
+        {
+            String8 zoomRatios;
+            float zoom = 1.f;
+            float zoomIncrement = (maxDigitalZoom.data.f[0] - zoom) /
+                    (NUM_ZOOM_STEPS-1);
+            bool addComma = false;
+            for (size_t i=0; i < NUM_ZOOM_STEPS; i++) {
+                if (addComma) zoomRatios += ",";
+                addComma = true;
+                zoomRatios += String8::format("%d", static_cast<int>(zoom * 100));
+                zoom += zoomIncrement;
+            }
+            params.set(CameraParameters::KEY_ZOOM_RATIOS, zoomRatios);
+        }
+
+        params.set(CameraParameters::KEY_ZOOM_SUPPORTED,
+                CameraParameters::TRUE);
+        zoomAvailable = true;
+    } else {
+        params.set(CameraParameters::KEY_ZOOM_SUPPORTED,
+                CameraParameters::FALSE);
+    }
     params.set(CameraParameters::KEY_SMOOTH_ZOOM_SUPPORTED,
             CameraParameters::FALSE);
 
@@ -1198,11 +1227,14 @@
 camera_metadata_ro_entry_t Parameters::staticInfo(uint32_t tag,
         size_t minCount, size_t maxCount, bool required) const {
     camera_metadata_ro_entry_t entry = info->find(tag);
+    const camera_metadata_t *metaBuffer = info->getAndLock();
 
     if (CC_UNLIKELY( entry.count == 0 ) && required) {
-        const char* tagSection = get_camera_metadata_section_name(tag);
+        const char* tagSection = get_local_camera_metadata_section_name(tag,
+                metaBuffer);
         if (tagSection == NULL) tagSection = "<unknown>";
-        const char* tagName = get_camera_metadata_tag_name(tag);
+        const char* tagName = get_local_camera_metadata_tag_name(tag,
+                metaBuffer);
         if (tagName == NULL) tagName = "<unknown>";
 
         ALOGE("Error finding static metadata entry '%s.%s' (%x)",
@@ -1210,14 +1242,17 @@
     } else if (CC_UNLIKELY(
             (minCount != 0 && entry.count < minCount) ||
             (maxCount != 0 && entry.count > maxCount) ) ) {
-        const char* tagSection = get_camera_metadata_section_name(tag);
+        const char* tagSection = get_local_camera_metadata_section_name(tag,
+                metaBuffer);
         if (tagSection == NULL) tagSection = "<unknown>";
-        const char* tagName = get_camera_metadata_tag_name(tag);
+        const char* tagName = get_local_camera_metadata_tag_name(tag,
+                metaBuffer);
         if (tagName == NULL) tagName = "<unknown>";
         ALOGE("Malformed static metadata entry '%s.%s' (%x):"
                 "Expected between %zu and %zu values, but got %zu values",
                 tagSection, tagName, tag, minCount, maxCount, entry.count);
     }
+    info->unlock(metaBuffer);
 
     return entry;
 }
@@ -1830,13 +1865,25 @@
         return BAD_VALUE;
     }
 
-    // AUTO_EXPOSURE_LOCK (always supported)
-    validatedParams.autoExposureLock = boolFromString(
-        newParams.get(CameraParameters::KEY_AUTO_EXPOSURE_LOCK));
+    if (autoExposureLockAvailable) {
+        validatedParams.autoExposureLock = boolFromString(
+            newParams.get(CameraParameters::KEY_AUTO_EXPOSURE_LOCK));
+    } else if (nullptr !=
+            newParams.get(CameraParameters::KEY_AUTO_EXPOSURE_LOCK)){
+        ALOGE("%s: Requested auto exposure lock is not supported",
+              __FUNCTION__);
+        return BAD_VALUE;
+    }
 
-    // AUTO_WHITEBALANCE_LOCK (always supported)
-    validatedParams.autoWhiteBalanceLock = boolFromString(
-        newParams.get(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK));
+    if (autoWhiteBalanceLockAvailable) {
+        validatedParams.autoWhiteBalanceLock = boolFromString(
+                newParams.get(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK));
+    } else if (nullptr !=
+           newParams.get(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK)) {
+        ALOGE("%s: Requested auto whitebalance lock is not supported",
+              __FUNCTION__);
+        return BAD_VALUE;
+    }
 
     // METERING_AREAS
     size_t maxAeRegions = (size_t)staticInfo(ANDROID_CONTROL_MAX_REGIONS,
@@ -1856,12 +1903,14 @@
     }
 
     // ZOOM
-    validatedParams.zoom = newParams.getInt(CameraParameters::KEY_ZOOM);
-    if (validatedParams.zoom < 0
-                || validatedParams.zoom >= (int)NUM_ZOOM_STEPS) {
-        ALOGE("%s: Requested zoom level %d is not supported",
-                __FUNCTION__, validatedParams.zoom);
-        return BAD_VALUE;
+    if (zoomAvailable) {
+        validatedParams.zoom = newParams.getInt(CameraParameters::KEY_ZOOM);
+        if (validatedParams.zoom < 0
+                    || validatedParams.zoom >= (int)NUM_ZOOM_STEPS) {
+            ALOGE("%s: Requested zoom level %d is not supported",
+                    __FUNCTION__, validatedParams.zoom);
+            return BAD_VALUE;
+        }
     }
 
     // VIDEO_SIZE
@@ -1982,10 +2031,12 @@
     }
     if (res != OK) return res;
 
-    uint8_t reqWbLock = autoWhiteBalanceLock ?
-            ANDROID_CONTROL_AWB_LOCK_ON : ANDROID_CONTROL_AWB_LOCK_OFF;
-    res = request->update(ANDROID_CONTROL_AWB_LOCK,
-            &reqWbLock, 1);
+    if (autoWhiteBalanceLockAvailable) {
+        uint8_t reqWbLock = autoWhiteBalanceLock ?
+                ANDROID_CONTROL_AWB_LOCK_ON : ANDROID_CONTROL_AWB_LOCK_OFF;
+        res = request->update(ANDROID_CONTROL_AWB_LOCK,
+                &reqWbLock, 1);
+    }
 
     res = request->update(ANDROID_CONTROL_EFFECT_MODE,
             &effectMode, 1);
@@ -2043,11 +2094,13 @@
             &reqAeMode, 1);
     if (res != OK) return res;
 
-    uint8_t reqAeLock = autoExposureLock ?
-            ANDROID_CONTROL_AE_LOCK_ON : ANDROID_CONTROL_AE_LOCK_OFF;
-    res = request->update(ANDROID_CONTROL_AE_LOCK,
-            &reqAeLock, 1);
-    if (res != OK) return res;
+    if (autoExposureLockAvailable) {
+        uint8_t reqAeLock = autoExposureLock ?
+                ANDROID_CONTROL_AE_LOCK_ON : ANDROID_CONTROL_AE_LOCK_OFF;
+        res = request->update(ANDROID_CONTROL_AE_LOCK,
+                &reqAeLock, 1);
+        if (res != OK) return res;
+    }
 
     res = request->update(ANDROID_CONTROL_AWB_MODE,
             &wbMode, 1);
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index c8ecbba..507de75 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -122,7 +122,9 @@
 
     int32_t exposureCompensation;
     bool autoExposureLock;
+    bool autoExposureLockAvailable;
     bool autoWhiteBalanceLock;
+    bool autoWhiteBalanceLockAvailable;
 
     // 3A region types, for use with ANDROID_CONTROL_MAX_REGIONS
     enum region_t {
@@ -135,6 +137,7 @@
     Vector<Area> meteringAreas;
 
     int zoom;
+    bool zoomAvailable;
 
     int videoWidth, videoHeight, videoFormat;
     android_dataspace videoDataSpace;
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index f9b062a..4f788ae 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -145,12 +145,6 @@
             int32_t format, /*out*/ int32_t *id) = 0;
 
     /**
-     * Create an input reprocess stream that uses buffers from an existing
-     * output stream.
-     */
-    virtual status_t createReprocessStreamFromStream(int outputId, int *id) = 0;
-
-    /**
      * Get information about a given stream.
      */
     virtual status_t getStreamInfo(int id,
@@ -169,12 +163,6 @@
     virtual status_t deleteStream(int id) = 0;
 
     /**
-     * Delete reprocess stream. Must not be called if there are requests in
-     * flight which reference that stream.
-     */
-    virtual status_t deleteReprocessStream(int id) = 0;
-
-    /**
      * Take the currently-defined set of streams and configure the HAL to use
      * them. This is a long-running operation (may be several hundered ms).
      *
@@ -289,21 +277,6 @@
     virtual status_t triggerPrecaptureMetering(uint32_t id) = 0;
 
     /**
-     * Abstract interface for clients that want to listen to reprocess buffer
-     * release events
-     */
-    struct BufferReleasedListener : public virtual RefBase {
-        virtual void onBufferReleased(buffer_handle_t *handle) = 0;
-    };
-
-    /**
-     * Push a buffer to be reprocessed into a reprocessing stream, and
-     * provide a listener to call once the buffer is returned by the HAL
-     */
-    virtual status_t pushReprocessBuffer(int reprocessStreamId,
-            buffer_handle_t *buffer, wp<BufferReleasedListener> listener) = 0;
-
-    /**
      * Flush all pending and in-flight requests. Blocks until flush is
      * complete.
      * Output lastFrameNumber is the last frame number of the previous streaming request.
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index bbeeca6..56ba5b6 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -23,6 +23,8 @@
 #include <chrono>
 #include <inttypes.h>
 #include <hidl/ServiceManagement.h>
+#include <functional>
+#include <camera_metadata_hidden.h>
 
 namespace android {
 
@@ -221,7 +223,9 @@
 }
 
 status_t CameraProviderManager::setUpVendorTags() {
-    // TODO (b/34275821): support aggregating vendor tags for more than one provider
+    sp<VendorTagDescriptorCache> tagCache = new VendorTagDescriptorCache();
+
+    VendorTagDescriptorCache::clearGlobalVendorTagCache();
     for (auto& provider : mProviders) {
         hardware::hidl_vec<VendorTagSection> vts;
         Status status;
@@ -242,8 +246,6 @@
             return mapToStatusT(status);
         }
 
-        VendorTagDescriptor::clearGlobalVendorTagDescriptor();
-
         // Read all vendor tag definitions into a descriptor
         sp<VendorTagDescriptor> desc;
         status_t res;
@@ -255,9 +257,11 @@
             return res;
         }
 
-        // Set the global descriptor to use with camera metadata
-        VendorTagDescriptor::setAsGlobalVendorTagDescriptor(desc);
+        tagCache->addVendorDescriptor(provider->mProviderTagid, desc);
     }
+
+    VendorTagDescriptorCache::setAsGlobalVendorTagCache(tagCache);
+
     return OK;
 }
 
@@ -350,6 +354,24 @@
     return nullptr;
 }
 
+metadata_vendor_id_t CameraProviderManager::getProviderTagIdLocked(
+        const std::string& id, hardware::hidl_version minVersion,
+        hardware::hidl_version maxVersion) const {
+    metadata_vendor_id_t ret = CAMERA_METADATA_INVALID_VENDOR_ID;
+
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+    for (auto& provider : mProviders) {
+        for (auto& deviceInfo : provider->mDevices) {
+            if (deviceInfo->mId == id &&
+                    minVersion <= deviceInfo->mVersion &&
+                    maxVersion >= deviceInfo->mVersion) {
+                return provider->mProviderTagid;
+            }
+        }
+    }
+
+    return ret;
+}
 
 status_t CameraProviderManager::addProviderLocked(const std::string& newProvider, bool expected) {
     for (const auto& providerInfo : mProviders) {
@@ -430,6 +452,7 @@
         CameraProviderManager *manager) :
         mProviderName(providerName),
         mInterface(interface),
+        mProviderTagid(generateVendorTagId(providerName)),
         mManager(manager) {
     (void) mManager;
 }
@@ -542,10 +565,12 @@
     std::unique_ptr<DeviceInfo> deviceInfo;
     switch (major) {
         case 1:
-            deviceInfo = initializeDeviceInfo<DeviceInfo1>(name, id, minor);
+            deviceInfo = initializeDeviceInfo<DeviceInfo1>(name, mProviderTagid,
+                    id, minor);
             break;
         case 3:
-            deviceInfo = initializeDeviceInfo<DeviceInfo3>(name, id, minor);
+            deviceInfo = initializeDeviceInfo<DeviceInfo3>(name, mProviderTagid,
+                    id, minor);
             break;
         default:
             ALOGE("%s: Device %s: Unknown HIDL device HAL major version %d:", __FUNCTION__,
@@ -691,7 +716,7 @@
 template<class DeviceInfoT>
 std::unique_ptr<CameraProviderManager::ProviderInfo::DeviceInfo>
     CameraProviderManager::ProviderInfo::initializeDeviceInfo(
-        const std::string &name,
+        const std::string &name, const metadata_vendor_id_t tagId,
         const std::string &id, uint16_t minorVersion) const {
     Status status;
 
@@ -711,7 +736,8 @@
         return nullptr;
     }
     return std::unique_ptr<DeviceInfo>(
-        new DeviceInfoT(name, id, minorVersion, resourceCost, cameraInterface));
+        new DeviceInfoT(name, tagId, id, minorVersion, resourceCost,
+                cameraInterface));
 }
 
 template<class InterfaceT>
@@ -782,11 +808,12 @@
 }
 
 CameraProviderManager::ProviderInfo::DeviceInfo1::DeviceInfo1(const std::string& name,
-        const std::string &id,
+        const metadata_vendor_id_t tagId, const std::string &id,
         uint16_t minorVersion,
         const CameraResourceCost& resourceCost,
         sp<InterfaceT> interface) :
-        DeviceInfo(name, id, hardware::hidl_version{1, minorVersion}, resourceCost),
+        DeviceInfo(name, tagId, id, hardware::hidl_version{1, minorVersion},
+                   resourceCost),
         mInterface(interface) {
     // Get default parameters and initialize flash unit availability
     // Requires powering on the camera device
@@ -869,11 +896,12 @@
 }
 
 CameraProviderManager::ProviderInfo::DeviceInfo3::DeviceInfo3(const std::string& name,
-        const std::string &id,
+        const metadata_vendor_id_t tagId, const std::string &id,
         uint16_t minorVersion,
         const CameraResourceCost& resourceCost,
         sp<InterfaceT> interface) :
-        DeviceInfo(name, id, hardware::hidl_version{3, minorVersion}, resourceCost),
+        DeviceInfo(name, tagId, id, hardware::hidl_version{3, minorVersion},
+                   resourceCost),
         mInterface(interface) {
     // Get camera characteristics and initialize flash unit availability
     Status status;
@@ -884,6 +912,7 @@
                 if (s == Status::OK) {
                     camera_metadata_t *buffer =
                             reinterpret_cast<camera_metadata_t*>(metadata.data());
+                    set_camera_metadata_vendor_id(buffer, mProviderTagid);
                     mCameraCharacteristics = buffer;
                 }
             });
@@ -1004,6 +1033,17 @@
     return OK;
 }
 
+metadata_vendor_id_t CameraProviderManager::ProviderInfo::generateVendorTagId(
+        const std::string &name) {
+    metadata_vendor_id_t ret = std::hash<std::string> {} (name);
+    // CAMERA_METADATA_INVALID_VENDOR_ID is not a valid hash value
+    if (CAMERA_METADATA_INVALID_VENDOR_ID == ret) {
+        ret = 0;
+    }
+
+    return ret;
+}
+
 status_t CameraProviderManager::ProviderInfo::parseDeviceName(const std::string& name,
         uint16_t *major, uint16_t *minor, std::string *type, std::string *id) {
 
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index a388db5..2df4fd5 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -224,6 +224,13 @@
     static status_t mapToStatusT(const hardware::camera::common::V1_0::Status& s);
     static const char* statusToString(const hardware::camera::common::V1_0::Status& s);
 
+    /*
+     * Return provider type for a specific device.
+     */
+    metadata_vendor_id_t getProviderTagIdLocked(const std::string& id,
+            hardware::hidl_version minVersion = hardware::hidl_version{0,0},
+            hardware::hidl_version maxVersion = hardware::hidl_version{1000,0}) const;
+
 private:
     // All private members, unless otherwise noted, expect mInterfaceMutex to be locked before use
     mutable std::mutex mInterfaceMutex;
@@ -241,6 +248,7 @@
     {
         const std::string mProviderName;
         const sp<hardware::camera::provider::V2_4::ICameraProvider> mInterface;
+        const metadata_vendor_id_t mProviderTagid;
 
         ProviderInfo(const std::string &providerName,
                 sp<hardware::camera::provider::V2_4::ICameraProvider>& interface,
@@ -274,6 +282,7 @@
             const std::string mName;  // Full instance name
             const std::string mId;    // ID section of full name
             const hardware::hidl_version mVersion;
+            const metadata_vendor_id_t mProviderTagid;
 
             const hardware::camera::common::V1_0::CameraResourceCost mResourceCost;
 
@@ -287,10 +296,11 @@
                 return INVALID_OPERATION;
             }
 
-            DeviceInfo(const std::string& name, const std::string &id,
-                    const hardware::hidl_version& version,
+            DeviceInfo(const std::string& name, const metadata_vendor_id_t tagId,
+                    const std::string &id, const hardware::hidl_version& version,
                     const hardware::camera::common::V1_0::CameraResourceCost& resourceCost) :
-                    mName(name), mId(id), mVersion(version), mResourceCost(resourceCost),
+                    mName(name), mId(id), mVersion(version), mProviderTagid(tagId),
+                    mResourceCost(resourceCost),
                     mStatus(hardware::camera::common::V1_0::CameraDeviceStatus::PRESENT),
                     mHasFlashUnit(false) {}
             virtual ~DeviceInfo();
@@ -312,8 +322,8 @@
             virtual status_t setTorchMode(bool enabled) override;
             virtual status_t getCameraInfo(hardware::CameraInfo *info) const override;
 
-            DeviceInfo1(const std::string& name, const std::string &id,
-                    uint16_t minorVersion,
+            DeviceInfo1(const std::string& name, const metadata_vendor_id_t tagId,
+                    const std::string &id, uint16_t minorVersion,
                     const hardware::camera::common::V1_0::CameraResourceCost& resourceCost,
                     sp<InterfaceT> interface);
             virtual ~DeviceInfo1();
@@ -331,8 +341,8 @@
             virtual status_t getCameraCharacteristics(
                     CameraMetadata *characteristics) const override;
 
-            DeviceInfo3(const std::string& name, const std::string &id,
-                    uint16_t minorVersion,
+            DeviceInfo3(const std::string& name, const metadata_vendor_id_t tagId,
+                    const std::string &id, uint16_t minorVersion,
                     const hardware::camera::common::V1_0::CameraResourceCost& resourceCost,
                     sp<InterfaceT> interface);
             virtual ~DeviceInfo3();
@@ -352,7 +362,8 @@
         // right CameraProvider getCameraDeviceInterface_* method.
         template<class DeviceInfoT>
         std::unique_ptr<DeviceInfo> initializeDeviceInfo(const std::string &name,
-                const std::string &id, uint16_t minorVersion) const;
+                const metadata_vendor_id_t tagId, const std::string &id,
+                uint16_t minorVersion) const;
 
         // Helper for initializeDeviceInfo to use the right CameraProvider get method.
         template<class InterfaceT>
@@ -365,6 +376,9 @@
         // Parse device instance name for device version, type, and id.
         static status_t parseDeviceName(const std::string& name,
                 uint16_t *major, uint16_t *minor, std::string *type, std::string *id);
+
+        // Generate vendor tag id
+        static metadata_vendor_id_t generateVendorTagId(const std::string &name);
     };
 
     // Utility to find a DeviceInfo by ID; pointer is only valid while mInterfaceMutex is held
diff --git a/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp b/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp
index b52c0d8..0fe09d9 100644
--- a/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp
+++ b/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp
@@ -193,6 +193,36 @@
     return hardware::Void();
 }
 
+hardware::Return<void> CameraHardwareInterface::handleCallbackTimestampBatch(
+        DataCallbackMsg msgType,
+        const hardware::hidl_vec<hardware::camera::device::V1_0::HandleTimestampMessage>& messages) {
+    std::vector<android::HandleTimestampMessage> msgs;
+    msgs.reserve(messages.size());
+
+    for (const auto& hidl_msg : messages) {
+        if (mHidlMemPoolMap.count(hidl_msg.data) == 0) {
+            ALOGE("%s: memory pool ID %d not found", __FUNCTION__, hidl_msg.data);
+            return hardware::Void();
+        }
+        sp<CameraHeapMemory> mem(
+                static_cast<CameraHeapMemory *>(mHidlMemPoolMap.at(hidl_msg.data)->handle));
+
+        if (hidl_msg.bufferIndex >= mem->mNumBufs) {
+            ALOGE("%s: invalid buffer index %d, max allowed is %d", __FUNCTION__,
+                 hidl_msg.bufferIndex, mem->mNumBufs);
+            return hardware::Void();
+        }
+        VideoNativeHandleMetadata* md = (VideoNativeHandleMetadata*)
+                mem->mBuffers[hidl_msg.bufferIndex]->pointer();
+        md->pHandle = const_cast<native_handle_t*>(hidl_msg.frameData.getNativeHandle());
+
+        msgs.push_back({hidl_msg.timestamp, mem->mBuffers[hidl_msg.bufferIndex]});
+    }
+
+    mDataCbTimestampBatch((int32_t) msgType, msgs, mCbUser);
+    return hardware::Void();
+}
+
 std::pair<bool, uint64_t> CameraHardwareInterface::getBufferId(
         ANativeWindowBuffer* anb) {
     std::lock_guard<std::mutex> lock(mBufferIdMapLock);
@@ -365,7 +395,7 @@
 }
 
 hardware::Return<Status>
-CameraHardwareInterface::setUsage(hardware::graphics::allocator::V2_0::ProducerUsage usage) {
+CameraHardwareInterface::setUsage(hardware::graphics::common::V1_0::BufferUsage usage) {
     Status s = Status::INTERNAL_ERROR;
     ANativeWindow *a = mPreviewWindow.get();
     if (a == nullptr) {
@@ -468,11 +498,13 @@
 void CameraHardwareInterface::setCallbacks(notify_callback notify_cb,
         data_callback data_cb,
         data_callback_timestamp data_cb_timestamp,
+        data_callback_timestamp_batch data_cb_timestamp_batch,
         void* user)
 {
     mNotifyCb = notify_cb;
     mDataCb = data_cb;
     mDataCbTimestamp = data_cb_timestamp;
+    mDataCbTimestampBatch = data_cb_timestamp_batch;
     mCbUser = user;
 
     ALOGV("%s(%s)", __FUNCTION__, mName.string());
@@ -628,6 +660,44 @@
     }
 }
 
+void CameraHardwareInterface::releaseRecordingFrameBatch(const std::vector<sp<IMemory>>& frames)
+{
+    ALOGV("%s(%s)", __FUNCTION__, mName.string());
+    size_t n = frames.size();
+    std::vector<VideoFrameMessage> msgs;
+    msgs.reserve(n);
+    for (auto& mem : frames) {
+        if (CC_LIKELY(mHidlDevice != nullptr)) {
+            ssize_t offset;
+            size_t size;
+            sp<IMemoryHeap> heap = mem->getMemory(&offset, &size);
+            if (size == sizeof(VideoNativeHandleMetadata)) {
+                uint32_t heapId = heap->getHeapID();
+                uint32_t bufferIndex = offset / size;
+                VideoNativeHandleMetadata* md = (VideoNativeHandleMetadata*) mem->pointer();
+                // Caching the handle here because md->pHandle will be subject to HAL's edit
+                native_handle_t* nh = md->pHandle;
+                VideoFrameMessage msg;
+                msgs.push_back({nh, heapId, bufferIndex});
+            } else {
+                ALOGE("%s only supports VideoNativeHandleMetadata mode", __FUNCTION__);
+                return;
+            }
+        } else {
+            ALOGE("Non HIDL mode do not support %s", __FUNCTION__);
+            return;
+        }
+    }
+
+    mHidlDevice->releaseRecordingFrameHandleBatch(msgs);
+
+    for (auto& msg : msgs) {
+        native_handle_t* nh = const_cast<native_handle_t*>(msg.frameData.getNativeHandle());
+        native_handle_close(nh);
+        native_handle_delete(nh);
+    }
+}
+
 status_t CameraHardwareInterface::autoFocus()
 {
     ALOGV("%s(%s)", __FUNCTION__, mName.string());
diff --git a/services/camera/libcameraservice/device1/CameraHardwareInterface.h b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
index 88ab2e9..4bd879f 100644
--- a/services/camera/libcameraservice/device1/CameraHardwareInterface.h
+++ b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
@@ -48,6 +48,15 @@
                             const sp<IMemory> &dataPtr,
                             void *user);
 
+struct HandleTimestampMessage {
+    nsecs_t timestamp;
+    const sp<IMemory> dataPtr;
+};
+
+typedef void (*data_callback_timestamp_batch)(
+        int32_t msgType,
+        const std::vector<HandleTimestampMessage>&, void* user);
+
 /**
  * CameraHardwareInterface.h defines the interface to the
  * camera hardware abstraction layer, used for setting and getting
@@ -112,6 +121,7 @@
     void setCallbacks(notify_callback notify_cb,
                       data_callback data_cb,
                       data_callback_timestamp data_cb_timestamp,
+                      data_callback_timestamp_batch data_cb_timestamp_batch,
                       void* user);
 
     /**
@@ -227,6 +237,20 @@
     void releaseRecordingFrame(const sp<IMemory>& mem);
 
     /**
+     * Release a batch of recording frames previously returned by
+     * CAMERA_MSG_VIDEO_FRAME. This method only supports frames that are
+     * stored as VideoNativeHandleMetadata.
+     *
+     * It is camera hal client's responsibility to release video recording
+     * frames sent out by the camera hal before the camera hal receives
+     * a call to disableMsgType(CAMERA_MSG_VIDEO_FRAME). After it receives
+     * the call to disableMsgType(CAMERA_MSG_VIDEO_FRAME), it is camera hal's
+     * responsibility of managing the life-cycle of the video recording
+     * frames.
+     */
+    void releaseRecordingFrameBatch(const std::vector<sp<IMemory>>& frames);
+
+    /**
      * Start auto focus, the notification callback routine is called
      * with CAMERA_MSG_FOCUS once when focusing is complete. autoFocus()
      * will be called again if another auto focus is needed.
@@ -416,6 +440,10 @@
             hardware::camera::device::V1_0::DataCallbackMsg msgType,
             const hardware::hidl_handle& frameData, uint32_t data,
             uint32_t bufferIndex, int64_t timestamp) override;
+    hardware::Return<void> handleCallbackTimestampBatch(
+            hardware::camera::device::V1_0::DataCallbackMsg msgType,
+            const hardware::hidl_vec<
+                    hardware::camera::device::V1_0::HandleTimestampMessage>&) override;
 
     /**
      * Implementation of android::hardware::camera::device::V1_0::ICameraDevicePreviewCallback
@@ -433,7 +461,7 @@
     hardware::Return<hardware::camera::common::V1_0::Status>
             setCrop(int32_t left, int32_t top, int32_t right, int32_t bottom) override;
     hardware::Return<hardware::camera::common::V1_0::Status>
-            setUsage(hardware::graphics::allocator::V2_0::ProducerUsage usage) override;
+            setUsage(hardware::graphics::common::V1_0::BufferUsage usage) override;
     hardware::Return<hardware::camera::common::V1_0::Status>
             setSwapInterval(int32_t interval) override;
     hardware::Return<void> getMinUndequeuedBufferCount(
@@ -450,9 +478,10 @@
 
     struct camera_preview_window mHalPreviewWindow;
 
-    notify_callback         mNotifyCb;
-    data_callback           mDataCb;
-    data_callback_timestamp mDataCbTimestamp;
+    notify_callback               mNotifyCb;
+    data_callback                 mDataCb;
+    data_callback_timestamp       mDataCbTimestamp;
+    data_callback_timestamp_batch mDataCbTimestampBatch;
     void *mCbUser;
 
     // Cached values for preview stream parameters
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 1de2edc..7d8d61e 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -74,7 +74,8 @@
         mNextReprocessResultFrameNumber(0),
         mNextShutterFrameNumber(0),
         mNextReprocessShutterFrameNumber(0),
-        mListener(NULL)
+        mListener(NULL),
+        mVendorTagId(CAMERA_METADATA_INVALID_VENDOR_ID)
 {
     ATRACE_CALL();
     camera3_callback_ops::notify = &sNotify;
@@ -202,6 +203,8 @@
     //       for now use 3_4 to keep legacy devices working
     mDeviceVersion = CAMERA_DEVICE_API_VERSION_3_4;
     mInterface = std::make_unique<HalInterface>(session);
+    std::string providerType;
+    mVendorTagId = manager->getProviderTagIdLocked(mId.string());
 
     return initializeCommonLocked();
 }
@@ -225,6 +228,8 @@
     /** Create buffer manager */
     mBufferManager = new Camera3BufferManager();
 
+    mTagMonitor.initialize(mVendorTagId);
+
     bool aeLockAvailable = false;
     camera_metadata_entry aeLockAvailableEntry = mDeviceInfo.find(
             ANDROID_CONTROL_AE_LOCK_AVAILABLE);
@@ -495,7 +500,7 @@
     return dataSpace;
 }
 
-ConsumerUsageFlags Camera3Device::mapToConsumerUsage(
+BufferUsageFlags Camera3Device::mapToConsumerUsage(
         uint32_t usage) {
     return usage;
 }
@@ -550,12 +555,12 @@
 }
 
 uint32_t Camera3Device::mapConsumerToFrameworkUsage(
-        ConsumerUsageFlags usage) {
+        BufferUsageFlags usage) {
     return usage;
 }
 
 uint32_t Camera3Device::mapProducerToFrameworkUsage(
-        ProducerUsageFlags usage) {
+        BufferUsageFlags usage) {
     return usage;
 }
 
@@ -1399,15 +1404,6 @@
     return OK;
 }
 
-status_t Camera3Device::createReprocessStreamFromStream(int outputId, int *id) {
-    ATRACE_CALL();
-    (void)outputId; (void)id;
-
-    CLOGE("Unimplemented");
-    return INVALID_OPERATION;
-}
-
-
 status_t Camera3Device::getStreamInfo(int id,
         uint32_t *width, uint32_t *height,
         uint32_t *format, android_dataspace *dataSpace) {
@@ -1523,14 +1519,6 @@
     return res;
 }
 
-status_t Camera3Device::deleteReprocessStream(int id) {
-    ATRACE_CALL();
-    (void)id;
-
-    CLOGE("Unimplemented");
-    return INVALID_OPERATION;
-}
-
 status_t Camera3Device::configureStreams(int operatingMode) {
     ATRACE_CALL();
     ALOGV("%s: E", __FUNCTION__);
@@ -1604,6 +1592,7 @@
         return res;
     }
 
+    set_camera_metadata_vendor_id(rawRequest, mVendorTagId);
     mRequestTemplateCache[templateId].acquire(rawRequest);
 
     // Derive some new keys for backward compatibility
@@ -1856,15 +1845,6 @@
                                         sizeof(trigger)/sizeof(trigger[0]));
 }
 
-status_t Camera3Device::pushReprocessBuffer(int reprocessStreamId,
-        buffer_handle_t *buffer, wp<BufferReleasedListener> listener) {
-    ATRACE_CALL();
-    (void)reprocessStreamId; (void)buffer; (void)listener;
-
-    CLOGE("Unimplemented");
-    return INVALID_OPERATION;
-}
-
 status_t Camera3Device::flush(int64_t *frameNumber) {
     ATRACE_CALL();
     ALOGV("%s: Camera %s: Flushing all requests", __FUNCTION__, mId.string());
@@ -2563,6 +2543,11 @@
             const AeTriggerCancelOverride_t &aeTriggerCancelOverride) {
     if (result == nullptr) return;
 
+    camera_metadata_t *meta = const_cast<camera_metadata_t *>(
+            result->mMetadata.getAndLock());
+    set_camera_metadata_vendor_id(meta, mVendorTagId);
+    result->mMetadata.unlock(meta);
+
     if (result->mMetadata.update(ANDROID_REQUEST_FRAME_COUNT,
             (int32_t*)&frameNumber, 1) != OK) {
         SET_ERR("Failed to set frame number %d in metadata", frameNumber);
@@ -2934,6 +2919,13 @@
                     InFlightRequest &r = mInFlightMap.editValueAt(idx);
                     r.requestStatus = msg.error_code;
                     resultExtras = r.resultExtras;
+                    if (hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT ==
+                            errorCode) {
+                        // In case of missing result check whether the buffers
+                        // returned. If they returned, then remove inflight
+                        // request.
+                        removeInFlightRequestIfReadyLocked(idx);
+                    }
                 } else {
                     resultExtras.frameNumber = msg.frame_number;
                     ALOGE("Camera %s: %s: cannot find in-flight request on "
@@ -3154,7 +3146,9 @@
             Stream &dst = requestedConfiguration.streams[i];
             camera3_stream_t *src = config->streams[i];
 
-            int streamId = Camera3Stream::cast(src)->getId();
+            Camera3Stream* cam3stream = Camera3Stream::cast(src);
+            cam3stream->setBufferFreedListener(this);
+            int streamId = cam3stream->getId();
             StreamType streamType;
             switch (src->stream_type) {
                 case CAMERA3_STREAM_OUTPUT:
@@ -3359,9 +3353,21 @@
         wrapAsHidlRequest(requests[i], /*out*/&captureRequests[i], /*out*/&handlesCreated);
     }
 
+    std::vector<device::V3_2::BufferCache> cachesToRemove;
+    {
+        std::lock_guard<std::mutex> lock(mBufferIdMapLock);
+        for (auto& pair : mFreedBuffers) {
+            // The stream might have been removed since onBufferFreed
+            if (mBufferIdMaps.find(pair.first) != mBufferIdMaps.end()) {
+                cachesToRemove.push_back({pair.first, pair.second});
+            }
+        }
+        mFreedBuffers.clear();
+    }
+
     common::V1_0::Status status = common::V1_0::Status::INTERNAL_ERROR;
     *numRequestProcessed = 0;
-    mHidlSession->processCaptureRequest(captureRequests,
+    mHidlSession->processCaptureRequest(captureRequests, cachesToRemove,
             [&status, &numRequestProcessed] (auto s, uint32_t n) {
                 status = s;
                 *numRequestProcessed = n;
@@ -3469,12 +3475,40 @@
     auto it = bIdMap.find(buf);
     if (it == bIdMap.end()) {
         bIdMap[buf] = mNextBufferId++;
+        ALOGV("stream %d now have %zu buffer caches, buf %p",
+                streamId, bIdMap.size(), buf);
         return std::make_pair(true, mNextBufferId - 1);
     } else {
         return std::make_pair(false, it->second);
     }
 }
 
+void Camera3Device::HalInterface::onBufferFreed(
+        int streamId, const native_handle_t* handle) {
+    std::lock_guard<std::mutex> lock(mBufferIdMapLock);
+    uint64_t bufferId = BUFFER_ID_NO_BUFFER;
+    auto mapIt = mBufferIdMaps.find(streamId);
+    if (mapIt == mBufferIdMaps.end()) {
+        // streamId might be from a deleted stream here
+        ALOGI("%s: stream %d has been removed",
+                __FUNCTION__, streamId);
+        return;
+    }
+    BufferIdMap& bIdMap = mapIt->second;
+    auto it = bIdMap.find(handle);
+    if (it == bIdMap.end()) {
+        ALOGW("%s: cannot find buffer %p in stream %d",
+                __FUNCTION__, handle, streamId);
+        return;
+    } else {
+        bufferId =  it->second;
+        bIdMap.erase(it);
+        ALOGV("%s: stream %d now have %zu buffer caches after removing buf %p",
+                __FUNCTION__, streamId, bIdMap.size(), handle);
+    }
+    mFreedBuffers.push_back(std::make_pair(streamId, bufferId));
+}
+
 /**
  * RequestThread inner class methods
  */
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index d873b27..9c0210b 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -39,6 +39,7 @@
 #include "device3/StatusTracker.h"
 #include "device3/Camera3BufferManager.h"
 #include "utils/TagMonitor.h"
+#include <camera_metadata_hidden.h>
 
 /**
  * Function pointer types with C calling convention to
@@ -125,7 +126,6 @@
     status_t createInputStream(
             uint32_t width, uint32_t height, int format,
             int *id) override;
-    status_t createReprocessStreamFromStream(int outputId, int *id) override;
 
     status_t getStreamInfo(int id,
             uint32_t *width, uint32_t *height,
@@ -133,7 +133,6 @@
     status_t setStreamTransform(int id, int transform) override;
 
     status_t deleteStream(int id) override;
-    status_t deleteReprocessStream(int id) override;
 
     status_t configureStreams(int operatingMode =
             static_cast<int>(hardware::camera::device::V3_2::StreamConfigurationMode::NORMAL_MODE))
@@ -155,9 +154,6 @@
     status_t triggerCancelAutofocus(uint32_t id) override;
     status_t triggerPrecaptureMetering(uint32_t id) override;
 
-    status_t pushReprocessBuffer(int reprocessStreamId,
-            buffer_handle_t *buffer, wp<BufferReleasedListener> listener) override;
-
     status_t flush(int64_t *lastFrameNumber = NULL) override;
 
     status_t prepare(int streamId) override;
@@ -228,7 +224,7 @@
      * Adapter for legacy HAL / HIDL HAL interface calls; calls either into legacy HALv3 or the
      * HIDL HALv3 interfaces.
      */
-    class HalInterface {
+    class HalInterface : public camera3::Camera3StreamBufferFreedListener {
       public:
         HalInterface(camera3_device_t *device);
         HalInterface(sp<hardware::camera::device::V3_2::ICameraDeviceSession> &session);
@@ -326,6 +322,10 @@
         //       buffer_handle_t's FD won't change.
         // return pair of (newlySeenBuffer?, bufferId)
         std::pair<bool, uint64_t> getBufferId(const buffer_handle_t& buf, int streamId);
+
+        virtual void onBufferFreed(int streamId, const native_handle_t* handle) override;
+
+        std::vector<std::pair<int, uint64_t>> mFreedBuffers;
     };
 
     std::unique_ptr<HalInterface> mInterface;
@@ -598,7 +598,7 @@
     static hardware::graphics::common::V1_0::PixelFormat mapToPixelFormat(int frameworkFormat);
     static hardware::camera::device::V3_2::DataspaceFlags mapToHidlDataspace(
             android_dataspace dataSpace);
-    static hardware::camera::device::V3_2::ConsumerUsageFlags mapToConsumerUsage(uint32_t usage);
+    static hardware::camera::device::V3_2::BufferUsageFlags mapToConsumerUsage(uint32_t usage);
     static hardware::camera::device::V3_2::StreamRotation mapToStreamRotation(
             camera3_stream_rotation_t rotation);
     // Returns a negative error code if the passed-in operation mode is not valid.
@@ -607,9 +607,9 @@
     static camera3_buffer_status_t mapHidlBufferStatus(hardware::camera::device::V3_2::BufferStatus status);
     static int mapToFrameworkFormat(hardware::graphics::common::V1_0::PixelFormat pixelFormat);
     static uint32_t mapConsumerToFrameworkUsage(
-            hardware::camera::device::V3_2::ConsumerUsageFlags usage);
+            hardware::camera::device::V3_2::BufferUsageFlags usage);
     static uint32_t mapProducerToFrameworkUsage(
-            hardware::camera::device::V3_2::ProducerUsageFlags usage);
+            hardware::camera::device::V3_2::BufferUsageFlags usage);
 
     struct RequestTrigger {
         // Metadata tag number, e.g. android.control.aePrecaptureTrigger
@@ -1065,6 +1065,8 @@
     void monitorMetadata(TagMonitor::eventSource source, int64_t frameNumber,
             nsecs_t timestamp, const CameraMetadata& metadata);
 
+    metadata_vendor_id_t mVendorTagId;
+
     /**
      * Static callback forwarding methods from HAL to instance
      */
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
index 1469b74..4eb15ad 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -263,6 +263,8 @@
         mConsumer->setName(String8::format("Camera3-InputStream-%d", mId));
 
         mProducer = producer;
+
+        mConsumer->setBufferFreedListener(this);
     }
 
     res = mConsumer->setDefaultBufferSize(camera3_stream::width,
@@ -288,6 +290,17 @@
     return OK;
 }
 
+void Camera3InputStream::onBufferFreed(const wp<GraphicBuffer>& gb) {
+    const sp<GraphicBuffer> buffer = gb.promote();
+    if (buffer != nullptr) {
+        if (mBufferFreedListener != nullptr) {
+            mBufferFreedListener->onBufferFreed(mId, buffer->handle);
+        }
+    } else {
+        ALOGE("%s: GraphicBuffer is freed before onBufferFreed callback finishes!", __FUNCTION__);
+    }
+}
+
 }; // namespace camera3
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.h b/services/camera/libcameraservice/device3/Camera3InputStream.h
index 9f3de10..8f5b431 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.h
@@ -34,7 +34,8 @@
  * buffers by feeding them into the HAL, as well as releasing the buffers back
  * the buffers once the HAL is done with them.
  */
-class Camera3InputStream : public Camera3IOStreamBase {
+class Camera3InputStream : public Camera3IOStreamBase,
+                           public BufferItemConsumer::BufferFreedListener {
   public:
     /**
      * Set up a stream for formats that have fixed size, such as RAW and YUV.
@@ -77,6 +78,11 @@
 
     virtual status_t getEndpointUsage(uint32_t *usage) const;
 
+    /**
+     * BufferItemConsumer::BufferFreedListener interface
+     */
+    virtual void onBufferFreed(const wp<GraphicBuffer>&) override;
+
 }; // class Camera3InputStream
 
 }; // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 51dc20a..e46d55e 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -347,7 +347,9 @@
 
     // Configure consumer-side ANativeWindow interface. The listener may be used
     // to notify buffer manager (if it is used) of the returned buffers.
-    res = mConsumer->connect(NATIVE_WINDOW_API_CAMERA, /*listener*/mBufferReleasedListener);
+    res = mConsumer->connect(NATIVE_WINDOW_API_CAMERA,
+            /*listener*/mBufferReleasedListener,
+            /*reportBufferRemoval*/true);
     if (res != OK) {
         ALOGE("%s: Unable to connect to native window for stream %d",
                 __FUNCTION__, mId);
@@ -543,6 +545,14 @@
         }
     }
 
+    if (res == OK) {
+        std::vector<sp<GraphicBuffer>> removedBuffers;
+        res = mConsumer->getAndFlushRemovedBuffers(&removedBuffers);
+        if (res == OK) {
+            onBuffersRemovedLocked(removedBuffers);
+        }
+    }
+
     return res;
 }
 
@@ -686,6 +696,16 @@
     }
 }
 
+void Camera3OutputStream::onBuffersRemovedLocked(
+        const std::vector<sp<GraphicBuffer>>& removedBuffers) {
+    Camera3StreamBufferFreedListener* callback = mBufferFreedListener;
+    if (callback != nullptr) {
+        for (auto gb : removedBuffers) {
+            callback->onBufferFreed(mId, gb->handle);
+        }
+    }
+}
+
 status_t Camera3OutputStream::detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd) {
     Mutex::Autolock l(mLock);
 
@@ -718,7 +738,12 @@
         }
     }
 
-    return OK;
+    std::vector<sp<GraphicBuffer>> removedBuffers;
+    res = mConsumer->getAndFlushRemovedBuffers(&removedBuffers);
+    if (res == OK) {
+        onBuffersRemovedLocked(removedBuffers);
+    }
+    return res;
 }
 
 status_t Camera3OutputStream::notifyBufferReleased(ANativeWindowBuffer* /*anwBuffer*/) {
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 24e4e05..86676e4 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -263,6 +263,8 @@
 
     virtual status_t getEndpointUsage(uint32_t *usage) const;
 
+    void onBuffersRemovedLocked(const std::vector<sp<GraphicBuffer>>&);
+
 }; // class Camera3OutputStream
 
 } // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 53a3168..2b1a899 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -812,6 +812,18 @@
     }
 }
 
+void Camera3Stream::setBufferFreedListener(
+        Camera3StreamBufferFreedListener* listener) {
+    Mutex::Autolock l(mLock);
+    // Only allow set listener during stream configuration because stream is guaranteed to be IDLE
+    // at this state, so setBufferFreedListener won't collide with onBufferFreed callbacks
+    if (mState != STATE_IN_CONFIG && mState != STATE_IN_RECONFIG) {
+        ALOGE("%s: listener must be set during stream configuration!",__FUNCTION__);
+        return;
+    }
+    mBufferFreedListener = listener;
+}
+
 }; // namespace camera3
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 56cb827..27ef86d 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -365,6 +365,11 @@
     void             removeBufferListener(
             const sp<Camera3StreamBufferListener>& listener);
 
+
+    // Setting listener will remove previous listener (if exists)
+    virtual void     setBufferFreedListener(
+            Camera3StreamBufferFreedListener* listener) override;
+
     /**
      * Return if the buffer queue of the stream is abandoned.
      */
@@ -408,6 +413,8 @@
             android_dataspace dataSpace, camera3_stream_rotation_t rotation,
             int setId);
 
+    Camera3StreamBufferFreedListener* mBufferFreedListener;
+
     /**
      * Interface to be implemented by derived classes
      */
diff --git a/services/camera/libcameraservice/device3/Camera3StreamBufferFreedListener.h b/services/camera/libcameraservice/device3/Camera3StreamBufferFreedListener.h
new file mode 100644
index 0000000..478a752
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3StreamBufferFreedListener.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA3_STREAMBUFFERFREEDLISTENER_H
+#define ANDROID_SERVERS_CAMERA3_STREAMBUFFERFREEDLISTENER_H
+
+#include <gui/Surface.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+namespace camera3 {
+
+class Camera3StreamBufferFreedListener {
+public:
+    // onBufferFreed is called when a buffer is no longer being managed
+    // by this stream. This will not be called in events when all
+    // buffers are freed due to stream disconnection.
+    //
+    // The input handle may be deleted after this callback ends, so attempting
+    // to dereference handle post this callback is illegal and might lead to
+    // crash.
+    //
+    // This callback will be called while holding Camera3Stream's lock, so
+    // calling into other Camera3Stream APIs within this callback will
+    // lead to deadlock.
+    virtual void onBufferFreed(int streamId, const native_handle_t* handle) = 0;
+
+    virtual ~Camera3StreamBufferFreedListener() {}
+};
+
+}; //namespace camera3
+}; //namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index f7b092f..37b7c36 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -19,6 +19,7 @@
 
 #include <utils/RefBase.h>
 #include "Camera3StreamBufferListener.h"
+#include "Camera3StreamBufferFreedListener.h"
 
 struct camera3_stream_buffer;
 
@@ -287,6 +288,15 @@
             wp<Camera3StreamBufferListener> listener) = 0;
     virtual void     removeBufferListener(
             const sp<Camera3StreamBufferListener>& listener) = 0;
+
+    /**
+     * Setting listner will remove previous listener (if exists)
+     * Only allow set listener during stream configuration because stream is guaranteed to be IDLE
+     * at this state, so setBufferFreedListener won't collide with onBufferFreed callbacks.
+     * Client is responsible to keep the listener object alive throughout the lifecycle of this
+     * Camera3Stream.
+     */
+    virtual void setBufferFreedListener(Camera3StreamBufferFreedListener* listener) = 0;
 };
 
 } // namespace camera3
diff --git a/services/camera/libcameraservice/tests/Android.mk b/services/camera/libcameraservice/tests/Android.mk
index 179643b..37a05c2 100644
--- a/services/camera/libcameraservice/tests/Android.mk
+++ b/services/camera/libcameraservice/tests/Android.mk
@@ -22,6 +22,9 @@
     libcameraservice \
     libhidlbase \
     liblog \
+    libhidltransport \
+    libcamera_client \
+    libcamera_metadata \
     libutils \
     android.hardware.camera.common@1.0 \
     android.hardware.camera.provider@2.4 \
@@ -29,6 +32,7 @@
     android.hardware.camera.device@3.2
 
 LOCAL_C_INCLUDES += \
+    system/media/private/camera/include \
 
 LOCAL_CFLAGS += -Wall -Wextra -Werror
 
diff --git a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
index eb934ba..c1d6e85 100644
--- a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
+++ b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
@@ -20,38 +20,104 @@
 #include "../common/CameraProviderManager.h"
 #include <android/hidl/manager/1.0/IServiceManager.h>
 #include <android/hidl/manager/1.0/IServiceNotification.h>
-
+#include <android/hardware/camera/device/3.2/ICameraDeviceCallback.h>
+#include <android/hardware/camera/device/3.2/ICameraDeviceSession.h>
+#include <camera_metadata_hidden.h>
 #include <gtest/gtest.h>
 
 using namespace android;
 using namespace android::hardware::camera;
 using android::hardware::camera::common::V1_0::Status;
+using android::hardware::camera::common::V1_0::VendorTag;
+using android::hardware::camera::common::V1_0::VendorTagSection;
+using android::hardware::camera::common::V1_0::CameraMetadataType;
+using android::hardware::camera::device::V3_2::ICameraDeviceCallback;
+using android::hardware::camera::device::V3_2::ICameraDeviceSession;
+
+/**
+ * Basic test implementation of a camera ver. 3.2 device interface
+ */
+struct TestDeviceInterface : public device::V3_2::ICameraDevice {
+    std::vector<hardware::hidl_string> mDeviceNames;
+    TestDeviceInterface(std::vector<hardware::hidl_string> deviceNames) :
+        mDeviceNames(deviceNames) {}
+    using getResourceCost_cb = std::function<void(
+            hardware::camera::common::V1_0::Status status,
+            const hardware::camera::common::V1_0::CameraResourceCost& resourceCost)>;
+    virtual ::android::hardware::Return<void> getResourceCost(
+            getResourceCost_cb _hidl_cb) override {
+        hardware::camera::common::V1_0::CameraResourceCost resourceCost = {100,
+                mDeviceNames};
+        _hidl_cb(Status::OK, resourceCost);
+        return hardware::Void();
+    }
+
+    using getCameraCharacteristics_cb = std::function<void(
+            hardware::camera::common::V1_0::Status status,
+            const hardware::hidl_vec<uint8_t>& cameraCharacteristics)>;
+    hardware::Return<void> getCameraCharacteristics(
+            getCameraCharacteristics_cb _hidl_cb) override {
+        hardware::hidl_vec<uint8_t> cameraCharacteristics;
+        _hidl_cb(Status::OK, cameraCharacteristics);
+        return hardware::Void();
+    }
+
+    hardware::Return<hardware::camera::common::V1_0::Status> setTorchMode(
+            ::android::hardware::camera::common::V1_0::TorchMode) override {
+        return Status::OK;
+    }
+
+    using open_cb = std::function<void(
+            ::android::hardware::camera::common::V1_0::Status status,
+             const ::android::sp<ICameraDeviceSession>& session)>;
+    hardware::Return<void> open(
+            const ::android::sp<ICameraDeviceCallback>&,
+            open_cb _hidl_cb) override {
+        sp<ICameraDeviceSession> deviceSession = nullptr;
+        _hidl_cb(Status::OK, deviceSession);
+        return hardware::Void();
+    }
+
+    hardware::Return<void> dumpState(
+            const ::android::hardware::hidl_handle&) override {
+        return hardware::Void();
+    }
+};
 
 /**
  * Basic test implementation of a camera provider
  */
 struct TestICameraProvider : virtual public provider::V2_4::ICameraProvider {
-    sp<provider::V2_4::ICameraProviderCallbacks> mCallbacks;
-
+    sp<provider::V2_4::ICameraProviderCallback> mCallbacks;
     std::vector<hardware::hidl_string> mDeviceNames;
+    sp<device::V3_2::ICameraDevice> mDeviceInterface;
+    hardware::hidl_vec<common::V1_0::VendorTagSection> mVendorTagSections;
 
-    TestICameraProvider() {
-        mDeviceNames.push_back("device@3.2/test/0");
-        mDeviceNames.push_back("device@1.0/test/0");
-        mDeviceNames.push_back("device@3.2/test/1");
-    }
+    TestICameraProvider(const std::vector<hardware::hidl_string> &devices,
+            const hardware::hidl_vec<common::V1_0::VendorTagSection> &vendorSection) :
+        mDeviceNames(devices),
+        mDeviceInterface(new TestDeviceInterface(devices)),
+        mVendorTagSections (vendorSection) {}
 
-    virtual hardware::Return<Status> setCallbacks(
-            const sp<provider::V2_4::ICameraProviderCallbacks>& callbacks) override {
+    virtual hardware::Return<Status> setCallback(
+            const sp<provider::V2_4::ICameraProviderCallback>& callbacks) override {
         mCallbacks = callbacks;
         return hardware::Return<Status>(Status::OK);
     }
 
     using getVendorTags_cb = std::function<void(Status status,
             const hardware::hidl_vec<common::V1_0::VendorTagSection>& sections)>;
-    virtual hardware::Return<void> getVendorTags(getVendorTags_cb _hidl_cb) override {
-        hardware::hidl_vec<common::V1_0::VendorTagSection> sections;
-        _hidl_cb(Status::OK, sections);
+    hardware::Return<void> getVendorTags(getVendorTags_cb _hidl_cb) override {
+        _hidl_cb(Status::OK, mVendorTagSections);
+        return hardware::Void();
+    }
+
+    using isSetTorchModeSupported_cb = std::function<void(
+            ::android::hardware::camera::common::V1_0::Status status,
+             bool support)>;
+    virtual ::hardware::Return<void> isSetTorchModeSupported(
+            isSetTorchModeSupported_cb _hidl_cb) override {
+        _hidl_cb(Status::OK, false);
         return hardware::Void();
     }
 
@@ -68,17 +134,17 @@
             const hardware::hidl_string& cameraDeviceName,
             getCameraDeviceInterface_V1_x_cb _hidl_cb) override {
         (void) cameraDeviceName;
-        _hidl_cb(Status::OK, nullptr);
+        _hidl_cb(Status::OK, nullptr); //TODO: impl. of ver. 1.0 device interface
+                                       //      otherwise enumeration will fail.
         return hardware::Void();
     }
 
     using getCameraDeviceInterface_V3_x_cb = std::function<void(Status status,
             const sp<device::V3_2::ICameraDevice>& device)>;
     virtual hardware::Return<void> getCameraDeviceInterface_V3_x(
-            const hardware::hidl_string& cameraDeviceName,
+            const hardware::hidl_string&,
             getCameraDeviceInterface_V3_x_cb _hidl_cb) override {
-        (void) cameraDeviceName;
-        _hidl_cb(Status::OK, nullptr);
+        _hidl_cb(Status::OK, mDeviceInterface);
         return hardware::Void();
     }
 
@@ -90,12 +156,13 @@
  */
 struct TestInteractionProxy : public CameraProviderManager::ServiceInteractionProxy {
     sp<hidl::manager::V1_0::IServiceNotification> mManagerNotificationInterface;
-    const sp<TestICameraProvider> mTestCameraProvider;
+    sp<TestICameraProvider> mTestCameraProvider;
 
-    TestInteractionProxy() :
-            mTestCameraProvider(new TestICameraProvider()) {
-
+    TestInteractionProxy() {}
+    void setProvider(sp<TestICameraProvider> provider) {
+        mTestCameraProvider = provider;
     }
+
     std::string mLastRequestedServiceName;
 
     virtual ~TestInteractionProxy() {}
@@ -116,13 +183,30 @@
 
 };
 
-TEST(CameraProviderManagerTest, InitializeTest) {
+struct TestStatusListener : public CameraProviderManager::StatusListener {
+    ~TestStatusListener() {}
 
+    void onDeviceStatusChanged(const String8 &,
+            hardware::camera::common::V1_0::CameraDeviceStatus) override {}
+    void onTorchStatusChanged(const String8 &,
+            hardware::camera::common::V1_0::TorchModeStatus) override {}
+};
+
+TEST(CameraProviderManagerTest, InitializeTest) {
+    std::vector<hardware::hidl_string> deviceNames;
+    deviceNames.push_back("device@3.2/test/0");
+    deviceNames.push_back("device@1.0/test/0");
+    deviceNames.push_back("device@3.2/test/1");
+    hardware::hidl_vec<common::V1_0::VendorTagSection> vendorSection;
     status_t res;
     sp<CameraProviderManager> providerManager = new CameraProviderManager();
-    TestInteractionProxy serviceProxy{};
+    sp<TestStatusListener> statusListener = new TestStatusListener();
+    TestInteractionProxy serviceProxy;
+    sp<TestICameraProvider> provider =  new TestICameraProvider(deviceNames,
+            vendorSection);
+    serviceProxy.setProvider(provider);
 
-    res = providerManager->initialize(&serviceProxy);
+    res = providerManager->initialize(statusListener, &serviceProxy);
     ASSERT_EQ(res, OK) << "Unable to initialize provider manager";
 
     hardware::hidl_string legacyInstanceName = "legacy/0";
@@ -139,3 +223,145 @@
     ASSERT_EQ(serviceProxy.mLastRequestedServiceName, testProviderInstanceName) <<
             "Incorrect instance requested from service manager";
 }
+
+TEST(CameraProviderManagerTest, MultipleVendorTagTest) {
+    hardware::hidl_string sectionName = "VendorTestSection";
+    hardware::hidl_string tagName = "VendorTestTag";
+    uint32_t tagId = VENDOR_SECTION << 16;
+    hardware::hidl_vec<common::V1_0::VendorTagSection> vendorSection;
+    CameraMetadataType tagType = CameraMetadataType::BYTE;
+    vendorSection.resize(1);
+    vendorSection[0].sectionName = sectionName;
+    vendorSection[0].tags.resize(1);
+    vendorSection[0].tags[0].tagId = tagId;
+    vendorSection[0].tags[0].tagName = tagName;
+    vendorSection[0].tags[0].tagType = tagType;
+    std::vector<hardware::hidl_string> deviceNames = {"device@3.2/test/0"};
+
+    sp<CameraProviderManager> providerManager = new CameraProviderManager();
+    sp<TestStatusListener> statusListener = new TestStatusListener();
+    TestInteractionProxy serviceProxy;
+
+    sp<TestICameraProvider> provider =  new TestICameraProvider(deviceNames,
+            vendorSection);
+    serviceProxy.setProvider(provider);
+
+    auto res = providerManager->initialize(statusListener, &serviceProxy);
+    ASSERT_EQ(res, OK) << "Unable to initialize provider manager";
+
+    hardware::hidl_string testProviderInstanceName = "test/0";
+    hardware::hidl_string testProviderFqInterfaceName =
+            "android.hardware.camera.provider@2.4::ICameraProvider";
+    serviceProxy.mManagerNotificationInterface->onRegistration(
+            testProviderFqInterfaceName, testProviderInstanceName, false);
+    ASSERT_EQ(serviceProxy.mLastRequestedServiceName, testProviderInstanceName) <<
+            "Incorrect instance requested from service manager";
+
+    hardware::hidl_string sectionNameSecond = "SecondVendorTestSection";
+    hardware::hidl_string secondTagName = "SecondVendorTestTag";
+    CameraMetadataType secondTagType = CameraMetadataType::DOUBLE;
+    vendorSection[0].sectionName = sectionNameSecond;
+    vendorSection[0].tags[0].tagId = tagId;
+    vendorSection[0].tags[0].tagName = secondTagName;
+    vendorSection[0].tags[0].tagType = secondTagType;
+    deviceNames = {"device@3.2/test2/1"};
+
+    sp<TestICameraProvider> secondProvider =  new TestICameraProvider(
+            deviceNames, vendorSection);
+    serviceProxy.setProvider(secondProvider);
+    hardware::hidl_string testProviderSecondInstanceName = "test2/0";
+    serviceProxy.mManagerNotificationInterface->onRegistration(
+            testProviderFqInterfaceName, testProviderSecondInstanceName, false);
+    ASSERT_EQ(serviceProxy.mLastRequestedServiceName,
+              testProviderSecondInstanceName) <<
+            "Incorrect instance requested from service manager";
+
+    ASSERT_EQ(NO_ERROR , providerManager->setUpVendorTags());
+    sp<VendorTagDescriptorCache> vendorCache =
+            VendorTagDescriptorCache::getGlobalVendorTagCache();
+    ASSERT_NE(nullptr, vendorCache.get());
+
+    metadata_vendor_id_t vendorId = std::hash<std::string> {} (
+            testProviderInstanceName.c_str());
+    metadata_vendor_id_t vendorIdSecond = std::hash<std::string> {} (
+            testProviderSecondInstanceName.c_str());
+
+    hardware::hidl_string resultTag = vendorCache->getTagName(tagId, vendorId);
+    ASSERT_EQ(resultTag, tagName);
+
+    resultTag = vendorCache->getTagName(tagId, vendorIdSecond);
+    ASSERT_EQ(resultTag, secondTagName);
+
+    // Check whether we can create two separate CameraMetadata instances
+    // using different tag vendor vendors.
+    camera_metadata *metaBuffer = allocate_camera_metadata(10, 20);
+    ASSERT_NE(nullptr, metaBuffer);
+    set_camera_metadata_vendor_id(metaBuffer, vendorId);
+    CameraMetadata metadata(metaBuffer);
+
+    uint8_t byteVal = 10;
+    ASSERT_TRUE(metadata.isEmpty());
+    ASSERT_EQ(OK, metadata.update(tagId, &byteVal, 1));
+    ASSERT_FALSE(metadata.isEmpty());
+    ASSERT_TRUE(metadata.exists(tagId));
+
+    metaBuffer = allocate_camera_metadata(10, 20);
+    ASSERT_NE(nullptr, metaBuffer);
+    set_camera_metadata_vendor_id(metaBuffer, vendorIdSecond);
+    CameraMetadata secondMetadata(metaBuffer);
+
+    ASSERT_TRUE(secondMetadata.isEmpty());
+    double doubleVal = 1.0f;
+    ASSERT_EQ(OK, secondMetadata.update(tagId, &doubleVal, 1));
+    ASSERT_FALSE(secondMetadata.isEmpty());
+    ASSERT_TRUE(secondMetadata.exists(tagId));
+
+    // Check whether CameraMetadata copying works as expected
+    CameraMetadata metadataCopy(metadata);
+    ASSERT_FALSE(metadataCopy.isEmpty());
+    ASSERT_TRUE(metadataCopy.exists(tagId));
+    ASSERT_EQ(OK, metadataCopy.update(tagId, &byteVal, 1));
+    ASSERT_TRUE(metadataCopy.exists(tagId));
+
+    // Check whether values are as expected
+    camera_metadata_entry_t entry = metadata.find(tagId);
+    ASSERT_EQ(1u, entry.count);
+    ASSERT_EQ(byteVal, entry.data.u8[0]);
+    entry = secondMetadata.find(tagId);
+    ASSERT_EQ(1u, entry.count);
+    ASSERT_EQ(doubleVal, entry.data.d[0]);
+
+    // Swap and erase
+    secondMetadata.swap(metadataCopy);
+    ASSERT_TRUE(metadataCopy.exists(tagId));
+    ASSERT_TRUE(secondMetadata.exists(tagId));
+    ASSERT_EQ(OK, secondMetadata.erase(tagId));
+    ASSERT_TRUE(secondMetadata.isEmpty());
+    doubleVal = 0.0f;
+    ASSERT_EQ(OK, metadataCopy.update(tagId, &doubleVal, 1));
+    entry = metadataCopy.find(tagId);
+    ASSERT_EQ(1u, entry.count);
+    ASSERT_EQ(doubleVal, entry.data.d[0]);
+
+    // Append
+    uint8_t sceneMode = ANDROID_CONTROL_SCENE_MODE_ACTION;
+    secondMetadata.update(ANDROID_CONTROL_SCENE_MODE, &sceneMode, 1);
+    // Append from two different vendor tag providers is not supported!
+    ASSERT_NE(OK, metadataCopy.append(secondMetadata));
+    ASSERT_EQ(OK, metadataCopy.erase(tagId));
+    metadataCopy.update(ANDROID_CONTROL_SCENE_MODE, &sceneMode, 1);
+    // However appending from same vendor tag provider should be fine
+    ASSERT_EQ(OK, metadata.append(secondMetadata));
+    // Append from a metadata without vendor tag provider should be supported
+    CameraMetadata regularMetadata(10, 20);
+    uint8_t controlMode = ANDROID_CONTROL_MODE_AUTO;
+    regularMetadata.update(ANDROID_CONTROL_MODE, &controlMode, 1);
+    ASSERT_EQ(OK, secondMetadata.append(regularMetadata));
+    ASSERT_EQ(2u, secondMetadata.entryCount());
+    ASSERT_EQ(2u, metadata.entryCount());
+
+    // Dump
+    metadata.dump(1, 2);
+    metadataCopy.dump(1, 2);
+    secondMetadata.dump(1, 2);
+}
diff --git a/services/camera/libcameraservice/utils/TagMonitor.cpp b/services/camera/libcameraservice/utils/TagMonitor.cpp
index f1b65bd..dec97d7 100644
--- a/services/camera/libcameraservice/utils/TagMonitor.cpp
+++ b/services/camera/libcameraservice/utils/TagMonitor.cpp
@@ -23,12 +23,14 @@
 #include <inttypes.h>
 #include <utils/Log.h>
 #include <camera/VendorTagDescriptor.h>
+#include <camera_metadata_hidden.h>
 
 namespace android {
 
 TagMonitor::TagMonitor():
         mMonitoringEnabled(false),
-        mMonitoringEvents(kMaxMonitorEvents)
+        mMonitoringEvents(kMaxMonitorEvents),
+        mVendorTagId(CAMERA_METADATA_INVALID_VENDOR_ID)
 {}
 
 const char* TagMonitor::k3aTags =
@@ -55,6 +57,13 @@
 
     sp<VendorTagDescriptor> vTags =
             VendorTagDescriptor::getGlobalVendorTagDescriptor();
+    if ((nullptr == vTags.get()) || (0 >= vTags->getTagCount())) {
+        sp<VendorTagDescriptorCache> cache =
+                VendorTagDescriptorCache::getGlobalVendorTagCache();
+        if (cache.get()) {
+            cache->getVendorTagDescriptor(mVendorTagId, &vTags);
+        }
+    }
 
     bool gotTag = false;
 
@@ -104,6 +113,15 @@
         camera_metadata_ro_entry entry = metadata.find(tag);
         CameraMetadata &lastValues = (source == REQUEST) ?
                 mLastMonitoredRequestValues : mLastMonitoredResultValues;
+        if (lastValues.isEmpty()) {
+            lastValues = CameraMetadata(mMonitoredTagList.size());
+            const camera_metadata_t *metaBuffer =
+                    lastValues.getAndLock();
+            set_camera_metadata_vendor_id(
+                    const_cast<camera_metadata_t *> (metaBuffer), mVendorTagId);
+            lastValues.unlock(metaBuffer);
+        }
+
         camera_metadata_entry lastEntry = lastValues.find(tag);
 
         if (entry.count > 0) {
@@ -129,16 +147,21 @@
             }
 
             if (isDifferent) {
-                ALOGV("%s: Tag %s changed", __FUNCTION__, get_camera_metadata_tag_name(tag));
+                ALOGV("%s: Tag %s changed", __FUNCTION__,
+                      get_local_camera_metadata_tag_name_vendor_id(
+                              tag, mVendorTagId));
                 lastValues.update(entry);
                 mMonitoringEvents.emplace(source, frameNumber, timestamp, entry);
             }
         } else if (lastEntry.count > 0) {
             // Value has been removed
-            ALOGV("%s: Tag %s removed", __FUNCTION__, get_camera_metadata_tag_name(tag));
+            ALOGV("%s: Tag %s removed", __FUNCTION__,
+                  get_local_camera_metadata_tag_name_vendor_id(
+                          tag, mVendorTagId));
             lastValues.erase(tag);
             entry.tag = tag;
-            entry.type = get_camera_metadata_tag_type(tag);
+            entry.type = get_local_camera_metadata_tag_type_vendor_id(tag,
+                    mVendorTagId);
             entry.count = 0;
             mMonitoringEvents.emplace(source, frameNumber, timestamp, entry);
         }
@@ -152,8 +175,10 @@
         dprintf(fd, "     Tag monitoring enabled for tags:\n");
         for (uint32_t tag : mMonitoredTagList) {
             dprintf(fd, "        %s.%s\n",
-                    get_camera_metadata_section_name(tag),
-                    get_camera_metadata_tag_name(tag));
+                    get_local_camera_metadata_section_name_vendor_id(tag,
+                            mVendorTagId),
+                    get_local_camera_metadata_tag_name_vendor_id(tag,
+                            mVendorTagId));
         }
     } else {
         dprintf(fd, "     Tag monitoring disabled (enable with -m <name1,..,nameN>)\n");
@@ -166,8 +191,10 @@
                     event.frameNumber, event.timestamp,
                     indentation,
                     event.source == REQUEST ? "REQ:" : "RES:",
-                    get_camera_metadata_section_name(event.tag),
-                    get_camera_metadata_tag_name(event.tag));
+                    get_local_camera_metadata_section_name_vendor_id(event.tag,
+                            mVendorTagId),
+                    get_local_camera_metadata_tag_name_vendor_id(event.tag,
+                            mVendorTagId));
             if (event.newData.size() == 0) {
                 dprintf(fd, " (Removed)\n");
             } else {
diff --git a/services/camera/libcameraservice/utils/TagMonitor.h b/services/camera/libcameraservice/utils/TagMonitor.h
index d7aa419..7155314 100644
--- a/services/camera/libcameraservice/utils/TagMonitor.h
+++ b/services/camera/libcameraservice/utils/TagMonitor.h
@@ -27,6 +27,7 @@
 
 #include <media/RingBuffer.h>
 #include <system/camera_metadata.h>
+#include <system/camera_vendor_tags.h>
 #include <camera/CameraMetadata.h>
 
 namespace android {
@@ -44,6 +45,8 @@
 
     TagMonitor();
 
+    void initialize(metadata_vendor_id_t id) { mVendorTagId = id; }
+
     // Parse tag name list (comma-separated) and if valid, enable monitoring
     // If invalid, do nothing.
     // Recognizes "3a" as a shortcut for enabling tracking 3A state, mode, and
@@ -100,6 +103,7 @@
 
     // 3A fields to use with the "3a" option
     static const char *k3aTags;
+    metadata_vendor_id_t mVendorTagId;
 };
 
 } // namespace android
diff --git a/services/mediaanalytics/Android.mk b/services/mediaanalytics/Android.mk
index ef49df4..9e2813e 100644
--- a/services/mediaanalytics/Android.mk
+++ b/services/mediaanalytics/Android.mk
@@ -5,7 +5,12 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:= \
-    main_mediametrics.cpp          \
+    main_mediametrics.cpp              \
+    MetricsSummarizerCodec.cpp         \
+    MetricsSummarizerExtractor.cpp     \
+    MetricsSummarizerPlayer.cpp        \
+    MetricsSummarizerRecorder.cpp      \
+    MetricsSummarizer.cpp              \
     MediaAnalyticsService.cpp
 
 LOCAL_SHARED_LIBRARIES := \
@@ -18,6 +23,7 @@
     libgui                      \
     libmedia                    \
     libmediautils               \
+    libmediametrics             \
     libstagefright_foundation   \
     libutils
 
diff --git a/services/mediaanalytics/MediaAnalyticsService.cpp b/services/mediaanalytics/MediaAnalyticsService.cpp
index 35c1f5b..876c685 100644
--- a/services/mediaanalytics/MediaAnalyticsService.cpp
+++ b/services/mediaanalytics/MediaAnalyticsService.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2016 The Android Open Source Project
+ * Copyright (C) 2017 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -20,6 +20,7 @@
 #define LOG_TAG "MediaAnalyticsService"
 #include <utils/Log.h>
 
+#include <stdint.h>
 #include <inttypes.h>
 #include <sys/types.h>
 #include <sys/stat.h>
@@ -70,11 +71,28 @@
 
 #include "MediaAnalyticsService.h"
 
+#include "MetricsSummarizer.h"
+#include "MetricsSummarizerCodec.h"
+#include "MetricsSummarizerExtractor.h"
+#include "MetricsSummarizerPlayer.h"
+#include "MetricsSummarizerRecorder.h"
+
 
 namespace android {
 
 
-#define DEBUG_QUEUE     0
+
+// summarized records
+// up to 48 sets, each covering an hour -- at least 2 days of coverage
+// (will be longer if there are hours without any media action)
+static const nsecs_t kNewSetIntervalNs = 3600*(1000*1000*1000ll);
+static const int kMaxRecordSets = 48;
+// individual records kept in memory
+static const int kMaxRecords    = 100;
+
+
+static const char *kServiceName = "media.metrics";
+
 
 //using android::status_t;
 //using android::OK;
@@ -85,18 +103,67 @@
 
 void MediaAnalyticsService::instantiate() {
     defaultServiceManager()->addService(
-            String16("media.metrics"), new MediaAnalyticsService());
+            String16(kServiceName), new MediaAnalyticsService());
 }
 
-// XXX: add dynamic controls for mMaxRecords
+// handle sets of summarizers
+MediaAnalyticsService::SummarizerSet::SummarizerSet() {
+    mSummarizers = new List<MetricsSummarizer *>();
+}
+MediaAnalyticsService::SummarizerSet::~SummarizerSet() {
+    // empty the list
+    List<MetricsSummarizer *> *l = mSummarizers;
+    while (l->size() > 0) {
+        MetricsSummarizer *summarizer = *(l->begin());
+        l->erase(l->begin());
+        delete summarizer;
+    }
+}
+
+void MediaAnalyticsService::newSummarizerSet() {
+    ALOGD("MediaAnalyticsService::newSummarizerSet");
+    MediaAnalyticsService::SummarizerSet *set = new MediaAnalyticsService::SummarizerSet();
+    nsecs_t now = systemTime(SYSTEM_TIME_REALTIME);
+    set->setStarted(now);
+
+    set->appendSummarizer(new MetricsSummarizerExtractor("extractor"));
+    set->appendSummarizer(new MetricsSummarizerCodec("codec"));
+    set->appendSummarizer(new MetricsSummarizerPlayer("nuplayer"));
+    set->appendSummarizer(new MetricsSummarizerRecorder("recorder"));
+
+    // ALWAYS at the end, since it catches everything
+    set->appendSummarizer(new MetricsSummarizer(NULL));
+
+    // inject this set at the BACK of the list.
+    mSummarizerSets->push_back(set);
+    mCurrentSet = set;
+
+    // limit the # that we have
+    if (mMaxRecordSets > 0) {
+        List<SummarizerSet *> *l = mSummarizerSets;
+        while (l->size() > (size_t) mMaxRecordSets) {
+            ALOGD("Deleting oldest record set....");
+            MediaAnalyticsService::SummarizerSet *oset = *(l->begin());
+            l->erase(l->begin());
+            delete oset;
+            mSetsDiscarded++;
+        }
+    }
+}
+
 MediaAnalyticsService::MediaAnalyticsService()
-        : mMaxRecords(100) {
+        : mMaxRecords(kMaxRecords),
+          mMaxRecordSets(kMaxRecordSets),
+          mNewSetInterval(kNewSetIntervalNs) {
 
     ALOGD("MediaAnalyticsService created");
     // clear our queues
     mOpen = new List<MediaAnalyticsItem *>();
     mFinalized = new List<MediaAnalyticsItem *>();
 
+    mSummarizerSets = new List<MediaAnalyticsService::SummarizerSet *>();
+    newSummarizerSet();
+
     mItemsSubmitted = 0;
     mItemsFinalized = 0;
     mItemsDiscarded = 0;
@@ -109,7 +176,13 @@
 MediaAnalyticsService::~MediaAnalyticsService() {
         ALOGD("MediaAnalyticsService destroyed");
 
-    // XXX: clean out mOpen and mFinalized
+    // clean out mOpen and mFinalized
+    delete mOpen;
+    mOpen = NULL;
+    delete mFinalized;
+    mFinalized = NULL;
+
+    // XXX: clean out the summaries
 }
 
 
@@ -145,7 +218,7 @@
         case AID_MEDIA_EX:
         case AID_MEDIA_DRM:
             // trusted source, only override default values
-            isTrusted = true;
+                isTrusted = true;
             if (uid_given == (-1)) {
                 item->setUid(uid);
             }
@@ -197,10 +270,12 @@
                 oitem = NULL;
             } else {
                 oitem->setFinalized(true);
+                summarize(oitem);
                 saveItem(mFinalized, oitem, 0);
             }
             // new record could itself be marked finalized...
             if (finalizing) {
+                summarize(item);
                 saveItem(mFinalized, item, 0);
                 mItemsFinalized++;
             } else {
@@ -211,6 +286,7 @@
             // combine the records, send it to finalized if appropriate
             oitem->merge(item);
             if (finalizing) {
+                summarize(oitem);
                 saveItem(mFinalized, oitem, 0);
                 mItemsFinalized++;
             }
@@ -229,6 +305,7 @@
                 delete item;
                 item = NULL;
             } else {
+                summarize(item);
                 saveItem(mFinalized, item, 0);
                 mItemsFinalized++;
             }
@@ -239,26 +316,6 @@
     return id;
 }
 
-List<MediaAnalyticsItem *> *MediaAnalyticsService::getMediaAnalyticsItemList(bool finished, nsecs_t ts) {
-    // this might never get called; the binder interface maps to the full parm list
-    // on the client side before making the binder call.
-    // but this lets us be sure...
-    List<MediaAnalyticsItem*> *list;
-    list = getMediaAnalyticsItemList(finished, ts, MediaAnalyticsItem::kKeyAny);
-    return list;
-}
-
-List<MediaAnalyticsItem *> *MediaAnalyticsService::getMediaAnalyticsItemList(bool , nsecs_t , MediaAnalyticsItem::Key ) {
-
-    // XXX: implement the get-item-list semantics
-
-    List<MediaAnalyticsItem *> *list = NULL;
-    // set up our query on the persistent data
-    // slurp in all of the pieces
-    // return that
-    return list;
-}
-
 status_t MediaAnalyticsService::dump(int fd, const Vector<String16>& args)
 {
     const size_t SIZE = 512;
@@ -277,15 +334,21 @@
 
     // crack any parameters
     bool clear = false;
+    bool summary = false;
     nsecs_t ts_since = 0;
+    String16 summaryOption("-summary");
     String16 clearOption("-clear");
     String16 sinceOption("-since");
     String16 helpOption("-help");
+    String16 onlyOption("-only");
+    const char *only = NULL;
     int n = args.size();
     for (int i = 0; i < n; i++) {
         String8 myarg(args[i]);
         if (args[i] == clearOption) {
             clear = true;
+        } else if (args[i] == summaryOption) {
+            summary = true;
         } else if (args[i] == sinceOption) {
             i++;
             if (i < n) {
@@ -301,12 +364,27 @@
             }
             // command line is milliseconds; internal units are nano-seconds
             ts_since *= 1000*1000;
+        } else if (args[i] == onlyOption) {
+            i++;
+            if (i < n) {
+                String8 value(args[i]);
+                const char *p = value.string();
+                char *q = strdup(p);
+                if (q != NULL) {
+                    if (only != NULL) {
+                        free((void*)only);
+                    }
+                only = q;
+                }
+            }
         } else if (args[i] == helpOption) {
             result.append("Recognized parameters:\n");
             result.append("-help        this help message\n");
+            result.append("-summary     show summary info\n");
             result.append("-clear       clears out saved records\n");
-            result.append("-since XXX   include records since XXX\n");
-            result.append("             (XXX is milliseconds since the UNIX epoch)\n");
+            result.append("-only X      process records for component X\n");
+            result.append("-since X     include records since X\n");
+            result.append("             (X is milliseconds since the UNIX epoch)\n");
             write(fd, result.string(), result.size());
             return NO_ERROR;
         }
@@ -314,9 +392,42 @@
 
     Mutex::Autolock _l(mLock);
 
-    snprintf(buffer, SIZE, "Dump of the mediametrics process:\n");
+    // we ALWAYS dump this piece
+    snprintf(buffer, SIZE, "Dump of the %s process:\n", kServiceName);
     result.append(buffer);
 
+    dumpHeaders(result, ts_since);
+
+    // only want 1, to avoid confusing folks that parse the output
+    if (summary) {
+        dumpSummaries(result, ts_since, only);
+    } else {
+        dumpRecent(result, ts_since, only);
+    }
+
+
+    if (clear) {
+        // remove everything from the finalized queue
+        while (mFinalized->size() > 0) {
+            MediaAnalyticsItem * oitem = *(mFinalized->begin());
+            mFinalized->erase(mFinalized->begin());
+            delete oitem;
+            mItemsDiscarded++;
+        }
+
+        // shall we clear the summary data too?
+
+    }
+
+    write(fd, result.string(), result.size());
+    return NO_ERROR;
+}
+
+// dump headers
+void MediaAnalyticsService::dumpHeaders(String8 &result, nsecs_t ts_since) {
+    const size_t SIZE = 512;
+    char buffer[SIZE];
+
     int enabled = MediaAnalyticsItem::isEnabled();
     if (enabled) {
         snprintf(buffer, SIZE, "Metrics gathering: enabled\n");
@@ -331,50 +442,71 @@
         " Discarded: %" PRId64 "\n",
         mItemsSubmitted, mItemsFinalized, mItemsDiscarded);
     result.append(buffer);
+    snprintf(buffer, SIZE,
+        "Summary Sets Discarded: %" PRId64 "\n", mSetsDiscarded);
+    result.append(buffer);
     if (ts_since != 0) {
         snprintf(buffer, SIZE,
             "Dumping Queue entries more recent than: %" PRId64 "\n",
             (int64_t) ts_since);
         result.append(buffer);
     }
+}
+
+// dump summary info
+void MediaAnalyticsService::dumpSummaries(String8 &result, nsecs_t ts_since, const char *only) {
+    const size_t SIZE = 512;
+    char buffer[SIZE];
+    int slot = 0;
+
+    snprintf(buffer, SIZE, "\nSummarized Metrics:\n");
+    result.append(buffer);
+
+    // have each of the distillers dump records
+    if (mSummarizerSets != NULL) {
+        List<SummarizerSet *>::iterator itSet = mSummarizerSets->begin();
+        for (; itSet != mSummarizerSets->end(); itSet++) {
+            nsecs_t when = (*itSet)->getStarted();
+            if (when < ts_since) {
+                continue;
+            }
+            List<MetricsSummarizer *> *list = (*itSet)->getSummarizers();
+            List<MetricsSummarizer *>::iterator it = list->begin();
+            for (; it != list->end(); it++) {
+                if (only != NULL && strcmp(only, (*it)->getKey()) != 0) {
+                    ALOGV("Told to omit '%s'", (*it)->getKey());
+                }
+                AString distilled = (*it)->dumpSummary(slot, only);
+                result.append(distilled.c_str());
+            }
+        }
+    }
+}
+
+// the recent, detailed queues
+void MediaAnalyticsService::dumpRecent(String8 &result, nsecs_t ts_since, const char * only) {
+    const size_t SIZE = 512;
+    char buffer[SIZE];
 
     // show the recently recorded records
     snprintf(buffer, sizeof(buffer), "\nFinalized Metrics (oldest first):\n");
     result.append(buffer);
-    result.append(this->dumpQueue(mFinalized, ts_since));
+    result.append(this->dumpQueue(mFinalized, ts_since, only));
 
     snprintf(buffer, sizeof(buffer), "\nIn-Progress Metrics (newest first):\n");
     result.append(buffer);
-    result.append(this->dumpQueue(mOpen, ts_since));
+    result.append(this->dumpQueue(mOpen, ts_since, only));
 
     // show who is connected and injecting records?
     // talk about # records fed to the 'readers'
     // talk about # records we discarded, perhaps "discarded w/o reading" too
-
-    if (clear) {
-        // remove everything from the finalized queue
-        while (mFinalized->size() > 0) {
-            MediaAnalyticsItem * oitem = *(mFinalized->begin());
-            if (DEBUG_QUEUE) {
-                ALOGD("zap old record: key %s sessionID %" PRId64 " ts %" PRId64 "",
-                    oitem->getKey().c_str(), oitem->getSessionID(),
-                    oitem->getTimestamp());
-            }
-            mFinalized->erase(mFinalized->begin());
-            mItemsDiscarded++;
-        }
-    }
-
-    write(fd, result.string(), result.size());
-    return NO_ERROR;
 }
-
 // caller has locked mLock...
 String8 MediaAnalyticsService::dumpQueue(List<MediaAnalyticsItem *> *theList) {
-    return dumpQueue(theList, (nsecs_t) 0);
+    return dumpQueue(theList, (nsecs_t) 0, NULL);
 }
 
-String8 MediaAnalyticsService::dumpQueue(List<MediaAnalyticsItem *> *theList, nsecs_t ts_since) {
+String8 MediaAnalyticsService::dumpQueue(List<MediaAnalyticsItem *> *theList, nsecs_t ts_since, const char * only) {
     String8 result;
     int slot = 0;
 
@@ -387,6 +519,11 @@
             if (when < ts_since) {
                 continue;
             }
+            if (only != NULL &&
+                strcmp(only, (*it)->getKey().c_str()) != 0) {
+                ALOGV("Omit '%s', it's not '%s'", (*it)->getKey().c_str(), only);
+                continue;
+            }
             AString entry = (*it)->toString();
             result.appendFormat("%5d: %s\n", slot, entry.c_str());
             slot++;
@@ -405,13 +542,6 @@
 
     Mutex::Autolock _l(mLock);
 
-    if (DEBUG_QUEUE) {
-        ALOGD("Inject a record: session %" PRId64 " ts %" PRId64 "",
-            item->getSessionID(), item->getTimestamp());
-        String8 before = dumpQueue(l);
-        ALOGD("Q before insert: %s", before.string());
-    }
-
     // adding at back of queue (fifo order)
     if (front)  {
         l->push_front(item);
@@ -419,30 +549,15 @@
         l->push_back(item);
     }
 
-    if (DEBUG_QUEUE) {
-        String8 after = dumpQueue(l);
-        ALOGD("Q after insert: %s", after.string());
-    }
-
     // keep removing old records the front until we're in-bounds
     if (mMaxRecords > 0) {
         while (l->size() > (size_t) mMaxRecords) {
             MediaAnalyticsItem * oitem = *(l->begin());
-            if (DEBUG_QUEUE) {
-                ALOGD("zap old record: key %s sessionID %" PRId64 " ts %" PRId64 "",
-                    oitem->getKey().c_str(), oitem->getSessionID(),
-                    oitem->getTimestamp());
-            }
             l->erase(l->begin());
             delete oitem;
             mItemsDiscarded++;
         }
     }
-
-    if (DEBUG_QUEUE) {
-        String8 after = dumpQueue(l);
-        ALOGD("Q after cleanup: %s", after.string());
-    }
 }
 
 // are they alike enough that nitem can be folded into oitem?
@@ -515,29 +630,14 @@
 
     Mutex::Autolock _l(mLock);
 
-    if(DEBUG_QUEUE) {
-        String8 before = dumpQueue(l);
-        ALOGD("Q before delete: %s", before.string());
-    }
-
     for (List<MediaAnalyticsItem *>::iterator it = l->begin();
         it != l->end(); it++) {
         if ((*it)->getSessionID() != item->getSessionID())
             continue;
-
-        if (DEBUG_QUEUE) {
-            ALOGD(" --- removing record for SessionID %" PRId64 "", item->getSessionID());
-            ALOGD("drop record at %s:%d", __FILE__, __LINE__);
-        }
         delete *it;
         l->erase(it);
         break;
     }
-
-    if (DEBUG_QUEUE) {
-        String8 after = dumpQueue(l);
-        ALOGD("Q after delete: %s", after.string());
-    }
 }
 
 static AString allowedKeys[] =
@@ -579,5 +679,43 @@
     return false;
 }
 
+// insert into the appropriate summarizer.
+// we make our own copy to save/summarize
+void MediaAnalyticsService::summarize(MediaAnalyticsItem *item) {
+
+    ALOGV("MediaAnalyticsService::summarize()");
+
+    if (item == NULL) {
+        return;
+    }
+
+    nsecs_t now = systemTime(SYSTEM_TIME_REALTIME);
+    if (mCurrentSet == NULL
+        || (mCurrentSet->getStarted() + mNewSetInterval < now)) {
+        newSummarizerSet();
+    }
+
+    if (mCurrentSet == NULL) {
+        return;
+    }
+
+    List<MetricsSummarizer *> *summarizers = mCurrentSet->getSummarizers();
+    List<MetricsSummarizer *>::iterator it = summarizers->begin();
+    for (; it != summarizers->end(); it++) {
+        if ((*it)->isMine(*item)) {
+            break;
+        }
+    }
+    if (it == summarizers->end()) {
+        ALOGD("no handler for type %s", item->getKey().c_str());
+        return;               // no handler
+    }
+
+    // invoke the summarizer. summarizer will make whatever copies
+    // it wants; the caller retains ownership of item.
+
+    (*it)->handleRecord(item);
+
+}
 
 } // namespace android
diff --git a/services/mediaanalytics/MediaAnalyticsService.h b/services/mediaanalytics/MediaAnalyticsService.h
index d2b0f09..6685967 100644
--- a/services/mediaanalytics/MediaAnalyticsService.h
+++ b/services/mediaanalytics/MediaAnalyticsService.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2016 The Android Open Source Project
+ * Copyright (C) 2017 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -28,6 +28,8 @@
 
 #include <media/IMediaAnalyticsService.h>
 
+#include "MetricsSummarizer.h"
+
 
 namespace android {
 
@@ -39,12 +41,6 @@
     // on this side, caller surrenders ownership
     virtual int64_t submit(MediaAnalyticsItem *item, bool forcenew);
 
-    virtual List<MediaAnalyticsItem *>
-            *getMediaAnalyticsItemList(bool finished, int64_t ts);
-    virtual List<MediaAnalyticsItem *>
-            *getMediaAnalyticsItemList(bool finished, int64_t ts, MediaAnalyticsItem::Key key);
-
-
     static  void            instantiate();
     virtual status_t        dump(int fd, const Vector<String16>& args);
 
@@ -58,6 +54,7 @@
     int64_t mItemsSubmitted;
     int64_t mItemsFinalized;
     int64_t mItemsDiscarded;
+    int64_t mSetsDiscarded;
     MediaAnalyticsItem::SessionID_t mLastSessionID;
 
     // partitioned a bit so we don't over serialize
@@ -67,6 +64,10 @@
     // the most we hold in memory
     // up to this many in each queue (open, finalized)
     int32_t mMaxRecords;
+    // # of sets of summaries
+    int32_t mMaxRecordSets;
+    // nsecs until we start a new record set
+    nsecs_t mNewSetInterval;
 
     // input validation after arrival from client
     bool contentValid(MediaAnalyticsItem *item, bool isTrusted);
@@ -82,12 +83,47 @@
     MediaAnalyticsItem *findItem(List<MediaAnalyticsItem *> *,
                                      MediaAnalyticsItem *, bool removeit);
 
+    // summarizers
+    void summarize(MediaAnalyticsItem *item);
+    class SummarizerSet {
+        nsecs_t mStarted;
+        List<MetricsSummarizer *> *mSummarizers;
+
+      public:
+        void appendSummarizer(MetricsSummarizer *s) {
+            if (s) {
+                mSummarizers->push_back(s);
+            }
+        };
+        nsecs_t getStarted() { return mStarted;}
+        void setStarted(nsecs_t started) {mStarted = started;}
+        List<MetricsSummarizer *> *getSummarizers() { return mSummarizers;}
+
+        SummarizerSet();
+        ~SummarizerSet();
+    };
+    void newSummarizerSet();
+    List<SummarizerSet *> *mSummarizerSets;
+    SummarizerSet *mCurrentSet;
+    List<MetricsSummarizer *> *getFirstSet() {
+        List<SummarizerSet *>::iterator first = mSummarizerSets->begin();
+        if (first != mSummarizerSets->end()) {
+            return (*first)->getSummarizers();
+        }
+        return NULL;
+    }
+
     void saveItem(MediaAnalyticsItem);
     void saveItem(List<MediaAnalyticsItem *> *, MediaAnalyticsItem *, int);
     void deleteItem(List<MediaAnalyticsItem *> *, MediaAnalyticsItem *);
 
+    // support for generating output
     String8 dumpQueue(List<MediaAnalyticsItem*> *);
-    String8 dumpQueue(List<MediaAnalyticsItem*> *, nsecs_t);
+    String8 dumpQueue(List<MediaAnalyticsItem*> *, nsecs_t, const char *only);
+
+    void dumpHeaders(String8 &result, nsecs_t ts_since);
+    void dumpSummaries(String8 &result, nsecs_t ts_since, const char * only);
+    void dumpRecent(String8 &result, nsecs_t ts_since, const char * only);
 
 };
 
diff --git a/services/mediaanalytics/MetricsSummarizer.cpp b/services/mediaanalytics/MetricsSummarizer.cpp
new file mode 100644
index 0000000..fc8f594
--- /dev/null
+++ b/services/mediaanalytics/MetricsSummarizer.cpp
@@ -0,0 +1,285 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "MetricsSummarizer"
+#include <utils/Log.h>
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <utils/threads.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/List.h>
+
+#include <media/IMediaAnalyticsService.h>
+
+#include "MetricsSummarizer.h"
+
+
+namespace android {
+
+#define DEBUG_SORT      0
+#define DEBUG_QUEUE     0
+
+
+MetricsSummarizer::MetricsSummarizer(const char *key)
+    : mIgnorables(NULL)
+{
+    ALOGV("MetricsSummarizer::MetricsSummarizer");
+
+    if (key == NULL) {
+        mKey = key;
+    } else {
+        mKey = strdup(key);
+    }
+
+    mSummaries = new List<MediaAnalyticsItem *>();
+}
+
+MetricsSummarizer::~MetricsSummarizer()
+{
+    ALOGV("MetricsSummarizer::~MetricsSummarizer");
+    if (mKey) {
+        free((void *)mKey);
+        mKey = NULL;
+    }
+
+    // clear the list of items we have saved
+    while (mSummaries->size() > 0) {
+        MediaAnalyticsItem * oitem = *(mSummaries->begin());
+        if (DEBUG_QUEUE) {
+            ALOGD("zap old record: key %s sessionID %" PRId64 " ts %" PRId64 "",
+                oitem->getKey().c_str(), oitem->getSessionID(),
+                oitem->getTimestamp());
+        }
+        mSummaries->erase(mSummaries->begin());
+        delete oitem;
+    }
+}
+
+// so we know what summarizer we were using
+const char *MetricsSummarizer::getKey() {
+    const char *value = mKey;
+    if (value == NULL) {
+        value = "unknown";
+    }
+    return value;
+}
+
+// should the record be given to this summarizer
+bool MetricsSummarizer::isMine(MediaAnalyticsItem &item)
+{
+    const char *incoming = item.getKey().c_str();
+    if (incoming == NULL) {
+        incoming = "unspecified";
+    }
+    if (mKey == NULL)
+        return true;
+    if (strcmp(mKey, incoming) != 0) {
+        return false;
+    }
+    // since nothing failed....
+    return true;
+}
+
+AString MetricsSummarizer::dumpSummary(int &slot)
+{
+    return dumpSummary(slot, NULL);
+}
+
+AString MetricsSummarizer::dumpSummary(int &slot, const char *only)
+{
+    AString value = "";
+
+    List<MediaAnalyticsItem *>::iterator it = mSummaries->begin();
+    if (it != mSummaries->end()) {
+        char buf[16];   // enough for "#####: "
+        for (; it != mSummaries->end(); it++) {
+            if (only != NULL && strcmp(only, (*it)->getKey().c_str()) != 0) {
+                continue;
+            }
+            AString entry = (*it)->toString();
+            snprintf(buf, sizeof(buf), "%5d: ", slot);
+            value.append(buf);
+            value.append(entry.c_str());
+            value.append("\n");
+            slot++;
+        }
+    }
+    return value;
+}
+
+void MetricsSummarizer::setIgnorables(const char **ignorables) {
+    mIgnorables = ignorables;
+}
+
+const char **MetricsSummarizer::getIgnorables() {
+    return mIgnorables;
+}
+
+void MetricsSummarizer::handleRecord(MediaAnalyticsItem *item) {
+
+    ALOGV("MetricsSummarizer::handleRecord() for %s",
+                item == NULL ? "<nothing>" : item->toString().c_str());
+
+    if (item == NULL) {
+        return;
+    }
+
+    List<MediaAnalyticsItem *>::iterator it = mSummaries->begin();
+    for (; it != mSummaries->end(); it++) {
+        bool good = sameAttributes((*it), item, getIgnorables());
+        ALOGV("Match against %s says %d",
+              (*it)->toString().c_str(), good);
+        if (good)
+            break;
+    }
+    if (it == mSummaries->end()) {
+            ALOGV("save new record");
+            item = item->dup();
+            if (item == NULL) {
+                ALOGE("unable to save MediaMetrics record");
+            }
+            sortProps(item);
+            item->setInt32("count",1);
+            mSummaries->push_back(item);
+    } else {
+            ALOGV("increment existing record");
+            (*it)->addInt32("count",1);
+            mergeRecord(*(*it), *item);
+    }
+}
+
+void MetricsSummarizer::mergeRecord(MediaAnalyticsItem &/*have*/, MediaAnalyticsItem &/*item*/) {
+    // default is no further massaging.
+    ALOGV("MetricsSummarizer::mergeRecord() [default]");
+    return;
+}
+
+
+//
+// Comparators
+//
+
+// testing that all of 'single' is in 'summ'
+// and that the values match.
+// 'summ' may have extra fields.
+// 'ignorable' is a set of things that we don't worry about matching up
+// (usually time- or count-based values we'll sum elsewhere)
+bool MetricsSummarizer::sameAttributes(MediaAnalyticsItem *summ, MediaAnalyticsItem *single, const char **ignorable) {
+
+    if (single == NULL || summ == NULL) {
+        return false;
+    }
+    ALOGV("MetricsSummarizer::sameAttributes(): summ %s", summ->toString().c_str());
+    ALOGV("MetricsSummarizer::sameAttributes(): single %s", single->toString().c_str());
+
+    // this can be made better.
+    for(size_t i=0;i<single->mPropCount;i++) {
+        MediaAnalyticsItem::Prop *prop1 = &(single->mProps[i]);
+        const char *attrName = prop1->mName;
+        ALOGV("compare on attr '%s'", attrName);
+
+        // is it something we should ignore
+        if (ignorable != NULL) {
+            const char **ig = ignorable;
+            while (*ig) {
+                if (strcmp(*ig, attrName) == 0) {
+                    break;
+                }
+                ig++;
+            }
+            if (*ig) {
+                ALOGV("we don't mind that it has attr '%s'", attrName);
+                continue;
+            }
+        }
+
+        MediaAnalyticsItem::Prop *prop2 = summ->findProp(attrName);
+        if (prop2 == NULL) {
+            ALOGV("summ doesn't have this attr");
+            return false;
+        }
+        if (prop1->mType != prop2->mType) {
+            ALOGV("mismatched attr types");
+            return false;
+        }
+        switch (prop1->mType) {
+            case MediaAnalyticsItem::kTypeInt32:
+                if (prop1->u.int32Value != prop2->u.int32Value)
+                    return false;
+                break;
+            case MediaAnalyticsItem::kTypeInt64:
+                if (prop1->u.int64Value != prop2->u.int64Value)
+                    return false;
+                break;
+            case MediaAnalyticsItem::kTypeDouble:
+                // XXX: watch out for floating point comparisons!
+                if (prop1->u.doubleValue != prop2->u.doubleValue)
+                    return false;
+                break;
+            case MediaAnalyticsItem::kTypeCString:
+                if (strcmp(prop1->u.CStringValue, prop2->u.CStringValue) != 0)
+                    return false;
+                break;
+            case MediaAnalyticsItem::kTypeRate:
+                if (prop1->u.rate.count != prop2->u.rate.count)
+                    return false;
+                if (prop1->u.rate.duration != prop2->u.rate.duration)
+                    return false;
+                break;
+            default:
+                return false;
+        }
+    }
+
+    return true;
+}
+
+bool MetricsSummarizer::sameAttributesId(MediaAnalyticsItem *summ, MediaAnalyticsItem *single, const char **ignorable) {
+
+    // verify same user
+    if (summ->mPid != single->mPid)
+        return false;
+
+    // and finally do the more expensive validation of the attributes
+    return sameAttributes(summ, single, ignorable);
+}
+
+int MetricsSummarizer::PropSorter(const void *a, const void *b) {
+    MediaAnalyticsItem::Prop *ai = (MediaAnalyticsItem::Prop *)a;
+    MediaAnalyticsItem::Prop *bi = (MediaAnalyticsItem::Prop *)b;
+    return strcmp(ai->mName, bi->mName);
+}
+
+// we sort in the summaries so that it looks pretty in the dumpsys
+void MetricsSummarizer::sortProps(MediaAnalyticsItem *item) {
+    if (item->mPropCount != 0) {
+        if (DEBUG_SORT) {
+            ALOGD("sortProps(pre): %s", item->toString().c_str());
+        }
+        qsort(item->mProps, item->mPropCount,
+              sizeof(MediaAnalyticsItem::Prop), MetricsSummarizer::PropSorter);
+        if (DEBUG_SORT) {
+            ALOGD("sortProps(pst): %s", item->toString().c_str());
+        }
+    }
+}
+
+} // namespace android
diff --git a/services/mediaanalytics/MetricsSummarizer.h b/services/mediaanalytics/MetricsSummarizer.h
new file mode 100644
index 0000000..0b64eac
--- /dev/null
+++ b/services/mediaanalytics/MetricsSummarizer.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ANDROID_METRICSSUMMARIZER_H
+#define ANDROID_METRICSSUMMARIZER_H
+
+#include <utils/threads.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/List.h>
+
+#include <media/IMediaAnalyticsService.h>
+
+
+namespace android {
+
+class MetricsSummarizer
+{
+
+ public:
+
+    MetricsSummarizer(const char *key);
+    virtual ~MetricsSummarizer();
+
+    // show the key
+    const char * getKey();
+
+    // should the record be given to this summarizer
+    bool isMine(MediaAnalyticsItem &item);
+
+    // hand the record to this summarizer
+    void handleRecord(MediaAnalyticsItem *item);
+
+    virtual void mergeRecord(MediaAnalyticsItem &have, MediaAnalyticsItem &incoming);
+
+    // dump the summarized records (for dumpsys)
+    AString dumpSummary(int &slot);
+    AString dumpSummary(int &slot, const char *only);
+
+    void setIgnorables(const char **);
+    const char **getIgnorables();
+
+ protected:
+
+    // various comparators
+    // "do these records have same attributes and values in those attrs"
+    // ditto, but watch for "error" fields
+    bool sameAttributes(MediaAnalyticsItem *summ, MediaAnalyticsItem *single, const char **ignoreables);
+    // attributes + from the same app/userid
+    bool sameAttributesId(MediaAnalyticsItem *summ, MediaAnalyticsItem *single, const char **ignoreables);
+
+    static int PropSorter(const void *a, const void *b);
+    void sortProps(MediaAnalyticsItem *item);
+
+ private:
+    const char *mKey;
+    const char **mIgnorables;
+    List<MediaAnalyticsItem *> *mSummaries;
+
+
+};
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
+
+#endif // ANDROID_METRICSSUMMARIZER_H
diff --git a/services/mediaanalytics/MetricsSummarizerCodec.cpp b/services/mediaanalytics/MetricsSummarizerCodec.cpp
new file mode 100644
index 0000000..8c74782
--- /dev/null
+++ b/services/mediaanalytics/MetricsSummarizerCodec.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "MetricsSummarizerCodec"
+#include <utils/Log.h>
+
+
+#include <utils/threads.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/List.h>
+
+#include <media/IMediaAnalyticsService.h>
+
+#include "MetricsSummarizer.h"
+#include "MetricsSummarizerCodec.h"
+
+
+
+
+namespace android {
+
+MetricsSummarizerCodec::MetricsSummarizerCodec(const char *key)
+    : MetricsSummarizer(key)
+{
+    ALOGV("MetricsSummarizerCodec::MetricsSummarizerCodec");
+}
+
+
+} // namespace android
diff --git a/services/mediaanalytics/MetricsSummarizerCodec.h b/services/mediaanalytics/MetricsSummarizerCodec.h
new file mode 100644
index 0000000..c01196f
--- /dev/null
+++ b/services/mediaanalytics/MetricsSummarizerCodec.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ANDROID_METRICSSUMMARIZERCODEC_H
+#define ANDROID_METRICSSUMMARIZERCODEC_H
+
+#include <utils/threads.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/List.h>
+
+#include <media/IMediaAnalyticsService.h>
+#include "MetricsSummarizer.h"
+
+
+namespace android {
+
+class MetricsSummarizerCodec : public MetricsSummarizer
+{
+
+ public:
+
+    MetricsSummarizerCodec(const char *key);
+    virtual ~MetricsSummarizerCodec() {};
+
+};
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
+
+#endif // ANDROID_METRICSSUMMARIZERCODEC_H
diff --git a/services/mediaanalytics/MetricsSummarizerExtractor.cpp b/services/mediaanalytics/MetricsSummarizerExtractor.cpp
new file mode 100644
index 0000000..190f87d
--- /dev/null
+++ b/services/mediaanalytics/MetricsSummarizerExtractor.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "MetricsSummarizerExtractor"
+#include <utils/Log.h>
+
+#include <utils/threads.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/List.h>
+
+#include <media/IMediaAnalyticsService.h>
+
+#include "MetricsSummarizer.h"
+#include "MetricsSummarizerExtractor.h"
+
+
+
+
+namespace android {
+
+MetricsSummarizerExtractor::MetricsSummarizerExtractor(const char *key)
+    : MetricsSummarizer(key)
+{
+    ALOGV("MetricsSummarizerExtractor::MetricsSummarizerExtractor");
+}
+
+} // namespace android
diff --git a/services/mediaanalytics/MetricsSummarizerExtractor.h b/services/mediaanalytics/MetricsSummarizerExtractor.h
new file mode 100644
index 0000000..eee052b
--- /dev/null
+++ b/services/mediaanalytics/MetricsSummarizerExtractor.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ANDROID_METRICSSUMMARIZEREXTRACTOR_H
+#define ANDROID_METRICSSUMMARIZEREXTRACTOR_H
+
+#include <utils/threads.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/List.h>
+
+#include <media/IMediaAnalyticsService.h>
+#include "MetricsSummarizer.h"
+
+
+namespace android {
+
+class MetricsSummarizerExtractor : public MetricsSummarizer
+{
+
+ public:
+
+    MetricsSummarizerExtractor(const char *key);
+    virtual ~MetricsSummarizerExtractor() {};
+
+};
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
+
+#endif // ANDROID_METRICSSUMMARIZEREXTRACTOR_H
diff --git a/services/mediaanalytics/MetricsSummarizerPlayer.cpp b/services/mediaanalytics/MetricsSummarizerPlayer.cpp
new file mode 100644
index 0000000..5162059
--- /dev/null
+++ b/services/mediaanalytics/MetricsSummarizerPlayer.cpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "MetricsSummarizerPlayer"
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <utils/threads.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/List.h>
+
+#include <media/IMediaAnalyticsService.h>
+
+#include "MetricsSummarizer.h"
+#include "MetricsSummarizerPlayer.h"
+
+
+
+
+namespace android {
+
+static const char *player_ignorable[] = {
+    "android.media.mediaplayer.durationMs",
+    "android.media.mediaplayer.playingMs",
+    "android.media.mediaplayer.frames",
+    "android.media.mediaplayer.dropped",
+    0
+};
+
+MetricsSummarizerPlayer::MetricsSummarizerPlayer(const char *key)
+    : MetricsSummarizer(key)
+{
+    ALOGV("MetricsSummarizerPlayer::MetricsSummarizerPlayer");
+    setIgnorables(player_ignorable);
+}
+
+void MetricsSummarizerPlayer::mergeRecord(MediaAnalyticsItem &summation, MediaAnalyticsItem &item) {
+
+    ALOGV("MetricsSummarizerPlayer::mergeRecord()");
+
+    //
+    // we sum time & frames.
+    // be careful about our special "-1" values that indicate 'unknown'
+    // treat those as 0 [basically, not summing them into the totals].
+    int64_t duration = 0;
+    if (item.getInt64("android.media.mediaplayer.durationMs", &duration)) {
+        ALOGV("found durationMs of %" PRId64, duration);
+        summation.addInt64("android.media.mediaplayer.durationMs",duration);
+    }
+    int64_t playing = 0;
+    if (item.getInt64("android.media.mediaplayer.playingMs", &playing))
+        ALOGV("found playingMs of %" PRId64, playing);
+        if (playing >= 0) {
+            summation.addInt64("android.media.mediaplayer.playingMs",playing);
+        }
+    int64_t frames = 0;
+    if (item.getInt64("android.media.mediaplayer.frames", &frames))
+        ALOGV("found framess of %" PRId64, frames);
+        if (frames >= 0) {
+            summation.addInt64("android.media.mediaplayer.frames",frames);
+        }
+    int64_t dropped = 0;
+    if (item.getInt64("android.media.mediaplayer.dropped", &dropped))
+        ALOGV("found dropped of %" PRId64, dropped);
+        if (dropped >= 0) {
+            summation.addInt64("android.media.mediaplayer.dropped",dropped);
+        }
+}
+
+} // namespace android
diff --git a/services/mediaanalytics/MetricsSummarizerPlayer.h b/services/mediaanalytics/MetricsSummarizerPlayer.h
new file mode 100644
index 0000000..ad1bf74
--- /dev/null
+++ b/services/mediaanalytics/MetricsSummarizerPlayer.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ANDROID_METRICSSUMMARIZERPLAYER_H
+#define ANDROID_METRICSSUMMARIZERPLAYER_H
+
+#include <utils/threads.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/List.h>
+
+#include <media/IMediaAnalyticsService.h>
+#include "MetricsSummarizer.h"
+
+
+namespace android {
+
+class MetricsSummarizerPlayer : public MetricsSummarizer
+{
+
+ public:
+
+    MetricsSummarizerPlayer(const char *key);
+    virtual ~MetricsSummarizerPlayer() {};
+
+    virtual void mergeRecord(MediaAnalyticsItem &have, MediaAnalyticsItem &incoming);
+
+};
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
+
+#endif // ANDROID_METRICSSUMMARIZERPLAYER_H
diff --git a/services/mediaanalytics/MetricsSummarizerRecorder.cpp b/services/mediaanalytics/MetricsSummarizerRecorder.cpp
new file mode 100644
index 0000000..c2919c3
--- /dev/null
+++ b/services/mediaanalytics/MetricsSummarizerRecorder.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "MetricsSummarizerRecorder"
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <utils/threads.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/List.h>
+
+#include <media/IMediaAnalyticsService.h>
+
+#include "MetricsSummarizer.h"
+#include "MetricsSummarizerRecorder.h"
+
+
+
+
+namespace android {
+
+MetricsSummarizerRecorder::MetricsSummarizerRecorder(const char *key)
+    : MetricsSummarizer(key)
+{
+    ALOGV("MetricsSummarizerRecorder::MetricsSummarizerRecorder");
+}
+
+} // namespace android
diff --git a/services/mediaanalytics/MetricsSummarizerRecorder.h b/services/mediaanalytics/MetricsSummarizerRecorder.h
new file mode 100644
index 0000000..963baab
--- /dev/null
+++ b/services/mediaanalytics/MetricsSummarizerRecorder.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ANDROID_METRICSSUMMARIZERRECORDER_H
+#define ANDROID_METRICSSUMMARIZERRECORDER_H
+
+#include <utils/threads.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/List.h>
+
+#include <media/IMediaAnalyticsService.h>
+#include "MetricsSummarizer.h"
+
+
+namespace android {
+
+class MetricsSummarizerRecorder : public MetricsSummarizer
+{
+
+ public:
+
+    MetricsSummarizerRecorder(const char *key);
+    virtual ~MetricsSummarizerRecorder() {};
+
+};
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
+
+#endif // ANDROID_METRICSSUMMARIZERRECORDER_H
diff --git a/services/oboeservice/AAudioEndpointManager.cpp b/services/oboeservice/AAudioEndpointManager.cpp
new file mode 100644
index 0000000..84fa227
--- /dev/null
+++ b/services/oboeservice/AAudioEndpointManager.cpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <map>
+#include <mutex>
+#include <utils/Singleton.h>
+
+#include "AAudioEndpointManager.h"
+#include "AAudioServiceEndpoint.h"
+
+using namespace android;
+using namespace aaudio;
+
+ANDROID_SINGLETON_STATIC_INSTANCE(AAudioEndpointManager);
+
+AAudioEndpointManager::AAudioEndpointManager()
+        : Singleton<AAudioEndpointManager>() {
+}
+
+AAudioServiceEndpoint *AAudioEndpointManager::findEndpoint(AAudioService &audioService, int32_t deviceId,
+                                                           aaudio_direction_t direction) {
+    AAudioServiceEndpoint *endpoint = nullptr;
+    std::lock_guard<std::mutex> lock(mLock);
+    switch (direction) {
+        case AAUDIO_DIRECTION_INPUT:
+            endpoint = mInputs[deviceId];
+            break;
+        case AAUDIO_DIRECTION_OUTPUT:
+            endpoint = mOutputs[deviceId];
+            break;
+        default:
+            assert(false); // There are only two possible directions.
+            break;
+    }
+
+    // If we can't find an existing one then open one.
+    ALOGD("AAudioEndpointManager::findEndpoint(), found %p", endpoint);
+    if (endpoint == nullptr) {
+        endpoint = new AAudioServiceEndpoint(audioService);
+        if (endpoint->open(deviceId, direction) != AAUDIO_OK) {
+            ALOGD("AAudioEndpointManager::findEndpoint(), open failed");
+            delete endpoint;
+            endpoint = nullptr;
+        } else {
+            switch(direction) {
+                case AAUDIO_DIRECTION_INPUT:
+                    mInputs[deviceId] = endpoint;
+                    break;
+                case AAUDIO_DIRECTION_OUTPUT:
+                    mOutputs[deviceId] = endpoint;
+                    break;
+            }
+        }
+    }
+    return endpoint;
+}
+
+// FIXME add reference counter for serviceEndpoints and removed on last use.
+
+void AAudioEndpointManager::removeEndpoint(AAudioServiceEndpoint *serviceEndpoint) {
+    aaudio_direction_t direction = serviceEndpoint->getDirection();
+    int32_t deviceId = serviceEndpoint->getDeviceId();
+
+    std::lock_guard<std::mutex> lock(mLock);
+    switch(direction) {
+        case AAUDIO_DIRECTION_INPUT:
+            mInputs.erase(deviceId);
+            break;
+        case AAUDIO_DIRECTION_OUTPUT:
+            mOutputs.erase(deviceId);
+            break;
+    }
+}
\ No newline at end of file
diff --git a/services/oboeservice/AAudioEndpointManager.h b/services/oboeservice/AAudioEndpointManager.h
new file mode 100644
index 0000000..48b27f0
--- /dev/null
+++ b/services/oboeservice/AAudioEndpointManager.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_ENDPOINT_MANAGER_H
+#define AAUDIO_AAUDIO_ENDPOINT_MANAGER_H
+
+#include <map>
+#include <mutex>
+#include <utils/Singleton.h>
+
+#include "binding/AAudioServiceMessage.h"
+#include "AAudioServiceEndpoint.h"
+
+namespace aaudio {
+
+class AAudioEndpointManager : public android::Singleton<AAudioEndpointManager>{
+public:
+    AAudioEndpointManager();
+    ~AAudioEndpointManager() = default;
+
+    /**
+     * Find a service endpoint for the given deviceId and direction.
+     * If an endpoint does not already exist then it will try to create one.
+     *
+     * @param deviceId
+     * @param direction
+     * @return endpoint or nullptr
+     */
+    AAudioServiceEndpoint *findEndpoint(android::AAudioService &audioService,
+                                        int32_t deviceId,
+                                        aaudio_direction_t direction);
+
+    void removeEndpoint(AAudioServiceEndpoint *serviceEndpoint);
+
+private:
+
+    std::mutex    mLock;
+
+    // We need separate inputs and outputs because they may both have device==0.
+    // TODO review
+    std::map<int32_t, AAudioServiceEndpoint *> mInputs;
+    std::map<int32_t, AAudioServiceEndpoint *> mOutputs;
+
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_AAUDIO_ENDPOINT_MANAGER_H
diff --git a/services/oboeservice/AAudioMixer.cpp b/services/oboeservice/AAudioMixer.cpp
new file mode 100644
index 0000000..70da339
--- /dev/null
+++ b/services/oboeservice/AAudioMixer.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <cstring>
+#include "AAudioMixer.h"
+
+using android::WrappingBuffer;
+using android::FifoBuffer;
+using android::fifo_frames_t;
+
+AAudioMixer::~AAudioMixer() {
+    delete[] mOutputBuffer;
+}
+
+void AAudioMixer::allocate(int32_t samplesPerFrame, int32_t framesPerBurst) {
+    mSamplesPerFrame = samplesPerFrame;
+    mFramesPerBurst = framesPerBurst;
+    int32_t samplesPerBuffer = samplesPerFrame * framesPerBurst;
+    mOutputBuffer = new float[samplesPerBuffer];
+    mBufferSizeInBytes = samplesPerBuffer * sizeof(float);
+}
+
+void AAudioMixer::clear() {
+    memset(mOutputBuffer, 0, mBufferSizeInBytes);
+}
+
+void AAudioMixer::mix(FifoBuffer *fifo, float volume) {
+    WrappingBuffer wrappingBuffer;
+    float *destination = mOutputBuffer;
+    fifo_frames_t framesLeft = mFramesPerBurst;
+
+    // Gather the data from the client. May be in two parts.
+    fifo->getFullDataAvailable(&wrappingBuffer);
+
+    // Mix data in one or two parts.
+    int partIndex = 0;
+    while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
+        fifo_frames_t framesToMix = framesLeft;
+        fifo_frames_t framesAvailable = wrappingBuffer.numFrames[partIndex];
+        if (framesAvailable > 0) {
+            if (framesToMix > framesAvailable) {
+                framesToMix = framesAvailable;
+            }
+            mixPart(destination, (float *)wrappingBuffer.data[partIndex], framesToMix, volume);
+
+            destination += framesToMix * mSamplesPerFrame;
+            framesLeft -= framesToMix;
+        }
+        partIndex++;
+    }
+    fifo->getFifoControllerBase()->advanceReadIndex(mFramesPerBurst - framesLeft);
+    if (framesLeft > 0) {
+        ALOGW("AAudioMixer::mix() UNDERFLOW by %d / %d frames ----- UNDERFLOW !!!!!!!!!!",
+              framesLeft, mFramesPerBurst);
+    }
+}
+
+void AAudioMixer::mixPart(float *destination, float *source, int32_t numFrames, float volume) {
+    int32_t numSamples = numFrames * mSamplesPerFrame;
+    // TODO maybe optimize using SIMD
+    for (int sampleIndex = 0; sampleIndex < numSamples; sampleIndex++) {
+        *destination++ += *source++ * volume;
+    }
+}
+
+float *AAudioMixer::getOutputBuffer() {
+    return mOutputBuffer;
+}
diff --git a/services/oboeservice/AAudioMixer.h b/services/oboeservice/AAudioMixer.h
new file mode 100644
index 0000000..2191183
--- /dev/null
+++ b/services/oboeservice/AAudioMixer.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_MIXER_H
+#define AAUDIO_AAUDIO_MIXER_H
+
+#include <stdint.h>
+
+#include <aaudio/AAudio.h>
+#include <fifo/FifoBuffer.h>
+
+class AAudioMixer {
+public:
+    AAudioMixer() {}
+    ~AAudioMixer();
+
+    void allocate(int32_t samplesPerFrame, int32_t framesPerBurst);
+
+    void clear();
+
+    void mix(android::FifoBuffer *fifo, float volume);
+
+    void mixPart(float *destination, float *source, int32_t numFrames, float volume);
+
+    float *getOutputBuffer();
+
+private:
+    float   *mOutputBuffer = nullptr;
+    int32_t  mSamplesPerFrame = 0;
+    int32_t  mFramesPerBurst = 0;
+    int32_t  mBufferSizeInBytes = 0;
+};
+
+
+#endif //AAUDIO_AAUDIO_MIXER_H
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index 99b0b4d..e4fa1c5 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -18,28 +18,29 @@
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
-#include <time.h>
-#include <pthread.h>
+//#include <time.h>
+//#include <pthread.h>
 
 #include <aaudio/AAudioDefinitions.h>
+#include <mediautils/SchedulingPolicyService.h>
+#include <utils/String16.h>
 
-#include "HandleTracker.h"
-#include "IAAudioService.h"
-#include "AAudioServiceDefinitions.h"
+#include "binding/AAudioServiceMessage.h"
 #include "AAudioService.h"
-#include "AAudioServiceStreamFakeHal.h"
+#include "AAudioServiceStreamMMAP.h"
+#include "AAudioServiceStreamShared.h"
+#include "AAudioServiceStreamMMAP.h"
+#include "binding/IAAudioService.h"
+#include "utility/HandleTracker.h"
 
 using namespace android;
 using namespace aaudio;
 
 typedef enum
 {
-    AAUDIO_HANDLE_TYPE_DUMMY1, // TODO remove DUMMYs
-    AAUDIO_HANDLE_TYPE_DUMMY2, // make server handles different than client
-    AAUDIO_HANDLE_TYPE_STREAM,
-    AAUDIO_HANDLE_TYPE_COUNT
+    AAUDIO_HANDLE_TYPE_STREAM
 } aaudio_service_handle_type_t;
-static_assert(AAUDIO_HANDLE_TYPE_COUNT <= HANDLE_TRACKER_MAX_TYPES, "Too many handle types.");
+static_assert(AAUDIO_HANDLE_TYPE_STREAM < HANDLE_TRACKER_MAX_TYPES, "Too many handle types.");
 
 android::AAudioService::AAudioService()
     : BnAAudioService() {
@@ -48,18 +49,50 @@
 AAudioService::~AAudioService() {
 }
 
-aaudio_handle_t AAudioService::openStream(aaudio::AAudioStreamRequest &request,
-                                                aaudio::AAudioStreamConfiguration &configuration) {
-    AAudioServiceStreamBase *serviceStream =  new AAudioServiceStreamFakeHal();
-    ALOGD("AAudioService::openStream(): created serviceStream = %p", serviceStream);
-    aaudio_result_t result = serviceStream->open(request, configuration);
-    if (result < 0) {
-        ALOGE("AAudioService::openStream(): open returned %d", result);
+aaudio_handle_t AAudioService::openStream(const aaudio::AAudioStreamRequest &request,
+                                          aaudio::AAudioStreamConfiguration &configurationOutput) {
+    aaudio_result_t result = AAUDIO_OK;
+    AAudioServiceStreamBase *serviceStream = nullptr;
+    const AAudioStreamConfiguration &configurationInput = request.getConstantConfiguration();
+    aaudio_sharing_mode_t sharingMode = configurationInput.getSharingMode();
+    ALOGE("AAudioService::openStream(): sharingMode = %d", sharingMode);
+
+    if (sharingMode != AAUDIO_SHARING_MODE_EXCLUSIVE && sharingMode != AAUDIO_SHARING_MODE_SHARED) {
+        ALOGE("AAudioService::openStream(): unrecognized sharing mode = %d", sharingMode);
+        return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+    }
+
+    if (sharingMode == AAUDIO_SHARING_MODE_EXCLUSIVE) {
+        ALOGD("AAudioService::openStream(), sharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE");
+        serviceStream = new AAudioServiceStreamMMAP();
+        result = serviceStream->open(request, configurationOutput);
+        if (result != AAUDIO_OK) {
+            // fall back to using a shared stream
+            ALOGD("AAudioService::openStream(), EXCLUSIVE mode failed");
+            delete serviceStream;
+            serviceStream = nullptr;
+        } else {
+            configurationOutput.setSharingMode(AAUDIO_SHARING_MODE_EXCLUSIVE);
+        }
+    }
+
+    // if SHARED requested or if EXCLUSIVE failed
+    if (serviceStream == nullptr) {
+        ALOGD("AAudioService::openStream(), sharingMode = AAUDIO_SHARING_MODE_SHARED");
+        serviceStream =  new AAudioServiceStreamShared(*this);
+        result = serviceStream->open(request, configurationOutput);
+        configurationOutput.setSharingMode(AAUDIO_SHARING_MODE_SHARED);
+    }
+
+    if (result != AAUDIO_OK) {
+        delete serviceStream;
+        ALOGE("AAudioService::openStream(): failed, return %d", result);
         return result;
     } else {
         aaudio_handle_t handle = mHandleTracker.put(AAUDIO_HANDLE_TYPE_STREAM, serviceStream);
         ALOGD("AAudioService::openStream(): handle = 0x%08X", handle);
         if (handle < 0) {
+            ALOGE("AAudioService::openStream(): handle table full");
             delete serviceStream;
         }
         return handle;
@@ -72,7 +105,7 @@
                                   streamHandle);
     ALOGD("AAudioService.closeStream(0x%08X)", streamHandle);
     if (serviceStream != nullptr) {
-        ALOGD("AAudioService::closeStream(): deleting serviceStream = %p", serviceStream);
+        serviceStream->close();
         delete serviceStream;
         return AAUDIO_OK;
     }
@@ -89,27 +122,32 @@
                 aaudio_handle_t streamHandle,
                 aaudio::AudioEndpointParcelable &parcelable) {
     AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
-    ALOGD("AAudioService::getStreamDescription(), serviceStream = %p", serviceStream);
     if (serviceStream == nullptr) {
+        ALOGE("AAudioService::getStreamDescription(), illegal stream handle = 0x%0x", streamHandle);
         return AAUDIO_ERROR_INVALID_HANDLE;
     }
-    return serviceStream->getDescription(parcelable);
+    ALOGD("AAudioService::getStreamDescription(), handle = 0x%08x", streamHandle);
+    aaudio_result_t result = serviceStream->getDescription(parcelable);
+    ALOGD("AAudioService::getStreamDescription(), result = %d", result);
+    // parcelable.dump();
+    return result;
 }
 
 aaudio_result_t AAudioService::startStream(aaudio_handle_t streamHandle) {
     AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
-    ALOGD("AAudioService::startStream(), serviceStream = %p", serviceStream);
     if (serviceStream == nullptr) {
+        ALOGE("AAudioService::startStream(), illegal stream handle = 0x%0x", streamHandle);
         return AAUDIO_ERROR_INVALID_HANDLE;
     }
     aaudio_result_t result = serviceStream->start();
+    ALOGD("AAudioService::startStream(), serviceStream->start() returned %d", result);
     return result;
 }
 
 aaudio_result_t AAudioService::pauseStream(aaudio_handle_t streamHandle) {
     AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
-    ALOGD("AAudioService::pauseStream(), serviceStream = %p", serviceStream);
     if (serviceStream == nullptr) {
+        ALOGE("AAudioService::pauseStream(), illegal stream handle = 0x%0x", streamHandle);
         return AAUDIO_ERROR_INVALID_HANDLE;
     }
     aaudio_result_t result = serviceStream->pause();
@@ -118,35 +156,33 @@
 
 aaudio_result_t AAudioService::flushStream(aaudio_handle_t streamHandle) {
     AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
-    ALOGD("AAudioService::flushStream(), serviceStream = %p", serviceStream);
     if (serviceStream == nullptr) {
+        ALOGE("AAudioService::flushStream(), illegal stream handle = 0x%0x", streamHandle);
         return AAUDIO_ERROR_INVALID_HANDLE;
     }
     return serviceStream->flush();
 }
 
 aaudio_result_t AAudioService::registerAudioThread(aaudio_handle_t streamHandle,
+                                                         pid_t clientProcessId,
                                                          pid_t clientThreadId,
                                                          int64_t periodNanoseconds) {
     AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
     ALOGD("AAudioService::registerAudioThread(), serviceStream = %p", serviceStream);
     if (serviceStream == nullptr) {
-        ALOGE("AAudioService::registerAudioThread(), serviceStream == nullptr");
+        ALOGE("AAudioService::registerAudioThread(), illegal stream handle = 0x%0x", streamHandle);
         return AAUDIO_ERROR_INVALID_HANDLE;
     }
     if (serviceStream->getRegisteredThread() != AAudioServiceStreamBase::ILLEGAL_THREAD_ID) {
         ALOGE("AAudioService::registerAudioThread(), thread already registered");
-        return AAUDIO_ERROR_INVALID_ORDER;
+        return AAUDIO_ERROR_INVALID_STATE;
     }
     serviceStream->setRegisteredThread(clientThreadId);
-    // Boost client thread to SCHED_FIFO
-    struct sched_param sp;
-    memset(&sp, 0, sizeof(sp));
-    sp.sched_priority = 2; // TODO use 'requestPriority' function from frameworks/av/media/utils
-    int err = sched_setscheduler(clientThreadId, SCHED_FIFO, &sp);
+    int err = android::requestPriority(clientProcessId, clientThreadId,
+                                       DEFAULT_AUDIO_PRIORITY, true /* isForApp */);
     if (err != 0){
-        ALOGE("AAudioService::sched_setscheduler() failed, errno = %d, priority = %d",
-              errno, sp.sched_priority);
+        ALOGE("AAudioService::registerAudioThread() failed, errno = %d, priority = %d",
+              errno, DEFAULT_AUDIO_PRIORITY);
         return AAUDIO_ERROR_INTERNAL;
     } else {
         return AAUDIO_OK;
@@ -154,11 +190,13 @@
 }
 
 aaudio_result_t AAudioService::unregisterAudioThread(aaudio_handle_t streamHandle,
-                                                           pid_t clientThreadId) {
+                                                     pid_t clientProcessId,
+                                                     pid_t clientThreadId) {
     AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
     ALOGI("AAudioService::unregisterAudioThread(), serviceStream = %p", serviceStream);
     if (serviceStream == nullptr) {
-        ALOGE("AAudioService::unregisterAudioThread(), serviceStream == nullptr");
+        ALOGE("AAudioService::unregisterAudioThread(), illegal stream handle = 0x%0x",
+              streamHandle);
         return AAUDIO_ERROR_INVALID_HANDLE;
     }
     if (serviceStream->getRegisteredThread() != clientThreadId) {
diff --git a/services/oboeservice/AAudioService.h b/services/oboeservice/AAudioService.h
index a520d7a..5a7a2b6 100644
--- a/services/oboeservice/AAudioService.h
+++ b/services/oboeservice/AAudioService.h
@@ -22,17 +22,19 @@
 
 #include <binder/BinderService.h>
 
-#include <aaudio/AAudioDefinitions.h>
 #include <aaudio/AAudio.h>
 #include "utility/HandleTracker.h"
-#include "IAAudioService.h"
+#include "binding/IAAudioService.h"
+#include "binding/AAudioServiceInterface.h"
+
 #include "AAudioServiceStreamBase.h"
 
 namespace android {
 
 class AAudioService :
     public BinderService<AAudioService>,
-    public BnAAudioService
+    public BnAAudioService,
+    public aaudio::AAudioServiceInterface
 {
     friend class BinderService<AAudioService>;
 
@@ -40,9 +42,9 @@
     AAudioService();
     virtual ~AAudioService();
 
-    static const char* getServiceName() { return "media.audio_aaudio"; }
+    static const char* getServiceName() { return AAUDIO_SERVICE_NAME; }
 
-    virtual aaudio_handle_t openStream(aaudio::AAudioStreamRequest &request,
+    virtual aaudio_handle_t openStream(const aaudio::AAudioStreamRequest &request,
                                      aaudio::AAudioStreamConfiguration &configuration);
 
     virtual aaudio_result_t closeStream(aaudio_handle_t streamHandle);
@@ -58,9 +60,11 @@
     virtual aaudio_result_t flushStream(aaudio_handle_t streamHandle);
 
     virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle,
-                                              pid_t pid, int64_t periodNanoseconds) ;
+                                              pid_t pid, pid_t tid,
+                                              int64_t periodNanoseconds) ;
 
-    virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle, pid_t pid);
+    virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle,
+                                                  pid_t pid, pid_t tid);
 
 private:
 
@@ -68,6 +72,9 @@
 
     HandleTracker mHandleTracker;
 
+    enum constants {
+        DEFAULT_AUDIO_PRIORITY = 2
+    };
 };
 
 } /* namespace android */
diff --git a/services/oboeservice/AAudioServiceDefinitions.h b/services/oboeservice/AAudioServiceDefinitions.h
deleted file mode 100644
index f98acbf..0000000
--- a/services/oboeservice/AAudioServiceDefinitions.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef AAUDIO_AAUDIO_SERVICE_DEFINITIONS_H
-#define AAUDIO_AAUDIO_SERVICE_DEFINITIONS_H
-
-#include <stdint.h>
-
-#include <aaudio/AAudio.h>
-
-#include "binding/RingBufferParcelable.h"
-
-namespace aaudio {
-
-// TODO move this an "include" folder for the service.
-
-struct AAudioMessageTimestamp {
-    int64_t position;
-    int64_t                deviceOffset; // add to client position to get device position
-    int64_t     timestamp;
-};
-
-typedef enum aaudio_service_event_e : uint32_t {
-    AAUDIO_SERVICE_EVENT_STARTED,
-    AAUDIO_SERVICE_EVENT_PAUSED,
-    AAUDIO_SERVICE_EVENT_FLUSHED,
-    AAUDIO_SERVICE_EVENT_CLOSED,
-    AAUDIO_SERVICE_EVENT_DISCONNECTED
-} aaudio_service_event_t;
-
-struct AAudioMessageEvent {
-    aaudio_service_event_t event;
-    int32_t data1;
-    int64_t data2;
-};
-
-typedef struct AAudioServiceMessage_s {
-    enum class code : uint32_t {
-        NOTHING,
-        TIMESTAMP,
-        EVENT,
-    };
-
-    code what;
-    union {
-        AAudioMessageTimestamp timestamp;
-        AAudioMessageEvent event;
-    };
-} AAudioServiceMessage;
-
-} /* namespace aaudio */
-
-#endif //AAUDIO_AAUDIO_SERVICE_DEFINITIONS_H
diff --git a/services/oboeservice/AAudioServiceEndpoint.cpp b/services/oboeservice/AAudioServiceEndpoint.cpp
new file mode 100644
index 0000000..80551c9
--- /dev/null
+++ b/services/oboeservice/AAudioServiceEndpoint.cpp
@@ -0,0 +1,188 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <algorithm>
+#include <mutex>
+#include <vector>
+
+#include "core/AudioStreamBuilder.h"
+#include "AAudioServiceEndpoint.h"
+#include "AAudioServiceStreamShared.h"
+
+using namespace android;  // TODO just import names needed
+using namespace aaudio;   // TODO just import names needed
+
+#define MIN_TIMEOUT_NANOS        (1000 * AAUDIO_NANOS_PER_MILLISECOND)
+
+// Wait at least this many times longer than the operation should take.
+#define MIN_TIMEOUT_OPERATIONS    4
+
+// The mStreamInternal will use a service interface that does not go through Binder.
+AAudioServiceEndpoint::AAudioServiceEndpoint(AAudioService &audioService)
+        : mStreamInternal(audioService, true)
+        {
+}
+
+AAudioServiceEndpoint::~AAudioServiceEndpoint() {
+}
+
+// Set up an EXCLUSIVE MMAP stream that will be shared.
+aaudio_result_t AAudioServiceEndpoint::open(int32_t deviceId, aaudio_direction_t direction) {
+    AudioStreamBuilder builder;
+    builder.setSharingMode(AAUDIO_SHARING_MODE_EXCLUSIVE);
+    builder.setDeviceId(deviceId);
+    builder.setDirection(direction);
+    aaudio_result_t result = mStreamInternal.open(builder);
+    if (result == AAUDIO_OK) {
+        mMixer.allocate(mStreamInternal.getSamplesPerFrame(), mStreamInternal.getFramesPerBurst());
+    }
+    return result;
+}
+
+aaudio_result_t AAudioServiceEndpoint::close() {
+    return mStreamInternal.close();
+}
+
+// TODO, maybe use an interface to reduce exposure
+aaudio_result_t AAudioServiceEndpoint::registerStream(AAudioServiceStreamShared *sharedStream) {
+    ALOGD("AAudioServiceEndpoint::registerStream(%p)", sharedStream);
+    // TODO use real-time technique to avoid mutex, eg. atomic command FIFO
+    std::lock_guard<std::mutex> lock(mLockStreams);
+    mRegisteredStreams.push_back(sharedStream);
+    return AAUDIO_OK;
+}
+
+aaudio_result_t AAudioServiceEndpoint::unregisterStream(AAudioServiceStreamShared *sharedStream) {
+    ALOGD("AAudioServiceEndpoint::unregisterStream(%p)", sharedStream);
+    std::lock_guard<std::mutex> lock(mLockStreams);
+    mRegisteredStreams.erase(std::remove(mRegisteredStreams.begin(), mRegisteredStreams.end(), sharedStream),
+              mRegisteredStreams.end());
+    return AAUDIO_OK;
+}
+
+aaudio_result_t AAudioServiceEndpoint::startStream(AAudioServiceStreamShared *sharedStream) {
+    // TODO use real-time technique to avoid mutex, eg. atomic command FIFO
+    ALOGD("AAudioServiceEndpoint(): startStream() entering");
+    std::lock_guard<std::mutex> lock(mLockStreams);
+    mRunningStreams.push_back(sharedStream);
+    if (mRunningStreams.size() == 1) {
+        startMixer_l();
+    }
+    return AAUDIO_OK;
+}
+
+aaudio_result_t AAudioServiceEndpoint::stopStream(AAudioServiceStreamShared *sharedStream) {
+    std::lock_guard<std::mutex> lock(mLockStreams);
+    mRunningStreams.erase(std::remove(mRunningStreams.begin(), mRunningStreams.end(), sharedStream),
+              mRunningStreams.end());
+    if (mRunningStreams.size() == 0) {
+        stopMixer_l();
+    }
+    return AAUDIO_OK;
+}
+
+static void *aaudio_mixer_thread_proc(void *context) {
+    AAudioServiceEndpoint *stream = (AAudioServiceEndpoint *) context;
+    //LOGD("AudioStreamAAudio(): oboe_callback_thread, stream = %p", stream);
+    if (stream != NULL) {
+        return stream->callbackLoop();
+    } else {
+        return NULL;
+    }
+}
+
+// Render audio in the application callback and then write the data to the stream.
+void *AAudioServiceEndpoint::callbackLoop() {
+    aaudio_result_t result = AAUDIO_OK;
+
+    ALOGD("AAudioServiceEndpoint(): callbackLoop() entering");
+
+    result = mStreamInternal.requestStart();
+    ALOGD("AAudioServiceEndpoint(): callbackLoop() after requestStart()  %d, isPlaying() = %d",
+          result, (int) mStreamInternal.isPlaying());
+
+    // result might be a frame count
+    while (mCallbackEnabled.load() && mStreamInternal.isPlaying() && (result >= 0)) {
+        // Mix data from each active stream.
+        {
+            mMixer.clear();
+            std::lock_guard<std::mutex> lock(mLockStreams);
+            for(AAudioServiceStreamShared *sharedStream : mRunningStreams) {
+                FifoBuffer *fifo = sharedStream->getDataFifoBuffer();
+                float volume = 0.5; // TODO get from system
+                mMixer.mix(fifo, volume);
+            }
+        }
+
+        // Write audio data to stream using a blocking write.
+        ALOGD("AAudioServiceEndpoint(): callbackLoop() write(%d)", getFramesPerBurst());
+        int64_t timeoutNanos = calculateReasonableTimeout(mStreamInternal.getFramesPerBurst());
+        result = mStreamInternal.write(mMixer.getOutputBuffer(), getFramesPerBurst(), timeoutNanos);
+        if (result == AAUDIO_ERROR_DISCONNECTED) {
+            disconnectRegisteredStreams();
+            break;
+        } else if (result != getFramesPerBurst()) {
+            ALOGW("AAudioServiceEndpoint(): callbackLoop() wrote %d / %d",
+                  result, getFramesPerBurst());
+            break;
+        }
+    }
+
+    ALOGD("AAudioServiceEndpoint(): callbackLoop() exiting, result = %d, isPlaying() = %d",
+          result, (int) mStreamInternal.isPlaying());
+
+    result = mStreamInternal.requestStop();
+
+    return NULL; // TODO review
+}
+
+aaudio_result_t AAudioServiceEndpoint::startMixer_l() {
+    // Launch the callback loop thread.
+    int64_t periodNanos = mStreamInternal.getFramesPerBurst()
+                          * AAUDIO_NANOS_PER_SECOND
+                          / getSampleRate();
+    mCallbackEnabled.store(true);
+    return mStreamInternal.createThread(periodNanos, aaudio_mixer_thread_proc, this);
+}
+
+aaudio_result_t AAudioServiceEndpoint::stopMixer_l() {
+    mCallbackEnabled.store(false);
+    return mStreamInternal.joinThread(NULL, calculateReasonableTimeout(mStreamInternal.getFramesPerBurst()));
+}
+
+// TODO Call method in AudioStreamInternal when that callback CL is merged.
+int64_t AAudioServiceEndpoint::calculateReasonableTimeout(int32_t framesPerOperation) {
+
+    // Wait for at least a second or some number of callbacks to join the thread.
+    int64_t timeoutNanoseconds = (MIN_TIMEOUT_OPERATIONS * framesPerOperation * AAUDIO_NANOS_PER_SECOND)
+                                 / getSampleRate();
+    if (timeoutNanoseconds < MIN_TIMEOUT_NANOS) { // arbitrary number of seconds
+        timeoutNanoseconds = MIN_TIMEOUT_NANOS;
+    }
+    return timeoutNanoseconds;
+}
+
+void AAudioServiceEndpoint::disconnectRegisteredStreams() {
+    std::lock_guard<std::mutex> lock(mLockStreams);
+    for(AAudioServiceStreamShared *sharedStream : mRunningStreams) {
+        sharedStream->onStop();
+    }
+    mRunningStreams.clear();
+    for(AAudioServiceStreamShared *sharedStream : mRegisteredStreams) {
+        sharedStream->onDisconnect();
+    }
+    mRegisteredStreams.clear();
+}
diff --git a/services/oboeservice/AAudioServiceEndpoint.h b/services/oboeservice/AAudioServiceEndpoint.h
new file mode 100644
index 0000000..020d38a
--- /dev/null
+++ b/services/oboeservice/AAudioServiceEndpoint.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_SERVICE_ENDPOINT_H
+#define AAUDIO_SERVICE_ENDPOINT_H
+
+#include <atomic>
+#include <functional>
+#include <mutex>
+#include <vector>
+
+#include "client/AudioStreamInternal.h"
+#include "binding/AAudioServiceMessage.h"
+#include "AAudioServiceStreamShared.h"
+#include "AAudioServiceStreamMMAP.h"
+#include "AAudioMixer.h"
+#include "AAudioService.h"
+
+namespace aaudio {
+
+class AAudioServiceEndpoint {
+public:
+    explicit AAudioServiceEndpoint(android::AAudioService &audioService);
+    virtual ~AAudioServiceEndpoint();
+
+    aaudio_result_t open(int32_t deviceId, aaudio_direction_t direction);
+
+    int32_t getSampleRate() const { return mStreamInternal.getSampleRate(); }
+    int32_t getSamplesPerFrame() const { return mStreamInternal.getSamplesPerFrame();  }
+    int32_t getFramesPerBurst() const { return mStreamInternal.getFramesPerBurst();  }
+
+    aaudio_result_t registerStream(AAudioServiceStreamShared *sharedStream);
+    aaudio_result_t unregisterStream(AAudioServiceStreamShared *sharedStream);
+    aaudio_result_t startStream(AAudioServiceStreamShared *sharedStream);
+    aaudio_result_t stopStream(AAudioServiceStreamShared *sharedStream);
+    aaudio_result_t close();
+
+    int32_t getDeviceId() const { return mStreamInternal.getDeviceId(); }
+
+    aaudio_direction_t getDirection() const { return mStreamInternal.getDirection(); }
+
+    void disconnectRegisteredStreams();
+
+    void *callbackLoop();
+
+private:
+    aaudio_result_t startMixer_l();
+    aaudio_result_t stopMixer_l();
+
+    int64_t calculateReasonableTimeout(int32_t framesPerOperation);
+
+    AudioStreamInternal      mStreamInternal;
+    AAudioMixer              mMixer;
+    AAudioServiceStreamMMAP  mStreamMMAP;
+
+    std::atomic<bool>        mCallbackEnabled;
+
+    std::mutex               mLockStreams;
+    std::vector<AAudioServiceStreamShared *> mRegisteredStreams;
+    std::vector<AAudioServiceStreamShared *> mRunningStreams;
+};
+
+} /* namespace aaudio */
+
+
+#endif //AAUDIO_SERVICE_ENDPOINT_H
diff --git a/services/oboeservice/AAudioServiceMain.cpp b/services/oboeservice/AAudioServiceMain.cpp
deleted file mode 100644
index aa89180..0000000
--- a/services/oboeservice/AAudioServiceMain.cpp
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "AAudioService"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
-#include <stdint.h>
-#include <math.h>
-
-#include <utils/RefBase.h>
-#include <binder/TextOutput.h>
-
-#include <binder/IInterface.h>
-#include <binder/IBinder.h>
-#include <binder/ProcessState.h>
-#include <binder/IServiceManager.h>
-#include <binder/IPCThreadState.h>
-
-#include <cutils/ashmem.h>
-#include <sys/mman.h>
-
-#include "AAudioServiceDefinitions.h"
-#include "IAAudioService.h"
-#include "AAudioService.h"
-
-using namespace android;
-using namespace aaudio;
-
-/**
- * This is used to test the AAudioService as a standalone application.
- * It is not used when the AAudioService is integrated with AudioFlinger.
- */
-int main(int argc, char **argv) {
-    printf("Test AAudioService %s\n", argv[1]);
-    ALOGD("This is the AAudioService");
-
-    defaultServiceManager()->addService(String16("AAudioService"), new AAudioService());
-    android::ProcessState::self()->startThreadPool();
-    printf("AAudioService service is now ready\n");
-    IPCThreadState::self()->joinThreadPool();
-    printf("AAudioService service thread joined\n");
-
-    return 0;
-}
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index a7938dc..b15043d 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -18,43 +18,138 @@
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
-#include "IAAudioService.h"
-#include "AAudioServiceDefinitions.h"
-#include "AAudioServiceStreamBase.h"
-#include "AudioEndpointParcelable.h"
+#include <mutex>
 
-using namespace android;
-using namespace aaudio;
+#include "binding/IAAudioService.h"
+#include "binding/AAudioServiceMessage.h"
+#include "utility/AudioClock.h"
+
+#include "AAudioServiceStreamBase.h"
+#include "TimestampScheduler.h"
+
+using namespace android;  // TODO just import names needed
+using namespace aaudio;   // TODO just import names needed
 
 /**
- * Construct the AudioCommandQueues and the AudioDataQueue
- * and fill in the endpoint parcelable.
+ * Base class for streams in the service.
+ * @return
  */
 
 AAudioServiceStreamBase::AAudioServiceStreamBase()
         : mUpMessageQueue(nullptr)
-{
-    // TODO could fail so move out of constructor
-    mUpMessageQueue = new SharedRingBuffer();
-    mUpMessageQueue->allocate(sizeof(AAudioServiceMessage), QUEUE_UP_CAPACITY_COMMANDS);
+        , mAAudioThread() {
 }
 
 AAudioServiceStreamBase::~AAudioServiceStreamBase() {
-    Mutex::Autolock _l(mLockUpMessageQueue);
-    delete mUpMessageQueue;
+    close();
 }
 
-void AAudioServiceStreamBase::sendServiceEvent(aaudio_service_event_t event,
-                              int32_t data1,
-                              int64_t data2) {
+aaudio_result_t AAudioServiceStreamBase::open(const aaudio::AAudioStreamRequest &request,
+                     aaudio::AAudioStreamConfiguration &configurationOutput) {
+    std::lock_guard<std::mutex> lock(mLockUpMessageQueue);
+    if (mUpMessageQueue != nullptr) {
+        return AAUDIO_ERROR_INVALID_STATE;
+    } else {
+        mUpMessageQueue = new SharedRingBuffer();
+        return mUpMessageQueue->allocate(sizeof(AAudioServiceMessage), QUEUE_UP_CAPACITY_COMMANDS);
+    }
+}
 
-    Mutex::Autolock _l(mLockUpMessageQueue);
+aaudio_result_t AAudioServiceStreamBase::close() {
+    std::lock_guard<std::mutex> lock(mLockUpMessageQueue);
+    delete mUpMessageQueue;
+    mUpMessageQueue = nullptr;
+    return AAUDIO_OK;
+}
+
+aaudio_result_t AAudioServiceStreamBase::start() {
+    sendServiceEvent(AAUDIO_SERVICE_EVENT_STARTED);
+    mState = AAUDIO_STREAM_STATE_STARTED;
+    mThreadEnabled.store(true);
+    return mAAudioThread.start(this);
+}
+
+aaudio_result_t AAudioServiceStreamBase::pause() {
+
+    sendCurrentTimestamp();
+    mThreadEnabled.store(false);
+    aaudio_result_t result = mAAudioThread.stop();
+    if (result != AAUDIO_OK) {
+        processError();
+        return result;
+    }
+    sendServiceEvent(AAUDIO_SERVICE_EVENT_PAUSED);
+    mState = AAUDIO_STREAM_STATE_PAUSED;
+    return result;
+}
+
+// implement Runnable
+void AAudioServiceStreamBase::run() {
+    ALOGD("AAudioServiceStreamMMAP::run() entering ----------------");
+    TimestampScheduler timestampScheduler;
+    timestampScheduler.setBurstPeriod(mFramesPerBurst, mSampleRate);
+    timestampScheduler.start(AudioClock::getNanoseconds());
+    int64_t nextTime = timestampScheduler.nextAbsoluteTime();
+    while(mThreadEnabled.load()) {
+        if (AudioClock::getNanoseconds() >= nextTime) {
+            aaudio_result_t result = sendCurrentTimestamp();
+            if (result != AAUDIO_OK) {
+                break;
+            }
+            nextTime = timestampScheduler.nextAbsoluteTime();
+        } else  {
+            // Sleep until it is time to send the next timestamp.
+            AudioClock::sleepUntilNanoTime(nextTime);
+        }
+    }
+    ALOGD("AAudioServiceStreamMMAP::run() exiting ----------------");
+}
+
+void AAudioServiceStreamBase::processError() {
+    sendServiceEvent(AAUDIO_SERVICE_EVENT_DISCONNECTED);
+}
+
+aaudio_result_t AAudioServiceStreamBase::sendServiceEvent(aaudio_service_event_t event,
+                                               double  dataDouble,
+                                               int64_t dataLong) {
     AAudioServiceMessage command;
     command.what = AAudioServiceMessage::code::EVENT;
     command.event.event = event;
-    command.event.data1 = data1;
-    command.event.data2 = data2;
-    mUpMessageQueue->getFifoBuffer()->write(&command, 1);
+    command.event.dataDouble = dataDouble;
+    command.event.dataLong = dataLong;
+    return writeUpMessageQueue(&command);
+}
+
+aaudio_result_t AAudioServiceStreamBase::writeUpMessageQueue(AAudioServiceMessage *command) {
+    std::lock_guard<std::mutex> lock(mLockUpMessageQueue);
+    int32_t count = mUpMessageQueue->getFifoBuffer()->write(command, 1);
+    if (count != 1) {
+        ALOGE("writeUpMessageQueue(): Queue full. Did client die?");
+        return AAUDIO_ERROR_WOULD_BLOCK;
+    } else {
+        return AAUDIO_OK;
+    }
+}
+
+aaudio_result_t AAudioServiceStreamBase::sendCurrentTimestamp() {
+    AAudioServiceMessage command;
+    aaudio_result_t result = getFreeRunningPosition(&command.timestamp.position,
+                                                    &command.timestamp.timestamp);
+    if (result == AAUDIO_OK) {
+        command.what = AAudioServiceMessage::code::TIMESTAMP;
+        result = writeUpMessageQueue(&command);
+    }
+    return result;
 }
 
 
+/**
+ * Get an immutable description of the in-memory queues
+ * used to communicate with the underlying HAL or Service.
+ */
+aaudio_result_t AAudioServiceStreamBase::getDescription(AudioEndpointParcelable &parcelable) {
+    // Gather information on the message queue.
+    mUpMessageQueue->fillParcelable(parcelable,
+                                    parcelable.mUpMessageQueueParcelable);
+    return getDownDataDescription(parcelable);
+}
\ No newline at end of file
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index 7a812f9..91eec35 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -17,13 +17,15 @@
 #ifndef AAUDIO_AAUDIO_SERVICE_STREAM_BASE_H
 #define AAUDIO_AAUDIO_SERVICE_STREAM_BASE_H
 
-#include <utils/Mutex.h>
+#include <mutex>
 
-#include "IAAudioService.h"
-#include "AAudioServiceDefinitions.h"
 #include "fifo/FifoBuffer.h"
+#include "binding/IAAudioService.h"
+#include "binding/AudioEndpointParcelable.h"
+#include "binding/AAudioServiceMessage.h"
+#include "utility/AAudioUtilities.h"
+
 #include "SharedRingBuffer.h"
-#include "AudioEndpointParcelable.h"
 #include "AAudioThread.h"
 
 namespace aaudio {
@@ -32,7 +34,11 @@
 // This should be way more than we need.
 #define QUEUE_UP_CAPACITY_COMMANDS (128)
 
-class AAudioServiceStreamBase {
+/**
+ * Base class for a stream in the AAudio service.
+ */
+class AAudioServiceStreamBase
+    : public Runnable  {
 
 public:
     AAudioServiceStreamBase();
@@ -42,16 +48,14 @@
         ILLEGAL_THREAD_ID = 0
     };
 
-    /**
-     * Fill in a parcelable description of stream.
-     */
-    virtual aaudio_result_t getDescription(aaudio::AudioEndpointParcelable &parcelable) = 0;
-
+    // -------------------------------------------------------------------
     /**
      * Open the device.
      */
-    virtual aaudio_result_t open(aaudio::AAudioStreamRequest &request,
-                               aaudio::AAudioStreamConfiguration &configuration) = 0;
+    virtual aaudio_result_t open(const aaudio::AAudioStreamRequest &request,
+                                 aaudio::AAudioStreamConfiguration &configurationOutput) = 0;
+
+    virtual aaudio_result_t close();
 
     /**
      * Start the flow of data.
@@ -68,39 +72,69 @@
      */
     virtual aaudio_result_t flush() = 0;
 
-    virtual aaudio_result_t close() = 0;
+    // -------------------------------------------------------------------
 
-    virtual void sendCurrentTimestamp() = 0;
+    /**
+     * Send a message to the client.
+     */
+    aaudio_result_t sendServiceEvent(aaudio_service_event_t event,
+                                     double  dataDouble = 0.0,
+                                     int64_t dataLong = 0);
 
-    int32_t getFramesPerBurst() {
-        return mFramesPerBurst;
-    }
+    /**
+     * Fill in a parcelable description of stream.
+     */
+    aaudio_result_t getDescription(AudioEndpointParcelable &parcelable);
 
-    virtual void sendServiceEvent(aaudio_service_event_t event,
-                                  int32_t data1 = 0,
-                                  int64_t data2 = 0);
 
-    virtual void setRegisteredThread(pid_t pid) {
+    void setRegisteredThread(pid_t pid) {
         mRegisteredClientThread = pid;
     }
 
-    virtual pid_t getRegisteredThread() {
+    pid_t getRegisteredThread() const {
         return mRegisteredClientThread;
     }
 
+    int32_t getFramesPerBurst() const {
+        return mFramesPerBurst;
+    }
+
+    int32_t calculateBytesPerFrame() const {
+        return mSamplesPerFrame * AAudioConvert_formatToSizeInBytes(mAudioFormat);
+    }
+
+    void run() override; // to implement Runnable
+
+    void processError();
+
 protected:
+    aaudio_result_t writeUpMessageQueue(AAudioServiceMessage *command);
+
+    aaudio_result_t sendCurrentTimestamp();
+
+    virtual aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) = 0;
+
+    virtual aaudio_result_t getDownDataDescription(AudioEndpointParcelable &parcelable) = 0;
+
+    aaudio_stream_state_t               mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
 
     pid_t              mRegisteredClientThread = ILLEGAL_THREAD_ID;
 
     SharedRingBuffer*  mUpMessageQueue;
+    std::mutex         mLockUpMessageQueue;
 
-    int32_t            mSampleRate = 0;
-    int32_t            mBytesPerFrame = 0;
+    AAudioThread        mAAudioThread;
+    // This is used by one thread to tell another thread to exit. So it must be atomic.
+    std::atomic<bool>   mThreadEnabled;
+
+
+    int                mAudioDataFileDescriptor = -1;
+
+    aaudio_audio_format_t mAudioFormat = AAUDIO_FORMAT_UNSPECIFIED;
     int32_t            mFramesPerBurst = 0;
-    int32_t            mCapacityInFrames = 0;
-    int32_t            mCapacityInBytes = 0;
-
-    android::Mutex     mLockUpMessageQueue;
+    int32_t            mSamplesPerFrame = AAUDIO_UNSPECIFIED;
+    int32_t            mSampleRate = AAUDIO_UNSPECIFIED;
+    int32_t            mCapacityInFrames = AAUDIO_UNSPECIFIED;
 };
 
 } /* namespace aaudio */
diff --git a/services/oboeservice/AAudioServiceStreamExclusive.h b/services/oboeservice/AAudioServiceStreamExclusive.h
new file mode 100644
index 0000000..db382a3
--- /dev/null
+++ b/services/oboeservice/AAudioServiceStreamExclusive.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_SERVICE_STREAM_EXCLUSIVE_H
+#define AAUDIO_AAUDIO_SERVICE_STREAM_EXCLUSIVE_H
+
+#include "AAudioServiceStreamMMAP.h"
+
+namespace aaudio {
+
+/**
+ * Exclusive mode stream in the AAudio service.
+ *
+ * This is currently a stub.
+ * We may move code from AAudioServiceStreamMMAP into this class.
+ * If not, then it will be removed.
+ */
+class AAudioServiceStreamExclusive : public AAudioServiceStreamMMAP {
+
+public:
+    AAudioServiceStreamExclusive() {};
+    virtual ~AAudioServiceStreamExclusive() = default;
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_AAUDIO_SERVICE_STREAM_EXCLUSIVE_H
diff --git a/services/oboeservice/AAudioServiceStreamFakeHal.cpp b/services/oboeservice/AAudioServiceStreamFakeHal.cpp
deleted file mode 100644
index 71d3542..0000000
--- a/services/oboeservice/AAudioServiceStreamFakeHal.cpp
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "AAudioService"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
-#include <atomic>
-
-#include "AudioClock.h"
-#include "AudioEndpointParcelable.h"
-
-#include "AAudioServiceStreamBase.h"
-#include "AAudioServiceStreamFakeHal.h"
-
-#include "FakeAudioHal.h"
-
-using namespace android;
-using namespace aaudio;
-
-// HACK values for Marlin
-#define CARD_ID              0
-#define DEVICE_ID            19
-
-/**
- * Construct the audio message queuues and message queues.
- */
-
-AAudioServiceStreamFakeHal::AAudioServiceStreamFakeHal()
-        : AAudioServiceStreamBase()
-        , mStreamId(nullptr)
-        , mPreviousFrameCounter(0)
-        , mAAudioThread()
-{
-}
-
-AAudioServiceStreamFakeHal::~AAudioServiceStreamFakeHal() {
-    ALOGD("AAudioServiceStreamFakeHal::~AAudioServiceStreamFakeHal() call close()");
-    close();
-}
-
-aaudio_result_t AAudioServiceStreamFakeHal::open(aaudio::AAudioStreamRequest &request,
-                                       aaudio::AAudioStreamConfiguration &configurationOutput) {
-    // Open stream on HAL and pass information about the ring buffer to the client.
-    mmap_buffer_info mmapInfo;
-    aaudio_result_t error;
-
-    // Open HAL
-    int bufferCapacity = request.getConfiguration().getBufferCapacity();
-    error = fake_hal_open(CARD_ID, DEVICE_ID, bufferCapacity, &mStreamId);
-    if(error < 0) {
-        ALOGE("Could not open card %d, device %d", CARD_ID, DEVICE_ID);
-        return error;
-    }
-
-    // Get information about the shared audio buffer.
-    error = fake_hal_get_mmap_info(mStreamId, &mmapInfo);
-    if (error < 0) {
-        ALOGE("fake_hal_get_mmap_info returned %d", error);
-        fake_hal_close(mStreamId);
-        mStreamId = nullptr;
-        return error;
-    }
-    mHalFileDescriptor = mmapInfo.fd;
-    mFramesPerBurst = mmapInfo.burst_size_in_frames;
-    mCapacityInFrames = mmapInfo.buffer_capacity_in_frames;
-    mCapacityInBytes = mmapInfo.buffer_capacity_in_bytes;
-    mSampleRate = mmapInfo.sample_rate;
-    mBytesPerFrame = mmapInfo.channel_count * sizeof(int16_t); // FIXME based on data format
-    ALOGD("AAudioServiceStreamFakeHal::open() mmapInfo.burst_size_in_frames = %d",
-         mmapInfo.burst_size_in_frames);
-    ALOGD("AAudioServiceStreamFakeHal::open() mmapInfo.buffer_capacity_in_frames = %d",
-         mmapInfo.buffer_capacity_in_frames);
-    ALOGD("AAudioServiceStreamFakeHal::open() mmapInfo.buffer_capacity_in_bytes = %d",
-         mmapInfo.buffer_capacity_in_bytes);
-
-    // Fill in AAudioStreamConfiguration
-    configurationOutput.setSampleRate(mSampleRate);
-    configurationOutput.setSamplesPerFrame(mmapInfo.channel_count);
-    configurationOutput.setAudioFormat(AAUDIO_FORMAT_PCM_I16);
-
-    return AAUDIO_OK;
-}
-
-/**
- * Get an immutable description of the in-memory queues
- * used to communicate with the underlying HAL or Service.
- */
-aaudio_result_t AAudioServiceStreamFakeHal::getDescription(AudioEndpointParcelable &parcelable) {
-    // Gather information on the message queue.
-    mUpMessageQueue->fillParcelable(parcelable,
-                                    parcelable.mUpMessageQueueParcelable);
-
-    // Gather information on the data queue.
-    // TODO refactor into a SharedRingBuffer?
-    int fdIndex = parcelable.addFileDescriptor(mHalFileDescriptor, mCapacityInBytes);
-    parcelable.mDownDataQueueParcelable.setupMemory(fdIndex, 0, mCapacityInBytes);
-    parcelable.mDownDataQueueParcelable.setBytesPerFrame(mBytesPerFrame);
-    parcelable.mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
-    parcelable.mDownDataQueueParcelable.setCapacityInFrames(mCapacityInFrames);
-    return AAUDIO_OK;
-}
-
-/**
- * Start the flow of data.
- */
-aaudio_result_t AAudioServiceStreamFakeHal::start() {
-    if (mStreamId == nullptr) return AAUDIO_ERROR_NULL;
-    aaudio_result_t result = fake_hal_start(mStreamId);
-    sendServiceEvent(AAUDIO_SERVICE_EVENT_STARTED);
-    mState = AAUDIO_STREAM_STATE_STARTED;
-    if (result == AAUDIO_OK) {
-        mThreadEnabled.store(true);
-        result = mAAudioThread.start(this);
-    }
-    return result;
-}
-
-/**
- * Stop the flow of data such that start() can resume with loss of data.
- */
-aaudio_result_t AAudioServiceStreamFakeHal::pause() {
-    if (mStreamId == nullptr) return AAUDIO_ERROR_NULL;
-    sendCurrentTimestamp();
-    aaudio_result_t result = fake_hal_pause(mStreamId);
-    sendServiceEvent(AAUDIO_SERVICE_EVENT_PAUSED);
-    mState = AAUDIO_STREAM_STATE_PAUSED;
-    mFramesRead.reset32();
-    ALOGD("AAudioServiceStreamFakeHal::pause() sent AAUDIO_SERVICE_EVENT_PAUSED");
-    mThreadEnabled.store(false);
-    result = mAAudioThread.stop();
-    return result;
-}
-
-/**
- *  Discard any data held by the underlying HAL or Service.
- */
-aaudio_result_t AAudioServiceStreamFakeHal::flush() {
-    if (mStreamId == nullptr) return AAUDIO_ERROR_NULL;
-    // TODO how do we flush an MMAP/NOIRQ buffer? sync pointers?
-    ALOGD("AAudioServiceStreamFakeHal::pause() send AAUDIO_SERVICE_EVENT_FLUSHED");
-    sendServiceEvent(AAUDIO_SERVICE_EVENT_FLUSHED);
-    mState = AAUDIO_STREAM_STATE_FLUSHED;
-    return AAUDIO_OK;
-}
-
-aaudio_result_t AAudioServiceStreamFakeHal::close() {
-    aaudio_result_t result = AAUDIO_OK;
-    if (mStreamId != nullptr) {
-        result = fake_hal_close(mStreamId);
-        mStreamId = nullptr;
-    }
-    return result;
-}
-
-void AAudioServiceStreamFakeHal::sendCurrentTimestamp() {
-    int frameCounter = 0;
-    int error = fake_hal_get_frame_counter(mStreamId, &frameCounter);
-    if (error < 0) {
-        ALOGE("AAudioServiceStreamFakeHal::sendCurrentTimestamp() error %d",
-                error);
-    } else if (frameCounter != mPreviousFrameCounter) {
-        AAudioServiceMessage command;
-        command.what = AAudioServiceMessage::code::TIMESTAMP;
-        mFramesRead.update32(frameCounter);
-        command.timestamp.position = mFramesRead.get();
-        ALOGD("AAudioServiceStreamFakeHal::sendCurrentTimestamp() HAL frames = %d, pos = %d",
-                frameCounter, (int)mFramesRead.get());
-        command.timestamp.timestamp = AudioClock::getNanoseconds();
-        mUpMessageQueue->getFifoBuffer()->write(&command, 1);
-        mPreviousFrameCounter = frameCounter;
-    }
-}
-
-// implement Runnable
-void AAudioServiceStreamFakeHal::run() {
-    TimestampScheduler timestampScheduler;
-    timestampScheduler.setBurstPeriod(mFramesPerBurst, mSampleRate);
-    timestampScheduler.start(AudioClock::getNanoseconds());
-    while(mThreadEnabled.load()) {
-        int64_t nextTime = timestampScheduler.nextAbsoluteTime();
-        if (AudioClock::getNanoseconds() >= nextTime) {
-            sendCurrentTimestamp();
-        } else  {
-            // Sleep until it is time to send the next timestamp.
-            AudioClock::sleepUntilNanoTime(nextTime);
-        }
-    }
-}
-
diff --git a/services/oboeservice/AAudioServiceStreamFakeHal.h b/services/oboeservice/AAudioServiceStreamFakeHal.h
deleted file mode 100644
index e9480fb..0000000
--- a/services/oboeservice/AAudioServiceStreamFakeHal.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef AAUDIO_AAUDIO_SERVICE_STREAM_FAKE_HAL_H
-#define AAUDIO_AAUDIO_SERVICE_STREAM_FAKE_HAL_H
-
-#include "AAudioServiceDefinitions.h"
-#include "AAudioServiceStreamBase.h"
-#include "FakeAudioHal.h"
-#include "MonotonicCounter.h"
-#include "AudioEndpointParcelable.h"
-#include "TimestampScheduler.h"
-
-namespace aaudio {
-
-class AAudioServiceStreamFakeHal
-    : public AAudioServiceStreamBase
-    , public Runnable {
-
-public:
-    AAudioServiceStreamFakeHal();
-    virtual ~AAudioServiceStreamFakeHal();
-
-    virtual aaudio_result_t getDescription(AudioEndpointParcelable &parcelable) override;
-
-    virtual aaudio_result_t open(aaudio::AAudioStreamRequest &request,
-                                 aaudio::AAudioStreamConfiguration &configurationOutput) override;
-
-    /**
-     * Start the flow of data.
-     */
-    virtual aaudio_result_t start() override;
-
-    /**
-     * Stop the flow of data such that start() can resume with loss of data.
-     */
-    virtual aaudio_result_t pause() override;
-
-    /**
-     *  Discard any data held by the underlying HAL or Service.
-     */
-    virtual aaudio_result_t flush() override;
-
-    virtual aaudio_result_t close() override;
-
-    void sendCurrentTimestamp();
-
-    virtual void run() override; // to implement Runnable
-
-private:
-    fake_hal_stream_ptr    mStreamId; // Move to HAL
-
-    MonotonicCounter       mFramesWritten;
-    MonotonicCounter       mFramesRead;
-    int                    mHalFileDescriptor = -1;
-    int                    mPreviousFrameCounter = 0;   // from HAL
-
-    aaudio_stream_state_t    mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
-
-    AAudioThread             mAAudioThread;
-    std::atomic<bool>      mThreadEnabled;
-};
-
-} // namespace aaudio
-
-#endif //AAUDIO_AAUDIO_SERVICE_STREAM_FAKE_HAL_H
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.cpp b/services/oboeservice/AAudioServiceStreamMMAP.cpp
new file mode 100644
index 0000000..b70c625
--- /dev/null
+++ b/services/oboeservice/AAudioServiceStreamMMAP.cpp
@@ -0,0 +1,269 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <atomic>
+#include <stdint.h>
+
+#include <utils/String16.h>
+#include <media/nbaio/AudioStreamOutSink.h>
+#include <media/MmapStreamInterface.h>
+
+#include "AAudioServiceStreamBase.h"
+#include "AAudioServiceStreamMMAP.h"
+#include "binding/AudioEndpointParcelable.h"
+#include "SharedMemoryProxy.h"
+#include "utility/AAudioUtilities.h"
+
+using namespace android;
+using namespace aaudio;
+
+#define AAUDIO_BUFFER_CAPACITY_MIN    4 * 512
+#define AAUDIO_SAMPLE_RATE_DEFAULT    48000
+
+/**
+ * Stream that uses an MMAP buffer.
+ */
+
+AAudioServiceStreamMMAP::AAudioServiceStreamMMAP()
+        : AAudioServiceStreamBase()
+        , mMmapStreamCallback(new MyMmapStreamCallback(*this))
+        , mPreviousFrameCounter(0)
+        , mMmapStream(nullptr) {
+}
+
+AAudioServiceStreamMMAP::~AAudioServiceStreamMMAP() {
+    close();
+}
+
+aaudio_result_t AAudioServiceStreamMMAP::close() {
+    ALOGD("AAudioServiceStreamMMAP::close() called, %p", mMmapStream.get());
+    mMmapStream.clear(); // TODO review. Is that all we have to do?
+    return AAudioServiceStreamBase::close();
+}
+
+// Open stream on HAL and pass information about the shared memory buffer back to the client.
+aaudio_result_t AAudioServiceStreamMMAP::open(const aaudio::AAudioStreamRequest &request,
+                                       aaudio::AAudioStreamConfiguration &configurationOutput) {
+    const audio_attributes_t attributes = {
+        .content_type = AUDIO_CONTENT_TYPE_MUSIC,
+        .usage = AUDIO_USAGE_MEDIA,
+        .source = AUDIO_SOURCE_DEFAULT,
+        .flags = AUDIO_FLAG_LOW_LATENCY,
+        .tags = ""
+    };
+    audio_config_base_t config;
+
+    aaudio_result_t result = AAudioServiceStreamBase::open(request, configurationOutput);
+    if (result != AAUDIO_OK) {
+        ALOGE("AAudioServiceStreamBase open returned %d", result);
+        return result;
+    }
+
+    const AAudioStreamConfiguration &configurationInput = request.getConstantConfiguration();
+    audio_port_handle_t deviceId = configurationInput.getDeviceId();
+
+    ALOGI("open request dump()");
+    request.dump();
+
+    mMmapClient.clientUid = request.getUserId();
+    mMmapClient.clientPid = request.getProcessId();
+    aaudio_direction_t direction = request.getDirection();
+
+    // Fill in config
+    aaudio_audio_format_t aaudioFormat = configurationInput.getAudioFormat();
+    if (aaudioFormat == AAUDIO_UNSPECIFIED || aaudioFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+        ALOGI("open forcing use of AAUDIO_FORMAT_PCM_I16");
+        aaudioFormat = AAUDIO_FORMAT_PCM_I16;
+    }
+    config.format = AAudioConvert_aaudioToAndroidDataFormat(aaudioFormat);
+
+    int32_t aaudioSampleRate = configurationInput.getSampleRate();
+    if (aaudioSampleRate == AAUDIO_UNSPECIFIED) {
+        aaudioSampleRate = AAUDIO_SAMPLE_RATE_DEFAULT;
+    }
+    config.sample_rate = aaudioSampleRate;
+
+    int32_t aaudioSamplesPerFrame = configurationInput.getSamplesPerFrame();
+
+    if (direction == AAUDIO_DIRECTION_OUTPUT) {
+        config.channel_mask = (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED)
+                            ? AUDIO_CHANNEL_OUT_STEREO
+                            : audio_channel_out_mask_from_count(aaudioSamplesPerFrame);
+    } else if (direction == AAUDIO_DIRECTION_INPUT) {
+        config.channel_mask =  (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED)
+                            ? AUDIO_CHANNEL_IN_STEREO
+                            : audio_channel_in_mask_from_count(aaudioSamplesPerFrame);
+    } else {
+        ALOGE("openMmapStream - invalid direction = %d", direction);
+        return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+    }
+
+    mMmapClient.packageName.setTo(String16("aaudio_service")); // FIXME what should we do here?
+
+    MmapStreamInterface::stream_direction_t streamDirection = (direction == AAUDIO_DIRECTION_OUTPUT)
+        ? MmapStreamInterface::DIRECTION_OUTPUT : MmapStreamInterface::DIRECTION_INPUT;
+
+    // Open HAL stream.
+    status_t status = MmapStreamInterface::openMmapStream(streamDirection,
+                                                          &attributes,
+                                                          &config,
+                                                          mMmapClient,
+                                                          &deviceId,
+                                                          mMmapStreamCallback,
+                                                          mMmapStream);
+    if (status != OK) {
+        ALOGE("openMmapStream returned status %d", status);
+        return AAUDIO_ERROR_UNAVAILABLE;
+    }
+
+    // Create MMAP/NOIRQ buffer.
+    int32_t minSizeFrames = configurationInput.getBufferCapacity();
+    if (minSizeFrames == 0) { // zero will get rejected
+        minSizeFrames = AAUDIO_BUFFER_CAPACITY_MIN;
+    }
+    status = mMmapStream->createMmapBuffer(minSizeFrames, &mMmapBufferinfo);
+    if (status != OK) {
+        ALOGE("%s: createMmapBuffer() returned status %d, return AAUDIO_ERROR_UNAVAILABLE",
+              __FILE__, status);
+        return AAUDIO_ERROR_UNAVAILABLE;
+    } else {
+        ALOGD("createMmapBuffer status %d shared_address = %p buffer_size %d burst_size %d",
+              status, mMmapBufferinfo.shared_memory_address,
+              mMmapBufferinfo.buffer_size_frames,
+              mMmapBufferinfo.burst_size_frames);
+    }
+
+    // Get information about the stream and pass it back to the caller.
+    mSamplesPerFrame = (direction == AAUDIO_DIRECTION_OUTPUT)
+                           ? audio_channel_count_from_out_mask(config.channel_mask)
+                           : audio_channel_count_from_in_mask(config.channel_mask);
+
+    mAudioDataFileDescriptor = mMmapBufferinfo.shared_memory_fd;
+    mFramesPerBurst = mMmapBufferinfo.burst_size_frames;
+    mCapacityInFrames = mMmapBufferinfo.buffer_size_frames;
+    mAudioFormat = AAudioConvert_androidToAAudioDataFormat(config.format);
+    mSampleRate = config.sample_rate;
+
+    // Fill in AAudioStreamConfiguration
+    configurationOutput.setSampleRate(mSampleRate);
+    configurationOutput.setSamplesPerFrame(mSamplesPerFrame);
+    configurationOutput.setAudioFormat(mAudioFormat);
+    configurationOutput.setDeviceId(deviceId);
+
+    return AAUDIO_OK;
+}
+
+
+/**
+ * Start the flow of data.
+ */
+aaudio_result_t AAudioServiceStreamMMAP::start() {
+    if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
+    aaudio_result_t result = mMmapStream->start(mMmapClient, &mPortHandle);
+    if (result != AAUDIO_OK) {
+        ALOGE("AAudioServiceStreamMMAP::start() mMmapStream->start() returned %d", result);
+        processError();
+    } else {
+        result = AAudioServiceStreamBase::start();
+    }
+    return result;
+}
+
+/**
+ * Stop the flow of data such that start() can resume with loss of data.
+ */
+aaudio_result_t AAudioServiceStreamMMAP::pause() {
+    if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
+
+    aaudio_result_t result1 = AAudioServiceStreamBase::pause();
+    aaudio_result_t result2 = mMmapStream->stop(mPortHandle);
+    mFramesRead.reset32();
+    return (result1 != AAUDIO_OK) ? result1 : result2;
+}
+
+/**
+ *  Discard any data held by the underlying HAL or Service.
+ */
+aaudio_result_t AAudioServiceStreamMMAP::flush() {
+    if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
+    // TODO how do we flush an MMAP/NOIRQ buffer? sync pointers?
+    ALOGD("AAudioServiceStreamMMAP::pause() send AAUDIO_SERVICE_EVENT_FLUSHED");
+    sendServiceEvent(AAUDIO_SERVICE_EVENT_FLUSHED);
+    mState = AAUDIO_STREAM_STATE_FLUSHED;
+    return AAUDIO_OK;
+}
+
+
+aaudio_result_t AAudioServiceStreamMMAP::getFreeRunningPosition(int64_t *positionFrames,
+                                                                int64_t *timeNanos) {
+    struct audio_mmap_position position;
+    if (mMmapStream == nullptr) {
+        processError();
+        return AAUDIO_ERROR_NULL;
+    }
+    status_t status = mMmapStream->getMmapPosition(&position);
+    if (status != OK) {
+        ALOGE("sendCurrentTimestamp(): getMmapPosition() returned %d", status);
+        processError();
+        return AAudioConvert_androidToAAudioResult(status);
+    } else {
+        mFramesRead.update32(position.position_frames);
+        *positionFrames = mFramesRead.get();
+        *timeNanos = position.time_nanoseconds;
+    }
+    return AAUDIO_OK;
+}
+
+void AAudioServiceStreamMMAP::onTearDown() {
+    ALOGD("AAudioServiceStreamMMAP::onTearDown() called - TODO");
+};
+
+void AAudioServiceStreamMMAP::onVolumeChanged(audio_channel_mask_t channels,
+                     android::Vector<float> values) {
+    // TODO do we really need a different volume for each channel?
+    float volume = values[0];
+    ALOGD("AAudioServiceStreamMMAP::onVolumeChanged() volume[0] = %f", volume);
+    sendServiceEvent(AAUDIO_SERVICE_EVENT_VOLUME, volume);
+};
+
+void AAudioServiceStreamMMAP::onRoutingChanged(audio_port_handle_t deviceId) {
+    ALOGD("AAudioServiceStreamMMAP::onRoutingChanged() called with %d, old = %d",
+          deviceId, mPortHandle);
+    if (mPortHandle > 0 && mPortHandle != deviceId) {
+        sendServiceEvent(AAUDIO_SERVICE_EVENT_DISCONNECTED);
+    }
+    mPortHandle = deviceId;
+};
+
+/**
+ * Get an immutable description of the data queue from the HAL.
+ */
+aaudio_result_t AAudioServiceStreamMMAP::getDownDataDescription(AudioEndpointParcelable &parcelable)
+{
+    // Gather information on the data queue based on HAL info.
+    int32_t bytesPerFrame = calculateBytesPerFrame();
+    int32_t capacityInBytes = mCapacityInFrames * bytesPerFrame;
+    int fdIndex = parcelable.addFileDescriptor(mAudioDataFileDescriptor, capacityInBytes);
+    parcelable.mDownDataQueueParcelable.setupMemory(fdIndex, 0, capacityInBytes);
+    parcelable.mDownDataQueueParcelable.setBytesPerFrame(bytesPerFrame);
+    parcelable.mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
+    parcelable.mDownDataQueueParcelable.setCapacityInFrames(mCapacityInFrames);
+    return AAUDIO_OK;
+}
\ No newline at end of file
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.h b/services/oboeservice/AAudioServiceStreamMMAP.h
new file mode 100644
index 0000000..f121c5c
--- /dev/null
+++ b/services/oboeservice/AAudioServiceStreamMMAP.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_SERVICE_STREAM_MMAP_H
+#define AAUDIO_AAUDIO_SERVICE_STREAM_MMAP_H
+
+#include <atomic>
+
+#include <media/audiohal/StreamHalInterface.h>
+#include <media/MmapStreamCallback.h>
+#include <media/MmapStreamInterface.h>
+#include <utils/RefBase.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
+
+#include "binding/AAudioServiceMessage.h"
+#include "AAudioServiceStreamBase.h"
+#include "binding/AudioEndpointParcelable.h"
+#include "SharedMemoryProxy.h"
+#include "TimestampScheduler.h"
+#include "utility/MonotonicCounter.h"
+
+namespace aaudio {
+
+    /**
+     * Manage one memory mapped buffer that originated from a HAL.
+     */
+class AAudioServiceStreamMMAP
+    : public AAudioServiceStreamBase
+    , public android::MmapStreamCallback {
+
+public:
+    AAudioServiceStreamMMAP();
+    virtual ~AAudioServiceStreamMMAP();
+
+
+    aaudio_result_t open(const aaudio::AAudioStreamRequest &request,
+                                 aaudio::AAudioStreamConfiguration &configurationOutput) override;
+
+    /**
+     * Start the flow of audio data.
+     *
+     * This is not guaranteed to be synchronous but it currently is.
+     * An AAUDIO_SERVICE_EVENT_STARTED will be sent to the client when complete.
+     */
+    aaudio_result_t start() override;
+
+    /**
+     * Stop the flow of data so that start() can resume without loss of data.
+     *
+     * This is not guaranteed to be synchronous but it currently is.
+     * An AAUDIO_SERVICE_EVENT_PAUSED will be sent to the client when complete.
+    */
+    aaudio_result_t pause() override;
+
+    /**
+     *  Discard any data held by the underlying HAL or Service.
+     *
+     * This is not guaranteed to be synchronous but it currently is.
+     * An AAUDIO_SERVICE_EVENT_FLUSHED will be sent to the client when complete.
+     */
+    aaudio_result_t flush() override;
+
+    aaudio_result_t close() override;
+
+    /**
+     * Send a MMAP/NOIRQ buffer timestamp to the client.
+     */
+    aaudio_result_t sendCurrentTimestamp();
+
+    // -------------- Callback functions ---------------------
+    void onTearDown() override;
+
+    void onVolumeChanged(audio_channel_mask_t channels,
+                         android::Vector<float> values) override;
+
+    void onRoutingChanged(audio_port_handle_t deviceId) override;
+
+protected:
+
+    aaudio_result_t getDownDataDescription(AudioEndpointParcelable &parcelable) override;
+
+    aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) override;
+
+private:
+    // This proxy class was needed to prevent a crash in AudioFlinger
+    // when the stream was closed.
+    class MyMmapStreamCallback : public android::MmapStreamCallback {
+    public:
+        explicit MyMmapStreamCallback(android::MmapStreamCallback &serviceCallback)
+            : mServiceCallback(serviceCallback){}
+        virtual ~MyMmapStreamCallback() = default;
+
+        void onTearDown() override {
+            mServiceCallback.onTearDown();
+        };
+
+        void onVolumeChanged(audio_channel_mask_t channels, android::Vector<float> values) override
+        {
+            mServiceCallback.onVolumeChanged(channels, values);
+        };
+
+        void onRoutingChanged(audio_port_handle_t deviceId) override {
+            mServiceCallback.onRoutingChanged(deviceId);
+        };
+
+    private:
+        android::MmapStreamCallback &mServiceCallback;
+    };
+
+    android::sp<MyMmapStreamCallback>   mMmapStreamCallback;
+    MonotonicCounter                    mFramesWritten;
+    MonotonicCounter                    mFramesRead;
+    int32_t                             mPreviousFrameCounter = 0;   // from HAL
+
+    // Interface to the AudioFlinger MMAP support.
+    android::sp<android::MmapStreamInterface> mMmapStream;
+    struct audio_mmap_buffer_info             mMmapBufferinfo;
+    android::MmapStreamInterface::Client      mMmapClient;
+    audio_port_handle_t                       mPortHandle = -1; // TODO review best default
+};
+
+} // namespace aaudio
+
+#endif //AAUDIO_AAUDIO_SERVICE_STREAM_MMAP_H
diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp
new file mode 100644
index 0000000..cd9336b
--- /dev/null
+++ b/services/oboeservice/AAudioServiceStreamShared.cpp
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <mutex>
+
+#include <aaudio/AAudio.h>
+
+#include "binding/IAAudioService.h"
+
+#include "binding/AAudioServiceMessage.h"
+#include "AAudioServiceStreamBase.h"
+#include "AAudioServiceStreamShared.h"
+#include "AAudioEndpointManager.h"
+#include "AAudioService.h"
+#include "AAudioServiceEndpoint.h"
+
+using namespace android;
+using namespace aaudio;
+
+#define MIN_BURSTS_PER_BUFFER   2
+#define MAX_BURSTS_PER_BUFFER   32
+
+AAudioServiceStreamShared::AAudioServiceStreamShared(AAudioService &audioService)
+    : mAudioService(audioService)
+    {
+}
+
+AAudioServiceStreamShared::~AAudioServiceStreamShared() {
+    close();
+}
+
+aaudio_result_t AAudioServiceStreamShared::open(const aaudio::AAudioStreamRequest &request,
+                     aaudio::AAudioStreamConfiguration &configurationOutput)  {
+
+    aaudio_result_t result = AAudioServiceStreamBase::open(request, configurationOutput);
+    if (result != AAUDIO_OK) {
+        ALOGE("AAudioServiceStreamBase open returned %d", result);
+        return result;
+    }
+
+    const AAudioStreamConfiguration &configurationInput = request.getConstantConfiguration();
+    int32_t deviceId = configurationInput.getDeviceId();
+    aaudio_direction_t direction = request.getDirection();
+
+    ALOGD("AAudioServiceStreamShared::open(), direction = %d", direction);
+    AAudioEndpointManager &mEndpointManager = AAudioEndpointManager::getInstance();
+    mServiceEndpoint = mEndpointManager.findEndpoint(mAudioService, deviceId, direction);
+    ALOGD("AAudioServiceStreamShared::open(), mServiceEndPoint = %p", mServiceEndpoint);
+    if (mServiceEndpoint == nullptr) {
+        return AAUDIO_ERROR_UNAVAILABLE;
+    }
+
+    // Is the request compatible with the shared endpoint?
+    mAudioFormat = configurationInput.getAudioFormat();
+    if (mAudioFormat == AAUDIO_FORMAT_UNSPECIFIED) {
+        mAudioFormat = AAUDIO_FORMAT_PCM_FLOAT;
+    } else if (mAudioFormat != AAUDIO_FORMAT_PCM_FLOAT) {
+        return AAUDIO_ERROR_INVALID_FORMAT;
+    }
+
+    mSampleRate = configurationInput.getSampleRate();
+    if (mSampleRate == AAUDIO_FORMAT_UNSPECIFIED) {
+        mSampleRate = mServiceEndpoint->getSampleRate();
+    } else if (mSampleRate != mServiceEndpoint->getSampleRate()) {
+        return AAUDIO_ERROR_INVALID_RATE;
+    }
+
+    mSamplesPerFrame = configurationInput.getSamplesPerFrame();
+    if (mSamplesPerFrame == AAUDIO_FORMAT_UNSPECIFIED) {
+        mSamplesPerFrame = mServiceEndpoint->getSamplesPerFrame();
+    } else if (mSamplesPerFrame != mServiceEndpoint->getSamplesPerFrame()) {
+        return AAUDIO_ERROR_OUT_OF_RANGE;
+    }
+
+    // Determine this stream's shared memory buffer capacity.
+    mFramesPerBurst = mServiceEndpoint->getFramesPerBurst();
+    int32_t minCapacityFrames = configurationInput.getBufferCapacity();
+    int32_t numBursts = (minCapacityFrames + mFramesPerBurst - 1) / mFramesPerBurst;
+    if (numBursts < MIN_BURSTS_PER_BUFFER) {
+        numBursts = MIN_BURSTS_PER_BUFFER;
+    } else if (numBursts > MAX_BURSTS_PER_BUFFER) {
+        numBursts = MAX_BURSTS_PER_BUFFER;
+    }
+    mCapacityInFrames = numBursts * mFramesPerBurst;
+    ALOGD("AAudioServiceStreamShared::open(), mCapacityInFrames = %d", mCapacityInFrames);
+
+    // Create audio data shared memory buffer for client.
+    mAudioDataQueue = new SharedRingBuffer();
+    mAudioDataQueue->allocate(calculateBytesPerFrame(), mCapacityInFrames);
+
+    // Fill in configuration for client.
+    configurationOutput.setSampleRate(mSampleRate);
+    configurationOutput.setSamplesPerFrame(mSamplesPerFrame);
+    configurationOutput.setAudioFormat(mAudioFormat);
+    configurationOutput.setDeviceId(deviceId);
+
+    mServiceEndpoint->registerStream(this);
+
+    return AAUDIO_OK;
+}
+
+/**
+ * Start the flow of audio data.
+ *
+ * An AAUDIO_SERVICE_EVENT_STARTED will be sent to the client when complete.
+ */
+aaudio_result_t AAudioServiceStreamShared::start()  {
+    // Add this stream to the mixer.
+    aaudio_result_t result = mServiceEndpoint->startStream(this);
+    if (result != AAUDIO_OK) {
+        ALOGE("AAudioServiceStreamShared::start() mServiceEndpoint returned %d", result);
+        processError();
+    } else {
+        result = AAudioServiceStreamBase::start();
+    }
+    return AAUDIO_OK;
+}
+
+/**
+ * Stop the flow of data so that start() can resume without loss of data.
+ *
+ * An AAUDIO_SERVICE_EVENT_PAUSED will be sent to the client when complete.
+*/
+aaudio_result_t AAudioServiceStreamShared::pause()  {
+    // Add this stream to the mixer.
+    aaudio_result_t result = mServiceEndpoint->stopStream(this);
+    if (result != AAUDIO_OK) {
+        ALOGE("AAudioServiceStreamShared::stop() mServiceEndpoint returned %d", result);
+        processError();
+    } else {
+        result = AAudioServiceStreamBase::start();
+    }
+    return AAUDIO_OK;
+}
+
+/**
+ *  Discard any data held by the underlying HAL or Service.
+ *
+ * An AAUDIO_SERVICE_EVENT_FLUSHED will be sent to the client when complete.
+ */
+aaudio_result_t AAudioServiceStreamShared::flush()  {
+    // TODO make sure we are paused
+    return AAUDIO_OK;
+}
+
+aaudio_result_t AAudioServiceStreamShared::close()  {
+    pause();
+    // TODO wait for pause() to synchronize
+    mServiceEndpoint->unregisterStream(this);
+    mServiceEndpoint->close();
+    mServiceEndpoint = nullptr;
+    return AAudioServiceStreamBase::close();
+}
+
+/**
+ * Get an immutable description of the data queue created by this service.
+ */
+aaudio_result_t AAudioServiceStreamShared::getDownDataDescription(AudioEndpointParcelable &parcelable)
+{
+    // Gather information on the data queue.
+    mAudioDataQueue->fillParcelable(parcelable,
+                                    parcelable.mDownDataQueueParcelable);
+    parcelable.mDownDataQueueParcelable.setFramesPerBurst(getFramesPerBurst());
+    return AAUDIO_OK;
+}
+
+void AAudioServiceStreamShared::onStop() {
+}
+
+void AAudioServiceStreamShared::onDisconnect() {
+    mServiceEndpoint->close();
+    mServiceEndpoint = nullptr;
+}
+
+
+aaudio_result_t AAudioServiceStreamShared::getFreeRunningPosition(int64_t *positionFrames,
+                                                                int64_t *timeNanos) {
+    *positionFrames = mAudioDataQueue->getFifoBuffer()->getReadCounter();
+    *timeNanos = AudioClock::getNanoseconds();
+    return AAUDIO_OK;
+}
diff --git a/services/oboeservice/AAudioServiceStreamShared.h b/services/oboeservice/AAudioServiceStreamShared.h
new file mode 100644
index 0000000..f6df7ce
--- /dev/null
+++ b/services/oboeservice/AAudioServiceStreamShared.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_SERVICE_STREAM_SHARED_H
+#define AAUDIO_AAUDIO_SERVICE_STREAM_SHARED_H
+
+#include "fifo/FifoBuffer.h"
+#include "binding/AAudioServiceMessage.h"
+#include "binding/AAudioStreamRequest.h"
+#include "binding/AAudioStreamConfiguration.h"
+
+#include "AAudioService.h"
+#include "AAudioServiceStreamBase.h"
+
+namespace aaudio {
+
+// We expect the queue to only have a few commands.
+// This should be way more than we need.
+#define QUEUE_UP_CAPACITY_COMMANDS (128)
+
+class AAudioEndpointManager;
+class AAudioServiceEndpoint;
+class SharedRingBuffer;
+
+/**
+ * One of these is created for every MODE_SHARED stream in the AAudioService.
+ *
+ * Each Shared stream will register itself with an AAudioServiceEndpoint when it is opened.
+ */
+class AAudioServiceStreamShared : public AAudioServiceStreamBase {
+
+public:
+    AAudioServiceStreamShared(android::AAudioService &aAudioService);
+    virtual ~AAudioServiceStreamShared();
+
+    aaudio_result_t open(const aaudio::AAudioStreamRequest &request,
+                         aaudio::AAudioStreamConfiguration &configurationOutput) override;
+
+    /**
+     * Start the flow of audio data.
+     *
+     * This is not guaranteed to be synchronous but it currently is.
+     * An AAUDIO_SERVICE_EVENT_STARTED will be sent to the client when complete.
+     */
+    aaudio_result_t start() override;
+
+    /**
+     * Stop the flow of data so that start() can resume without loss of data.
+     *
+     * This is not guaranteed to be synchronous but it currently is.
+     * An AAUDIO_SERVICE_EVENT_PAUSED will be sent to the client when complete.
+    */
+    aaudio_result_t pause() override;
+
+    /**
+     *  Discard any data held by the underlying HAL or Service.
+     *
+     * This is not guaranteed to be synchronous but it currently is.
+     * An AAUDIO_SERVICE_EVENT_FLUSHED will be sent to the client when complete.
+     */
+    aaudio_result_t flush() override;
+
+    aaudio_result_t close() override;
+
+    android::FifoBuffer *getDataFifoBuffer() { return mAudioDataQueue->getFifoBuffer(); }
+
+    void onStop();
+
+    void onDisconnect();
+
+protected:
+
+    aaudio_result_t getDownDataDescription(AudioEndpointParcelable &parcelable) override;
+
+    aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) override;
+
+private:
+    android::AAudioService  &mAudioService;
+    AAudioServiceEndpoint   *mServiceEndpoint = nullptr;
+    SharedRingBuffer        *mAudioDataQueue;
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_AAUDIO_SERVICE_STREAM_SHARED_H
diff --git a/services/oboeservice/AAudioThread.cpp b/services/oboeservice/AAudioThread.cpp
index f5e5784..b1b563d 100644
--- a/services/oboeservice/AAudioThread.cpp
+++ b/services/oboeservice/AAudioThread.cpp
@@ -21,14 +21,17 @@
 #include <pthread.h>
 
 #include <aaudio/AAudioDefinitions.h>
+#include <utility/AAudioUtilities.h>
 
 #include "AAudioThread.h"
 
 using namespace aaudio;
 
 
-AAudioThread::AAudioThread() {
-    // mThread is a pthread_t of unknown size so we need memset.
+AAudioThread::AAudioThread()
+    : mRunnable(nullptr)
+    , mHasThread(false) {
+    // mThread is a pthread_t of unknown size so we need memset().
     memset(&mThread, 0, sizeof(mThread));
 }
 
@@ -50,14 +53,16 @@
 
 aaudio_result_t AAudioThread::start(Runnable *runnable) {
     if (mHasThread) {
+        ALOGE("AAudioThread::start() - mHasThread.load() already true");
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    mRunnable = runnable; // TODO use atomic?
+    // mRunnable will be read by the new thread when it starts.
+    // pthread_create() forces a memory synchronization so mRunnable does not need to be atomic.
+    mRunnable = runnable;
     int err = pthread_create(&mThread, nullptr, AAudioThread_internalThreadProc, this);
     if (err != 0) {
-        ALOGE("AAudioThread::pthread_create() returned %d", err);
-        // TODO convert errno to aaudio_result_t
-        return AAUDIO_ERROR_INTERNAL;
+        ALOGE("AAudioThread::start() - pthread_create() returned %d %s", err, strerror(err));
+        return AAudioConvert_androidToAAudioResult(-err);
     } else {
         mHasThread = true;
         return AAUDIO_OK;
@@ -70,7 +75,11 @@
     }
     int err = pthread_join(mThread, nullptr);
     mHasThread = false;
-    // TODO convert errno to aaudio_result_t
-    return err ? AAUDIO_ERROR_INTERNAL : AAUDIO_OK;
+    if (err != 0) {
+        ALOGE("AAudioThread::stop() - pthread_join() returned %d %s", err, strerror(err));
+        return AAudioConvert_androidToAAudioResult(-err);
+    } else {
+        return AAUDIO_OK;
+    }
 }
 
diff --git a/services/oboeservice/AAudioThread.h b/services/oboeservice/AAudioThread.h
index a5d43a4..dd9f640 100644
--- a/services/oboeservice/AAudioThread.h
+++ b/services/oboeservice/AAudioThread.h
@@ -24,16 +24,20 @@
 
 namespace aaudio {
 
+/**
+ * Abstract class similar to Java Runnable.
+ */
 class Runnable {
 public:
     Runnable() {};
     virtual ~Runnable() = default;
 
-    virtual void run() {}
+    virtual void run() = 0;
 };
 
 /**
- * Abstraction for a host thread.
+ * Abstraction for a host dependent thread.
+ * TODO Consider using Android "Thread" class or std::thread instead.
  */
 class AAudioThread
 {
@@ -62,9 +66,9 @@
     void dispatch(); // called internally from 'C' thread wrapper
 
 private:
-    Runnable*          mRunnable = nullptr; // TODO make atomic with memory barrier?
-    bool               mHasThread = false;
-    pthread_t          mThread; // initialized in constructor
+    Runnable    *mRunnable;
+    bool         mHasThread;
+    pthread_t    mThread; // initialized in constructor
 
 };
 
diff --git a/services/oboeservice/Android.mk b/services/oboeservice/Android.mk
index 5cd9121..a9c80ae 100644
--- a/services/oboeservice/Android.mk
+++ b/services/oboeservice/Android.mk
@@ -3,52 +3,54 @@
 # AAudio Service
 include $(CLEAR_VARS)
 
-LOCAL_MODULE := aaudioservice
+LOCAL_MODULE := libaaudioservice
 LOCAL_MODULE_TAGS := optional
 
 LIBAAUDIO_DIR := ../../media/libaaudio
 LIBAAUDIO_SRC_DIR := $(LIBAAUDIO_DIR)/src
 
 LOCAL_C_INCLUDES := \
+    $(TOPDIR)frameworks/av/services/audioflinger \
     $(call include-path-for, audio-utils) \
     frameworks/native/include \
     system/core/base/include \
     $(TOP)/frameworks/native/media/libaaudio/include/include \
     $(TOP)/frameworks/av/media/libaaudio/include \
+    $(TOP)/frameworks/av/media/utils/include \
     frameworks/native/include \
     $(TOP)/external/tinyalsa/include \
-    $(TOP)/frameworks/av/media/libaaudio/src \
-    $(TOP)/frameworks/av/media/libaaudio/src/binding \
-    $(TOP)/frameworks/av/media/libaaudio/src/client \
-    $(TOP)/frameworks/av/media/libaaudio/src/core \
-    $(TOP)/frameworks/av/media/libaaudio/src/fifo \
-    $(TOP)/frameworks/av/media/libaaudio/src/utility
+    $(TOP)/frameworks/av/media/libaaudio/src
 
-# TODO These could be in a libaaudio_common library
 LOCAL_SRC_FILES += \
     $(LIBAAUDIO_SRC_DIR)/utility/HandleTracker.cpp \
-    $(LIBAAUDIO_SRC_DIR)/utility/AAudioUtilities.cpp \
-    $(LIBAAUDIO_SRC_DIR)/fifo/FifoBuffer.cpp \
-    $(LIBAAUDIO_SRC_DIR)/fifo/FifoControllerBase.cpp \
-    $(LIBAAUDIO_SRC_DIR)/binding/SharedMemoryParcelable.cpp \
-    $(LIBAAUDIO_SRC_DIR)/binding/SharedRegionParcelable.cpp \
-    $(LIBAAUDIO_SRC_DIR)/binding/RingBufferParcelable.cpp \
-    $(LIBAAUDIO_SRC_DIR)/binding/AudioEndpointParcelable.cpp \
-    $(LIBAAUDIO_SRC_DIR)/binding/AAudioStreamRequest.cpp \
-    $(LIBAAUDIO_SRC_DIR)/binding/AAudioStreamConfiguration.cpp \
-    $(LIBAAUDIO_SRC_DIR)/binding/IAAudioService.cpp \
+    SharedMemoryProxy.cpp \
     SharedRingBuffer.cpp \
-    FakeAudioHal.cpp \
+    AAudioEndpointManager.cpp \
+    AAudioMixer.cpp \
     AAudioService.cpp \
+    AAudioServiceEndpoint.cpp \
     AAudioServiceStreamBase.cpp \
-    AAudioServiceStreamFakeHal.cpp \
+    AAudioServiceStreamMMAP.cpp \
+    AAudioServiceStreamShared.cpp \
     TimestampScheduler.cpp \
-    AAudioServiceMain.cpp \
     AAudioThread.cpp
 
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
+# LOCAL_CFLAGS += -fvisibility=hidden
 LOCAL_CFLAGS += -Wno-unused-parameter
 LOCAL_CFLAGS += -Wall -Werror
 
-LOCAL_SHARED_LIBRARIES :=  libbinder libcutils libutils liblog libtinyalsa
+LOCAL_SHARED_LIBRARIES :=  \
+    libaaudio \
+    libaudioflinger \
+    libbinder \
+    libcutils \
+    libmediautils \
+    libutils \
+    liblog \
+    libtinyalsa
 
-include $(BUILD_EXECUTABLE)
+include $(BUILD_SHARED_LIBRARY)
+
+
diff --git a/services/oboeservice/FakeAudioHal.cpp b/services/oboeservice/FakeAudioHal.cpp
deleted file mode 100644
index 122671e..0000000
--- a/services/oboeservice/FakeAudioHal.cpp
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Simple fake HAL that supports ALSA MMAP/NOIRQ mode.
- */
-
-#include <iostream>
-#include <math.h>
-#include <limits>
-#include <string.h>
-#include <unistd.h>
-
-#include <sound/asound.h>
-
-#include "tinyalsa/asoundlib.h"
-
-#include "FakeAudioHal.h"
-
-//using namespace aaudio;
-
-using sample_t = int16_t;
-using std::cout;
-using std::endl;
-
-#undef SNDRV_PCM_IOCTL_SYNC_PTR
-#define SNDRV_PCM_IOCTL_SYNC_PTR 0xc0884123
-#define PCM_ERROR_MAX 128
-
-const int SAMPLE_RATE = 48000;       // Hz
-const int CHANNEL_COUNT = 2;
-
-struct pcm {
-    int fd;
-    unsigned int flags;
-    int running:1;
-    int prepared:1;
-    int underruns;
-    unsigned int buffer_size;
-    unsigned int boundary;
-    char error[PCM_ERROR_MAX];
-    struct pcm_config config;
-    struct snd_pcm_mmap_status *mmap_status;
-    struct snd_pcm_mmap_control *mmap_control;
-    struct snd_pcm_sync_ptr *sync_ptr;
-    void *mmap_buffer;
-    unsigned int noirq_frames_per_msec;
-    int wait_for_avail_min;
-};
-
-static int pcm_sync_ptr(struct pcm *pcm, int flags) {
-    if (pcm->sync_ptr) {
-        pcm->sync_ptr->flags = flags;
-        if (ioctl(pcm->fd, SNDRV_PCM_IOCTL_SYNC_PTR, pcm->sync_ptr) < 0)
-            return -1;
-    }
-    return 0;
-}
-
-int pcm_get_hw_ptr(struct pcm* pcm, unsigned int* hw_ptr) {
-    if (!hw_ptr || !pcm) return -EINVAL;
-
-    int result = pcm_sync_ptr(pcm, SNDRV_PCM_SYNC_PTR_HWSYNC);
-    if (!result) {
-        *hw_ptr = pcm->sync_ptr->s.status.hw_ptr;
-    }
-
-    return result;
-}
-
-typedef struct stream_tracker {
-    struct pcm * pcm;
-    int          framesPerBurst;
-    sample_t   * hwBuffer;
-    int32_t      capacityInFrames;
-    int32_t      capacityInBytes;
-} stream_tracker_t;
-
-#define FRAMES_PER_BURST_QUALCOMM 192
-#define FRAMES_PER_BURST_NVIDIA   128
-
-int fake_hal_open(int card_id, int device_id,
-                  int frameCapacity,
-                  fake_hal_stream_ptr *streamPP) {
-    int framesPerBurst = FRAMES_PER_BURST_QUALCOMM; // TODO update as needed
-    int periodCountRequested = frameCapacity / framesPerBurst;
-    int periodCount = 32;
-    unsigned int offset1;
-    unsigned int frames1;
-    void *area = nullptr;
-    int mmapAvail = 0;
-
-    // Try to match requested size with a power of 2.
-    while (periodCount < periodCountRequested && periodCount < 1024) {
-        periodCount *= 2;
-    }
-    std::cout << "fake_hal_open() requested frameCapacity = " << frameCapacity << std::endl;
-    std::cout << "fake_hal_open() periodCountRequested = " << periodCountRequested << std::endl;
-    std::cout << "fake_hal_open() periodCount = " << periodCount << std::endl;
-
-    // Configuration for an ALSA stream.
-    pcm_config cfg;
-    memset(&cfg, 0, sizeof(cfg));
-    cfg.channels = CHANNEL_COUNT;
-    cfg.format = PCM_FORMAT_S16_LE;
-    cfg.rate = SAMPLE_RATE;
-    cfg.period_count = periodCount;
-    cfg.period_size = framesPerBurst;
-    cfg.start_threshold = 0; // for NOIRQ, should just start, was     framesPerBurst;
-    cfg.stop_threshold = INT32_MAX;
-    cfg.silence_size = 0;
-    cfg.silence_threshold = 0;
-    cfg.avail_min = framesPerBurst;
-
-    stream_tracker_t *streamTracker = (stream_tracker_t *) malloc(sizeof(stream_tracker_t));
-    if (streamTracker == nullptr) {
-        return -1;
-    }
-    memset(streamTracker, 0, sizeof(stream_tracker_t));
-
-    streamTracker->pcm = pcm_open(card_id, device_id, PCM_OUT | PCM_MMAP | PCM_NOIRQ, &cfg);
-    if (streamTracker->pcm == nullptr) {
-        cout << "Could not open device." << endl;
-        free(streamTracker);
-        return -1;
-    }
-
-    streamTracker->framesPerBurst = cfg.period_size; // Get from ALSA
-    streamTracker->capacityInFrames = pcm_get_buffer_size(streamTracker->pcm);
-    streamTracker->capacityInBytes = pcm_frames_to_bytes(streamTracker->pcm, streamTracker->capacityInFrames);
-    std::cout << "fake_hal_open() streamTracker->framesPerBurst = " << streamTracker->framesPerBurst << std::endl;
-    std::cout << "fake_hal_open() streamTracker->capacityInFrames = " << streamTracker->capacityInFrames << std::endl;
-
-    if (pcm_is_ready(streamTracker->pcm) < 0) {
-        cout << "Device is not ready." << endl;
-        goto error;
-    }
-
-    if (pcm_prepare(streamTracker->pcm) < 0) {
-        cout << "Device could not be prepared." << endl;
-        cout << "For Marlin, please enter:" << endl;
-        cout << "   adb shell" << endl;
-        cout << "   tinymix \"QUAT_MI2S_RX Audio Mixer MultiMedia8\" 1" << endl;
-        goto error;
-    }
-    mmapAvail = pcm_mmap_avail(streamTracker->pcm);
-    if (mmapAvail <= 0) {
-        cout << "fake_hal_open() mmap_avail is <=0" << endl;
-        goto error;
-    }
-    cout << "fake_hal_open() mmap_avail = " << mmapAvail << endl;
-
-    // Where is the memory mapped area?
-    if (pcm_mmap_begin(streamTracker->pcm, &area, &offset1, &frames1) < 0)  {
-        cout << "fake_hal_open() pcm_mmap_begin failed" << endl;
-        goto error;
-    }
-
-    // Clear the buffer.
-    memset((sample_t*) area, 0, streamTracker->capacityInBytes);
-    streamTracker->hwBuffer = (sample_t*) area;
-    streamTracker->hwBuffer[0] = 32000; // impulse
-
-    // Prime the buffer so it can start.
-    if (pcm_mmap_commit(streamTracker->pcm, 0, framesPerBurst) < 0) {
-        cout << "fake_hal_open() pcm_mmap_commit failed" << endl;
-        goto error;
-    }
-
-    *streamPP = streamTracker;
-    return 1;
-
-error:
-    fake_hal_close(streamTracker);
-    return -1;
-}
-
-int fake_hal_get_mmap_info(fake_hal_stream_ptr stream, mmap_buffer_info *info) {
-    stream_tracker_t *streamTracker = (stream_tracker_t *) stream;
-    info->fd = streamTracker->pcm->fd; // TODO use tinyalsa function
-    info->hw_buffer = streamTracker->hwBuffer;
-    info->burst_size_in_frames = streamTracker->framesPerBurst;
-    info->buffer_capacity_in_frames = streamTracker->capacityInFrames;
-    info->buffer_capacity_in_bytes = streamTracker->capacityInBytes;
-    info->sample_rate = SAMPLE_RATE;
-    info->channel_count = CHANNEL_COUNT;
-    return 0;
-}
-
-int fake_hal_start(fake_hal_stream_ptr stream) {
-    stream_tracker_t *streamTracker = (stream_tracker_t *) stream;
-    if (pcm_start(streamTracker->pcm) < 0) {
-        cout << "fake_hal_start failed" << endl;
-        return -1;
-    }
-    return 0;
-}
-
-int fake_hal_pause(fake_hal_stream_ptr stream) {
-    stream_tracker_t *streamTracker = (stream_tracker_t *) stream;
-    if (pcm_stop(streamTracker->pcm) < 0) {
-        cout << "fake_hal_stop failed" << endl;
-        return -1;
-    }
-    return 0;
-}
-
-int fake_hal_get_frame_counter(fake_hal_stream_ptr stream, int *frame_counter) {
-    stream_tracker_t *streamTracker = (stream_tracker_t *) stream;
-    if (pcm_get_hw_ptr(streamTracker->pcm, (unsigned int *)frame_counter) < 0) {
-        cout << "fake_hal_get_frame_counter failed" << endl;
-        return -1;
-    }
-    return 0;
-}
-
-int fake_hal_close(fake_hal_stream_ptr stream) {
-    stream_tracker_t *streamTracker = (stream_tracker_t *) stream;
-    pcm_close(streamTracker->pcm);
-    free(streamTracker);
-    return 0;
-}
-
diff --git a/services/oboeservice/FakeAudioHal.h b/services/oboeservice/FakeAudioHal.h
deleted file mode 100644
index d3aa4e8..0000000
--- a/services/oboeservice/FakeAudioHal.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Simple fake HAL that supports ALSA MMAP/NOIRQ mode.
- */
-
-#ifndef FAKE_AUDIO_HAL_H
-#define FAKE_AUDIO_HAL_H
-
-//namespace aaudio {
-
-using sample_t = int16_t;
-struct mmap_buffer_info {
-    int       fd;
-    int32_t   burst_size_in_frames;
-    int32_t   buffer_capacity_in_frames;
-    int32_t   buffer_capacity_in_bytes;
-    int32_t   sample_rate;
-    int32_t   channel_count;
-    sample_t *hw_buffer;
-};
-
-typedef void *fake_hal_stream_ptr;
-
-//extern "C"
-//{
-
-int fake_hal_open(int card_id, int device_id,
-                  int frameCapacity,
-                  fake_hal_stream_ptr *stream_pp);
-
-int fake_hal_get_mmap_info(fake_hal_stream_ptr stream, mmap_buffer_info *info);
-
-int fake_hal_start(fake_hal_stream_ptr stream);
-
-int fake_hal_pause(fake_hal_stream_ptr stream);
-
-int fake_hal_get_frame_counter(fake_hal_stream_ptr stream, int *frame_counter);
-
-int fake_hal_close(fake_hal_stream_ptr stream);
-
-//} /* "C" */
-
-//} /* namespace aaudio */
-
-#endif // FAKE_AUDIO_HAL_H
diff --git a/services/oboeservice/SharedMemoryProxy.cpp b/services/oboeservice/SharedMemoryProxy.cpp
new file mode 100644
index 0000000..83ae1d4
--- /dev/null
+++ b/services/oboeservice/SharedMemoryProxy.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <aaudio/AAudioDefinitions.h>
+#include "SharedMemoryProxy.h"
+
+using namespace android;
+using namespace aaudio;
+
+SharedMemoryProxy::~SharedMemoryProxy()
+{
+    if (mOriginalSharedMemory != nullptr) {
+        munmap(mOriginalSharedMemory, mSharedMemorySizeInBytes);
+        mOriginalSharedMemory = nullptr;
+    }
+    if (mProxySharedMemory != nullptr) {
+        munmap(mProxySharedMemory, mSharedMemorySizeInBytes);
+        close(mProxyFileDescriptor);
+        mProxySharedMemory = nullptr;
+    }
+}
+
+aaudio_result_t SharedMemoryProxy::open(int originalFD, int32_t capacityInBytes) {
+    mOriginalFileDescriptor = originalFD;
+    mSharedMemorySizeInBytes = capacityInBytes;
+
+    mProxyFileDescriptor = ashmem_create_region("AAudioProxyDataBuffer", mSharedMemorySizeInBytes);
+    if (mProxyFileDescriptor < 0) {
+        ALOGE("SharedMemoryProxy::open() ashmem_create_region() failed %d", errno);
+        return AAUDIO_ERROR_INTERNAL;
+    }
+    int err = ashmem_set_prot_region(mProxyFileDescriptor, PROT_READ|PROT_WRITE);
+    if (err < 0) {
+        ALOGE("SharedMemoryProxy::open() ashmem_set_prot_region() failed %d", errno);
+        close(mProxyFileDescriptor);
+        mProxyFileDescriptor = -1;
+        return AAUDIO_ERROR_INTERNAL; // TODO convert errno to a better AAUDIO_ERROR;
+    }
+
+    // Get original memory address.
+    mOriginalSharedMemory = (uint8_t *) mmap(0, mSharedMemorySizeInBytes,
+                         PROT_READ|PROT_WRITE,
+                         MAP_SHARED,
+                         mOriginalFileDescriptor, 0);
+    if (mOriginalSharedMemory == MAP_FAILED) {
+        ALOGE("SharedMemoryProxy::open() original mmap(%d) failed %d (%s)",
+                mOriginalFileDescriptor, errno, strerror(errno));
+        return AAUDIO_ERROR_INTERNAL; // TODO convert errno to a better AAUDIO_ERROR;
+    }
+
+    // Map the fd to the same memory addresses.
+    mProxySharedMemory = (uint8_t *) mmap(mOriginalSharedMemory, mSharedMemorySizeInBytes,
+                         PROT_READ|PROT_WRITE,
+                         MAP_SHARED,
+                         mProxyFileDescriptor, 0);
+    if (mProxySharedMemory != mOriginalSharedMemory) {
+        ALOGE("SharedMemoryProxy::open() proxy mmap(%d) failed %d", mProxyFileDescriptor, errno);
+        munmap(mOriginalSharedMemory, mSharedMemorySizeInBytes);
+        mOriginalSharedMemory = nullptr;
+        close(mProxyFileDescriptor);
+        mProxyFileDescriptor = -1;
+        return AAUDIO_ERROR_INTERNAL; // TODO convert errno to a better AAUDIO_ERROR;
+    }
+
+    return AAUDIO_OK;
+}
diff --git a/services/oboeservice/SharedMemoryProxy.h b/services/oboeservice/SharedMemoryProxy.h
new file mode 100644
index 0000000..99bfdea
--- /dev/null
+++ b/services/oboeservice/SharedMemoryProxy.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_SHARED_MEMORY_PROXY_H
+#define AAUDIO_SHARED_MEMORY_PROXY_H
+
+#include <stdint.h>
+#include <cutils/ashmem.h>
+#include <sys/mman.h>
+
+#include <aaudio/AAudioDefinitions.h>
+
+namespace aaudio {
+
+/**
+ * Proxy for sharing memory between two file descriptors.
+ */
+class SharedMemoryProxy {
+public:
+    SharedMemoryProxy() {}
+
+    ~SharedMemoryProxy();
+
+    aaudio_result_t open(int fd, int32_t capacityInBytes);
+
+    int getFileDescriptor() const {
+        return mProxyFileDescriptor;
+    }
+
+private:
+    int            mOriginalFileDescriptor = -1;
+    int            mProxyFileDescriptor = -1;
+    uint8_t       *mOriginalSharedMemory = nullptr;
+    uint8_t       *mProxySharedMemory = nullptr;
+    int32_t        mSharedMemorySizeInBytes = 0;
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_SHARED_MEMORY_PROXY_H
diff --git a/services/oboeservice/SharedRingBuffer.cpp b/services/oboeservice/SharedRingBuffer.cpp
index 9ac8fdf..efcc9d6 100644
--- a/services/oboeservice/SharedRingBuffer.cpp
+++ b/services/oboeservice/SharedRingBuffer.cpp
@@ -18,11 +18,8 @@
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
-#include "AudioClock.h"
-#include "AudioEndpointParcelable.h"
-
-//#include "AAudioServiceStreamBase.h"
-//#include "AAudioServiceStreamFakeHal.h"
+#include "binding/RingBufferParcelable.h"
+#include "binding/AudioEndpointParcelable.h"
 
 #include "SharedRingBuffer.h"
 
diff --git a/services/oboeservice/SharedRingBuffer.h b/services/oboeservice/SharedRingBuffer.h
index 75f138b..a2c3766 100644
--- a/services/oboeservice/SharedRingBuffer.h
+++ b/services/oboeservice/SharedRingBuffer.h
@@ -22,8 +22,8 @@
 #include <sys/mman.h>
 
 #include "fifo/FifoBuffer.h"
-#include "RingBufferParcelable.h"
-#include "AudioEndpointParcelable.h"
+#include "binding/RingBufferParcelable.h"
+#include "binding/AudioEndpointParcelable.h"
 
 namespace aaudio {
 
@@ -41,22 +41,22 @@
 
     virtual ~SharedRingBuffer();
 
-    aaudio_result_t allocate(fifo_frames_t bytesPerFrame, fifo_frames_t capacityInFrames);
+    aaudio_result_t allocate(android::fifo_frames_t bytesPerFrame, android::fifo_frames_t capacityInFrames);
 
     void fillParcelable(AudioEndpointParcelable &endpointParcelable,
                         RingBufferParcelable &ringBufferParcelable);
 
-    FifoBuffer * getFifoBuffer() {
+    android::FifoBuffer * getFifoBuffer() {
         return mFifoBuffer;
     }
 
 private:
-    int            mFileDescriptor = -1;
-    FifoBuffer   * mFifoBuffer = nullptr;
-    uint8_t      * mSharedMemory = nullptr;
-    int32_t        mSharedMemorySizeInBytes = 0;
-    int32_t        mDataMemorySizeInBytes = 0;
-    fifo_frames_t  mCapacityInFrames = 0;
+    int                    mFileDescriptor = -1;
+    android::FifoBuffer   *mFifoBuffer = nullptr;
+    uint8_t               *mSharedMemory = nullptr;
+    int32_t                mSharedMemorySizeInBytes = 0;
+    int32_t                mDataMemorySizeInBytes = 0;
+    android::fifo_frames_t mCapacityInFrames = 0;
 };
 
 } /* namespace aaudio */
diff --git a/services/oboeservice/TimestampScheduler.h b/services/oboeservice/TimestampScheduler.h
index 91a2477..325bee4 100644
--- a/services/oboeservice/TimestampScheduler.h
+++ b/services/oboeservice/TimestampScheduler.h
@@ -17,15 +17,8 @@
 #ifndef AAUDIO_TIMESTAMP_SCHEDULER_H
 #define AAUDIO_TIMESTAMP_SCHEDULER_H
 
-
-
-#include "IAAudioService.h"
-#include "AAudioServiceDefinitions.h"
-#include "AudioStream.h"
-#include "fifo/FifoBuffer.h"
-#include "SharedRingBuffer.h"
-#include "AudioEndpointParcelable.h"
-#include "utility/AudioClock.h"
+#include <aaudio/AAudioDefinitions.h>
+#include <utility/AudioClock.h>
 
 namespace aaudio {
 
@@ -47,8 +40,7 @@
     void start(int64_t startTime);
 
     /**
-     * Calculate the next time that the read position should be
-     * measured.
+     * Calculate the next time that the read position should be measured.
      */
     int64_t nextAbsoluteTime();
 
@@ -68,8 +60,8 @@
 private:
     // Start with an arbitrary default so we do not divide by zero.
     int64_t mBurstPeriod = AAUDIO_NANOS_PER_MILLISECOND;
-    int64_t mStartTime;
-    int64_t mLastTime;
+    int64_t mStartTime = 0;
+    int64_t mLastTime = 0;
 };
 
 } /* namespace aaudio */