Merge "Change drm/crypto service names to "default"" into oc-dev
diff --git a/camera/ndk/Android.bp b/camera/ndk/Android.bp
index c5fc646..ade0d72 100644
--- a/camera/ndk/Android.bp
+++ b/camera/ndk/Android.bp
@@ -17,7 +17,7 @@
 // frameworks/av/include.
 
 ndk_library {
-    name: "libcamera2ndk.ndk",
+    name: "libcamera2ndk",
     symbol_file: "libcamera2ndk.map.txt",
     first_version: "24",
     unversioned_until: "current",
diff --git a/include/media/omx/1.0/WOmxNode.h b/include/media/omx/1.0/WOmxNode.h
index 1d575e7..eebc8c6 100644
--- a/include/media/omx/1.0/WOmxNode.h
+++ b/include/media/omx/1.0/WOmxNode.h
@@ -102,9 +102,6 @@
             const char *parameter_name,
             OMX_INDEXTYPE *index) override;
     status_t dispatchMessage(const omx_message &msg) override;
-
-    // TODO: this is temporary, will be removed when quirks move to OMX side.
-    status_t setQuirks(OMX_U32 quirks) override;
 };
 
 struct TWOmxNode : public IOmxNode {
@@ -153,7 +150,6 @@
             hidl_string const& parameterName,
             getExtensionIndex_cb _hidl_cb) override;
     Return<Status> dispatchMessage(Message const& msg) override;
-    Return<void> setQuirks(uint32_t quirks) override;
 };
 
 }  // namespace utils
diff --git a/include/media/vndk/xmlparser/1.0/MediaCodecsXmlParser.h b/include/media/vndk/xmlparser/1.0/MediaCodecsXmlParser.h
new file mode 100644
index 0000000..b324cd8
--- /dev/null
+++ b/include/media/vndk/xmlparser/1.0/MediaCodecsXmlParser.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_CODECS_XML_PARSER_H_
+
+#define MEDIA_CODECS_XML_PARSER_H_
+
+#include <map>
+#include <vector>
+
+#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/foundation/AString.h>
+
+#include <sys/types.h>
+#include <utils/Errors.h>
+#include <utils/Vector.h>
+#include <utils/StrongPointer.h>
+
+namespace android {
+
+struct AMessage;
+
+// Quirk still supported, even though deprecated
+enum Quirks {
+    kRequiresAllocateBufferOnInputPorts   = 1,
+    kRequiresAllocateBufferOnOutputPorts  = 2,
+
+    kQuirksMask = kRequiresAllocateBufferOnInputPorts
+                | kRequiresAllocateBufferOnOutputPorts,
+};
+
+// Lightweight struct for querying components.
+struct TypeInfo {
+    AString mName;
+    std::map<AString, AString> mStringFeatures;
+    std::map<AString, bool> mBoolFeatures;
+    std::map<AString, AString> mDetails;
+};
+
+struct ProfileLevel {
+    uint32_t mProfile;
+    uint32_t mLevel;
+};
+
+struct CodecInfo {
+    std::vector<TypeInfo> mTypes;
+    std::vector<ProfileLevel> mProfileLevels;
+    std::vector<uint32_t> mColorFormats;
+    uint32_t mFlags;
+    bool mIsEncoder;
+};
+
+class MediaCodecsXmlParser {
+public:
+    MediaCodecsXmlParser();
+    ~MediaCodecsXmlParser();
+
+    void getGlobalSettings(std::map<AString, AString> *settings) const;
+
+    status_t getCodecInfo(const char *name, CodecInfo *info) const;
+
+    status_t getQuirks(const char *name, std::vector<AString> *quirks) const;
+
+private:
+    enum Section {
+        SECTION_TOPLEVEL,
+        SECTION_SETTINGS,
+        SECTION_DECODERS,
+        SECTION_DECODER,
+        SECTION_DECODER_TYPE,
+        SECTION_ENCODERS,
+        SECTION_ENCODER,
+        SECTION_ENCODER_TYPE,
+        SECTION_INCLUDE,
+    };
+
+    status_t mInitCheck;
+    Section mCurrentSection;
+    bool mUpdate;
+    Vector<Section> mPastSections;
+    int32_t mDepth;
+    AString mHrefBase;
+
+    std::map<AString, AString> mGlobalSettings;
+
+    // name -> CodecInfo
+    std::map<AString, CodecInfo> mCodecInfos;
+    std::map<AString, std::vector<AString>> mQuirks;
+    AString mCurrentName;
+    std::vector<TypeInfo>::iterator mCurrentType;
+
+    status_t initCheck() const;
+    void parseTopLevelXMLFile(const char *path, bool ignore_errors = false);
+
+    void parseXMLFile(const char *path);
+
+    static void StartElementHandlerWrapper(
+            void *me, const char *name, const char **attrs);
+
+    static void EndElementHandlerWrapper(void *me, const char *name);
+
+    void startElementHandler(const char *name, const char **attrs);
+    void endElementHandler(const char *name);
+
+    status_t includeXMLFile(const char **attrs);
+    status_t addSettingFromAttributes(const char **attrs);
+    status_t addMediaCodecFromAttributes(bool encoder, const char **attrs);
+    void addMediaCodec(bool encoder, const char *name, const char *type = NULL);
+
+    status_t addQuirk(const char **attrs);
+    status_t addTypeFromAttributes(const char **attrs, bool encoder);
+    status_t addLimit(const char **attrs);
+    status_t addFeature(const char **attrs);
+    void addType(const char *name);
+
+    DISALLOW_EVIL_CONSTRUCTORS(MediaCodecsXmlParser);
+};
+
+}  // namespace android
+
+#endif  // MEDIA_CODECS_XML_PARSER_H_
+
diff --git a/media/audioserver/Android.mk b/media/audioserver/Android.mk
index 63fa16b..afd1189 100644
--- a/media/audioserver/Android.mk
+++ b/media/audioserver/Android.mk
@@ -6,6 +6,7 @@
 	main_audioserver.cpp
 
 LOCAL_SHARED_LIBRARIES := \
+	libaaudioservice \
 	libaudioflinger \
 	libaudiopolicyservice \
 	libbinder \
@@ -18,6 +19,7 @@
 	libutils \
 	libhwbinder
 
+# TODO oboeservice is the old folder name for aaudioservice. It will be changed.
 LOCAL_C_INCLUDES := \
 	frameworks/av/services/audioflinger \
 	frameworks/av/services/audiopolicy \
@@ -26,8 +28,12 @@
 	frameworks/av/services/audiopolicy/engine/interface \
 	frameworks/av/services/audiopolicy/service \
 	frameworks/av/services/medialog \
+	frameworks/av/services/oboeservice \
 	frameworks/av/services/radio \
 	frameworks/av/services/soundtrigger \
+	frameworks/av/media/libaaudio/include \
+	frameworks/av/media/libaaudio/src \
+	frameworks/av/media/libaaudio/src/binding \
 	$(call include-path-for, audio-utils) \
 	external/sonic \
 
diff --git a/media/audioserver/main_audioserver.cpp b/media/audioserver/main_audioserver.cpp
index bcd0342..ee02d23 100644
--- a/media/audioserver/main_audioserver.cpp
+++ b/media/audioserver/main_audioserver.cpp
@@ -34,6 +34,7 @@
 // from LOCAL_C_INCLUDES
 #include "AudioFlinger.h"
 #include "AudioPolicyService.h"
+#include "AAudioService.h"
 #include "MediaLogService.h"
 #include "RadioService.h"
 #include "SoundTriggerHwService.h"
@@ -131,6 +132,7 @@
         ALOGI("ServiceManager: %p", sm.get());
         AudioFlinger::instantiate();
         AudioPolicyService::instantiate();
+        AAudioService::instantiate();
         RadioService::instantiate();
         SoundTriggerHwService::instantiate();
         ProcessState::self()->startThreadPool();
diff --git a/media/libaaudio/Android.bp b/media/libaaudio/Android.bp
index e41d62b..f539ba9 100644
--- a/media/libaaudio/Android.bp
+++ b/media/libaaudio/Android.bp
@@ -21,7 +21,7 @@
 }
 
 ndk_library {
-    name: "libaaudio.ndk",
+    name: "libaaudio",
     symbol_file: "libaaudio.map.txt",
     first_version: "26",
     unversioned_until: "current",
diff --git a/media/libaaudio/examples/input_monitor/Android.mk b/media/libaaudio/examples/input_monitor/Android.mk
new file mode 100644
index 0000000..b56328b
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/Android.mk
@@ -0,0 +1,6 @@
+# include $(call all-subdir-makefiles)
+
+# Just include static/ for now.
+LOCAL_PATH := $(call my-dir)
+#include $(LOCAL_PATH)/jni/Android.mk
+include $(LOCAL_PATH)/static/Android.mk
diff --git a/media/libaaudio/examples/input_monitor/README.md b/media/libaaudio/examples/input_monitor/README.md
new file mode 100644
index 0000000..3e54ef0
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/README.md
@@ -0,0 +1 @@
+Monitor input level and print value.
diff --git a/media/libaaudio/examples/input_monitor/jni/Android.mk b/media/libaaudio/examples/input_monitor/jni/Android.mk
new file mode 100644
index 0000000..51a5a85
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/jni/Android.mk
@@ -0,0 +1,35 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/av/media/liboboe/include
+
+LOCAL_SRC_FILES:= frameworks/av/media/liboboe/src/write_sine.cpp
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia libtinyalsa \
+        libbinder libcutils libutils
+LOCAL_STATIC_LIBRARIES := libsndfile
+LOCAL_MODULE := write_sine_ndk
+LOCAL_SHARED_LIBRARIES += liboboe_prebuilt
+include $(BUILD_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/av/media/liboboe/include
+
+LOCAL_SRC_FILES:= frameworks/av/media/liboboe/src/write_sine_threaded.cpp
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia libtinyalsa \
+        libbinder libcutils libutils
+LOCAL_STATIC_LIBRARIES := libsndfile
+LOCAL_MODULE := write_sine_threaded_ndk
+LOCAL_SHARED_LIBRARIES += liboboe_prebuilt
+include $(BUILD_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := liboboe_prebuilt
+LOCAL_SRC_FILES := liboboe.so
+LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
+include $(PREBUILT_SHARED_LIBRARY)
diff --git a/media/libaaudio/examples/input_monitor/jni/Application.mk b/media/libaaudio/examples/input_monitor/jni/Application.mk
new file mode 100644
index 0000000..e74475c
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/jni/Application.mk
@@ -0,0 +1,3 @@
+# TODO remove then when we support other architectures
+APP_ABI := arm64-v8a
+APP_CPPFLAGS += -std=c++11
diff --git a/media/libaaudio/examples/input_monitor/src/input_monitor.cpp b/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
new file mode 100644
index 0000000..545496f
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Record input using AAudio and display the peak amplitudes.
+
+#include <new>
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <aaudio/AAudioDefinitions.h>
+#include <aaudio/AAudio.h>
+
+#define SAMPLE_RATE        48000
+#define NUM_SECONDS        10
+#define NANOS_PER_MICROSECOND ((int64_t)1000)
+#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
+#define NANOS_PER_SECOND   (NANOS_PER_MILLISECOND * 1000)
+
+#define DECAY_FACTOR       0.999
+#define MIN_FRAMES_TO_READ 48  /* arbitrary, 1 msec at 48000 Hz */
+
+static const char *getSharingModeText(aaudio_sharing_mode_t mode) {
+    const char *modeText = "unknown";
+    switch (mode) {
+    case AAUDIO_SHARING_MODE_EXCLUSIVE:
+        modeText = "EXCLUSIVE";
+        break;
+    case AAUDIO_SHARING_MODE_SHARED:
+        modeText = "SHARED";
+        break;
+    default:
+        break;
+    }
+    return modeText;
+}
+
+int main(int argc, char **argv)
+{
+    (void)argc; // unused
+
+    aaudio_result_t result;
+
+    int actualSamplesPerFrame;
+    int actualSampleRate;
+    const aaudio_audio_format_t requestedDataFormat = AAUDIO_FORMAT_PCM_I16;
+    aaudio_audio_format_t actualDataFormat;
+
+    const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_SHARED;
+    aaudio_sharing_mode_t actualSharingMode;
+
+    AAudioStreamBuilder *aaudioBuilder = nullptr;
+    AAudioStream *aaudioStream = nullptr;
+    aaudio_stream_state_t state;
+    int32_t framesPerBurst = 0;
+    int32_t framesPerRead = 0;
+    int32_t framesToRecord = 0;
+    int32_t framesLeft = 0;
+    int32_t xRunCount = 0;
+    int16_t *data = nullptr;
+    float peakLevel = 0.0;
+    int loopCounter = 0;
+
+    // Make printf print immediately so that debug info is not stuck
+    // in a buffer if we hang or crash.
+    setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+
+    printf("%s - Monitor input level using AAudio\n", argv[0]);
+
+    // Use an AAudioStreamBuilder to contain requested parameters.
+    result = AAudio_createStreamBuilder(&aaudioBuilder);
+    if (result != AAUDIO_OK) {
+        goto finish;
+    }
+
+    // Request stream properties.
+    AAudioStreamBuilder_setDirection(aaudioBuilder, AAUDIO_DIRECTION_INPUT);
+    AAudioStreamBuilder_setFormat(aaudioBuilder, requestedDataFormat);
+    AAudioStreamBuilder_setSharingMode(aaudioBuilder, requestedSharingMode);
+
+    // Create an AAudioStream using the Builder.
+    result = AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream);
+    if (result != AAUDIO_OK) {
+        goto finish;
+    }
+
+    actualSamplesPerFrame = AAudioStream_getSamplesPerFrame(aaudioStream);
+    printf("SamplesPerFrame = %d\n", actualSamplesPerFrame);
+    actualSampleRate = AAudioStream_getSampleRate(aaudioStream);
+    printf("SamplesPerFrame = %d\n", actualSampleRate);
+
+    actualSharingMode = AAudioStream_getSharingMode(aaudioStream);
+    printf("SharingMode: requested = %s, actual = %s\n",
+            getSharingModeText(requestedSharingMode),
+            getSharingModeText(actualSharingMode));
+
+    // This is the number of frames that are written in one chunk by a DMA controller
+    // or a DSP.
+    framesPerBurst = AAudioStream_getFramesPerBurst(aaudioStream);
+    printf("DataFormat: framesPerBurst = %d\n",framesPerBurst);
+
+    // Some DMA might use very short bursts of 16 frames. We don't need to read such small
+    // buffers. But it helps to use a multiple of the burst size for predictable scheduling.
+    framesPerRead = framesPerBurst;
+    while (framesPerRead < MIN_FRAMES_TO_READ) {
+        framesPerRead *= 2;
+    }
+    printf("DataFormat: framesPerRead = %d\n",framesPerRead);
+
+    actualDataFormat = AAudioStream_getFormat(aaudioStream);
+    printf("DataFormat: requested = %d, actual = %d\n", requestedDataFormat, actualDataFormat);
+    // TODO handle other data formats
+    assert(actualDataFormat == AAUDIO_FORMAT_PCM_I16);
+
+    // Allocate a buffer for the audio data.
+    data = new(std::nothrow) int16_t[framesPerRead * actualSamplesPerFrame];
+    if (data == nullptr) {
+        fprintf(stderr, "ERROR - could not allocate data buffer\n");
+        result = AAUDIO_ERROR_NO_MEMORY;
+        goto finish;
+    }
+
+    // Start the stream.
+    printf("call AAudioStream_requestStart()\n");
+    result = AAudioStream_requestStart(aaudioStream);
+    if (result != AAUDIO_OK) {
+        fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d\n", result);
+        goto finish;
+    }
+
+    state = AAudioStream_getState(aaudioStream);
+    printf("after start, state = %s\n", AAudio_convertStreamStateToText(state));
+
+    // Play for a while.
+    framesToRecord = actualSampleRate * NUM_SECONDS;
+    framesLeft = framesToRecord;
+    while (framesLeft > 0) {
+        // Read audio data from the stream.
+        int64_t timeoutNanos = 100 * NANOS_PER_MILLISECOND;
+        int minFrames = (framesToRecord < framesPerRead) ? framesToRecord : framesPerRead;
+        int actual = AAudioStream_read(aaudioStream, data, minFrames, timeoutNanos);
+        if (actual < 0) {
+            fprintf(stderr, "ERROR - AAudioStream_read() returned %zd\n", actual);
+            goto finish;
+        } else if (actual == 0) {
+            fprintf(stderr, "WARNING - AAudioStream_read() returned %zd\n", actual);
+            goto finish;
+        }
+        framesLeft -= actual;
+
+        // Peak follower.
+        for (int frameIndex = 0; frameIndex < actual; frameIndex++) {
+            float sample = data[frameIndex * actualSamplesPerFrame] * (1.0/32768);
+            peakLevel *= DECAY_FACTOR;
+            if (sample > peakLevel) {
+                peakLevel = sample;
+            }
+        }
+
+        // Display level as stars, eg. "******".
+        if ((loopCounter++ % 10) == 0) {
+            printf("%5.3f ", peakLevel);
+            int numStars = (int)(peakLevel * 50);
+            for (int i = 0; i < numStars; i++) {
+                printf("*");
+            }
+            printf("\n");
+        }
+    }
+
+    xRunCount = AAudioStream_getXRunCount(aaudioStream);
+    printf("AAudioStream_getXRunCount %d\n", xRunCount);
+
+finish:
+    delete[] data;
+    AAudioStream_close(aaudioStream);
+    AAudioStreamBuilder_delete(aaudioBuilder);
+    printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+    return (result != AAUDIO_OK) ? EXIT_FAILURE : EXIT_SUCCESS;
+}
+
diff --git a/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp b/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
new file mode 100644
index 0000000..8d40d94
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
@@ -0,0 +1,284 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Record input using AAudio and display the peak amplitudes.
+
+#include <assert.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <math.h>
+#include <time.h>
+#include <aaudio/AAudioDefinitions.h>
+#include <aaudio/AAudio.h>
+
+#define NUM_SECONDS           10
+#define NANOS_PER_MICROSECOND ((int64_t)1000)
+#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
+#define NANOS_PER_SECOND      (NANOS_PER_MILLISECOND * 1000)
+
+//#define SHARING_MODE  AAUDIO_SHARING_MODE_EXCLUSIVE
+#define SHARING_MODE  AAUDIO_SHARING_MODE_SHARED
+
+/**
+ * Simple wrapper for AAudio that opens a default stream and then calls
+ * a callback function to fill the output buffers.
+ */
+class SimpleAAudioPlayer {
+public:
+    SimpleAAudioPlayer() {}
+    ~SimpleAAudioPlayer() {
+        close();
+    };
+
+    /**
+     * Call this before calling open().
+     * @param requestedSharingMode
+     */
+    void setSharingMode(aaudio_sharing_mode_t requestedSharingMode) {
+        mRequestedSharingMode = requestedSharingMode;
+    }
+
+    /**
+     * Also known as "sample rate"
+     * Only call this after open() has been called.
+     */
+    int32_t getFramesPerSecond() {
+        if (mStream == nullptr) {
+            return AAUDIO_ERROR_INVALID_STATE;
+        }
+        return AAudioStream_getSampleRate(mStream);;
+    }
+
+    /**
+     * Only call this after open() has been called.
+     */
+    int32_t getSamplesPerFrame() {
+        if (mStream == nullptr) {
+            return AAUDIO_ERROR_INVALID_STATE;
+        }
+        return AAudioStream_getSamplesPerFrame(mStream);;
+    }
+
+    /**
+     * Open a stream
+     */
+    aaudio_result_t open(AAudioStream_dataCallback proc, void *userContext) {
+        aaudio_result_t result = AAUDIO_OK;
+
+        // Use an AAudioStreamBuilder to contain requested parameters.
+        result = AAudio_createStreamBuilder(&mBuilder);
+        if (result != AAUDIO_OK) return result;
+
+        AAudioStreamBuilder_setDirection(mBuilder, AAUDIO_DIRECTION_INPUT);
+        AAudioStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
+        AAudioStreamBuilder_setDataCallback(mBuilder, proc, userContext);
+        AAudioStreamBuilder_setFormat(mBuilder, AAUDIO_FORMAT_PCM_I16);
+
+        // Open an AAudioStream using the Builder.
+        result = AAudioStreamBuilder_openStream(mBuilder, &mStream);
+        if (result != AAUDIO_OK) {
+            fprintf(stderr, "ERROR - AAudioStreamBuilder_openStream() returned %d %s\n",
+                    result, AAudio_convertResultToText(result));
+            goto finish1;
+        }
+
+        printf("AAudioStream_getFramesPerBurst() = %d\n",
+               AAudioStream_getFramesPerBurst(mStream));
+        printf("AAudioStream_getBufferSizeInFrames() = %d\n",
+               AAudioStream_getBufferSizeInFrames(mStream));
+        printf("AAudioStream_getBufferCapacityInFrames() = %d\n",
+               AAudioStream_getBufferCapacityInFrames(mStream));
+        return result;
+
+     finish1:
+        AAudioStreamBuilder_delete(mBuilder);
+        mBuilder = nullptr;
+        return result;
+    }
+
+    aaudio_result_t close() {
+        if (mStream != nullptr) {
+            printf("call AAudioStream_close(%p)\n", mStream);  fflush(stdout);
+            AAudioStream_close(mStream);
+            mStream = nullptr;
+            AAudioStreamBuilder_delete(mBuilder);
+            mBuilder = nullptr;
+        }
+        return AAUDIO_OK;
+    }
+
+    // Write zero data to fill up the buffer and prevent underruns.
+    // Assume format is PCM_I16. TODO use floats.
+    aaudio_result_t prime() {
+        int32_t samplesPerFrame = AAudioStream_getSamplesPerFrame(mStream);
+        const int numFrames = 32; // arbitrary
+        int16_t zeros[numFrames * samplesPerFrame];
+        memset(zeros, 0, sizeof(zeros));
+        aaudio_result_t result = numFrames;
+        while (result == numFrames) {
+            result = AAudioStream_write(mStream, zeros, numFrames, 0);
+        }
+        return result;
+    }
+
+    // Start the stream. AAudio will start calling your callback function.
+     aaudio_result_t start() {
+        aaudio_result_t result = AAudioStream_requestStart(mStream);
+        if (result != AAUDIO_OK) {
+            fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d %s\n",
+                    result, AAudio_convertResultToText(result));
+        }
+        return result;
+    }
+
+    // Stop the stream. AAudio will stop calling your callback function.
+    aaudio_result_t stop() {
+        aaudio_result_t result = AAudioStream_requestStop(mStream);
+        if (result != AAUDIO_OK) {
+            fprintf(stderr, "ERROR - AAudioStream_requestStop() returned %d %s\n",
+                    result, AAudio_convertResultToText(result));
+        }
+        int32_t xRunCount = AAudioStream_getXRunCount(mStream);
+        printf("AAudioStream_getXRunCount %d\n", xRunCount);
+        return result;
+    }
+
+private:
+    AAudioStreamBuilder    *mBuilder = nullptr;
+    AAudioStream           *mStream = nullptr;
+    aaudio_sharing_mode_t   mRequestedSharingMode = SHARING_MODE;
+};
+
+// Application data that gets passed to the callback.
+typedef struct PeakTrackerData {
+    float peakLevel;
+} PeakTrackerData_t;
+
+#define DECAY_FACTOR   0.999
+
+// Callback function that fills the audio output buffer.
+aaudio_data_callback_result_t MyDataCallbackProc(
+        AAudioStream *stream,
+        void *userData,
+        void *audioData,
+        int32_t numFrames
+        ) {
+
+    PeakTrackerData_t *data = (PeakTrackerData_t *) userData;
+    // printf("MyCallbackProc(): frameCount = %d\n", numFrames);
+    int32_t samplesPerFrame = AAudioStream_getSamplesPerFrame(stream);
+    float sample;
+    // This code assume mono or stereo.
+    switch (AAudioStream_getFormat(stream)) {
+        case AAUDIO_FORMAT_PCM_I16: {
+            int16_t *audioBuffer = (int16_t *) audioData;
+            // Peak follower
+            for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
+                sample = audioBuffer[frameIndex * samplesPerFrame] * (1.0/32768);
+                data->peakLevel *= DECAY_FACTOR;
+                if (sample > data->peakLevel) {
+                    data->peakLevel = sample;
+                }
+            }
+        }
+        break;
+        case AAUDIO_FORMAT_PCM_FLOAT: {
+            float *audioBuffer = (float *) audioData;
+            // Peak follower
+            for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
+                sample = audioBuffer[frameIndex * samplesPerFrame];
+                data->peakLevel *= DECAY_FACTOR;
+                if (sample > data->peakLevel) {
+                    data->peakLevel = sample;
+                }
+            }
+        }
+        break;
+        default:
+            return AAUDIO_CALLBACK_RESULT_STOP;
+    }
+
+    return AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+void displayPeakLevel(float peakLevel) {
+    printf("%5.3f ", peakLevel);
+    const int maxStars = 50; // arbitrary, fits on one line
+    int numStars = (int) (peakLevel * maxStars);
+    for (int i = 0; i < numStars; i++) {
+        printf("*");
+    }
+    printf("\n");
+}
+
+int main(int argc, char **argv)
+{
+    (void)argc; // unused
+    SimpleAAudioPlayer player;
+    PeakTrackerData_t myData = {0.0};
+    aaudio_result_t result;
+    const int displayRateHz = 20; // arbitrary
+    const int loopsNeeded = NUM_SECONDS * displayRateHz;
+
+    // Make printf print immediately so that debug info is not stuck
+    // in a buffer if we hang or crash.
+    setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+    printf("%s - Display audio input using an AAudio callback\n", argv[0]);
+
+    player.setSharingMode(SHARING_MODE);
+
+    result = player.open(MyDataCallbackProc, &myData);
+    if (result != AAUDIO_OK) {
+        fprintf(stderr, "ERROR -  player.open() returned %d\n", result);
+        goto error;
+    }
+    printf("player.getFramesPerSecond() = %d\n", player.getFramesPerSecond());
+    printf("player.getSamplesPerFrame() = %d\n", player.getSamplesPerFrame());
+
+    result = player.start();
+    if (result != AAUDIO_OK) {
+        fprintf(stderr, "ERROR -  player.start() returned %d\n", result);
+        goto error;
+    }
+
+    printf("Sleep for %d seconds while audio plays in a callback thread.\n", NUM_SECONDS);
+   for (int i = 0; i < loopsNeeded; i++)
+    {
+        const struct timespec request = { .tv_sec = 0,
+                .tv_nsec = NANOS_PER_SECOND / displayRateHz };
+        (void) clock_nanosleep(CLOCK_MONOTONIC, 0 /*flags*/, &request, NULL /*remain*/);
+        displayPeakLevel(myData.peakLevel);
+    }
+    printf("Woke up now.\n");
+
+    result = player.stop();
+    if (result != AAUDIO_OK) {
+        goto error;
+    }
+    result = player.close();
+    if (result != AAUDIO_OK) {
+        goto error;
+    }
+
+    printf("SUCCESS\n");
+    return EXIT_SUCCESS;
+error:
+    player.close();
+    printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+    return EXIT_FAILURE;
+}
+
diff --git a/media/libaaudio/examples/input_monitor/static/Android.mk b/media/libaaudio/examples/input_monitor/static/Android.mk
new file mode 100644
index 0000000..e83f179
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/static/Android.mk
@@ -0,0 +1,35 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := examples
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/av/media/libaaudio/include
+
+# TODO reorganize folders to avoid using ../
+LOCAL_SRC_FILES:= ../src/input_monitor.cpp
+
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
+                          libbinder libcutils libutils \
+                          libaudioclient liblog libtinyalsa
+LOCAL_STATIC_LIBRARIES := libaaudio
+
+LOCAL_MODULE := input_monitor
+include $(BUILD_EXECUTABLE)
+
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/av/media/libaaudio/include
+
+LOCAL_SRC_FILES:= ../src/input_monitor_callback.cpp
+
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
+                          libbinder libcutils libutils \
+                          libaudioclient liblog
+LOCAL_STATIC_LIBRARIES := libaaudio
+
+LOCAL_MODULE := input_monitor_callback
+include $(BUILD_EXECUTABLE)
diff --git a/media/libaaudio/examples/input_monitor/static/README.md b/media/libaaudio/examples/input_monitor/static/README.md
new file mode 100644
index 0000000..6e26d7b
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/static/README.md
@@ -0,0 +1,2 @@
+Makefile for building simple command line examples.
+They link with AAudio as a static library.
diff --git a/media/libaaudio/examples/write_sine/Android.mk b/media/libaaudio/examples/write_sine/Android.mk
index b56328b..5053e7d 100644
--- a/media/libaaudio/examples/write_sine/Android.mk
+++ b/media/libaaudio/examples/write_sine/Android.mk
@@ -1,6 +1 @@
-# include $(call all-subdir-makefiles)
-
-# Just include static/ for now.
-LOCAL_PATH := $(call my-dir)
-#include $(LOCAL_PATH)/jni/Android.mk
-include $(LOCAL_PATH)/static/Android.mk
+include $(call all-subdir-makefiles)
diff --git a/media/libaaudio/examples/write_sine/jni/Android.mk b/media/libaaudio/examples/write_sine/jni/Android.mk
index 51a5a85..8cd0f03 100644
--- a/media/libaaudio/examples/write_sine/jni/Android.mk
+++ b/media/libaaudio/examples/write_sine/jni/Android.mk
@@ -4,32 +4,27 @@
 LOCAL_MODULE_TAGS := tests
 LOCAL_C_INCLUDES := \
     $(call include-path-for, audio-utils) \
-    frameworks/av/media/liboboe/include
+    frameworks/av/media/libaaudio/include
 
-LOCAL_SRC_FILES:= frameworks/av/media/liboboe/src/write_sine.cpp
-LOCAL_SHARED_LIBRARIES := libaudioutils libmedia libtinyalsa \
-        libbinder libcutils libutils
-LOCAL_STATIC_LIBRARIES := libsndfile
+# NDK recommends using this kind of relative path instead of an absolute path.
+LOCAL_SRC_FILES:= ../src/write_sine.cpp
+LOCAL_SHARED_LIBRARIES := libaaudio
 LOCAL_MODULE := write_sine_ndk
-LOCAL_SHARED_LIBRARIES += liboboe_prebuilt
 include $(BUILD_EXECUTABLE)
 
 include $(CLEAR_VARS)
 LOCAL_MODULE_TAGS := tests
 LOCAL_C_INCLUDES := \
     $(call include-path-for, audio-utils) \
-    frameworks/av/media/liboboe/include
+    frameworks/av/media/libaaudio/include
 
-LOCAL_SRC_FILES:= frameworks/av/media/liboboe/src/write_sine_threaded.cpp
-LOCAL_SHARED_LIBRARIES := libaudioutils libmedia libtinyalsa \
-        libbinder libcutils libutils
-LOCAL_STATIC_LIBRARIES := libsndfile
+LOCAL_SRC_FILES:= ../src/write_sine_threaded.cpp
+LOCAL_SHARED_LIBRARIES := libaaudio
 LOCAL_MODULE := write_sine_threaded_ndk
-LOCAL_SHARED_LIBRARIES += liboboe_prebuilt
 include $(BUILD_EXECUTABLE)
 
 include $(CLEAR_VARS)
-LOCAL_MODULE := liboboe_prebuilt
-LOCAL_SRC_FILES := liboboe.so
+LOCAL_MODULE := libaaudio_prebuilt
+LOCAL_SRC_FILES := libaaudio.so
 LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
 include $(PREBUILT_SHARED_LIBRARY)
diff --git a/media/libaaudio/examples/write_sine/src/SineGenerator.h b/media/libaaudio/examples/write_sine/src/SineGenerator.h
index ade7527..64b772d 100644
--- a/media/libaaudio/examples/write_sine/src/SineGenerator.h
+++ b/media/libaaudio/examples/write_sine/src/SineGenerator.h
@@ -79,7 +79,7 @@
         }
     }
 
-    double mAmplitude = 0.01;
+    double mAmplitude = 0.05;  // unitless scaler
     double mPhase = 0.0;
     double mPhaseIncrement = 440 * M_PI * 2 / 48000;
     double mFrameRate = 48000;
diff --git a/media/libaaudio/examples/write_sine/src/write_sine.cpp b/media/libaaudio/examples/write_sine/src/write_sine.cpp
index 80b6252..d8e5ec1 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine.cpp
@@ -19,7 +19,6 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <math.h>
-#include <aaudio/AAudioDefinitions.h>
 #include <aaudio/AAudio.h>
 #include "SineGenerator.h"
 
@@ -44,6 +43,7 @@
     return modeText;
 }
 
+// TODO move to a common utility library
 static int64_t getNanoseconds(clockid_t clockId = CLOCK_MONOTONIC) {
     struct timespec time;
     int result = clock_gettime(clockId, &time);
@@ -74,6 +74,8 @@
     AAudioStream *aaudioStream = nullptr;
     aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNINITIALIZED;
     int32_t framesPerBurst = 0;
+    int32_t framesPerWrite = 0;
+    int32_t bufferCapacity = 0;
     int32_t framesToPlay = 0;
     int32_t framesLeft = 0;
     int32_t xRunCount = 0;
@@ -100,7 +102,6 @@
     AAudioStreamBuilder_setFormat(aaudioBuilder, requestedDataFormat);
     AAudioStreamBuilder_setSharingMode(aaudioBuilder, requestedSharingMode);
 
-
     // Create an AAudioStream using the Builder.
     result = AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream);
     if (result != AAUDIO_OK) {
@@ -129,21 +130,25 @@
     // This is the number of frames that are read in one chunk by a DMA controller
     // or a DSP or a mixer.
     framesPerBurst = AAudioStream_getFramesPerBurst(aaudioStream);
-    printf("DataFormat: original framesPerBurst = %d\n",framesPerBurst);
+    printf("DataFormat: framesPerBurst = %d\n",framesPerBurst);
+    bufferCapacity = AAudioStream_getBufferCapacityInFrames(aaudioStream);
+    printf("DataFormat: bufferCapacity = %d, remainder = %d\n",
+           bufferCapacity, bufferCapacity % framesPerBurst);
 
     // Some DMA might use very short bursts of 16 frames. We don't need to write such small
     // buffers. But it helps to use a multiple of the burst size for predictable scheduling.
-    while (framesPerBurst < 48) {
-        framesPerBurst *= 2;
+    framesPerWrite = framesPerBurst;
+    while (framesPerWrite < 48) {
+        framesPerWrite *= 2;
     }
-    printf("DataFormat: final framesPerBurst = %d\n",framesPerBurst);
+    printf("DataFormat: framesPerWrite = %d\n",framesPerWrite);
 
     actualDataFormat = AAudioStream_getFormat(aaudioStream);
     printf("DataFormat: requested = %d, actual = %d\n", requestedDataFormat, actualDataFormat);
     // TODO handle other data formats
 
     // Allocate a buffer for the audio data.
-    data = new int16_t[framesPerBurst * actualSamplesPerFrame];
+    data = new int16_t[framesPerWrite * actualSamplesPerFrame];
     if (data == nullptr) {
         fprintf(stderr, "ERROR - could not allocate data buffer\n");
         result = AAUDIO_ERROR_NO_MEMORY;
@@ -166,14 +171,14 @@
     framesLeft = framesToPlay;
     while (framesLeft > 0) {
         // Render sine waves to left and right channels.
-        sineOsc1.render(&data[0], actualSamplesPerFrame, framesPerBurst);
+        sineOsc1.render(&data[0], actualSamplesPerFrame, framesPerWrite);
         if (actualSamplesPerFrame > 1) {
-            sineOsc2.render(&data[1], actualSamplesPerFrame, framesPerBurst);
+            sineOsc2.render(&data[1], actualSamplesPerFrame, framesPerWrite);
         }
 
         // Write audio data to the stream.
         int64_t timeoutNanos = 100 * NANOS_PER_MILLISECOND;
-        int minFrames = (framesToPlay < framesPerBurst) ? framesToPlay : framesPerBurst;
+        int minFrames = (framesToPlay < framesPerWrite) ? framesToPlay : framesPerWrite;
         int actual = AAudioStream_write(aaudioStream, data, minFrames, timeoutNanos);
         if (actual < 0) {
             fprintf(stderr, "ERROR - AAudioStream_write() returned %zd\n", actual);
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
new file mode 100644
index 0000000..9414236
--- /dev/null
+++ b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
@@ -0,0 +1,320 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Play sine waves using an AAudio callback.
+
+#include <assert.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <sched.h>
+#include <stdio.h>
+#include <math.h>
+#include <time.h>
+#include <aaudio/AAudio.h>
+#include "SineGenerator.h"
+
+#define NUM_SECONDS              5
+
+//#define SHARING_MODE  AAUDIO_SHARING_MODE_EXCLUSIVE
+#define SHARING_MODE  AAUDIO_SHARING_MODE_SHARED
+
+#define  CALLBACK_SIZE_FRAMES    128
+
+// TODO refactor common code into a single SimpleAAudio class
+/**
+ * Simple wrapper for AAudio that opens a default stream and then calls
+ * a callback function to fill the output buffers.
+ */
+class SimpleAAudioPlayer {
+public:
+    SimpleAAudioPlayer() {}
+    ~SimpleAAudioPlayer() {
+        close();
+    };
+
+    /**
+     * Call this before calling open().
+     * @param requestedSharingMode
+     */
+    void setSharingMode(aaudio_sharing_mode_t requestedSharingMode) {
+        mRequestedSharingMode = requestedSharingMode;
+    }
+
+    /**
+     * Also known as "sample rate"
+     * Only call this after open() has been called.
+     */
+    int32_t getFramesPerSecond() {
+        if (mStream == nullptr) {
+            return AAUDIO_ERROR_INVALID_STATE;
+        }
+        return AAudioStream_getSampleRate(mStream);;
+    }
+
+    /**
+     * Only call this after open() has been called.
+     */
+    int32_t getSamplesPerFrame() {
+        if (mStream == nullptr) {
+            return AAUDIO_ERROR_INVALID_STATE;
+        }
+        return AAudioStream_getSamplesPerFrame(mStream);;
+    }
+
+    /**
+     * Open a stream
+     */
+    aaudio_result_t open(AAudioStream_dataCallback dataProc, void *userContext) {
+        aaudio_result_t result = AAUDIO_OK;
+
+        // Use an AAudioStreamBuilder to contain requested parameters.
+        result = AAudio_createStreamBuilder(&mBuilder);
+        if (result != AAUDIO_OK) return result;
+
+        AAudioStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
+        AAudioStreamBuilder_setDataCallback(mBuilder, dataProc, userContext);
+        AAudioStreamBuilder_setFormat(mBuilder, AAUDIO_FORMAT_PCM_FLOAT);
+        AAudioStreamBuilder_setFramesPerDataCallback(mBuilder, CALLBACK_SIZE_FRAMES);
+ //       AAudioStreamBuilder_setBufferCapacityInFrames(mBuilder, CALLBACK_SIZE_FRAMES * 4);
+
+        // Open an AAudioStream using the Builder.
+        result = AAudioStreamBuilder_openStream(mBuilder, &mStream);
+        if (result != AAUDIO_OK) goto finish1;
+
+        printf("AAudioStream_getFramesPerBurst() = %d\n",
+               AAudioStream_getFramesPerBurst(mStream));
+        printf("AAudioStream_getBufferSizeInFrames() = %d\n",
+               AAudioStream_getBufferSizeInFrames(mStream));
+        printf("AAudioStream_getBufferCapacityInFrames() = %d\n",
+               AAudioStream_getBufferCapacityInFrames(mStream));
+        return result;
+
+     finish1:
+        AAudioStreamBuilder_delete(mBuilder);
+        mBuilder = nullptr;
+        return result;
+    }
+
+    aaudio_result_t close() {
+        if (mStream != nullptr) {
+            printf("call AAudioStream_close(%p)\n", mStream);  fflush(stdout);
+            AAudioStream_close(mStream);
+            mStream = nullptr;
+            AAudioStreamBuilder_delete(mBuilder);
+            mBuilder = nullptr;
+        }
+        return AAUDIO_OK;
+    }
+
+    // Write zero data to fill up the buffer and prevent underruns.
+    aaudio_result_t prime() {
+        int32_t samplesPerFrame = AAudioStream_getSamplesPerFrame(mStream);
+        const int numFrames = 32;
+        float zeros[numFrames * samplesPerFrame];
+        memset(zeros, 0, sizeof(zeros));
+        aaudio_result_t result = numFrames;
+        while (result == numFrames) {
+            result = AAudioStream_write(mStream, zeros, numFrames, 0);
+        }
+        return result;
+    }
+
+    // Start the stream. AAudio will start calling your callback function.
+     aaudio_result_t start() {
+        aaudio_result_t result = AAudioStream_requestStart(mStream);
+        if (result != AAUDIO_OK) {
+            fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d %s\n",
+                    result, AAudio_convertResultToText(result));
+        }
+        return result;
+    }
+
+    // Stop the stream. AAudio will stop calling your callback function.
+    aaudio_result_t stop() {
+        aaudio_result_t result = AAudioStream_requestStop(mStream);
+        if (result != AAUDIO_OK) {
+            fprintf(stderr, "ERROR - AAudioStream_requestStop() returned %d %s\n",
+                    result, AAudio_convertResultToText(result));
+        }
+        int32_t xRunCount = AAudioStream_getXRunCount(mStream);
+        printf("AAudioStream_getXRunCount %d\n", xRunCount);
+        return result;
+    }
+
+    AAudioStream *getStream() const {
+        return mStream;
+    }
+
+private:
+    AAudioStreamBuilder    *mBuilder = nullptr;
+    AAudioStream           *mStream = nullptr;
+    aaudio_sharing_mode_t   mRequestedSharingMode = SHARING_MODE;
+};
+
+// Application data that gets passed to the callback.
+#define MAX_FRAME_COUNT_RECORDS    256
+typedef struct SineThreadedData_s {
+    SineGenerator  sineOsc1;
+    SineGenerator  sineOsc2;
+    // Remove these variables used for testing.
+    int32_t        numFrameCounts;
+    int32_t        frameCounts[MAX_FRAME_COUNT_RECORDS];
+    int            scheduler;
+    bool           schedulerChecked;
+} SineThreadedData_t;
+
+// Callback function that fills the audio output buffer.
+aaudio_data_callback_result_t MyDataCallbackProc(
+        AAudioStream *stream,
+        void *userData,
+        void *audioData,
+        int32_t numFrames
+        ) {
+
+    SineThreadedData_t *sineData = (SineThreadedData_t *) userData;
+
+    if (sineData->numFrameCounts < MAX_FRAME_COUNT_RECORDS) {
+        sineData->frameCounts[sineData->numFrameCounts++] = numFrames;
+    }
+
+    if (!sineData->schedulerChecked) {
+        sineData->scheduler = sched_getscheduler(gettid());
+        sineData->schedulerChecked = true;
+    }
+
+    int32_t samplesPerFrame = AAudioStream_getSamplesPerFrame(stream);
+    // This code only plays on the first one or two channels.
+    // TODO Support arbitrary number of channels.
+    switch (AAudioStream_getFormat(stream)) {
+        case AAUDIO_FORMAT_PCM_I16: {
+            int16_t *audioBuffer = (int16_t *) audioData;
+            // Render sine waves as shorts to first channel.
+            sineData->sineOsc1.render(&audioBuffer[0], samplesPerFrame, numFrames);
+            // Render sine waves to second channel if there is one.
+            if (samplesPerFrame > 1) {
+                sineData->sineOsc2.render(&audioBuffer[1], samplesPerFrame, numFrames);
+            }
+        }
+        break;
+        case AAUDIO_FORMAT_PCM_FLOAT: {
+            float *audioBuffer = (float *) audioData;
+            // Render sine waves as floats to first channel.
+            sineData->sineOsc1.render(&audioBuffer[0], samplesPerFrame, numFrames);
+            // Render sine waves to second channel if there is one.
+            if (samplesPerFrame > 1) {
+                sineData->sineOsc2.render(&audioBuffer[1], samplesPerFrame, numFrames);
+            }
+        }
+        break;
+        default:
+            return AAUDIO_CALLBACK_RESULT_STOP;
+    }
+
+    return AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+int main(int argc, char **argv)
+{
+    (void)argc; // unused
+    SimpleAAudioPlayer player;
+    SineThreadedData_t myData;
+    aaudio_result_t result;
+
+    // Make printf print immediately so that debug info is not stuck
+    // in a buffer if we hang or crash.
+    setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+    printf("%s - Play a sine sweep using an AAudio callback\n", argv[0]);
+
+    player.setSharingMode(SHARING_MODE);
+
+    myData.numFrameCounts = 0;
+    myData.schedulerChecked = false;
+
+    result = player.open(MyDataCallbackProc, &myData);
+    if (result != AAUDIO_OK) {
+        fprintf(stderr, "ERROR -  player.open() returned %d\n", result);
+        goto error;
+    }
+    printf("player.getFramesPerSecond() = %d\n", player.getFramesPerSecond());
+    printf("player.getSamplesPerFrame() = %d\n", player.getSamplesPerFrame());
+    myData.sineOsc1.setup(440.0, 48000);
+    myData.sineOsc1.setSweep(300.0, 600.0, 5.0);
+    myData.sineOsc2.setup(660.0, 48000);
+    myData.sineOsc2.setSweep(350.0, 900.0, 7.0);
+
+#if 0
+    result = player.prime(); // FIXME crashes AudioTrack.cpp
+    if (result != AAUDIO_OK) {
+        fprintf(stderr, "ERROR - player.prime() returned %d\n", result);
+        goto error;
+    }
+#endif
+
+    result = player.start();
+    if (result != AAUDIO_OK) {
+        fprintf(stderr, "ERROR - player.start() returned %d\n", result);
+        goto error;
+    }
+
+    printf("Sleep for %d seconds while audio plays in a callback thread.\n", NUM_SECONDS);
+    for (int second = 0; second < NUM_SECONDS; second++)
+    {
+        const struct timespec request = { .tv_sec = 1, .tv_nsec = 0 };
+        (void) clock_nanosleep(CLOCK_MONOTONIC, 0 /*flags*/, &request, NULL /*remain*/);
+
+        aaudio_stream_state_t state;
+        result = AAudioStream_waitForStateChange(player.getStream(),
+                                                 AAUDIO_STREAM_STATE_CLOSED,
+                                                 &state,
+                                                 0);
+        if (result != AAUDIO_OK) {
+            fprintf(stderr, "ERROR - AAudioStream_waitForStateChange() returned %d\n", result);
+            goto error;
+        }
+        if (state != AAUDIO_STREAM_STATE_STARTING && state != AAUDIO_STREAM_STATE_STARTED) {
+            printf("Stream state is %d %s!\n", state, AAudio_convertStreamStateToText(state));
+            break;
+        }
+    }
+    printf("Woke up now.\n");
+
+    result = player.stop();
+    if (result != AAUDIO_OK) {
+        goto error;
+    }
+    result = player.close();
+    if (result != AAUDIO_OK) {
+        goto error;
+    }
+
+    // Report data gathered in the callback.
+    for (int i = 0; i < myData.numFrameCounts; i++) {
+        printf("numFrames[%4d] = %4d\n", i, myData.frameCounts[i]);
+    }
+    if (myData.schedulerChecked) {
+        printf("scheduler = 0x%08x, SCHED_FIFO = 0x%08X\n",
+               myData.scheduler,
+               SCHED_FIFO);
+    }
+
+    printf("SUCCESS\n");
+    return EXIT_SUCCESS;
+error:
+    player.close();
+    printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+    return EXIT_FAILURE;
+}
+
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_threaded.cpp b/media/libaaudio/examples/write_sine/src/write_sine_threaded.cpp
index 40e5016..9bc5886 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine_threaded.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine_threaded.cpp
@@ -16,24 +16,24 @@
 
 // Play sine waves using an AAudio background thread.
 
-#include <assert.h>
+//#include <assert.h>
+#include <atomic>
 #include <unistd.h>
 #include <stdlib.h>
 #include <stdio.h>
 #include <math.h>
 #include <time.h>
-#include <aaudio/AAudioDefinitions.h>
 #include <aaudio/AAudio.h>
 #include "SineGenerator.h"
 
-#define NUM_SECONDS           10
+#define NUM_SECONDS           5
 #define NANOS_PER_MICROSECOND ((int64_t)1000)
 #define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
 #define MILLIS_PER_SECOND     1000
 #define NANOS_PER_SECOND      (NANOS_PER_MILLISECOND * MILLIS_PER_SECOND)
 
-//#define SHARING_MODE  AAUDIO_SHARING_MODE_EXCLUSIVE
-#define SHARING_MODE  AAUDIO_SHARING_MODE_SHARED
+#define SHARING_MODE  AAUDIO_SHARING_MODE_EXCLUSIVE
+//#define SHARING_MODE  AAUDIO_SHARING_MODE_SHARED
 
 // Prototype for a callback.
 typedef int audio_callback_proc_t(float *outputBuffer,
@@ -42,6 +42,16 @@
 
 static void *SimpleAAudioPlayerThreadProc(void *arg);
 
+// TODO merge into common code
+static int64_t getNanoseconds(clockid_t clockId = CLOCK_MONOTONIC) {
+    struct timespec time;
+    int result = clock_gettime(clockId, &time);
+    if (result < 0) {
+        return -errno; // TODO standardize return value
+    }
+    return (time.tv_sec * NANOS_PER_SECOND) + time.tv_nsec;
+}
+
 /**
  * Simple wrapper for AAudio that opens a default stream and then calls
  * a callback function to fill the output buffers.
@@ -49,7 +59,7 @@
 class SimpleAAudioPlayer {
 public:
     SimpleAAudioPlayer() {}
-    virtual ~SimpleAAudioPlayer() {
+    ~SimpleAAudioPlayer() {
         close();
     };
 
@@ -80,21 +90,25 @@
         if (result != AAUDIO_OK) return result;
 
         AAudioStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
+        AAudioStreamBuilder_setSampleRate(mBuilder, 48000);
 
         // Open an AAudioStream using the Builder.
         result = AAudioStreamBuilder_openStream(mBuilder, &mStream);
-        if (result != AAUDIO_OK) goto finish1;
+        if (result != AAUDIO_OK) goto error;
+
+        printf("Requested sharing mode = %d\n", mRequestedSharingMode);
+        printf("Actual    sharing mode = %d\n", AAudioStream_getSharingMode(mStream));
 
         // Check to see what kind of stream we actually got.
         mFramesPerSecond = AAudioStream_getSampleRate(mStream);
-        printf("open() mFramesPerSecond = %d\n", mFramesPerSecond);
+        printf("Actual    framesPerSecond = %d\n", mFramesPerSecond);
 
         mSamplesPerFrame = AAudioStream_getSamplesPerFrame(mStream);
-        printf("open() mSamplesPerFrame = %d\n", mSamplesPerFrame);
+        printf("Actual    samplesPerFrame = %d\n", mSamplesPerFrame);
 
         {
             int32_t bufferCapacity = AAudioStream_getBufferCapacityInFrames(mStream);
-            printf("open() got bufferCapacity = %d\n", bufferCapacity);
+            printf("Actual    bufferCapacity = %d\n", bufferCapacity);
         }
 
         // This is the number of frames that are read in one chunk by a DMA controller
@@ -105,9 +119,10 @@
         while (mFramesPerBurst < 48) {
             mFramesPerBurst *= 2;
         }
-        printf("DataFormat: final framesPerBurst = %d\n",mFramesPerBurst);
+        printf("Actual    framesPerBurst = %d\n",mFramesPerBurst);
 
         mDataFormat = AAudioStream_getFormat(mStream);
+        printf("Actual    dataFormat = %d\n", mDataFormat);
 
         // Allocate a buffer for the audio data.
         mOutputBuffer = new float[mFramesPerBurst * mSamplesPerFrame];
@@ -118,6 +133,7 @@
 
         // If needed allocate a buffer for converting float to int16_t.
         if (mDataFormat == AAUDIO_FORMAT_PCM_I16) {
+            printf("Allocate data conversion buffer for float=>pcm16\n");
             mConversionBuffer = new int16_t[mFramesPerBurst * mSamplesPerFrame];
             if (mConversionBuffer == nullptr) {
                 fprintf(stderr, "ERROR - could not allocate conversion buffer\n");
@@ -126,7 +142,7 @@
         }
         return result;
 
-     finish1:
+    error:
         AAudioStreamBuilder_delete(mBuilder);
         mBuilder = nullptr;
         return result;
@@ -150,7 +166,7 @@
 
     // Start a thread that will call the callback proc.
     aaudio_result_t start() {
-        mEnabled = true;
+        mEnabled.store(true);
         int64_t nanosPerBurst = mFramesPerBurst * NANOS_PER_SECOND
                                            / mFramesPerSecond;
         return AAudioStream_createThread(mStream, nanosPerBurst,
@@ -160,56 +176,106 @@
 
     // Tell the thread to stop.
     aaudio_result_t stop() {
-        mEnabled = false;
+        mEnabled.store(false);
         return AAudioStream_joinThread(mStream, nullptr, 2 * NANOS_PER_SECOND);
     }
 
-    aaudio_result_t callbackLoop() {
-        int32_t framesWritten = 0;
-        int32_t xRunCount = 0;
-        aaudio_result_t result = AAUDIO_OK;
+    bool isEnabled() const {
+        return mEnabled.load();
+    }
 
-        result = AAudioStream_requestStart(mStream);
-        if (result != AAUDIO_OK) {
-            fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d\n", result);
-            return result;
-        }
+    aaudio_result_t callbackLoop() {
+        aaudio_result_t result = 0;
+        int64_t framesWritten = 0;
+        int32_t xRunCount = 0;
+        bool    started = false;
+        int64_t framesInBuffer =
+                AAudioStream_getFramesWritten(mStream) -
+                AAudioStream_getFramesRead(mStream);
+        int64_t framesAvailable =
+                AAudioStream_getBufferSizeInFrames(mStream) - framesInBuffer;
+
+        int64_t startTime = 0;
+        int64_t startPosition = 0;
+        int32_t loopCount = 0;
 
         // Give up after several burst periods have passed.
         const int burstsPerTimeout = 8;
-        int64_t nanosPerTimeout =
-                        burstsPerTimeout * mFramesPerBurst * NANOS_PER_SECOND
-                        / mFramesPerSecond;
+        int64_t nanosPerTimeout = 0;
+        int64_t runningNanosPerTimeout = 500 * NANOS_PER_MILLISECOND;
 
-        while (mEnabled && result >= 0) {
+        while (isEnabled() && result >= 0) {
             // Call application's callback function to fill the buffer.
             if (mCallbackProc(mOutputBuffer, mFramesPerBurst, mUserContext)) {
-                mEnabled = false;
+                mEnabled.store(false);
             }
+
             // if needed, convert from float to int16_t PCM
+            //printf("app callbackLoop writing %d frames, state = %s\n", mFramesPerBurst,
+            //       AAudio_convertStreamStateToText(AAudioStream_getState(mStream)));
             if (mConversionBuffer != nullptr) {
                 int32_t numSamples = mFramesPerBurst * mSamplesPerFrame;
                 for (int i = 0; i < numSamples; i++) {
                     mConversionBuffer[i] = (int16_t)(32767.0 * mOutputBuffer[i]);
                 }
                 // Write the application data to stream.
-                result = AAudioStream_write(mStream, mConversionBuffer, mFramesPerBurst, nanosPerTimeout);
+                result = AAudioStream_write(mStream, mConversionBuffer,
+                                            mFramesPerBurst, nanosPerTimeout);
             } else {
                 // Write the application data to stream.
-                result = AAudioStream_write(mStream, mOutputBuffer, mFramesPerBurst, nanosPerTimeout);
+                result = AAudioStream_write(mStream, mOutputBuffer,
+                                            mFramesPerBurst, nanosPerTimeout);
             }
-            framesWritten += result;
+
             if (result < 0) {
-                fprintf(stderr, "ERROR - AAudioStream_write() returned %zd\n", result);
+                fprintf(stderr, "ERROR - AAudioStream_write() returned %d %s\n", result,
+                        AAudio_convertResultToText(result));
+                break;
+            } else if (started && result != mFramesPerBurst) {
+                fprintf(stderr, "ERROR - AAudioStream_write() timed out! %d\n", result);
+                break;
+            } else {
+                framesWritten += result;
+            }
+
+            if (startTime > 0 && ((loopCount & 0x01FF) == 0)) {
+                double elapsedFrames = (double)(framesWritten - startPosition);
+                int64_t elapsedTime = getNanoseconds() - startTime;
+                double measuredRate = elapsedFrames * NANOS_PER_SECOND / elapsedTime;
+                printf("app callbackLoop write() measured rate %f\n", measuredRate);
+            }
+            loopCount++;
+
+            if (!started && framesWritten >= framesAvailable) {
+                // Start buffer if fully primed.{
+                result = AAudioStream_requestStart(mStream);
+                printf("app callbackLoop requestStart returned %d\n", result);
+                if (result != AAUDIO_OK) {
+                    fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d %s\n", result,
+                            AAudio_convertResultToText(result));
+                    mEnabled.store(false);
+                    return result;
+                }
+                started = true;
+                nanosPerTimeout = runningNanosPerTimeout;
+                startPosition = framesWritten;
+                startTime = getNanoseconds();
+            }
+
+            {
+                int32_t tempXRunCount = AAudioStream_getXRunCount(mStream);
+                if (tempXRunCount != xRunCount) {
+                    xRunCount = tempXRunCount;
+                    printf("AAudioStream_getXRunCount returns %d at frame %d\n",
+                           xRunCount, (int) framesWritten);
+                }
             }
         }
 
-        xRunCount = AAudioStream_getXRunCount(mStream);
-        printf("AAudioStream_getXRunCount %d\n", xRunCount);
-
         result = AAudioStream_requestStop(mStream);
         if (result != AAUDIO_OK) {
-            fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d\n", result);
+            fprintf(stderr, "ERROR - AAudioStream_requestStop() returned %d %s\n", result,
+                    AAudio_convertResultToText(result));
             return result;
         }
 
@@ -230,7 +296,7 @@
     int32_t               mFramesPerBurst = 0;
     aaudio_audio_format_t mDataFormat = AAUDIO_FORMAT_PCM_I16;
 
-    volatile bool         mEnabled = false; // used to request that callback exit its loop
+    std::atomic<bool>     mEnabled; // used to request that callback exit its loop
 };
 
 static void *SimpleAAudioPlayerThreadProc(void *arg) {
@@ -289,19 +355,21 @@
     }
 
     printf("Sleep for %d seconds while audio plays in a background thread.\n", NUM_SECONDS);
-    {
+    for (int i = 0; i < NUM_SECONDS && player.isEnabled(); i++) {
         // FIXME sleep is not an NDK API
         // sleep(NUM_SECONDS);
-        const struct timespec request = { .tv_sec = NUM_SECONDS, .tv_nsec = 0 };
+        const struct timespec request = { .tv_sec = 1, .tv_nsec = 0 };
         (void) clock_nanosleep(CLOCK_MONOTONIC, 0 /*flags*/, &request, NULL /*remain*/);
     }
-    printf("Woke up now.\n");
+    printf("Woke up now!\n");
 
     result = player.stop();
     if (result != AAUDIO_OK) {
         fprintf(stderr, "ERROR -  player.stop() returned %d\n", result);
         goto error;
     }
+
+    printf("Player stopped.\n");
     result = player.close();
     if (result != AAUDIO_OK) {
         fprintf(stderr, "ERROR -  player.close() returned %d\n", result);
diff --git a/media/libaaudio/examples/write_sine/static/Android.mk b/media/libaaudio/examples/write_sine/static/Android.mk
index 139b70a..c02b91c 100644
--- a/media/libaaudio/examples/write_sine/static/Android.mk
+++ b/media/libaaudio/examples/write_sine/static/Android.mk
@@ -6,7 +6,7 @@
     $(call include-path-for, audio-utils) \
     frameworks/av/media/libaaudio/include
 
-# TODO reorganize folders to avoid using ../
+# NDK recommends using this kind of relative path instead of an absolute path.
 LOCAL_SRC_FILES:= ../src/write_sine.cpp
 
 LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
@@ -17,6 +17,8 @@
 LOCAL_MODULE := write_sine
 include $(BUILD_EXECUTABLE)
 
+
+
 include $(CLEAR_VARS)
 LOCAL_MODULE_TAGS := tests
 LOCAL_C_INCLUDES := \
@@ -27,8 +29,26 @@
 
 LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
                           libbinder libcutils libutils \
-                          libaudioclient liblog libtinyalsa
+                          libaudioclient liblog
 LOCAL_STATIC_LIBRARIES := libaaudio
 
 LOCAL_MODULE := write_sine_threaded
 include $(BUILD_EXECUTABLE)
+
+
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/av/media/libaaudio/include
+
+LOCAL_SRC_FILES:= ../src/write_sine_callback.cpp
+
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
+                          libbinder libcutils libutils \
+                          libaudioclient liblog
+LOCAL_STATIC_LIBRARIES := libaaudio
+
+LOCAL_MODULE := write_sine_callback
+include $(BUILD_EXECUTABLE)
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index 551dcc9..25ad5f8 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -89,7 +89,8 @@
  * Request an audio device identified device using an ID.
  * On Android, for example, the ID could be obtained from the Java AudioManager.
  *
- * By default, the primary device will be used.
+ * The default, if you do not call this function, is AAUDIO_DEVICE_UNSPECIFIED,
+ * in which case the primary device will be used.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param deviceId device identifier or AAUDIO_DEVICE_UNSPECIFIED
@@ -98,52 +99,71 @@
                                                      int32_t deviceId);
 
 /**
- * Request a sample rate in Hz.
+ * Request a sample rate in Hertz.
+ *
  * The stream may be opened with a different sample rate.
  * So the application should query for the actual rate after the stream is opened.
  *
  * Technically, this should be called the "frame rate" or "frames per second",
  * because it refers to the number of complete frames transferred per second.
- * But it is traditionally called "sample rate". Se we use that term.
+ * But it is traditionally called "sample rate". So we use that term.
  *
- * Default is AAUDIO_UNSPECIFIED.
-
+ * The default, if you do not call this function, is AAUDIO_UNSPECIFIED.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param sampleRate frames per second. Common rates include 44100 and 48000 Hz.
  */
 AAUDIO_API void AAudioStreamBuilder_setSampleRate(AAudioStreamBuilder* builder,
                                                        int32_t sampleRate);
 
 /**
  * Request a number of samples per frame.
+ *
  * The stream may be opened with a different value.
  * So the application should query for the actual value after the stream is opened.
  *
- * Default is AAUDIO_UNSPECIFIED.
+ * The default, if you do not call this function, is AAUDIO_UNSPECIFIED.
  *
  * Note, this quantity is sometimes referred to as "channel count".
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param samplesPerFrame Number of samples in one frame, ie. numChannels.
  */
 AAUDIO_API void AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder* builder,
                                                    int32_t samplesPerFrame);
 
 /**
  * Request a sample data format, for example AAUDIO_FORMAT_PCM_I16.
- * The application should query for the actual format after the stream is opened.
+ *
+ * The default, if you do not call this function, is AAUDIO_UNSPECIFIED.
+ *
+ * The stream may be opened with a different value.
+ * So the application should query for the actual value after the stream is opened.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param format Most common formats are AAUDIO_FORMAT_PCM_FLOAT and AAUDIO_FORMAT_PCM_I16.
  */
 AAUDIO_API void AAudioStreamBuilder_setFormat(AAudioStreamBuilder* builder,
                                                    aaudio_audio_format_t format);
 
 /**
  * Request a mode for sharing the device.
+ *
+ * The default, if you do not call this function, is AAUDIO_SHARING_MODE_SHARED.
+ *
  * The requested sharing mode may not be available.
- * So the application should query for the actual mode after the stream is opened.
+ * The application can query for the actual mode after the stream is opened.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
- * @param sharingMode AAUDIO_SHARING_MODE_LEGACY or AAUDIO_SHARING_MODE_EXCLUSIVE
+ * @param sharingMode AAUDIO_SHARING_MODE_SHARED or AAUDIO_SHARING_MODE_EXCLUSIVE
  */
 AAUDIO_API void AAudioStreamBuilder_setSharingMode(AAudioStreamBuilder* builder,
                                                         aaudio_sharing_mode_t sharingMode);
 
 /**
- * Request the direction for a stream. The default is AAUDIO_DIRECTION_OUTPUT.
+ * Request the direction for a stream.
+ *
+ * The default, if you do not call this function, is AAUDIO_DIRECTION_OUTPUT.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param direction AAUDIO_DIRECTION_OUTPUT or AAUDIO_DIRECTION_INPUT
@@ -152,16 +172,162 @@
                                                             aaudio_direction_t direction);
 
 /**
- * Set the requested maximum buffer capacity in frames.
+ * Set the requested buffer capacity in frames.
  * The final AAudioStream capacity may differ, but will probably be at least this big.
  *
- * Default is AAUDIO_UNSPECIFIED.
+ * The default, if you do not call this function, is AAUDIO_UNSPECIFIED.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
- * @param frames the desired buffer capacity in frames or AAUDIO_UNSPECIFIED
+ * @param numFrames the desired buffer capacity in frames or AAUDIO_UNSPECIFIED
  */
 AAUDIO_API void AAudioStreamBuilder_setBufferCapacityInFrames(AAudioStreamBuilder* builder,
-                                                                 int32_t frames);
+                                                                 int32_t numFrames);
+/**
+ * Return one of these values from the data callback function.
+ */
+enum {
+
+    /**
+     * Continue calling the callback.
+     */
+    AAUDIO_CALLBACK_RESULT_CONTINUE = 0,
+
+    /**
+     * Stop calling the callback.
+     *
+     * The application will still need to call AAudioStream_requestPause()
+     * or AAudioStream_requestStop().
+     */
+    AAUDIO_CALLBACK_RESULT_STOP,
+
+};
+typedef int32_t aaudio_data_callback_result_t;
+
+/**
+ * Prototype for the data function that is passed to AAudioStreamBuilder_setDataCallback().
+ *
+ * For an output stream, this function should render and write numFrames of data
+ * in the streams current data format to the audioData buffer.
+ *
+ * For an input stream, this function should read and process numFrames of data
+ * from the audioData buffer.
+ *
+ * Note that this callback function should be considered a "real-time" function.
+ * It must not do anything that could cause an unbounded delay because that can cause the
+ * audio to glitch or pop.
+ *
+ * These are things the function should NOT do:
+ * <ul>
+ * <li>allocate memory using, for example, malloc() or new</li>
+ * <li>any file operations such as opening, closing, reading or writing</li>
+ * <li>any network operations such as streaming</li>
+ * <li>use any mutexes or other synchronization primitives</li>
+ * <li>sleep</li>
+ * </ul>
+ *
+ * If you need to move data, eg. MIDI commands, in or out of the callback function then
+ * we recommend the use of non-blocking techniques such as an atomic FIFO.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @param userData the same address that was passed to AAudioStreamBuilder_setCallback()
+ * @param audioData a pointer to the audio data
+ * @param numFrames the number of frames to be processed
+ * @return AAUDIO_CALLBACK_RESULT_*
+ */
+typedef aaudio_data_callback_result_t (*AAudioStream_dataCallback)(
+        AAudioStream *stream,
+        void *userData,
+        void *audioData,
+        int32_t numFrames);
+
+/**
+ * Request that AAudio call this functions when the stream is running.
+ *
+ * Note that when using this callback, the audio data will be passed in or out
+ * of the function as an argument.
+ * So you cannot call AAudioStream_write() or AAudioStream_read() on the same stream
+ * that has an active data callback.
+ *
+ * The callback function will start being called after AAudioStream_requestStart() is called.
+ * It will stop being called after AAudioStream_requestPause() or
+ * AAudioStream_requestStop() is called.
+ *
+ * This callback function will be called on a real-time thread owned by AAudio. See
+ * {@link aaudio_data_callback_proc_t} for more information.
+ *
+ * Note that the AAudio callbacks will never be called simultaneously from multiple threads.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param callback pointer to a function that will process audio data.
+ * @param userData pointer to an application data structure that will be passed
+ *          to the callback functions.
+ */
+AAUDIO_API void AAudioStreamBuilder_setDataCallback(AAudioStreamBuilder* builder,
+                                                 AAudioStream_dataCallback callback,
+                                                 void *userData);
+
+/**
+ * Set the requested data callback buffer size in frames.
+ * See {@link AAudioStream_dataCallback}.
+ *
+ * The default, if you do not call this function, is AAUDIO_UNSPECIFIED.
+ *
+ * For the lowest possible latency, do not call this function. AAudio will then
+ * call the dataProc callback function with whatever size is optimal.
+ * That size may vary from one callback to another.
+ *
+ * Only use this function if the application requires a specific number of frames for processing.
+ * The application might, for example, be using an FFT that requires
+ * a specific power-of-two sized buffer.
+ *
+ * AAudio may need to add additional buffering in order to adapt between the internal
+ * buffer size and the requested buffer size.
+ *
+ * If you do call this function then the requested size should be less than
+ * half the buffer capacity, to allow double buffering.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param numFrames the desired buffer size in frames or AAUDIO_UNSPECIFIED
+ */
+AAUDIO_API void AAudioStreamBuilder_setFramesPerDataCallback(AAudioStreamBuilder* builder,
+                                                             int32_t numFrames);
+
+/**
+ * Prototype for the callback function that is passed to
+ * AAudioStreamBuilder_setErrorCallback().
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @param userData the same address that was passed to AAudioStreamBuilder_setErrorCallback()
+ * @param error an AAUDIO_ERROR_* value.
+ */
+typedef void (*AAudioStream_errorCallback)(
+        AAudioStream *stream,
+        void *userData,
+        aaudio_result_t error);
+
+/**
+ * Request that AAudio call this functions if any error occurs on a callback thread.
+ *
+ * It will be called, for example, if a headset or a USB device is unplugged causing the stream's
+ * device to be unavailable.
+ * In response, this function could signal or launch another thread to reopen a
+ * stream on another device. Do not reopen the stream in this callback.
+ *
+ * This will not be called because of actions by the application, such as stopping
+ * or closing a stream.
+ *
+ * Another possible cause of error would be a timeout or an unanticipated internal error.
+ *
+ * Note that the AAudio callbacks will never be called simultaneously from multiple threads.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param callback pointer to a function that will be called if an error occurs.
+ * @param userData pointer to an application data structure that will be passed
+ *          to the callback functions.
+ */
+AAUDIO_API void AAudioStreamBuilder_setErrorCallback(AAudioStreamBuilder* builder,
+                                                AAudioStream_errorCallback callback,
+                                                void *userData);
 
 /**
  * Open a stream based on the options in the StreamBuilder.
@@ -333,9 +499,14 @@
 // High priority audio threads
 // ============================================================
 
+/**
+ * @deprecated Use AudioStreamBuilder_setCallback()
+ */
 typedef void *(*aaudio_audio_thread_proc_t)(void *);
 
 /**
+ * @deprecated Use AudioStreamBuilder_setCallback()
+ *
  * Create a thread associated with a stream. The thread has special properties for
  * low latency audio performance. This thread can be used to implement a callback API.
  *
@@ -360,6 +531,8 @@
                                      void *arg);
 
 /**
+ * @deprecated Use AudioStreamBuilder_setCallback()
+ *
  * Wait until the thread exits or an error occurs.
  *
  * @param stream A stream created using AAudioStreamBuilder_openStream().
@@ -388,11 +561,11 @@
  * Call AAudioStream_getBufferSizeInFrames() to see what the actual final size is.
  *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
- * @param requestedFrames requested number of frames that can be filled without blocking
+ * @param numFrames requested number of frames that can be filled without blocking
  * @return actual buffer size in frames or a negative error
  */
 AAUDIO_API aaudio_result_t AAudioStream_setBufferSizeInFrames(AAudioStream* stream,
-                                                      int32_t requestedFrames);
+                                                      int32_t numFrames);
 
 /**
  * Query the maximum number of frames that can be filled without blocking.
@@ -421,11 +594,32 @@
  * Query maximum buffer capacity in frames.
  *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
- * @return  the buffer capacity in frames
+ * @return  buffer capacity in frames
  */
 AAUDIO_API int32_t AAudioStream_getBufferCapacityInFrames(AAudioStream* stream);
 
 /**
+ * Query the size of the buffer that will be passed to the dataProc callback
+ * in the numFrames parameter.
+ *
+ * This call can be used if the application needs to know the value of numFrames before
+ * the stream is started. This is not normally necessary.
+ *
+ * If a specific size was requested by calling AAudioStreamBuilder_setCallbackSizeInFrames()
+ * then this will be the same size.
+ *
+ * If AAudioStreamBuilder_setCallbackSizeInFrames() was not called then this will
+ * return the size chosen by AAudio, or AAUDIO_UNSPECIFIED.
+ *
+ * AAUDIO_UNSPECIFIED indicates that the callback buffer size for this stream
+ * may vary from one dataProc callback to the next.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return callback buffer size in frames or AAUDIO_UNSPECIFIED
+ */
+AAUDIO_API int32_t AAudioStream_getFramesPerDataCallback(AAudioStream* stream);
+
+/**
  * An XRun is an Underrun or an Overrun.
  * During playing, an underrun will occur if the stream is not written in time
  * and the system runs out of valid data.
diff --git a/media/libaaudio/include/aaudio/AAudioDefinitions.h b/media/libaaudio/include/aaudio/AAudioDefinitions.h
index fbd284c..57e3dbd 100644
--- a/media/libaaudio/include/aaudio/AAudioDefinitions.h
+++ b/media/libaaudio/include/aaudio/AAudioDefinitions.h
@@ -39,7 +39,7 @@
  * and would accept whatever it was given.
  */
 #define AAUDIO_UNSPECIFIED           0
-#define AAUDIO_DEVICE_UNSPECIFIED    ((int32_t) -1)
+#define AAUDIO_DEVICE_UNSPECIFIED    0
 
 enum {
     AAUDIO_DIRECTION_OUTPUT,
@@ -82,9 +82,10 @@
     AAUDIO_ERROR_NULL,
     AAUDIO_ERROR_TIMEOUT,
     AAUDIO_ERROR_WOULD_BLOCK,
-    AAUDIO_ERROR_INVALID_ORDER,
+    AAUDIO_ERROR_INVALID_FORMAT,
     AAUDIO_ERROR_OUT_OF_RANGE,
-    AAUDIO_ERROR_NO_SERVICE
+    AAUDIO_ERROR_NO_SERVICE,
+    AAUDIO_ERROR_INVALID_RATE
 };
 typedef int32_t  aaudio_result_t;
 
@@ -103,9 +104,11 @@
     AAUDIO_STREAM_STATE_STOPPED,
     AAUDIO_STREAM_STATE_CLOSING,
     AAUDIO_STREAM_STATE_CLOSED,
+    AAUDIO_STREAM_STATE_DISCONNECTED
 };
 typedef int32_t aaudio_stream_state_t;
 
+
 enum {
     /**
      * This will be the only stream using a particular source or sink.
diff --git a/media/libaaudio/libaaudio.map.txt b/media/libaaudio/libaaudio.map.txt
index a9e9109..f22fdfe 100644
--- a/media/libaaudio/libaaudio.map.txt
+++ b/media/libaaudio/libaaudio.map.txt
@@ -4,6 +4,9 @@
     AAudio_convertStreamStateToText;
     AAudio_createStreamBuilder;
     AAudioStreamBuilder_setDeviceId;
+    AAudioStreamBuilder_setDataCallback;
+    AAudioStreamBuilder_setErrorCallback;
+    AAudioStreamBuilder_setFramesPerDataCallback;
     AAudioStreamBuilder_setSampleRate;
     AAudioStreamBuilder_setSamplesPerFrame;
     AAudioStreamBuilder_setFormat;
@@ -25,6 +28,7 @@
     AAudioStream_joinThread;
     AAudioStream_setBufferSizeInFrames;
     AAudioStream_getBufferSizeInFrames;
+    AAudioStream_getFramesPerDataCallback;
     AAudioStream_getFramesPerBurst;
     AAudioStream_getBufferCapacityInFrames;
     AAudioStream_getXRunCount;
diff --git a/media/libaaudio/src/Android.mk b/media/libaaudio/src/Android.mk
index a016b49..b5bb75f 100644
--- a/media/libaaudio/src/Android.mk
+++ b/media/libaaudio/src/Android.mk
@@ -26,26 +26,32 @@
     $(LOCAL_PATH)/legacy \
     $(LOCAL_PATH)/utility
 
+# If you add a file here then also add it below in the SHARED target
 LOCAL_SRC_FILES = \
     core/AudioStream.cpp \
     core/AudioStreamBuilder.cpp \
     core/AAudioAudio.cpp \
+    legacy/AudioStreamLegacy.cpp \
     legacy/AudioStreamRecord.cpp \
     legacy/AudioStreamTrack.cpp \
     utility/HandleTracker.cpp \
     utility/AAudioUtilities.cpp \
+    utility/FixedBlockAdapter.cpp \
+    utility/FixedBlockReader.cpp \
+    utility/FixedBlockWriter.cpp \
     fifo/FifoBuffer.cpp \
     fifo/FifoControllerBase.cpp \
     client/AudioEndpoint.cpp \
     client/AudioStreamInternal.cpp \
     client/IsochronousClockModel.cpp \
-    binding/SharedMemoryParcelable.cpp \
-    binding/SharedRegionParcelable.cpp \
-    binding/RingBufferParcelable.cpp \
     binding/AudioEndpointParcelable.cpp \
+    binding/AAudioBinderClient.cpp \
     binding/AAudioStreamRequest.cpp \
     binding/AAudioStreamConfiguration.cpp \
-    binding/IAAudioService.cpp
+    binding/IAAudioService.cpp \
+    binding/RingBufferParcelable.cpp \
+    binding/SharedMemoryParcelable.cpp \
+    binding/SharedRegionParcelable.cpp
 
 LOCAL_CFLAGS += -Wno-unused-parameter -Wall -Werror
 
@@ -79,22 +85,27 @@
 LOCAL_SRC_FILES = core/AudioStream.cpp \
     core/AudioStreamBuilder.cpp \
     core/AAudioAudio.cpp \
+    legacy/AudioStreamLegacy.cpp \
     legacy/AudioStreamRecord.cpp \
     legacy/AudioStreamTrack.cpp \
     utility/HandleTracker.cpp \
     utility/AAudioUtilities.cpp \
+    utility/FixedBlockAdapter.cpp \
+    utility/FixedBlockReader.cpp \
+    utility/FixedBlockWriter.cpp \
     fifo/FifoBuffer.cpp \
     fifo/FifoControllerBase.cpp \
     client/AudioEndpoint.cpp \
     client/AudioStreamInternal.cpp \
     client/IsochronousClockModel.cpp \
-    binding/SharedMemoryParcelable.cpp \
-    binding/SharedRegionParcelable.cpp \
-    binding/RingBufferParcelable.cpp \
     binding/AudioEndpointParcelable.cpp \
+    binding/AAudioBinderClient.cpp \
     binding/AAudioStreamRequest.cpp \
     binding/AAudioStreamConfiguration.cpp \
-    binding/IAAudioService.cpp
+    binding/IAAudioService.cpp \
+    binding/RingBufferParcelable.cpp \
+    binding/SharedMemoryParcelable.cpp \
+    binding/SharedRegionParcelable.cpp
 
 LOCAL_CFLAGS += -Wno-unused-parameter -Wall -Werror
 
diff --git a/media/libaaudio/src/binding/AAudioBinderClient.cpp b/media/libaaudio/src/binding/AAudioBinderClient.cpp
new file mode 100644
index 0000000..8315c40
--- /dev/null
+++ b/media/libaaudio/src/binding/AAudioBinderClient.cpp
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <binder/IServiceManager.h>
+#include <utils/Mutex.h>
+#include <utils/RefBase.h>
+
+#include <aaudio/AAudio.h>
+
+#include "AudioEndpointParcelable.h"
+#include "binding/AAudioStreamRequest.h"
+#include "binding/AAudioStreamConfiguration.h"
+#include "binding/IAAudioService.h"
+#include "binding/AAudioServiceMessage.h"
+
+#include "AAudioBinderClient.h"
+#include "AAudioServiceInterface.h"
+
+using android::String16;
+using android::IServiceManager;
+using android::defaultServiceManager;
+using android::interface_cast;
+using android::IAAudioService;
+using android::Mutex;
+using android::sp;
+
+using namespace aaudio;
+
+static android::Mutex gServiceLock;
+static sp<IAAudioService>  gAAudioService;
+
+// TODO Share code with other service clients.
+// Helper function to get access to the "AAudioService" service.
+// This code was modeled after frameworks/av/media/libaudioclient/AudioSystem.cpp
+static const sp<IAAudioService> getAAudioService() {
+    sp<IBinder> binder;
+    Mutex::Autolock _l(gServiceLock);
+    if (gAAudioService == 0) {
+        sp<IServiceManager> sm = defaultServiceManager();
+        // Try several times to get the service.
+        int retries = 4;
+        do {
+            binder = sm->getService(String16(AAUDIO_SERVICE_NAME)); // This will wait a while.
+            if (binder != 0) {
+                break;
+            }
+        } while (retries-- > 0);
+
+        if (binder != 0) {
+            // TODO Add linkToDeath() like in frameworks/av/media/libaudioclient/AudioSystem.cpp
+            // TODO Create a DeathRecipient that disconnects all active streams.
+            gAAudioService = interface_cast<IAAudioService>(binder);
+        } else {
+            ALOGE("AudioStreamInternal could not get %s", AAUDIO_SERVICE_NAME);
+        }
+    }
+    return gAAudioService;
+}
+
+
+AAudioBinderClient::AAudioBinderClient()
+        : AAudioServiceInterface() {}
+
+AAudioBinderClient::~AAudioBinderClient() {}
+
+/**
+* @param request info needed to create the stream
+* @param configuration contains information about the created stream
+* @return handle to the stream or a negative error
+*/
+aaudio_handle_t AAudioBinderClient::openStream(const AAudioStreamRequest &request,
+                                               AAudioStreamConfiguration &configurationOutput) {
+
+    const sp<IAAudioService> &service = getAAudioService();
+    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+    return service->openStream(request, configurationOutput);
+}
+
+aaudio_result_t AAudioBinderClient::closeStream(aaudio_handle_t streamHandle) {
+
+    const sp<IAAudioService> &service = getAAudioService();
+    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+    return service->closeStream(streamHandle);
+}
+
+/* Get an immutable description of the in-memory queues
+* used to communicate with the underlying HAL or Service.
+*/
+aaudio_result_t AAudioBinderClient::getStreamDescription(aaudio_handle_t streamHandle,
+                                                         AudioEndpointParcelable &parcelable) {
+
+    const sp<IAAudioService> &service = getAAudioService();
+    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+    return service->getStreamDescription(streamHandle, parcelable);
+}
+
+/**
+* Start the flow of data.
+*/
+aaudio_result_t AAudioBinderClient::startStream(aaudio_handle_t streamHandle) {
+    const sp<IAAudioService> &service = getAAudioService();
+    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+    return service->startStream(streamHandle);
+}
+
+/**
+* Stop the flow of data such that start() can resume without loss of data.
+*/
+aaudio_result_t AAudioBinderClient::pauseStream(aaudio_handle_t streamHandle) {
+    const sp<IAAudioService> &service = getAAudioService();
+    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+    return service->startStream(streamHandle);
+}
+
+/**
+*  Discard any data held by the underlying HAL or Service.
+*/
+aaudio_result_t AAudioBinderClient::flushStream(aaudio_handle_t streamHandle) {
+    const sp<IAAudioService> &service = getAAudioService();
+    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+    return service->startStream(streamHandle);
+}
+
+/**
+* Manage the specified thread as a low latency audio thread.
+*/
+aaudio_result_t AAudioBinderClient::registerAudioThread(aaudio_handle_t streamHandle,
+                                                        pid_t clientProcessId,
+                                                        pid_t clientThreadId,
+                                                        int64_t periodNanoseconds) {
+    const sp<IAAudioService> &service = getAAudioService();
+    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+    return service->registerAudioThread(streamHandle,
+                                        clientProcessId,
+                                        clientThreadId,
+                                        periodNanoseconds);
+}
+
+aaudio_result_t AAudioBinderClient::unregisterAudioThread(aaudio_handle_t streamHandle,
+                                                          pid_t clientProcessId,
+                                                          pid_t clientThreadId) {
+    const sp<IAAudioService> &service = getAAudioService();
+    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+    return service->unregisterAudioThread(streamHandle,
+                                          clientProcessId,
+                                          clientThreadId);
+}
+
+
diff --git a/media/libaaudio/src/binding/AAudioBinderClient.h b/media/libaaudio/src/binding/AAudioBinderClient.h
new file mode 100644
index 0000000..5613d5b
--- /dev/null
+++ b/media/libaaudio/src/binding/AAudioBinderClient.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_BINDER_CLIENT_H
+#define AAUDIO_AAUDIO_BINDER_CLIENT_H
+
+#include <aaudio/AAudioDefinitions.h>
+#include "AAudioServiceDefinitions.h"
+#include "AAudioServiceInterface.h"
+#include "binding/AAudioStreamRequest.h"
+#include "binding/AAudioStreamConfiguration.h"
+#include "binding/AudioEndpointParcelable.h"
+
+/**
+ * Implements the AAudioServiceInterface by talking to the actual service through Binder.
+ */
+
+namespace aaudio {
+
+class AAudioBinderClient : public AAudioServiceInterface {
+
+public:
+
+    AAudioBinderClient();
+
+    virtual ~AAudioBinderClient();
+
+    /**
+     * @param request info needed to create the stream
+     * @param configuration contains resulting information about the created stream
+     * @return handle to the stream or a negative error
+     */
+    aaudio_handle_t openStream(const AAudioStreamRequest &request,
+                               AAudioStreamConfiguration &configurationOutput) override;
+
+    aaudio_result_t closeStream(aaudio_handle_t streamHandle) override;
+
+    /* Get an immutable description of the in-memory queues
+    * used to communicate with the underlying HAL or Service.
+    */
+    aaudio_result_t getStreamDescription(aaudio_handle_t streamHandle,
+                                                 AudioEndpointParcelable &parcelable) override;
+
+    /**
+     * Start the flow of data.
+     * This is asynchronous. When complete, the service will send a STARTED event.
+     */
+    aaudio_result_t startStream(aaudio_handle_t streamHandle) override;
+
+    /**
+     * Stop the flow of data such that start() can resume without loss of data.
+     * This is asynchronous. When complete, the service will send a PAUSED event.
+     */
+    aaudio_result_t pauseStream(aaudio_handle_t streamHandle) override;
+
+    /**
+     *  Discard any data held by the underlying HAL or Service.
+     * This is asynchronous. When complete, the service will send a FLUSHED event.
+     */
+    aaudio_result_t flushStream(aaudio_handle_t streamHandle) override;
+
+    /**
+     * Manage the specified thread as a low latency audio thread.
+     * TODO Consider passing this information as part of the startStream() call.
+     */
+    aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle,
+                                                pid_t clientProcessId,
+                                                pid_t clientThreadId,
+                                                int64_t periodNanoseconds) override;
+
+    aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle,
+                                                  pid_t clientProcessId,
+                                                  pid_t clientThreadId) override;
+};
+
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_AAUDIO_BINDER_CLIENT_H
diff --git a/media/libaaudio/src/binding/AAudioServiceDefinitions.h b/media/libaaudio/src/binding/AAudioServiceDefinitions.h
index b58d170..0d5bae5 100644
--- a/media/libaaudio/src/binding/AAudioServiceDefinitions.h
+++ b/media/libaaudio/src/binding/AAudioServiceDefinitions.h
@@ -48,25 +48,6 @@
 
 #define AAUDIO_HANDLE_INVALID  ((aaudio_handle_t) -1)
 
-enum aaudio_commands_t {
-    OPEN_STREAM = IBinder::FIRST_CALL_TRANSACTION,
-    CLOSE_STREAM,
-    GET_STREAM_DESCRIPTION,
-    START_STREAM,
-    PAUSE_STREAM,
-    FLUSH_STREAM,
-    REGISTER_AUDIO_THREAD,
-    UNREGISTER_AUDIO_THREAD
-};
-
-// TODO Expand this to include all the open parameters.
-typedef struct AAudioServiceStreamInfo_s {
-    int32_t               deviceId;
-    int32_t               samplesPerFrame;  // number of channels
-    int32_t               sampleRate;
-    aaudio_audio_format_t audioFormat;
-} AAudioServiceStreamInfo;
-
 // This must be a fixed width so it can be in shared memory.
 enum RingbufferFlags : uint32_t {
     NONE = 0,
diff --git a/media/libaaudio/src/binding/AAudioServiceInterface.h b/media/libaaudio/src/binding/AAudioServiceInterface.h
new file mode 100644
index 0000000..62fd894
--- /dev/null
+++ b/media/libaaudio/src/binding/AAudioServiceInterface.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_BINDING_AAUDIO_SERVICE_INTERFACE_H
+#define AAUDIO_BINDING_AAUDIO_SERVICE_INTERFACE_H
+
+#include "binding/AAudioServiceDefinitions.h"
+#include "binding/AAudioStreamRequest.h"
+#include "binding/AAudioStreamConfiguration.h"
+#include "binding/AudioEndpointParcelable.h"
+
+/**
+ * This has the same methods as IAAudioService but without the Binder features.
+ *
+ * It allows us to abstract the Binder interface and use an AudioStreamInternal
+ * both in the client and in the service.
+ */
+namespace aaudio {
+
+class AAudioServiceInterface {
+public:
+
+    AAudioServiceInterface() {};
+    virtual ~AAudioServiceInterface() = default;
+
+    /**
+     * @param request info needed to create the stream
+     * @param configuration contains information about the created stream
+     * @return handle to the stream or a negative error
+     */
+    virtual aaudio_handle_t openStream(const AAudioStreamRequest &request,
+                                       AAudioStreamConfiguration &configuration) = 0;
+
+    virtual aaudio_result_t closeStream(aaudio_handle_t streamHandle) = 0;
+
+    /* Get an immutable description of the in-memory queues
+    * used to communicate with the underlying HAL or Service.
+    */
+    virtual aaudio_result_t getStreamDescription(aaudio_handle_t streamHandle,
+                                                 AudioEndpointParcelable &parcelable) = 0;
+
+    /**
+     * Start the flow of data.
+     */
+    virtual aaudio_result_t startStream(aaudio_handle_t streamHandle) = 0;
+
+    /**
+     * Stop the flow of data such that start() can resume without loss of data.
+     */
+    virtual aaudio_result_t pauseStream(aaudio_handle_t streamHandle) = 0;
+
+    /**
+     *  Discard any data held by the underlying HAL or Service.
+     */
+    virtual aaudio_result_t flushStream(aaudio_handle_t streamHandle) = 0;
+
+    /**
+     * Manage the specified thread as a low latency audio thread.
+     */
+    virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle,
+                                                pid_t clientProcessId,
+                                                pid_t clientThreadId,
+                                                int64_t periodNanoseconds) = 0;
+
+    virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle,
+                                                  pid_t clientProcessId,
+                                                  pid_t clientThreadId) = 0;
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_BINDING_AAUDIO_SERVICE_INTERFACE_H
diff --git a/media/libaaudio/src/binding/AAudioServiceMessage.h b/media/libaaudio/src/binding/AAudioServiceMessage.h
index cc77d59..b74b6c2 100644
--- a/media/libaaudio/src/binding/AAudioServiceMessage.h
+++ b/media/libaaudio/src/binding/AAudioServiceMessage.h
@@ -25,10 +25,11 @@
 
 // TODO move this to an "include" folder for the service.
 
+// Used to send information about the HAL to the client.
 struct AAudioMessageTimestamp {
-    int64_t    position;
-    int64_t    deviceOffset; // add to client position to get device position
-    int64_t    timestamp;
+    int64_t position;     // number of frames transferred so far
+    int64_t deviceOffset; // add to client position to get device position
+    int64_t timestamp;    // time when that position was reached
 };
 
 typedef enum aaudio_service_event_e : uint32_t {
@@ -36,13 +37,14 @@
     AAUDIO_SERVICE_EVENT_PAUSED,
     AAUDIO_SERVICE_EVENT_FLUSHED,
     AAUDIO_SERVICE_EVENT_CLOSED,
-    AAUDIO_SERVICE_EVENT_DISCONNECTED
+    AAUDIO_SERVICE_EVENT_DISCONNECTED,
+    AAUDIO_SERVICE_EVENT_VOLUME
 } aaudio_service_event_t;
 
 struct AAudioMessageEvent {
     aaudio_service_event_t event;
-    int32_t                data1;
-    int64_t                data2;
+    double                 dataDouble;
+    int64_t                dataLong;
 };
 
 typedef struct AAudioServiceMessage_s {
@@ -54,12 +56,11 @@
 
     code what;
     union {
-        AAudioMessageTimestamp timestamp;
-        AAudioMessageEvent event;
+        AAudioMessageTimestamp timestamp; // what == TIMESTAMP
+        AAudioMessageEvent event;         // what == EVENT
     };
 } AAudioServiceMessage;
 
-
 } /* namespace aaudio */
 
 #endif //AAUDIO_AAUDIO_SERVICE_MESSAGE_H
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
index fe3a59f..ba41a3b 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
@@ -35,26 +35,50 @@
 AAudioStreamConfiguration::~AAudioStreamConfiguration() {}
 
 status_t AAudioStreamConfiguration::writeToParcel(Parcel* parcel) const {
-    parcel->writeInt32(mDeviceId);
-    parcel->writeInt32(mSampleRate);
-    parcel->writeInt32(mSamplesPerFrame);
-    parcel->writeInt32((int32_t) mAudioFormat);
-    parcel->writeInt32(mBufferCapacity);
-    return NO_ERROR; // TODO check for errors above
+    status_t status;
+    status = parcel->writeInt32(mDeviceId);
+    if (status != NO_ERROR) goto error;
+    status = parcel->writeInt32(mSampleRate);
+    if (status != NO_ERROR) goto error;
+    status = parcel->writeInt32(mSamplesPerFrame);
+    if (status != NO_ERROR) goto error;
+    status = parcel->writeInt32((int32_t) mSharingMode);
+    ALOGD("AAudioStreamConfiguration.writeToParcel(): mSharingMode = %d", mSharingMode);
+    if (status != NO_ERROR) goto error;
+    status = parcel->writeInt32((int32_t) mAudioFormat);
+    if (status != NO_ERROR) goto error;
+    status = parcel->writeInt32(mBufferCapacity);
+    if (status != NO_ERROR) goto error;
+    return NO_ERROR;
+error:
+    ALOGE("AAudioStreamConfiguration.writeToParcel(): write failed = %d", status);
+    return status;
 }
 
 status_t AAudioStreamConfiguration::readFromParcel(const Parcel* parcel) {
     int32_t temp;
-    parcel->readInt32(&mDeviceId);
-    parcel->readInt32(&mSampleRate);
-    parcel->readInt32(&mSamplesPerFrame);
-    parcel->readInt32(&temp);
+    status_t status = parcel->readInt32(&mDeviceId);
+    if (status != NO_ERROR) goto error;
+    status = parcel->readInt32(&mSampleRate);
+    if (status != NO_ERROR) goto error;
+    status = parcel->readInt32(&mSamplesPerFrame);
+    if (status != NO_ERROR) goto error;
+    status = parcel->readInt32(&temp);
+    if (status != NO_ERROR) goto error;
+    mSharingMode = (aaudio_sharing_mode_t) temp;
+    ALOGD("AAudioStreamConfiguration.readFromParcel(): mSharingMode = %d", mSharingMode);
+    status = parcel->readInt32(&temp);
+    if (status != NO_ERROR) goto error;
     mAudioFormat = (aaudio_audio_format_t) temp;
-    parcel->readInt32(&mBufferCapacity);
-    return NO_ERROR; // TODO check for errors above
+    status = parcel->readInt32(&mBufferCapacity);
+    if (status != NO_ERROR) goto error;
+    return NO_ERROR;
+error:
+    ALOGE("AAudioStreamConfiguration.readFromParcel(): read failed = %d", status);
+    return status;
 }
 
-aaudio_result_t AAudioStreamConfiguration::validate() {
+aaudio_result_t AAudioStreamConfiguration::validate() const {
     // Validate results of the open.
     if (mSampleRate < 0 || mSampleRate >= 8 * 48000) { // TODO review limits
         ALOGE("AAudioStreamConfiguration.validate(): invalid sampleRate = %d", mSampleRate);
@@ -84,9 +108,11 @@
     return AAUDIO_OK;
 }
 
-void AAudioStreamConfiguration::dump() {
-    ALOGD("AAudioStreamConfiguration mSampleRate      = %d -----", mSampleRate);
+void AAudioStreamConfiguration::dump() const {
+    ALOGD("AAudioStreamConfiguration mDeviceId        = %d", mDeviceId);
+    ALOGD("AAudioStreamConfiguration mSampleRate      = %d", mSampleRate);
     ALOGD("AAudioStreamConfiguration mSamplesPerFrame = %d", mSamplesPerFrame);
+    ALOGD("AAudioStreamConfiguration mSharingMode     = %d", (int)mSharingMode);
     ALOGD("AAudioStreamConfiguration mAudioFormat     = %d", (int)mAudioFormat);
     ALOGD("AAudioStreamConfiguration mBufferCapacity  = %d", mBufferCapacity);
 }
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.h b/media/libaaudio/src/binding/AAudioStreamConfiguration.h
index 57b1c59..b68d8b2 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.h
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.h
@@ -66,6 +66,14 @@
         mAudioFormat = audioFormat;
     }
 
+    aaudio_sharing_mode_t getSharingMode() const {
+        return mSharingMode;
+    }
+
+    void setSharingMode(aaudio_sharing_mode_t sharingMode) {
+        mSharingMode = sharingMode;
+    }
+
     int32_t getBufferCapacity() const {
         return mBufferCapacity;
     }
@@ -78,14 +86,15 @@
 
     virtual status_t readFromParcel(const Parcel* parcel) override;
 
-    aaudio_result_t validate();
+    aaudio_result_t validate() const;
 
-    void dump();
+    void dump() const;
 
-protected:
+private:
     int32_t               mDeviceId        = AAUDIO_DEVICE_UNSPECIFIED;
     int32_t               mSampleRate      = AAUDIO_UNSPECIFIED;
     int32_t               mSamplesPerFrame = AAUDIO_UNSPECIFIED;
+    aaudio_sharing_mode_t mSharingMode     = AAUDIO_SHARING_MODE_SHARED;
     aaudio_audio_format_t mAudioFormat     = AAUDIO_FORMAT_UNSPECIFIED;
     int32_t               mBufferCapacity  = AAUDIO_UNSPECIFIED;
 };
diff --git a/media/libaaudio/src/binding/AAudioStreamRequest.cpp b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
index 5202b73..b8a0429 100644
--- a/media/libaaudio/src/binding/AAudioStreamRequest.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
@@ -14,6 +14,10 @@
  * limitations under the License.
  */
 
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
 #include <stdint.h>
 
 #include <sys/mman.h>
@@ -39,28 +43,48 @@
 AAudioStreamRequest::~AAudioStreamRequest() {}
 
 status_t AAudioStreamRequest::writeToParcel(Parcel* parcel) const {
-    parcel->writeInt32((int32_t) mUserId);
-    parcel->writeInt32((int32_t) mProcessId);
-    mConfiguration.writeToParcel(parcel);
-    return NO_ERROR; // TODO check for errors above
+    status_t status = parcel->writeInt32((int32_t) mUserId);
+    if (status != NO_ERROR) goto error;
+    status = parcel->writeInt32((int32_t) mProcessId);
+    if (status != NO_ERROR) goto error;
+    status = parcel->writeInt32((int32_t) mDirection);
+    if (status != NO_ERROR) goto error;
+    status = mConfiguration.writeToParcel(parcel);
+    if (status != NO_ERROR) goto error;
+    return NO_ERROR;
+
+error:
+    ALOGE("AAudioStreamRequest.writeToParcel(): write failed = %d", status);
+    return status;
 }
 
 status_t AAudioStreamRequest::readFromParcel(const Parcel* parcel) {
     int32_t temp;
-    parcel->readInt32(&temp);
+    status_t status = parcel->readInt32(&temp);
+    if (status != NO_ERROR) goto error;
     mUserId = (uid_t) temp;
-    parcel->readInt32(&temp);
+    status = parcel->readInt32(&temp);
+    if (status != NO_ERROR) goto error;
     mProcessId = (pid_t) temp;
-    mConfiguration.readFromParcel(parcel);
-    return NO_ERROR; // TODO check for errors above
+    status = parcel->readInt32(&temp);
+    if (status != NO_ERROR) goto error;
+    mDirection = (aaudio_direction_t) temp;
+    status = mConfiguration.readFromParcel(parcel);
+    if (status != NO_ERROR) goto error;
+    return NO_ERROR;
+
+error:
+    ALOGE("AAudioStreamRequest.readFromParcel(): read failed = %d", status);
+    return status;
 }
 
-aaudio_result_t AAudioStreamRequest::validate() {
+aaudio_result_t AAudioStreamRequest::validate() const {
     return mConfiguration.validate();
 }
 
-void AAudioStreamRequest::dump() {
-    ALOGD("AAudioStreamRequest mUserId = %d -----", mUserId);
+void AAudioStreamRequest::dump() const {
+    ALOGD("AAudioStreamRequest mUserId    = %d", mUserId);
     ALOGD("AAudioStreamRequest mProcessId = %d", mProcessId);
+    ALOGD("AAudioStreamRequest mDirection = %d", mDirection);
     mConfiguration.dump();
 }
diff --git a/media/libaaudio/src/binding/AAudioStreamRequest.h b/media/libaaudio/src/binding/AAudioStreamRequest.h
index 0fd28ba..6546562 100644
--- a/media/libaaudio/src/binding/AAudioStreamRequest.h
+++ b/media/libaaudio/src/binding/AAudioStreamRequest.h
@@ -52,6 +52,18 @@
         mProcessId = processId;
     }
 
+    aaudio_direction_t getDirection() const {
+        return mDirection;
+    }
+
+    void setDirection(aaudio_direction_t direction) {
+        mDirection = direction;
+    }
+
+    const AAudioStreamConfiguration &getConstantConfiguration() const {
+        return mConfiguration;
+    }
+
     AAudioStreamConfiguration &getConfiguration() {
         return mConfiguration;
     }
@@ -60,14 +72,15 @@
 
     virtual status_t readFromParcel(const Parcel* parcel) override;
 
-    aaudio_result_t validate();
+    aaudio_result_t validate() const;
 
-    void dump();
+    void dump() const;
 
 protected:
     AAudioStreamConfiguration  mConfiguration;
-    uid_t    mUserId;
-    pid_t    mProcessId;
+    uid_t                      mUserId;
+    pid_t                      mProcessId;
+    aaudio_direction_t         mDirection;
 };
 
 } /* namespace aaudio */
diff --git a/media/libaaudio/src/binding/AudioEndpointParcelable.cpp b/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
index f40ee02..ee92ee3 100644
--- a/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
+++ b/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
@@ -14,11 +14,15 @@
  * limitations under the License.
  */
 
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
 #include <stdint.h>
 
-#include <sys/mman.h>
 #include <binder/Parcel.h>
 #include <binder/Parcelable.h>
+#include <utility/AAudioUtilities.h>
 
 #include "binding/AAudioServiceDefinitions.h"
 #include "binding/RingBufferParcelable.h"
@@ -82,13 +86,27 @@
 }
 
 aaudio_result_t AudioEndpointParcelable::resolve(EndpointDescriptor *descriptor) {
-    // TODO error check
-    mUpMessageQueueParcelable.resolve(mSharedMemories, &descriptor->upMessageQueueDescriptor);
-    mDownMessageQueueParcelable.resolve(mSharedMemories,
+    aaudio_result_t result = mUpMessageQueueParcelable.resolve(mSharedMemories,
+                                                           &descriptor->upMessageQueueDescriptor);
+    if (result != AAUDIO_OK) return result;
+    result = mDownMessageQueueParcelable.resolve(mSharedMemories,
                                         &descriptor->downMessageQueueDescriptor);
-    mUpDataQueueParcelable.resolve(mSharedMemories, &descriptor->upDataQueueDescriptor);
-    mDownDataQueueParcelable.resolve(mSharedMemories, &descriptor->downDataQueueDescriptor);
-    return AAUDIO_OK;
+    if (result != AAUDIO_OK) return result;
+
+    result = mUpDataQueueParcelable.resolve(mSharedMemories, &descriptor->upDataQueueDescriptor);
+    if (result != AAUDIO_OK) return result;
+    result = mDownDataQueueParcelable.resolve(mSharedMemories,
+                                              &descriptor->downDataQueueDescriptor);
+    return result;
+}
+
+aaudio_result_t AudioEndpointParcelable::close() {
+    int err = 0;
+    for (int i = 0; i < mNumSharedMemories; i++) {
+        int lastErr = mSharedMemories[i].close();
+        if (lastErr < 0) err = lastErr;
+    }
+    return AAudioConvert_androidToAAudioResult(err);
 }
 
 aaudio_result_t AudioEndpointParcelable::validate() {
@@ -100,6 +118,7 @@
     for (int i = 0; i < mNumSharedMemories; i++) {
         result = mSharedMemories[i].validate();
         if (result != AAUDIO_OK) {
+            ALOGE("AudioEndpointParcelable invalid mSharedMemories[%d] = %d", i, result);
             return result;
         }
     }
diff --git a/media/libaaudio/src/binding/AudioEndpointParcelable.h b/media/libaaudio/src/binding/AudioEndpointParcelable.h
index d4646d0..4a1cb72 100644
--- a/media/libaaudio/src/binding/AudioEndpointParcelable.h
+++ b/media/libaaudio/src/binding/AudioEndpointParcelable.h
@@ -57,6 +57,8 @@
 
     aaudio_result_t validate();
 
+    aaudio_result_t close();
+
     void dump();
 
 public: // TODO add getters
diff --git a/media/libaaudio/src/binding/IAAudioService.cpp b/media/libaaudio/src/binding/IAAudioService.cpp
index c21033e..20cbbc8 100644
--- a/media/libaaudio/src/binding/IAAudioService.cpp
+++ b/media/libaaudio/src/binding/IAAudioService.cpp
@@ -40,11 +40,13 @@
     {
     }
 
-    virtual aaudio_handle_t openStream(aaudio::AAudioStreamRequest &request,
-                                     aaudio::AAudioStreamConfiguration &configuration) override {
+    virtual aaudio_handle_t openStream(const aaudio::AAudioStreamRequest &request,
+                                     aaudio::AAudioStreamConfiguration &configurationOutput) override {
         Parcel data, reply;
         // send command
         data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+        ALOGE("BpAAudioService::client openStream request dump --------------------");
+        request.dump();
         request.writeToParcel(&data);
         status_t err = remote()->transact(OPEN_STREAM, data, &reply);
         if (err != NO_ERROR) {
@@ -53,7 +55,12 @@
         // parse reply
         aaudio_handle_t stream;
         reply.readInt32(&stream);
-        configuration.readFromParcel(&reply);
+        err = configurationOutput.readFromParcel(&reply);
+        if (err != NO_ERROR) {
+            ALOGE("BpAAudioService::client openStream readFromParcel failed %d", err);
+            closeStream(stream);
+            return AAudioConvert_androidToAAudioResult(err);
+        }
         return stream;
     }
 
@@ -80,16 +87,30 @@
         data.writeInt32(streamHandle);
         status_t err = remote()->transact(GET_STREAM_DESCRIPTION, data, &reply);
         if (err != NO_ERROR) {
+            ALOGE("BpAAudioService::client transact(GET_STREAM_DESCRIPTION) returns %d", err);
             return AAudioConvert_androidToAAudioResult(err);
         }
         // parse reply
-        parcelable.readFromParcel(&reply);
-        parcelable.dump();
-        aaudio_result_t result = parcelable.validate();
-        if (result != AAUDIO_OK) {
+        aaudio_result_t result;
+        err = reply.readInt32(&result);
+        if (err != NO_ERROR) {
+            ALOGE("BpAAudioService::client transact(GET_STREAM_DESCRIPTION) readInt %d", err);
+            return AAudioConvert_androidToAAudioResult(err);
+        } else if (result != AAUDIO_OK) {
+            ALOGE("BpAAudioService::client GET_STREAM_DESCRIPTION passed result %d", result);
             return result;
         }
-        reply.readInt32(&result);
+        err = parcelable.readFromParcel(&reply);;
+        if (err != NO_ERROR) {
+            ALOGE("BpAAudioService::client transact(GET_STREAM_DESCRIPTION) read endpoint %d", err);
+            return AAudioConvert_androidToAAudioResult(err);
+        }
+        //parcelable.dump();
+        result = parcelable.validate();
+        if (result != AAUDIO_OK) {
+            ALOGE("BpAAudioService::client GET_STREAM_DESCRIPTION validation fails %d", result);
+            return result;
+        }
         return result;
     }
 
@@ -139,13 +160,16 @@
         return res;
     }
 
-    virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle, pid_t clientThreadId,
+    virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle,
+                                              pid_t clientProcessId,
+                                              pid_t clientThreadId,
                                               int64_t periodNanoseconds)
     override {
         Parcel data, reply;
         // send command
         data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
         data.writeInt32(streamHandle);
+        data.writeInt32((int32_t) clientProcessId);
         data.writeInt32((int32_t) clientThreadId);
         data.writeInt64(periodNanoseconds);
         status_t err = remote()->transact(REGISTER_AUDIO_THREAD, data, &reply);
@@ -158,12 +182,15 @@
         return res;
     }
 
-    virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle, pid_t clientThreadId)
+    virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle,
+                                                  pid_t clientProcessId,
+                                                  pid_t clientThreadId)
     override {
         Parcel data, reply;
         // send command
         data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
         data.writeInt32(streamHandle);
+        data.writeInt32((int32_t) clientProcessId);
         data.writeInt32((int32_t) clientThreadId);
         status_t err = remote()->transact(UNREGISTER_AUDIO_THREAD, data, &reply);
         if (err != NO_ERROR) {
@@ -178,7 +205,7 @@
 };
 
 // Implement an interface to the service.
-// This is here so that you don't have to link with liboboe static library.
+// This is here so that you don't have to link with libaaudio static library.
 IMPLEMENT_META_INTERFACE(AAudioService, "IAAudioService");
 
 // The order of parameters in the Parcels must match with code in BpAAudioService
@@ -189,6 +216,7 @@
     aaudio::AAudioStreamRequest request;
     aaudio::AAudioStreamConfiguration configuration;
     pid_t pid;
+    pid_t tid;
     int64_t nanoseconds;
     aaudio_result_t result;
     ALOGV("BnAAudioService::onTransact(%i) %i", code, flags);
@@ -197,8 +225,12 @@
     switch(code) {
         case OPEN_STREAM: {
             request.readFromParcel(&data);
+
+            ALOGD("BnAAudioService::client openStream request dump --------------------");
+            request.dump();
+
             stream = openStream(request, configuration);
-            ALOGD("BnAAudioService::onTransact OPEN_STREAM server handle = 0x%08X", stream);
+            ALOGV("BnAAudioService::onTransact OPEN_STREAM server handle = 0x%08X", stream);
             reply->writeInt32(stream);
             configuration.writeToParcel(reply);
             return NO_ERROR;
@@ -206,7 +238,7 @@
 
         case CLOSE_STREAM: {
             data.readInt32(&stream);
-            ALOGD("BnAAudioService::onTransact CLOSE_STREAM 0x%08X", stream);
+            ALOGV("BnAAudioService::onTransact CLOSE_STREAM 0x%08X", stream);
             result = closeStream(stream);
             reply->writeInt32(result);
             return NO_ERROR;
@@ -214,26 +246,28 @@
 
         case GET_STREAM_DESCRIPTION: {
             data.readInt32(&stream);
-            ALOGD("BnAAudioService::onTransact GET_STREAM_DESCRIPTION 0x%08X", stream);
+            ALOGI("BnAAudioService::onTransact GET_STREAM_DESCRIPTION 0x%08X", stream);
             aaudio::AudioEndpointParcelable parcelable;
             result = getStreamDescription(stream, parcelable);
+            ALOGI("BnAAudioService::onTransact getStreamDescription() returns %d", result);
             if (result != AAUDIO_OK) {
                 return AAudioConvert_aaudioToAndroidStatus(result);
             }
-            parcelable.dump();
             result = parcelable.validate();
             if (result != AAUDIO_OK) {
+                ALOGE("BnAAudioService::onTransact getStreamDescription() returns %d", result);
+                parcelable.dump();
                 return AAudioConvert_aaudioToAndroidStatus(result);
             }
-            parcelable.writeToParcel(reply);
             reply->writeInt32(result);
+            parcelable.writeToParcel(reply);
             return NO_ERROR;
         } break;
 
         case START_STREAM: {
             data.readInt32(&stream);
             result = startStream(stream);
-            ALOGD("BnAAudioService::onTransact START_STREAM 0x%08X, result = %d",
+            ALOGV("BnAAudioService::onTransact START_STREAM 0x%08X, result = %d",
                     stream, result);
             reply->writeInt32(result);
             return NO_ERROR;
@@ -242,7 +276,7 @@
         case PAUSE_STREAM: {
             data.readInt32(&stream);
             result = pauseStream(stream);
-            ALOGD("BnAAudioService::onTransact PAUSE_STREAM 0x%08X, result = %d",
+            ALOGV("BnAAudioService::onTransact PAUSE_STREAM 0x%08X, result = %d",
                     stream, result);
             reply->writeInt32(result);
             return NO_ERROR;
@@ -251,7 +285,7 @@
         case FLUSH_STREAM: {
             data.readInt32(&stream);
             result = flushStream(stream);
-            ALOGD("BnAAudioService::onTransact FLUSH_STREAM 0x%08X, result = %d",
+            ALOGV("BnAAudioService::onTransact FLUSH_STREAM 0x%08X, result = %d",
                     stream, result);
             reply->writeInt32(result);
             return NO_ERROR;
@@ -260,9 +294,10 @@
         case REGISTER_AUDIO_THREAD: {
             data.readInt32(&stream);
             data.readInt32(&pid);
+            data.readInt32(&tid);
             data.readInt64(&nanoseconds);
-            result = registerAudioThread(stream, pid, nanoseconds);
-            ALOGD("BnAAudioService::onTransact REGISTER_AUDIO_THREAD 0x%08X, result = %d",
+            result = registerAudioThread(stream, pid, tid, nanoseconds);
+            ALOGV("BnAAudioService::onTransact REGISTER_AUDIO_THREAD 0x%08X, result = %d",
                     stream, result);
             reply->writeInt32(result);
             return NO_ERROR;
@@ -271,8 +306,9 @@
         case UNREGISTER_AUDIO_THREAD: {
             data.readInt32(&stream);
             data.readInt32(&pid);
-            result = unregisterAudioThread(stream, pid);
-            ALOGD("BnAAudioService::onTransact UNREGISTER_AUDIO_THREAD 0x%08X, result = %d",
+            data.readInt32(&tid);
+            result = unregisterAudioThread(stream, pid, tid);
+            ALOGV("BnAAudioService::onTransact UNREGISTER_AUDIO_THREAD 0x%08X, result = %d",
                     stream, result);
             reply->writeInt32(result);
             return NO_ERROR;
diff --git a/media/libaaudio/src/binding/IAAudioService.h b/media/libaaudio/src/binding/IAAudioService.h
index 53c3b45..ab7fd1b 100644
--- a/media/libaaudio/src/binding/IAAudioService.h
+++ b/media/libaaudio/src/binding/IAAudioService.h
@@ -28,9 +28,12 @@
 #include "binding/AudioEndpointParcelable.h"
 #include "binding/AAudioStreamRequest.h"
 #include "binding/AAudioStreamConfiguration.h"
+#include "utility/HandleTracker.h"
 
 namespace android {
 
+#define AAUDIO_SERVICE_NAME  "media.aaudio"
+
 // Interface (our AIDL) - Shared by server and client
 class IAAudioService : public IInterface {
 public:
@@ -42,8 +45,8 @@
      * @param configuration contains information about the created stream
      * @return handle to the stream or a negative error
      */
-    virtual aaudio::aaudio_handle_t openStream(aaudio::AAudioStreamRequest &request,
-                                     aaudio::AAudioStreamConfiguration &configuration) = 0;
+    virtual aaudio_handle_t openStream(const aaudio::AAudioStreamRequest &request,
+                                     aaudio::AAudioStreamConfiguration &configurationOutput) = 0;
 
     virtual aaudio_result_t closeStream(aaudio::aaudio_handle_t streamHandle) = 0;
 
@@ -55,26 +58,33 @@
 
     /**
      * Start the flow of data.
+     * This is asynchronous. When complete, the service will send a STARTED event.
      */
     virtual aaudio_result_t startStream(aaudio::aaudio_handle_t streamHandle) = 0;
 
     /**
      * Stop the flow of data such that start() can resume without loss of data.
+     * This is asynchronous. When complete, the service will send a PAUSED event.
      */
     virtual aaudio_result_t pauseStream(aaudio::aaudio_handle_t streamHandle) = 0;
 
     /**
      *  Discard any data held by the underlying HAL or Service.
+     * This is asynchronous. When complete, the service will send a FLUSHED event.
      */
     virtual aaudio_result_t flushStream(aaudio::aaudio_handle_t streamHandle) = 0;
 
     /**
      * Manage the specified thread as a low latency audio thread.
+     * TODO Consider passing this information as part of the startStream() call.
      */
-    virtual aaudio_result_t registerAudioThread(aaudio::aaudio_handle_t streamHandle, pid_t clientThreadId,
+    virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle,
+                                              pid_t clientProcessId,
+                                              pid_t clientThreadId,
                                               int64_t periodNanoseconds) = 0;
 
-    virtual aaudio_result_t unregisterAudioThread(aaudio::aaudio_handle_t streamHandle,
+    virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle,
+                                                pid_t clientProcessId,
                                                 pid_t clientThreadId) = 0;
 };
 
diff --git a/media/libaaudio/src/binding/RingBufferParcelable.cpp b/media/libaaudio/src/binding/RingBufferParcelable.cpp
index 3a92929..05451f9 100644
--- a/media/libaaudio/src/binding/RingBufferParcelable.cpp
+++ b/media/libaaudio/src/binding/RingBufferParcelable.cpp
@@ -14,6 +14,10 @@
  * limitations under the License.
  */
 
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
 #include <stdint.h>
 
 #include <binder/Parcelable.h>
diff --git a/media/libaaudio/src/binding/RingBufferParcelable.h b/media/libaaudio/src/binding/RingBufferParcelable.h
index 3f82c79..5fc5d00 100644
--- a/media/libaaudio/src/binding/RingBufferParcelable.h
+++ b/media/libaaudio/src/binding/RingBufferParcelable.h
@@ -55,6 +55,8 @@
 
     void setCapacityInFrames(int32_t capacityInFrames);
 
+    bool isFileDescriptorSafe(SharedMemoryParcelable *memoryParcels);
+
     /**
      * The read and write must be symmetric.
      */
diff --git a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
index 1102dec..cfb820f 100644
--- a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
+++ b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
@@ -14,12 +14,18 @@
  * limitations under the License.
  */
 
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
 #include <stdint.h>
+#include <stdio.h>
 
 #include <sys/mman.h>
 #include <aaudio/AAudioDefinitions.h>
 
 #include <binder/Parcelable.h>
+#include <utility/AAudioUtilities.h>
 
 #include "binding/SharedMemoryParcelable.h"
 
@@ -36,28 +42,55 @@
 void SharedMemoryParcelable::setup(int fd, int32_t sizeInBytes) {
     mFd = fd;
     mSizeInBytes = sizeInBytes;
+
 }
 
 status_t SharedMemoryParcelable::writeToParcel(Parcel* parcel) const {
-    parcel->writeInt32(mSizeInBytes);
+    status_t status = parcel->writeInt32(mSizeInBytes);
+    if (status != NO_ERROR) return status;
     if (mSizeInBytes > 0) {
-        parcel->writeDupFileDescriptor(mFd);
+        status = parcel->writeDupFileDescriptor(mFd);
+        ALOGE_IF(status != NO_ERROR, "SharedMemoryParcelable writeDupFileDescriptor failed : %d", status);
     }
-    return NO_ERROR; // TODO check for errors above
+    return status;
 }
 
 status_t SharedMemoryParcelable::readFromParcel(const Parcel* parcel) {
-    parcel->readInt32(&mSizeInBytes);
-    if (mSizeInBytes > 0) {
-        mFd = dup(parcel->readFileDescriptor());
+    status_t status = parcel->readInt32(&mSizeInBytes);
+    if (status != NO_ERROR) {
+        return status;
     }
-    return NO_ERROR; // TODO check for errors above
+    if (mSizeInBytes > 0) {
+// FIXME        mFd = dup(parcel->readFileDescriptor());
+        // Why is the ALSA resource not getting freed?!
+        mFd = fcntl(parcel->readFileDescriptor(), F_DUPFD_CLOEXEC, 0);
+        if (mFd == -1) {
+            status = -errno;
+            ALOGE("SharedMemoryParcelable readFileDescriptor fcntl() failed : %d", status);
+        }
+    }
+    return status;
 }
 
-// TODO Add code to unmmap()
+aaudio_result_t SharedMemoryParcelable::close() {
+    if (mResolvedAddress != nullptr) {
+        int err = munmap(mResolvedAddress, mSizeInBytes);
+        if (err < 0) {
+            ALOGE("SharedMemoryParcelable::close() munmap() failed %d", err);
+            return AAudioConvert_androidToAAudioResult(err);
+        }
+        mResolvedAddress = nullptr;
+    }
+    if (mFd != -1) {
+        ::close(mFd);
+        mFd = -1;
+    }
+    return AAUDIO_OK;
+}
 
 aaudio_result_t SharedMemoryParcelable::resolve(int32_t offsetInBytes, int32_t sizeInBytes,
                                               void **regionAddressPtr) {
+
     if (offsetInBytes < 0) {
         ALOGE("SharedMemoryParcelable illegal offsetInBytes = %d", offsetInBytes);
         return AAUDIO_ERROR_OUT_OF_RANGE;
@@ -68,6 +101,11 @@
         return AAUDIO_ERROR_OUT_OF_RANGE;
     }
     if (mResolvedAddress == nullptr) {
+        /* TODO remove
+        int fd = fcntl(mFd, F_DUPFD_CLOEXEC, 0);
+        ALOGE_IF(fd==-1, "cannot dup fd=%d, size=%zd, (%s)",
+                    mFd, mSizeInBytes, strerror(errno));
+        */
         mResolvedAddress = (uint8_t *) mmap(0, mSizeInBytes, PROT_READ|PROT_WRITE,
                                           MAP_SHARED, mFd, 0);
         if (mResolvedAddress == nullptr) {
@@ -76,8 +114,8 @@
         }
     }
     *regionAddressPtr = mResolvedAddress + offsetInBytes;
-    ALOGD("SharedMemoryParcelable mResolvedAddress = %p", mResolvedAddress);
-    ALOGD("SharedMemoryParcelable offset by %d, *regionAddressPtr = %p",
+    ALOGV("SharedMemoryParcelable mResolvedAddress = %p", mResolvedAddress);
+    ALOGV("SharedMemoryParcelable offset by %d, *regionAddressPtr = %p",
           offsetInBytes, *regionAddressPtr);
     return AAUDIO_OK;
 }
diff --git a/media/libaaudio/src/binding/SharedMemoryParcelable.h b/media/libaaudio/src/binding/SharedMemoryParcelable.h
index 7e0bf1a..22e16f0 100644
--- a/media/libaaudio/src/binding/SharedMemoryParcelable.h
+++ b/media/libaaudio/src/binding/SharedMemoryParcelable.h
@@ -49,8 +49,14 @@
 
     virtual status_t readFromParcel(const Parcel* parcel) override;
 
+    // mmap() shared memory
     aaudio_result_t resolve(int32_t offsetInBytes, int32_t sizeInBytes, void **regionAddressPtr);
 
+    // munmap() any mapped memory
+    aaudio_result_t close();
+
+    bool isFileDescriptorSafe();
+
     int32_t getSizeInBytes();
 
     aaudio_result_t validate();
diff --git a/media/libaaudio/src/binding/SharedRegionParcelable.cpp b/media/libaaudio/src/binding/SharedRegionParcelable.cpp
index 8ca0023..8e57832 100644
--- a/media/libaaudio/src/binding/SharedRegionParcelable.cpp
+++ b/media/libaaudio/src/binding/SharedRegionParcelable.cpp
@@ -14,6 +14,10 @@
  * limitations under the License.
  */
 
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
 #include <stdint.h>
 
 #include <sys/mman.h>
diff --git a/media/libaaudio/src/binding/SharedRegionParcelable.h b/media/libaaudio/src/binding/SharedRegionParcelable.h
index d6c2281..5fb2a4c 100644
--- a/media/libaaudio/src/binding/SharedRegionParcelable.h
+++ b/media/libaaudio/src/binding/SharedRegionParcelable.h
@@ -45,6 +45,8 @@
 
     aaudio_result_t resolve(SharedMemoryParcelable *memoryParcels, void **regionAddressPtr);
 
+    bool isFileDescriptorSafe(SharedMemoryParcelable *memoryParcels);
+
     aaudio_result_t validate();
 
     void dump();
diff --git a/media/libaaudio/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
index 47c4774..fe049b2 100644
--- a/media/libaaudio/src/client/AudioEndpoint.cpp
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -19,7 +19,7 @@
 #include <utils/Log.h>
 
 #include <cassert>
-#include <aaudio/AAudioDefinitions.h>
+#include <aaudio/AAudio.h>
 
 #include "AudioEndpointParcelable.h"
 #include "AudioEndpoint.h"
@@ -39,11 +39,26 @@
 {
 }
 
-static void AudioEndpoint_validateQueueDescriptor(const char *type,
+static aaudio_result_t AudioEndpoint_validateQueueDescriptor(const char *type,
                                                   const RingBufferDescriptor *descriptor) {
-    assert(descriptor->capacityInFrames > 0);
-    assert(descriptor->bytesPerFrame > 1);
-    assert(descriptor->dataAddress != nullptr);
+    if (descriptor == nullptr) {
+        ALOGE("AudioEndpoint_validateQueueDescriptor() NULL descriptor");
+        return AAUDIO_ERROR_NULL;
+    }
+    if (descriptor->capacityInFrames <= 0) {
+        ALOGE("AudioEndpoint_validateQueueDescriptor() bad capacityInFrames = %d",
+              descriptor->capacityInFrames);
+        return AAUDIO_ERROR_OUT_OF_RANGE;
+    }
+    if (descriptor->bytesPerFrame <= 1) {
+        ALOGE("AudioEndpoint_validateQueueDescriptor() bad bytesPerFrame = %d",
+              descriptor->bytesPerFrame);
+        return AAUDIO_ERROR_OUT_OF_RANGE;
+    }
+    if (descriptor->dataAddress == nullptr) {
+        ALOGE("AudioEndpoint_validateQueueDescriptor() NULL dataAddress");
+        return AAUDIO_ERROR_NULL;
+    }
     ALOGD("AudioEndpoint_validateQueueDescriptor %s, dataAddress at %p ====================",
           type,
           descriptor->dataAddress);
@@ -52,11 +67,12 @@
           descriptor->writeCounterAddress);
 
     // Try to READ from the data area.
+    // This code will crash if the mmap failed.
     uint8_t value = descriptor->dataAddress[0];
     ALOGD("AudioEndpoint_validateQueueDescriptor() dataAddress[0] = %d, then try to write",
         (int) value);
     // Try to WRITE to the data area.
-    descriptor->dataAddress[0] = value;
+    descriptor->dataAddress[0] = value * 3;
     ALOGD("AudioEndpoint_validateQueueDescriptor() wrote successfully");
 
     if (descriptor->readCounterAddress) {
@@ -73,17 +89,28 @@
         *descriptor->writeCounterAddress = counter;
         ALOGD("AudioEndpoint_validateQueueDescriptor() wrote writeCounterAddress successfully");
     }
+    return AAUDIO_OK;
 }
 
-void AudioEndpoint_validateDescriptor(const EndpointDescriptor *pEndpointDescriptor) {
-    AudioEndpoint_validateQueueDescriptor("msg", &pEndpointDescriptor->upMessageQueueDescriptor);
-    AudioEndpoint_validateQueueDescriptor("data", &pEndpointDescriptor->downDataQueueDescriptor);
+aaudio_result_t AudioEndpoint_validateDescriptor(const EndpointDescriptor *pEndpointDescriptor) {
+    aaudio_result_t result = AudioEndpoint_validateQueueDescriptor("messages",
+                                    &pEndpointDescriptor->upMessageQueueDescriptor);
+    if (result == AAUDIO_OK) {
+        result = AudioEndpoint_validateQueueDescriptor("data",
+                                                &pEndpointDescriptor->downDataQueueDescriptor);
+    }
+    return result;
 }
 
 aaudio_result_t AudioEndpoint::configure(const EndpointDescriptor *pEndpointDescriptor)
 {
-    aaudio_result_t result = AAUDIO_OK;
-    AudioEndpoint_validateDescriptor(pEndpointDescriptor); // FIXME remove after debugging
+    // TODO maybe remove after debugging
+    aaudio_result_t result = AudioEndpoint_validateDescriptor(pEndpointDescriptor);
+    if (result != AAUDIO_OK) {
+        ALOGD("AudioEndpoint_validateQueueDescriptor returned %d %s",
+              result, AAudio_convertResultToText(result));
+        return result;
+    }
 
     const RingBufferDescriptor *descriptor = &pEndpointDescriptor->upMessageQueueDescriptor;
     assert(descriptor->bytesPerFrame == sizeof(AAudioServiceMessage));
@@ -125,6 +152,7 @@
     int64_t *writeCounterAddress = (descriptor->writeCounterAddress == nullptr)
                                   ? &mDataWriteCounter
                                   : descriptor->writeCounterAddress;
+
     mDownDataQueue = new FifoBuffer(
             descriptor->bytesPerFrame,
             descriptor->capacityInFrames,
@@ -144,9 +172,19 @@
 
 aaudio_result_t AudioEndpoint::writeDataNow(const void *buffer, int32_t numFrames)
 {
+    // TODO Make it easier for the AAudioStreamInternal to scale floats and write shorts
+    // TODO Similar to block adapter write through technique. Add a DataConverter.
     return mDownDataQueue->write(buffer, numFrames);
 }
 
+void AudioEndpoint::getEmptyRoomAvailable(WrappingBuffer *wrappingBuffer) {
+    mDownDataQueue->getEmptyRoomAvailable(wrappingBuffer);
+}
+
+void AudioEndpoint::advanceWriteIndex(int32_t deltaFrames) {
+    mDownDataQueue->getFifoControllerBase()->advanceWriteIndex(deltaFrames);
+}
+
 void AudioEndpoint::setDownDataReadCounter(fifo_counter_t framesRead)
 {
     mDownDataQueue->setReadCounter(framesRead);
diff --git a/media/libaaudio/src/client/AudioEndpoint.h b/media/libaaudio/src/client/AudioEndpoint.h
index caee488..a24a705 100644
--- a/media/libaaudio/src/client/AudioEndpoint.h
+++ b/media/libaaudio/src/client/AudioEndpoint.h
@@ -19,13 +19,13 @@
 
 #include <aaudio/AAudio.h>
 
-#include "AAudioServiceMessage.h"
-#include "AudioEndpointParcelable.h"
+#include "binding/AAudioServiceMessage.h"
+#include "binding/AudioEndpointParcelable.h"
 #include "fifo/FifoBuffer.h"
 
 namespace aaudio {
 
-#define ENDPOINT_DATA_QUEUE_SIZE_MIN   64
+#define ENDPOINT_DATA_QUEUE_SIZE_MIN   48
 
 /**
  * A sink for audio.
@@ -54,15 +54,19 @@
      */
     aaudio_result_t writeDataNow(const void *buffer, int32_t numFrames);
 
+    void getEmptyRoomAvailable(android::WrappingBuffer *wrappingBuffer);
+
+    void advanceWriteIndex(int32_t deltaFrames);
+
     /**
      * Set the read index in the downData queue.
      * This is needed if the reader is not updating the index itself.
      */
-    void setDownDataReadCounter(fifo_counter_t framesRead);
-    fifo_counter_t getDownDataReadCounter();
+    void setDownDataReadCounter(android::fifo_counter_t framesRead);
+    android::fifo_counter_t getDownDataReadCounter();
 
-    void setDownDataWriteCounter(fifo_counter_t framesWritten);
-    fifo_counter_t getDownDataWriteCounter();
+    void setDownDataWriteCounter(android::fifo_counter_t framesWritten);
+    android::fifo_counter_t getDownDataWriteCounter();
 
     /**
      * The result is not valid until after configure() is called.
@@ -80,11 +84,11 @@
     int32_t getFullFramesAvailable();
 
 private:
-    FifoBuffer   * mUpCommandQueue;
-    FifoBuffer   * mDownDataQueue;
-    bool           mOutputFreeRunning;
-    fifo_counter_t mDataReadCounter; // only used if free-running
-    fifo_counter_t mDataWriteCounter; // only used if free-running
+    android::FifoBuffer    *mUpCommandQueue;
+    android::FifoBuffer    *mDownDataQueue;
+    bool                    mOutputFreeRunning;
+    android::fifo_counter_t mDataReadCounter; // only used if free-running
+    android::fifo_counter_t mDataWriteCounter; // only used if free-running
 };
 
 } // namespace aaudio
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 54f4870..7304205 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -22,9 +22,9 @@
 #include <assert.h>
 
 #include <binder/IServiceManager.h>
-#include <utils/Mutex.h>
 
 #include <aaudio/AAudio.h>
+#include <utils/String16.h>
 
 #include "AudioClock.h"
 #include "AudioEndpointParcelable.h"
@@ -32,6 +32,7 @@
 #include "binding/AAudioStreamConfiguration.h"
 #include "binding/IAAudioService.h"
 #include "binding/AAudioServiceMessage.h"
+#include "fifo/FifoBuffer.h"
 
 #include "core/AudioStreamBuilder.h"
 #include "AudioStreamInternal.h"
@@ -43,47 +44,25 @@
 using android::defaultServiceManager;
 using android::interface_cast;
 using android::Mutex;
+using android::WrappingBuffer;
 
 using namespace aaudio;
 
-static android::Mutex gServiceLock;
-static sp<IAAudioService>  gAAudioService;
+#define MIN_TIMEOUT_NANOS        (1000 * AAUDIO_NANOS_PER_MILLISECOND)
 
-#define AAUDIO_SERVICE_NAME   "AAudioService"
+// Wait at least this many times longer than the operation should take.
+#define MIN_TIMEOUT_OPERATIONS    4
 
-// Helper function to get access to the "AAudioService" service.
-// This code was modeled after frameworks/av/media/libaudioclient/AudioSystem.cpp
-static const sp<IAAudioService> getAAudioService() {
-    sp<IBinder> binder;
-    Mutex::Autolock _l(gServiceLock);
-    if (gAAudioService == 0) {
-        sp<IServiceManager> sm = defaultServiceManager();
-        // Try several times to get the service.
-        int retries = 4;
-        do {
-            binder = sm->getService(String16(AAUDIO_SERVICE_NAME)); // This will wait a while.
-            if (binder != 0) {
-                break;
-            }
-        } while (retries-- > 0);
+#define ALOG_CONDITION   (mInService == false)
 
-        if (binder != 0) {
-            // TODO Add linkToDeath() like in frameworks/av/media/libaudioclient/AudioSystem.cpp
-            // TODO Create a DeathRecipient that disconnects all active streams.
-            gAAudioService = interface_cast<IAAudioService>(binder);
-        } else {
-            ALOGE("AudioStreamInternal could not get %s", AAUDIO_SERVICE_NAME);
-        }
-    }
-    return gAAudioService;
-}
-
-AudioStreamInternal::AudioStreamInternal()
+AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface  &serviceInterface, bool inService)
         : AudioStream()
         , mClockModel()
         , mAudioEndpoint()
         , mServiceStreamHandle(AAUDIO_HANDLE_INVALID)
         , mFramesPerBurst(16)
+        , mServiceInterface(serviceInterface)
+        , mInService(inService)
 {
 }
 
@@ -92,9 +71,6 @@
 
 aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
 
-    const sp<IAAudioService>& service = getAAudioService();
-    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
-
     aaudio_result_t result = AAUDIO_OK;
     AAudioStreamRequest request;
     AAudioStreamConfiguration configuration;
@@ -104,22 +80,31 @@
         return result;
     }
 
+    // We have to do volume scaling. So we prefer FLOAT format.
+    if (getFormat() == AAUDIO_UNSPECIFIED) {
+        setFormat(AAUDIO_FORMAT_PCM_FLOAT);
+    }
+
     // Build the request to send to the server.
     request.setUserId(getuid());
     request.setProcessId(getpid());
+    request.setDirection(getDirection());
+
     request.getConfiguration().setDeviceId(getDeviceId());
     request.getConfiguration().setSampleRate(getSampleRate());
     request.getConfiguration().setSamplesPerFrame(getSamplesPerFrame());
     request.getConfiguration().setAudioFormat(getFormat());
+    aaudio_sharing_mode_t sharingMode = getSharingMode();
+    ALOGE("AudioStreamInternal.open(): sharingMode %d", sharingMode);
+    request.getConfiguration().setSharingMode(sharingMode);
     request.getConfiguration().setBufferCapacity(builder.getBufferCapacity());
-    request.dump();
 
-    mServiceStreamHandle = service->openStream(request, configuration);
-    ALOGD("AudioStreamInternal.open(): openStream returned mServiceStreamHandle = 0x%08X",
+    mServiceStreamHandle = mServiceInterface.openStream(request, configuration);
+    ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal.open(): openStream returned mServiceStreamHandle = 0x%08X",
          (unsigned int)mServiceStreamHandle);
     if (mServiceStreamHandle < 0) {
         result = mServiceStreamHandle;
-        ALOGE("AudioStreamInternal.open(): acquireRealtimeStream aaudio_result_t = 0x%08X", result);
+        ALOGE("AudioStreamInternal.open(): openStream() returned %d", result);
     } else {
         result = configuration.validate();
         if (result != AAUDIO_OK) {
@@ -129,17 +114,27 @@
         // Save results of the open.
         setSampleRate(configuration.getSampleRate());
         setSamplesPerFrame(configuration.getSamplesPerFrame());
-        setFormat(configuration.getAudioFormat());
+        setDeviceId(configuration.getDeviceId());
 
-        aaudio::AudioEndpointParcelable parcelable;
-        result = service->getStreamDescription(mServiceStreamHandle, parcelable);
+        // Save device format so we can do format conversion and volume scaling together.
+        mDeviceFormat = configuration.getAudioFormat();
+
+        result = mServiceInterface.getStreamDescription(mServiceStreamHandle, mEndPointParcelable);
+        ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal.open(): getStreamDescriptor(0x%08X) returns %d",
+              mServiceStreamHandle, result);
         if (result != AAUDIO_OK) {
             ALOGE("AudioStreamInternal.open(): getStreamDescriptor returns %d", result);
-            service->closeStream(mServiceStreamHandle);
+            mServiceInterface.closeStream(mServiceStreamHandle);
             return result;
         }
+
         // resolve parcelable into a descriptor
-        parcelable.resolve(&mEndpointDescriptor);
+        result = mEndPointParcelable.resolve(&mEndpointDescriptor);
+        if (result != AAUDIO_OK) {
+            ALOGE("AudioStreamInternal.open(): resolve() returns %d", result);
+            mServiceInterface.closeStream(mServiceStreamHandle);
+            return result;
+        }
 
         // Configure endpoint based on descriptor.
         mAudioEndpoint.configure(&mEndpointDescriptor);
@@ -151,67 +146,186 @@
         mClockModel.setSampleRate(getSampleRate());
         mClockModel.setFramesPerBurst(mFramesPerBurst);
 
+        if (getDataCallbackProc()) {
+            mCallbackFrames = builder.getFramesPerDataCallback();
+            if (mCallbackFrames > getBufferCapacity() / 2) {
+                ALOGE("AudioStreamInternal.open(): framesPerCallback too large");
+                mServiceInterface.closeStream(mServiceStreamHandle);
+                return AAUDIO_ERROR_OUT_OF_RANGE;
+
+            } else if (mCallbackFrames < 0) {
+                ALOGE("AudioStreamInternal.open(): framesPerCallback negative");
+                mServiceInterface.closeStream(mServiceStreamHandle);
+                return AAUDIO_ERROR_OUT_OF_RANGE;
+
+            }
+            if (mCallbackFrames == AAUDIO_UNSPECIFIED) {
+                mCallbackFrames = mFramesPerBurst;
+            }
+
+            int32_t bytesPerFrame = getSamplesPerFrame()
+                                    * AAudioConvert_formatToSizeInBytes(getFormat());
+            int32_t callbackBufferSize = mCallbackFrames * bytesPerFrame;
+            mCallbackBuffer = new uint8_t[callbackBufferSize];
+        }
+
         setState(AAUDIO_STREAM_STATE_OPEN);
     }
     return result;
 }
 
 aaudio_result_t AudioStreamInternal::close() {
-    ALOGD("AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X", mServiceStreamHandle);
+    ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X", mServiceStreamHandle);
     if (mServiceStreamHandle != AAUDIO_HANDLE_INVALID) {
         aaudio_handle_t serviceStreamHandle = mServiceStreamHandle;
         mServiceStreamHandle = AAUDIO_HANDLE_INVALID;
-        const sp<IAAudioService>& aaudioService = getAAudioService();
-        if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
-        aaudioService->closeStream(serviceStreamHandle);
-        return AAUDIO_OK;
+
+        mServiceInterface.closeStream(serviceStreamHandle);
+        delete[] mCallbackBuffer;
+        return mEndPointParcelable.close();
     } else {
         return AAUDIO_ERROR_INVALID_HANDLE;
     }
 }
 
+
+// Render audio in the application callback and then write the data to the stream.
+void *AudioStreamInternal::callbackLoop() {
+    aaudio_result_t result = AAUDIO_OK;
+    aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
+    AAudioStream_dataCallback appCallback = getDataCallbackProc();
+    if (appCallback == nullptr) return NULL;
+
+    // result might be a frame count
+    while (mCallbackEnabled.load() && isPlaying() && (result >= 0)) {
+        // Call application using the AAudio callback interface.
+        callbackResult = (*appCallback)(
+                (AAudioStream *) this,
+                getDataCallbackUserData(),
+                mCallbackBuffer,
+                mCallbackFrames);
+
+        if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
+            // Write audio data to stream.
+            int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
+
+            // This is a BLOCKING WRITE!
+            result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
+            if ((result != mCallbackFrames)) {
+                ALOGE("AudioStreamInternal(): callbackLoop: write() returned %d", result);
+                if (result >= 0) {
+                    // Only wrote some of the frames requested. Must have timed out.
+                    result = AAUDIO_ERROR_TIMEOUT;
+                }
+                if (getErrorCallbackProc() != nullptr) {
+                    (*getErrorCallbackProc())(
+                            (AAudioStream *) this,
+                            getErrorCallbackUserData(),
+                            result);
+                }
+                break;
+            }
+        } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
+            ALOGD("AudioStreamInternal(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
+            break;
+        }
+    }
+
+    ALOGD("AudioStreamInternal(): callbackLoop() exiting, result = %d, isPlaying() = %d",
+          result, (int) isPlaying());
+    return NULL; // TODO review
+}
+
+static void *aaudio_callback_thread_proc(void *context)
+{
+    AudioStreamInternal *stream = (AudioStreamInternal *)context;
+    //LOGD("AudioStreamInternal(): oboe_callback_thread, stream = %p", stream);
+    if (stream != NULL) {
+        return stream->callbackLoop();
+    } else {
+        return NULL;
+    }
+}
+
 aaudio_result_t AudioStreamInternal::requestStart()
 {
     int64_t startTime;
-    ALOGD("AudioStreamInternal(): start()");
+    ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): start()");
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    const sp<IAAudioService>& aaudioService = getAAudioService();
-    if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
+
     startTime = AudioClock::getNanoseconds();
     mClockModel.start(startTime);
     processTimestamp(0, startTime);
     setState(AAUDIO_STREAM_STATE_STARTING);
-    return aaudioService->startStream(mServiceStreamHandle);
+    aaudio_result_t result = mServiceInterface.startStream(mServiceStreamHandle);;
+
+    if (result == AAUDIO_OK && getDataCallbackProc() != nullptr) {
+        // Launch the callback loop thread.
+        int64_t periodNanos = mCallbackFrames
+                              * AAUDIO_NANOS_PER_SECOND
+                              / getSampleRate();
+        mCallbackEnabled.store(true);
+        result = createThread(periodNanos, aaudio_callback_thread_proc, this);
+    }
+    return result;
 }
 
-aaudio_result_t AudioStreamInternal::requestPause()
+int64_t AudioStreamInternal::calculateReasonableTimeout(int32_t framesPerOperation) {
+
+    // Wait for at least a second or some number of callbacks to join the thread.
+    int64_t timeoutNanoseconds = (MIN_TIMEOUT_OPERATIONS * framesPerOperation * AAUDIO_NANOS_PER_SECOND)
+                         / getSampleRate();
+    if (timeoutNanoseconds < MIN_TIMEOUT_NANOS) { // arbitrary number of seconds
+        timeoutNanoseconds = MIN_TIMEOUT_NANOS;
+    }
+    return timeoutNanoseconds;
+}
+
+aaudio_result_t AudioStreamInternal::stopCallback()
+{
+    if (isDataCallbackActive()) {
+        mCallbackEnabled.store(false);
+        return joinThread(NULL, calculateReasonableTimeout(mCallbackFrames));
+    } else {
+        return AAUDIO_OK;
+    }
+}
+
+aaudio_result_t AudioStreamInternal::requestPauseInternal()
 {
     ALOGD("AudioStreamInternal(): pause()");
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    const sp<IAAudioService>& aaudioService = getAAudioService();
-    if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
+
     mClockModel.stop(AudioClock::getNanoseconds());
     setState(AAUDIO_STREAM_STATE_PAUSING);
-    return aaudioService->pauseStream(mServiceStreamHandle);
+    return mServiceInterface.startStream(mServiceStreamHandle);
+}
+
+aaudio_result_t AudioStreamInternal::requestPause()
+{
+    aaudio_result_t result = stopCallback();
+    if (result != AAUDIO_OK) {
+        return result;
+    }
+    return requestPauseInternal();
 }
 
 aaudio_result_t AudioStreamInternal::requestFlush() {
-    ALOGD("AudioStreamInternal(): flush()");
+    ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): flush()");
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    const sp<IAAudioService>& aaudioService = getAAudioService();
-    if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
-setState(AAUDIO_STREAM_STATE_FLUSHING);
-    return aaudioService->flushStream(mServiceStreamHandle);
+
+    setState(AAUDIO_STREAM_STATE_FLUSHING);
+    return mServiceInterface.flushStream(mServiceStreamHandle);
 }
 
 void AudioStreamInternal::onFlushFromServer() {
-    ALOGD("AudioStreamInternal(): onFlushFromServer()");
+    ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): onFlushFromServer()");
     int64_t readCounter = mAudioEndpoint.getDownDataReadCounter();
     int64_t writeCounter = mAudioEndpoint.getDownDataWriteCounter();
     // Bump offset so caller does not see the retrograde motion in getFramesRead().
@@ -239,39 +353,38 @@
 }
 
 aaudio_result_t AudioStreamInternal::registerThread() {
-    ALOGD("AudioStreamInternal(): registerThread()");
+    ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): registerThread()");
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    const sp<IAAudioService>& aaudioService = getAAudioService();
-    if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
-    return aaudioService->registerAudioThread(mServiceStreamHandle,
-                                         gettid(),
-                                         getPeriodNanoseconds());
+    return mServiceInterface.registerAudioThread(mServiceStreamHandle,
+                                              getpid(),
+                                              gettid(),
+                                              getPeriodNanoseconds());
 }
 
 aaudio_result_t AudioStreamInternal::unregisterThread() {
-    ALOGD("AudioStreamInternal(): unregisterThread()");
+    ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): unregisterThread()");
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    const sp<IAAudioService>& aaudioService = getAAudioService();
-    if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
-    return aaudioService->unregisterAudioThread(mServiceStreamHandle, gettid());
+    return mServiceInterface.unregisterAudioThread(mServiceStreamHandle, getpid(), gettid());
 }
 
-// TODO use aaudio_clockid_t all the way down to AudioClock
 aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
                            int64_t *framePosition,
                            int64_t *timeNanoseconds) {
-// TODO implement using real HAL
+    // TODO implement using real HAL
     int64_t time = AudioClock::getNanoseconds();
     *framePosition = mClockModel.convertTimeToPosition(time);
     *timeNanoseconds = time + (10 * AAUDIO_NANOS_PER_MILLISECOND); // Fake hardware delay
     return AAUDIO_OK;
 }
 
-aaudio_result_t AudioStreamInternal::updateState() {
+aaudio_result_t AudioStreamInternal::updateStateWhileWaiting() {
+    if (isDataCallbackActive()) {
+        return AAUDIO_OK; // state is getting updated by the callback thread read/write call
+    }
     return processCommands();
 }
 
@@ -281,16 +394,16 @@
     static int64_t oldTime = 0;
     int64_t framePosition = command.timestamp.position;
     int64_t nanoTime = command.timestamp.timestamp;
-    ALOGD("AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %llu",
+    ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %llu",
          (long long) framePosition,
          (long long) nanoTime);
     int64_t nanosDelta = nanoTime - oldTime;
     if (nanosDelta > 0 && oldTime > 0) {
         int64_t framesDelta = framePosition - oldPosition;
         int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta;
-        ALOGD("AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
-        ALOGD("AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
-        ALOGD("AudioStreamInternal() - measured rate = %llu", (unsigned long long) rate);
+        ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
+        ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
+        ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() - measured rate = %llu", (unsigned long long) rate);
     }
     oldPosition = framePosition;
     oldTime = nanoTime;
@@ -309,29 +422,34 @@
 
 aaudio_result_t AudioStreamInternal::onEventFromServer(AAudioServiceMessage *message) {
     aaudio_result_t result = AAUDIO_OK;
-    ALOGD("processCommands() got event %d", message->event.event);
+    ALOGD_IF(ALOG_CONDITION, "processCommands() got event %d", message->event.event);
     switch (message->event.event) {
         case AAUDIO_SERVICE_EVENT_STARTED:
-            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_STARTED");
+            ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_STARTED");
             setState(AAUDIO_STREAM_STATE_STARTED);
             break;
         case AAUDIO_SERVICE_EVENT_PAUSED:
-            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_PAUSED");
+            ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_PAUSED");
             setState(AAUDIO_STREAM_STATE_PAUSED);
             break;
         case AAUDIO_SERVICE_EVENT_FLUSHED:
-            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED");
+            ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED");
             setState(AAUDIO_STREAM_STATE_FLUSHED);
             onFlushFromServer();
             break;
         case AAUDIO_SERVICE_EVENT_CLOSED:
-            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_CLOSED");
+            ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_CLOSED");
             setState(AAUDIO_STREAM_STATE_CLOSED);
             break;
         case AAUDIO_SERVICE_EVENT_DISCONNECTED:
             result = AAUDIO_ERROR_DISCONNECTED;
+            setState(AAUDIO_STREAM_STATE_DISCONNECTED);
             ALOGW("WARNING - processCommands() AAUDIO_SERVICE_EVENT_DISCONNECTED");
             break;
+        case AAUDIO_SERVICE_EVENT_VOLUME:
+            mVolume = message->event.dataDouble;
+            ALOGD_IF(ALOG_CONDITION, "processCommands() AAUDIO_SERVICE_EVENT_VOLUME %f", mVolume);
+            break;
         default:
             ALOGW("WARNING - processCommands() Unrecognized event = %d",
                  (int) message->event.event);
@@ -345,6 +463,7 @@
     aaudio_result_t result = AAUDIO_OK;
 
     while (result == AAUDIO_OK) {
+        //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::processCommands() - looping, %d", result);
         AAudioServiceMessage message;
         if (mAudioEndpoint.readUpCommand(&message) != 1) {
             break; // no command this time, no problem
@@ -373,21 +492,26 @@
                                          int64_t timeoutNanoseconds)
 {
     aaudio_result_t result = AAUDIO_OK;
+    int32_t loopCount = 0;
     uint8_t* source = (uint8_t*)buffer;
     int64_t currentTimeNanos = AudioClock::getNanoseconds();
     int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
     int32_t framesLeft = numFrames;
-//    ALOGD("AudioStreamInternal::write(%p, %d) at time %08llu , mState = %d ------------------",
-//         buffer, numFrames, (unsigned long long) currentTimeNanos, mState);
+    //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write(%p, %d) at time %08llu , mState = %s",
+    //      buffer, numFrames, (unsigned long long) currentTimeNanos,
+    //      AAudio_convertStreamStateToText(getState()));
 
     // Write until all the data has been written or until a timeout occurs.
     while (framesLeft > 0) {
+        //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write() loop: framesLeft = %d, loopCount = %d  =====",
+        //      framesLeft, loopCount++);
         // The call to writeNow() will not block. It will just write as much as it can.
         int64_t wakeTimeNanos = 0;
         aaudio_result_t framesWritten = writeNow(source, framesLeft,
                                                currentTimeNanos, &wakeTimeNanos);
-//        ALOGD("AudioStreamInternal::write() writeNow() framesLeft = %d --> framesWritten = %d", framesLeft, framesWritten);
+        //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write() loop: framesWritten = %d", framesWritten);
         if (framesWritten < 0) {
+            ALOGE("AudioStreamInternal::write() loop: writeNow returned %d", framesWritten);
             result = framesWritten;
             break;
         }
@@ -398,18 +522,19 @@
         if (timeoutNanoseconds == 0) {
             break; // don't block
         } else if (framesLeft > 0) {
-            //ALOGD("AudioStreamInternal:: original wakeTimeNanos %lld", (long long) wakeTimeNanos);
+            //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal:: original wakeTimeNanos %lld", (long long) wakeTimeNanos);
             // clip the wake time to something reasonable
             if (wakeTimeNanos < currentTimeNanos) {
                 wakeTimeNanos = currentTimeNanos;
             }
             if (wakeTimeNanos > deadlineNanos) {
                 // If we time out, just return the framesWritten so far.
-                ALOGE("AudioStreamInternal::write(): timed out after %lld nanos", (long long) timeoutNanoseconds);
+                ALOGE("AudioStreamInternal::write(): timed out after %lld nanos",
+                      (long long) timeoutNanoseconds);
                 break;
             }
 
-            //ALOGD("AudioStreamInternal:: sleep until %lld, dur = %lld", (long long) wakeTimeNanos,
+            //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal:: sleep until %lld, dur = %lld", (long long) wakeTimeNanos,
             //        (long long) (wakeTimeNanos - currentTimeNanos));
             AudioClock::sleepForNanos(wakeTimeNanos - currentTimeNanos);
             currentTimeNanos = AudioClock::getNanoseconds();
@@ -417,43 +542,52 @@
     }
 
     // return error or framesWritten
+    //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write() result = %d, framesLeft = %d, #%d",
+    //      result, framesLeft, loopCount);
+    (void) loopCount;
     return (result < 0) ? result : numFrames - framesLeft;
 }
 
 // Write as much data as we can without blocking.
 aaudio_result_t AudioStreamInternal::writeNow(const void *buffer, int32_t numFrames,
                                          int64_t currentNanoTime, int64_t *wakeTimePtr) {
+
+    //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow(%p) - enter", buffer);
     {
         aaudio_result_t result = processCommands();
+        //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - processCommands() returned %d", result);
         if (result != AAUDIO_OK) {
             return result;
         }
     }
 
     if (mAudioEndpoint.isOutputFreeRunning()) {
+        ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - update read counter");
         // Update data queue based on the timing model.
         int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
         mAudioEndpoint.setDownDataReadCounter(estimatedReadCounter);
-        // If the read index passed the write index then consider it an underrun.
-        if (mAudioEndpoint.getFullFramesAvailable() < 0) {
-            mXRunCount++;
-        }
     }
     // TODO else query from endpoint cuz set by actual reader, maybe
 
-    // Write some data to the buffer.
-    int32_t framesWritten = mAudioEndpoint.writeDataNow(buffer, numFrames);
-    if (framesWritten > 0) {
-        incrementFramesWritten(framesWritten);
+    // If the read index passed the write index then consider it an underrun.
+    if (mAudioEndpoint.getFullFramesAvailable() < 0) {
+        mXRunCount++;
     }
-    //ALOGD("AudioStreamInternal::writeNow() - tried to write %d frames, wrote %d",
+
+    // Write some data to the buffer.
+    //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - writeNowWithConversion(%d)", numFrames);
+    int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
+    //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - tried to write %d frames, wrote %d",
     //    numFrames, framesWritten);
 
     // Calculate an ideal time to wake up.
     if (wakeTimePtr != nullptr && framesWritten >= 0) {
         // By default wake up a few milliseconds from now.  // TODO review
-        int64_t wakeTime = currentNanoTime + (2 * AAUDIO_NANOS_PER_MILLISECOND);
-        switch (getState()) {
+        int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
+        aaudio_stream_state_t state = getState();
+        //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - wakeTime based on %s",
+        //      AAudio_convertStreamStateToText(state));
+        switch (state) {
             case AAUDIO_STREAM_STATE_OPEN:
             case AAUDIO_STREAM_STATE_STARTING:
                 if (framesWritten != 0) {
@@ -478,50 +612,68 @@
         *wakeTimePtr = wakeTime;
 
     }
-//    ALOGD("AudioStreamInternal::writeNow finished: now = %llu, read# = %llu, wrote# = %llu",
+//    ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow finished: now = %llu, read# = %llu, wrote# = %llu",
 //         (unsigned long long)currentNanoTime,
 //         (unsigned long long)mAudioEndpoint.getDownDataReadCounter(),
 //         (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
     return framesWritten;
 }
 
-aaudio_result_t AudioStreamInternal::waitForStateChange(aaudio_stream_state_t currentState,
-                                                      aaudio_stream_state_t *nextState,
-                                                      int64_t timeoutNanoseconds)
 
-{
-    aaudio_result_t result = processCommands();
-//    ALOGD("AudioStreamInternal::waitForStateChange() - processCommands() returned %d", result);
-    if (result != AAUDIO_OK) {
-        return result;
-    }
-    // TODO replace this polling with a timed sleep on a futex on the message queue
-    int32_t durationNanos = 5 * AAUDIO_NANOS_PER_MILLISECOND;
-    aaudio_stream_state_t state = getState();
-//    ALOGD("AudioStreamInternal::waitForStateChange() - state = %d", state);
-    while (state == currentState && timeoutNanoseconds > 0) {
-        // TODO use futex from service message queue
-        if (durationNanos > timeoutNanoseconds) {
-            durationNanos = timeoutNanoseconds;
+// TODO this function needs a major cleanup.
+aaudio_result_t AudioStreamInternal::writeNowWithConversion(const void *buffer,
+                                       int32_t numFrames) {
+    // ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNowWithConversion(%p, %d)", buffer, numFrames);
+    WrappingBuffer wrappingBuffer;
+    mAudioEndpoint.getEmptyRoomAvailable(&wrappingBuffer);
+    uint8_t *source = (uint8_t *) buffer;
+    int32_t framesLeft = numFrames;
+
+    mAudioEndpoint.getEmptyRoomAvailable(&wrappingBuffer);
+
+    // Read data in one or two parts.
+    int partIndex = 0;
+    while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
+        int32_t framesToWrite = framesLeft;
+        int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
+        if (framesAvailable > 0) {
+            if (framesToWrite > framesAvailable) {
+                framesToWrite = framesAvailable;
+            }
+            int32_t numBytes = getBytesPerFrame();
+            // TODO handle volume scaling
+            if (getFormat() == mDeviceFormat) {
+                // Copy straight through.
+                memcpy(wrappingBuffer.data[partIndex], source, numBytes);
+            } else if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT
+                    && mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
+                // Data conversion.
+                AAudioConvert_floatToPcm16(
+                        (const float *) source,
+                        framesToWrite * getSamplesPerFrame(),
+                        (int16_t *) wrappingBuffer.data[partIndex]);
+            } else {
+                // TODO handle more conversions
+                ALOGE("AudioStreamInternal::writeNowWithConversion() unsupported formats: %d, %d",
+                      getFormat(), mDeviceFormat);
+                return AAUDIO_ERROR_UNEXPECTED_VALUE;
+            }
+
+            source += numBytes;
+            framesLeft -= framesToWrite;
         }
-        AudioClock::sleepForNanos(durationNanos);
-        timeoutNanoseconds -= durationNanos;
-
-        result = processCommands();
-        if (result != AAUDIO_OK) {
-            return result;
-        }
-
-        state = getState();
-//        ALOGD("AudioStreamInternal::waitForStateChange() - state = %d", state);
+        partIndex++;
     }
-    if (nextState != nullptr) {
-        *nextState = state;
+    int32_t framesWritten = numFrames - framesLeft;
+    mAudioEndpoint.advanceWriteIndex(framesWritten);
+
+    if (framesWritten > 0) {
+        incrementFramesWritten(framesWritten);
     }
-    return (state == currentState) ? AAUDIO_ERROR_TIMEOUT : AAUDIO_OK;
+    // ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
+    return framesWritten;
 }
 
-
 void AudioStreamInternal::processTimestamp(uint64_t position, int64_t time) {
     mClockModel.processTimestamp( position, time);
 }
@@ -562,7 +714,7 @@
     } else {
         mLastFramesRead = framesRead;
     }
-    ALOGD("AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
+    ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
     return framesRead;
 }
 
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 6f3a7ac..1aa3b0f 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -26,6 +26,8 @@
 #include "client/AudioEndpoint.h"
 #include "core/AudioStream.h"
 
+#include "binding/AAudioServiceInterface.h"
+
 using android::sp;
 using android::IAAudioService;
 
@@ -35,61 +37,66 @@
 class AudioStreamInternal : public AudioStream {
 
 public:
-    AudioStreamInternal();
+    AudioStreamInternal(AAudioServiceInterface  &serviceInterface, bool inService = false);
     virtual ~AudioStreamInternal();
 
     // =========== Begin ABSTRACT methods ===========================
-    virtual aaudio_result_t requestStart() override;
+    aaudio_result_t requestStart() override;
 
-    virtual aaudio_result_t requestPause() override;
+    aaudio_result_t requestPause() override;
 
-    virtual aaudio_result_t requestFlush() override;
+    aaudio_result_t requestFlush() override;
 
-    virtual aaudio_result_t requestStop() override;
+    aaudio_result_t requestStop() override;
 
     // TODO use aaudio_clockid_t all the way down to AudioClock
-    virtual aaudio_result_t getTimestamp(clockid_t clockId,
+    aaudio_result_t getTimestamp(clockid_t clockId,
                                        int64_t *framePosition,
                                        int64_t *timeNanoseconds) override;
 
 
-    virtual aaudio_result_t updateState() override;
+
+    virtual aaudio_result_t updateStateWhileWaiting() override;
+
     // =========== End ABSTRACT methods ===========================
 
-    virtual aaudio_result_t open(const AudioStreamBuilder &builder) override;
+    aaudio_result_t open(const AudioStreamBuilder &builder) override;
 
-    virtual aaudio_result_t close() override;
+    aaudio_result_t close() override;
 
-    virtual aaudio_result_t write(const void *buffer,
+    aaudio_result_t write(const void *buffer,
                              int32_t numFrames,
                              int64_t timeoutNanoseconds) override;
 
-    virtual aaudio_result_t waitForStateChange(aaudio_stream_state_t currentState,
-                                          aaudio_stream_state_t *nextState,
-                                          int64_t timeoutNanoseconds) override;
+    aaudio_result_t setBufferSize(int32_t requestedFrames) override;
 
-    virtual aaudio_result_t setBufferSize(int32_t requestedFrames) override;
+    int32_t getBufferSize() const override;
 
-    virtual int32_t getBufferSize() const override;
+    int32_t getBufferCapacity() const override;
 
-    virtual int32_t getBufferCapacity() const override;
+    int32_t getFramesPerBurst() const override;
 
-    virtual int32_t getFramesPerBurst() const override;
+    int64_t getFramesRead() override;
 
-    virtual int64_t getFramesRead() override;
-
-    virtual int32_t getXRunCount() const override {
+    int32_t getXRunCount() const override {
         return mXRunCount;
     }
 
-    virtual aaudio_result_t registerThread() override;
+    aaudio_result_t registerThread() override;
 
-    virtual aaudio_result_t unregisterThread() override;
+    aaudio_result_t unregisterThread() override;
+
+    // Called internally from 'C'
+    void *callbackLoop();
 
 protected:
 
     aaudio_result_t processCommands();
 
+    aaudio_result_t requestPauseInternal();
+
+    aaudio_result_t stopCallback();
+
 /**
  * Low level write that will not block. It will just write as much as it can.
  *
@@ -97,10 +104,10 @@
  *
  * @return the number of frames written or a negative error code.
  */
-    virtual aaudio_result_t writeNow(const void *buffer,
-                                int32_t numFrames,
-                                int64_t currentTimeNanos,
-                                int64_t *wakeTimePtr);
+    aaudio_result_t writeNow(const void *buffer,
+                                     int32_t numFrames,
+                                     int64_t currentTimeNanos,
+                                     int64_t *wakeTimePtr);
 
     void onFlushFromServer();
 
@@ -108,18 +115,45 @@
 
     aaudio_result_t onTimestampFromServer(AAudioServiceMessage *message);
 
-private:
-    IsochronousClockModel    mClockModel;
-    AudioEndpoint            mAudioEndpoint;
-    aaudio_handle_t          mServiceStreamHandle;
-    EndpointDescriptor       mEndpointDescriptor;
-    // Offset from underlying frame position.
-    int64_t                  mFramesOffsetFromService = 0;
-    int64_t                  mLastFramesRead = 0;
-    int32_t                  mFramesPerBurst;
-    int32_t                  mXRunCount = 0;
+    // Calculate timeout for an operation involving framesPerOperation.
+    int64_t calculateReasonableTimeout(int32_t framesPerOperation);
 
+private:
+    /*
+     * Asynchronous write with data conversion.
+     * @param buffer
+     * @param numFrames
+     * @return fdrames written or negative error
+     */
+    aaudio_result_t writeNowWithConversion(const void *buffer,
+                                     int32_t numFrames);
     void processTimestamp(uint64_t position, int64_t time);
+
+    // Adjust timing model based on timestamp from service.
+
+    IsochronousClockModel    mClockModel;      // timing model for chasing the HAL
+    AudioEndpoint            mAudioEndpoint;   // sink for writes
+    aaudio_handle_t          mServiceStreamHandle; // opaque handle returned from service
+
+    AudioEndpointParcelable  mEndPointParcelable; // description of the buffers filled by service
+    EndpointDescriptor       mEndpointDescriptor; // buffer description with resolved addresses
+
+    aaudio_audio_format_t    mDeviceFormat = AAUDIO_FORMAT_UNSPECIFIED;
+
+    uint8_t                 *mCallbackBuffer = nullptr;
+    int32_t                  mCallbackFrames = 0;
+
+    // Offset from underlying frame position.
+    int64_t                  mFramesOffsetFromService = 0; // offset for timestamps
+    int64_t                  mLastFramesRead = 0; // used to prevent retrograde motion
+    int32_t                  mFramesPerBurst;     // frames per HAL transfer
+    int32_t                  mXRunCount = 0;      // how many underrun events?
+    float                    mVolume = 1.0;       // volume that the server told us to use
+
+    AAudioServiceInterface  &mServiceInterface;   // abstract interface to the service
+
+    // The service uses this for SHARED mode.
+    bool                     mInService = false;  // Are running in the client or the service?
 };
 
 } /* namespace aaudio */
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index 4c8aabc..c278c8b 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -19,7 +19,6 @@
 #include <utils/Log.h>
 
 #include <stdint.h>
-#include <aaudio/AAudioDefinitions.h>
 
 #include "utility/AudioClock.h"
 #include "IsochronousClockModel.h"
diff --git a/media/libaaudio/src/client/IsochronousClockModel.h b/media/libaaudio/src/client/IsochronousClockModel.h
index 524c286..205c341 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.h
+++ b/media/libaaudio/src/client/IsochronousClockModel.h
@@ -14,11 +14,10 @@
  * limitations under the License.
  */
 
-#ifndef AAUDIO_ISOCHRONOUSCLOCKMODEL_H
-#define AAUDIO_ISOCHRONOUSCLOCKMODEL_H
+#ifndef AAUDIO_ISOCHRONOUS_CLOCK_MODEL_H
+#define AAUDIO_ISOCHRONOUS_CLOCK_MODEL_H
 
 #include <stdint.h>
-#include <aaudio/AAudio.h>
 
 namespace aaudio {
 
@@ -107,4 +106,4 @@
 
 } /* namespace aaudio */
 
-#endif //AAUDIO_ISOCHRONOUSCLOCKMODEL_H
+#endif //AAUDIO_ISOCHRONOUS_CLOCK_MODEL_H
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index 52bad70..f687e7d 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -49,10 +49,13 @@
 AAUDIO_API const char * AAudio_convertResultToText(aaudio_result_t returnCode) {
     switch (returnCode) {
         AAUDIO_CASE_ENUM(AAUDIO_OK);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_DISCONNECTED);
         AAUDIO_CASE_ENUM(AAUDIO_ERROR_ILLEGAL_ARGUMENT);
         AAUDIO_CASE_ENUM(AAUDIO_ERROR_INCOMPATIBLE);
         AAUDIO_CASE_ENUM(AAUDIO_ERROR_INTERNAL);
         AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_STATE);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_UNEXPECTED_STATE);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_UNEXPECTED_VALUE);
         AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_HANDLE);
         AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_QUERY);
         AAUDIO_CASE_ENUM(AAUDIO_ERROR_UNIMPLEMENTED);
@@ -62,9 +65,10 @@
         AAUDIO_CASE_ENUM(AAUDIO_ERROR_NULL);
         AAUDIO_CASE_ENUM(AAUDIO_ERROR_TIMEOUT);
         AAUDIO_CASE_ENUM(AAUDIO_ERROR_WOULD_BLOCK);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_ORDER);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_FORMAT);
         AAUDIO_CASE_ENUM(AAUDIO_ERROR_OUT_OF_RANGE);
         AAUDIO_CASE_ENUM(AAUDIO_ERROR_NO_SERVICE);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_RATE);
     }
     return "Unrecognized AAudio error.";
 }
@@ -82,6 +86,7 @@
         AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_FLUSHED);
         AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STOPPING);
         AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STOPPED);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_DISCONNECTED);
         AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_CLOSING);
         AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_CLOSED);
     }
@@ -102,7 +107,6 @@
 
 AAUDIO_API aaudio_result_t AAudio_createStreamBuilder(AAudioStreamBuilder** builder)
 {
-    ALOGD("AAudio_createStreamBuilder(): check sHandleTracker.isInitialized ()");
     AudioStreamBuilder *audioStreamBuilder =  new AudioStreamBuilder();
     if (audioStreamBuilder == nullptr) {
         return AAUDIO_ERROR_NO_MEMORY;
@@ -114,53 +118,79 @@
 AAUDIO_API void AAudioStreamBuilder_setDeviceId(AAudioStreamBuilder* builder,
                                                      int32_t deviceId)
 {
-    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
     streamBuilder->setDeviceId(deviceId);
 }
 
 AAUDIO_API void AAudioStreamBuilder_setSampleRate(AAudioStreamBuilder* builder,
                                               int32_t sampleRate)
 {
-    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
     streamBuilder->setSampleRate(sampleRate);
 }
 
 AAUDIO_API void AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder* builder,
                                                    int32_t samplesPerFrame)
 {
-    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
     streamBuilder->setSamplesPerFrame(samplesPerFrame);
 }
 
 AAUDIO_API void AAudioStreamBuilder_setDirection(AAudioStreamBuilder* builder,
                                              aaudio_direction_t direction)
 {
-    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
     streamBuilder->setDirection(direction);
 }
 
-
 AAUDIO_API void AAudioStreamBuilder_setFormat(AAudioStreamBuilder* builder,
                                                    aaudio_audio_format_t format)
 {
-    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
     streamBuilder->setFormat(format);
 }
 
 AAUDIO_API void AAudioStreamBuilder_setSharingMode(AAudioStreamBuilder* builder,
                                                         aaudio_sharing_mode_t sharingMode)
 {
-    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
     streamBuilder->setSharingMode(sharingMode);
 }
 
 AAUDIO_API void AAudioStreamBuilder_setBufferCapacityInFrames(AAudioStreamBuilder* builder,
                                                         int32_t frames)
 {
-    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
     streamBuilder->setBufferCapacity(frames);
 }
 
+AAUDIO_API void AAudioStreamBuilder_setDataCallback(AAudioStreamBuilder* builder,
+                                                    AAudioStream_dataCallback callback,
+                                                    void *userData)
+{
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+    ALOGD("AAudioStreamBuilder_setCallback(): userData = %p", userData);
+    streamBuilder->setDataCallbackProc(callback);
+    streamBuilder->setDataCallbackUserData(userData);
+}
+AAUDIO_API void AAudioStreamBuilder_setErrorCallback(AAudioStreamBuilder* builder,
+                                                 AAudioStream_errorCallback callback,
+                                                 void *userData)
+{
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+    ALOGD("AAudioStreamBuilder_setCallback(): userData = %p", userData);
+    streamBuilder->setErrorCallbackProc(callback);
+    streamBuilder->setErrorCallbackUserData(userData);
+}
+
+AAUDIO_API void AAudioStreamBuilder_setFramesPerDataCallback(AAudioStreamBuilder* builder,
+                                                int32_t frames)
+{
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+    ALOGD("%s: frames = %d", __func__, frames);
+    streamBuilder->setFramesPerDataCallback(frames);
+}
+
 static aaudio_result_t  AAudioInternal_openStream(AudioStreamBuilder *streamBuilder,
                                               AAudioStream** streamPtr)
 {
@@ -276,6 +306,13 @@
     if (buffer == nullptr) {
         return AAUDIO_ERROR_NULL;
     }
+
+    // Don't allow writes when playing with a callback.
+    if (audioStream->getDataCallbackProc() != nullptr && audioStream->isPlaying()) {
+        ALOGE("Cannot write to a callback stream when running.");
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+
     if (numFrames < 0) {
         return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
     } else if (numFrames == 0) {
@@ -297,6 +334,9 @@
                                      aaudio_audio_thread_proc_t threadProc, void *arg)
 {
     AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    if (audioStream->getDataCallbackProc() != nullptr) {
+        return AAUDIO_ERROR_INCOMPATIBLE;
+    }
     return audioStream->createThread(periodNanoseconds, threadProc, arg);
 }
 
@@ -361,6 +401,12 @@
     return audioStream->getFramesPerBurst();
 }
 
+AAUDIO_API int32_t AAudioStream_getFramesPerDataCallback(AAudioStream* stream)
+{
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getFramesPerDataCallback();
+}
+
 AAUDIO_API int32_t AAudioStream_getBufferCapacityInFrames(AAudioStream* stream)
 {
     AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index b054d94..7c0b5ae 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -28,7 +28,9 @@
 
 using namespace aaudio;
 
-AudioStream::AudioStream() {
+AudioStream::AudioStream()
+        : mCallbackEnabled(false)
+{
     // mThread is a pthread_t of unknown size so we need memset.
     memset(&mThread, 0, sizeof(mThread));
     setPeriodNanoseconds(0);
@@ -36,13 +38,31 @@
 
 aaudio_result_t AudioStream::open(const AudioStreamBuilder& builder)
 {
-    // TODO validate parameters.
+
     // Copy parameters from the Builder because the Builder may be deleted after this call.
     mSamplesPerFrame = builder.getSamplesPerFrame();
     mSampleRate = builder.getSampleRate();
     mDeviceId = builder.getDeviceId();
     mFormat = builder.getFormat();
+    mDirection = builder.getDirection();
     mSharingMode = builder.getSharingMode();
+
+    // callbacks
+    mFramesPerDataCallback = builder.getFramesPerDataCallback();
+    mDataCallbackProc = builder.getDataCallbackProc();
+    mErrorCallbackProc = builder.getErrorCallbackProc();
+    mDataCallbackUserData = builder.getDataCallbackUserData();
+
+    // TODO validate more parameters.
+    if (mErrorCallbackProc != nullptr && mDataCallbackProc == nullptr) {
+        ALOGE("AudioStream::open(): disconnect callback cannot be used without a data callback.");
+        return AAUDIO_ERROR_UNEXPECTED_VALUE;
+    }
+    if (mDirection != AAUDIO_DIRECTION_INPUT && mDirection != AAUDIO_DIRECTION_OUTPUT) {
+        ALOGE("AudioStream::open(): illegal direction %d", mDirection);
+        return AAUDIO_ERROR_UNEXPECTED_VALUE;
+    }
+
     return AAUDIO_OK;
 }
 
@@ -75,8 +95,13 @@
                                                 aaudio_stream_state_t *nextState,
                                                 int64_t timeoutNanoseconds)
 {
+    aaudio_result_t result = updateStateWhileWaiting();
+    if (result != AAUDIO_OK) {
+        return result;
+    }
+
     // TODO replace this when similar functionality added to AudioTrack.cpp
-    int64_t durationNanos = 20 * AAUDIO_NANOS_PER_MILLISECOND;
+    int64_t durationNanos = 20 * AAUDIO_NANOS_PER_MILLISECOND; // arbitrary
     aaudio_stream_state_t state = getState();
     while (state == currentState && timeoutNanoseconds > 0) {
         if (durationNanos > timeoutNanoseconds) {
@@ -85,7 +110,7 @@
         AudioClock::sleepForNanos(durationNanos);
         timeoutNanoseconds -= durationNanos;
 
-        aaudio_result_t result = updateState();
+        aaudio_result_t result = updateStateWhileWaiting();
         if (result != AAUDIO_OK) {
             return result;
         }
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 6ac8554..da71906 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -18,12 +18,12 @@
 #define AAUDIO_AUDIOSTREAM_H
 
 #include <atomic>
+#include <mutex>
 #include <stdint.h>
-#include <aaudio/AAudioDefinitions.h>
 #include <aaudio/AAudio.h>
 
-#include "AAudioUtilities.h"
-#include "MonotonicCounter.h"
+#include "utility/AAudioUtilities.h"
+#include "utility/MonotonicCounter.h"
 
 namespace aaudio {
 
@@ -55,14 +55,18 @@
                                        int64_t *timeNanoseconds) = 0;
 
 
-    virtual aaudio_result_t updateState() = 0;
+    /**
+     * Update state while in the middle of waitForStateChange()
+     * @return
+     */
+    virtual aaudio_result_t updateStateWhileWaiting() = 0;
 
 
     // =========== End ABSTRACT methods ===========================
 
     virtual aaudio_result_t waitForStateChange(aaudio_stream_state_t currentState,
-                                          aaudio_stream_state_t *nextState,
-                                          int64_t timeoutNanoseconds);
+                                               aaudio_stream_state_t *nextState,
+                                               int64_t timeoutNanoseconds);
 
     /**
      * Open the stream using the parameters in the builder.
@@ -152,10 +156,16 @@
         return mDirection;
     }
 
+    /**
+     * This is only valid after setSamplesPerFrame() and setFormat() have been called.
+     */
     int32_t getBytesPerFrame() const {
         return mSamplesPerFrame * getBytesPerSample();
     }
 
+    /**
+     * This is only valid after setFormat() has been called.
+     */
     int32_t getBytesPerSample() const {
         return AAudioConvert_formatToSizeInBytes(mFormat);
     }
@@ -168,6 +178,27 @@
         return mFramesRead.get();
     }
 
+    AAudioStream_dataCallback getDataCallbackProc() const {
+        return mDataCallbackProc;
+    }
+    AAudioStream_errorCallback getErrorCallbackProc() const {
+        return mErrorCallbackProc;
+    }
+
+    void *getDataCallbackUserData() const {
+        return mDataCallbackUserData;
+    }
+    void *getErrorCallbackUserData() const {
+        return mErrorCallbackUserData;
+    }
+
+    int32_t getFramesPerDataCallback() const {
+        return mFramesPerDataCallback;
+    }
+
+    bool isDataCallbackActive() {
+        return (mDataCallbackProc != nullptr) && isPlaying();
+    }
 
     // ============== I/O ===========================
     // A Stream will only implement read() or write() depending on its direction.
@@ -235,7 +266,13 @@
         mState = state;
     }
 
+    void setDeviceId(int32_t deviceId) {
+        mDeviceId = deviceId;
+    }
 
+    std::mutex           mStreamMutex;
+
+    std::atomic<bool>    mCallbackEnabled;
 
 protected:
     MonotonicCounter     mFramesWritten;
@@ -259,6 +296,15 @@
     aaudio_direction_t     mDirection = AAUDIO_DIRECTION_OUTPUT;
     aaudio_stream_state_t  mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
 
+    // callback ----------------------------------
+
+    AAudioStream_dataCallback   mDataCallbackProc = nullptr;  // external callback functions
+    void                       *mDataCallbackUserData = nullptr;
+    int32_t                     mFramesPerDataCallback = AAUDIO_UNSPECIFIED; // frames
+
+    AAudioStream_errorCallback  mErrorCallbackProc = nullptr;
+    void                       *mErrorCallbackUserData = nullptr;
+
     // background thread ----------------------------------
     bool                   mHasThread = false;
     pthread_t              mThread; // initialized in constructor
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index 5a54e62..c0b59bb 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -24,12 +24,18 @@
 #include <aaudio/AAudioDefinitions.h>
 #include <aaudio/AAudio.h>
 
+#include "binding/AAudioBinderClient.h"
 #include "client/AudioStreamInternal.h"
 #include "core/AudioStream.h"
 #include "core/AudioStreamBuilder.h"
 #include "legacy/AudioStreamRecord.h"
 #include "legacy/AudioStreamTrack.h"
 
+// Enable a mixer in AAudio service that will mix stream to an ALSA MMAP buffer.
+#define MMAP_SHARED_ENABLED      0
+// Enable AAUDIO_SHARING_MODE_EXCLUSIVE that uses an ALSA MMAP buffer.
+#define MMAP_EXCLUSIVE_ENABLED   1
+
 using namespace aaudio;
 
 /*
@@ -43,8 +49,11 @@
 
 aaudio_result_t AudioStreamBuilder::build(AudioStream** streamPtr) {
     AudioStream* audioStream = nullptr;
+    AAudioBinderClient *aaudioClient = nullptr;
     const aaudio_sharing_mode_t sharingMode = getSharingMode();
+    ALOGD("AudioStreamBuilder.build() sharingMode = %d", sharingMode);
     switch (getDirection()) {
+
     case AAUDIO_DIRECTION_INPUT:
         switch (sharingMode) {
             case AAUDIO_SHARING_MODE_SHARED:
@@ -56,26 +65,37 @@
                 break;
         }
         break;
+
     case AAUDIO_DIRECTION_OUTPUT:
         switch (sharingMode) {
             case AAUDIO_SHARING_MODE_SHARED:
+#if MMAP_SHARED_ENABLED
+                aaudioClient = new AAudioBinderClient();
+                audioStream = new(std::nothrow) AudioStreamInternal(*aaudioClient, false);
+#else
                 audioStream = new(std::nothrow) AudioStreamTrack();
+#endif
                 break;
+#if MMAP_EXCLUSIVE_ENABLED
             case AAUDIO_SHARING_MODE_EXCLUSIVE:
-                audioStream = new(std::nothrow) AudioStreamInternal();
+                aaudioClient = new AAudioBinderClient();
+                audioStream = new(std::nothrow) AudioStreamInternal(*aaudioClient, false);
                 break;
+#endif
             default:
                 ALOGE("AudioStreamBuilder(): bad sharing mode = %d", sharingMode);
                 return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
                 break;
         }
         break;
+
     default:
         ALOGE("AudioStreamBuilder(): bad direction = %d", getDirection());
         return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
         break;
     }
     if (audioStream == nullptr) {
+        delete aaudioClient;
         return AAUDIO_ERROR_NO_MEMORY;
     }
     ALOGD("AudioStreamBuilder(): created audioStream = %p", audioStream);
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.h b/media/libaaudio/src/core/AudioStreamBuilder.h
index 7b5f35c..93ca7f5 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.h
+++ b/media/libaaudio/src/core/AudioStreamBuilder.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef AAUDIO_AUDIOSTREAMBUILDER_H
-#define AAUDIO_AUDIOSTREAMBUILDER_H
+#ifndef AAUDIO_AUDIO_STREAM_BUILDER_H
+#define AAUDIO_AUDIO_STREAM_BUILDER_H
 
 #include <stdint.h>
 
@@ -101,6 +101,52 @@
         return this;
     }
 
+    AAudioStream_dataCallback getDataCallbackProc() const {
+        return mDataCallbackProc;
+    }
+
+    AudioStreamBuilder* setDataCallbackProc(AAudioStream_dataCallback proc) {
+        mDataCallbackProc = proc;
+        return this;
+    }
+
+
+    void *getDataCallbackUserData() const {
+        return mDataCallbackUserData;
+    }
+
+    AudioStreamBuilder* setDataCallbackUserData(void *userData) {
+        mDataCallbackUserData = userData;
+        return this;
+    }
+
+    AAudioStream_errorCallback getErrorCallbackProc() const {
+        return mErrorCallbackProc;
+    }
+
+    AudioStreamBuilder* setErrorCallbackProc(AAudioStream_errorCallback proc) {
+        mErrorCallbackProc = proc;
+        return this;
+    }
+
+    AudioStreamBuilder* setErrorCallbackUserData(void *userData) {
+        mErrorCallbackUserData = userData;
+        return this;
+    }
+
+    void *getErrorCallbackUserData() const {
+        return mErrorCallbackUserData;
+    }
+
+    int32_t getFramesPerDataCallback() const {
+        return mFramesPerDataCallback;
+    }
+
+    AudioStreamBuilder* setFramesPerDataCallback(int32_t sizeInFrames) {
+        mFramesPerDataCallback = sizeInFrames;
+        return this;
+    }
+
     aaudio_result_t build(AudioStream **streamPtr);
 
 private:
@@ -111,8 +157,15 @@
     aaudio_audio_format_t  mFormat = AAUDIO_FORMAT_UNSPECIFIED;
     aaudio_direction_t     mDirection = AAUDIO_DIRECTION_OUTPUT;
     int32_t                mBufferCapacity = AAUDIO_UNSPECIFIED;
+
+    AAudioStream_dataCallback  mDataCallbackProc = nullptr;  // external callback functions
+    void                      *mDataCallbackUserData = nullptr;
+    int32_t                    mFramesPerDataCallback = AAUDIO_UNSPECIFIED; // frames
+
+    AAudioStream_errorCallback mErrorCallbackProc = nullptr;
+    void                      *mErrorCallbackUserData = nullptr;
 };
 
 } /* namespace aaudio */
 
-#endif /* AAUDIO_AUDIOSTREAMBUILDER_H */
+#endif //AAUDIO_AUDIO_STREAM_BUILDER_H
diff --git a/media/libaaudio/src/fifo/FifoBuffer.cpp b/media/libaaudio/src/fifo/FifoBuffer.cpp
index c5489f1..857780c 100644
--- a/media/libaaudio/src/fifo/FifoBuffer.cpp
+++ b/media/libaaudio/src/fifo/FifoBuffer.cpp
@@ -17,6 +17,7 @@
 #include <cstring>
 #include <unistd.h>
 
+
 #define LOG_TAG "FifoBuffer"
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
@@ -26,6 +27,8 @@
 #include "FifoControllerIndirect.h"
 #include "FifoBuffer.h"
 
+using namespace android; // TODO just import names needed
+
 FifoBuffer::FifoBuffer(int32_t bytesPerFrame, fifo_frames_t capacityInFrames)
         : mFrameCapacity(capacityInFrames)
         , mBytesPerFrame(bytesPerFrame)
@@ -79,80 +82,102 @@
     return frames * mBytesPerFrame;
 }
 
-fifo_frames_t FifoBuffer::read(void *buffer, fifo_frames_t numFrames) {
-    size_t numBytes;
-    fifo_frames_t framesAvailable = mFifo->getFullFramesAvailable();
-    fifo_frames_t framesToRead = numFrames;
-    // Is there enough data in the FIFO
-    if (framesToRead > framesAvailable) {
-        framesToRead = framesAvailable;
-    }
-    if (framesToRead == 0) {
-        return 0;
-    }
+void FifoBuffer::fillWrappingBuffer(WrappingBuffer *wrappingBuffer,
+                                    int32_t framesAvailable,
+                                    int32_t startIndex) {
+    wrappingBuffer->data[1] = nullptr;
+    wrappingBuffer->numFrames[1] = 0;
+    if (framesAvailable > 0) {
 
-    fifo_frames_t readIndex = mFifo->getReadIndex();
-    uint8_t *destination = (uint8_t *) buffer;
-    uint8_t *source = &mStorage[convertFramesToBytes(readIndex)];
-    if ((readIndex + framesToRead) > mFrameCapacity) {
-        // read in two parts, first part here
-        fifo_frames_t frames1 = mFrameCapacity - readIndex;
-        int32_t numBytes = convertFramesToBytes(frames1);
-        memcpy(destination, source, numBytes);
-        destination += numBytes;
-        // read second part
-        source = &mStorage[0];
-        fifo_frames_t frames2 = framesToRead - frames1;
-        numBytes = convertFramesToBytes(frames2);
-        memcpy(destination, source, numBytes);
+        uint8_t *source = &mStorage[convertFramesToBytes(startIndex)];
+        // Does the available data cross the end of the FIFO?
+        if ((startIndex + framesAvailable) > mFrameCapacity) {
+            wrappingBuffer->data[0] = source;
+            wrappingBuffer->numFrames[0] = mFrameCapacity - startIndex;
+            wrappingBuffer->data[1] = &mStorage[0];
+            wrappingBuffer->numFrames[1] = mFrameCapacity - startIndex;
+
+        } else {
+            wrappingBuffer->data[0] = source;
+            wrappingBuffer->numFrames[0] = framesAvailable;
+        }
     } else {
-        // just read in one shot
-        numBytes = convertFramesToBytes(framesToRead);
-        memcpy(destination, source, numBytes);
+        wrappingBuffer->data[0] = nullptr;
+        wrappingBuffer->numFrames[0] = 0;
     }
-    mFifo->advanceReadIndex(framesToRead);
 
-    return framesToRead;
 }
 
-fifo_frames_t FifoBuffer::write(const void *buffer, fifo_frames_t framesToWrite) {
+void FifoBuffer::getFullDataAvailable(WrappingBuffer *wrappingBuffer) {
+    fifo_frames_t framesAvailable = mFifo->getFullFramesAvailable();
+    fifo_frames_t startIndex = mFifo->getReadIndex();
+    fillWrappingBuffer(wrappingBuffer, framesAvailable, startIndex);
+}
+
+void FifoBuffer::getEmptyRoomAvailable(WrappingBuffer *wrappingBuffer) {
     fifo_frames_t framesAvailable = mFifo->getEmptyFramesAvailable();
-//    ALOGD("FifoBuffer::write() framesToWrite = %d, framesAvailable = %d",
-//         framesToWrite, framesAvailable);
-    if (framesToWrite > framesAvailable) {
-        framesToWrite = framesAvailable;
-    }
-    if (framesToWrite <= 0) {
-        return 0;
-    }
+    fifo_frames_t startIndex = mFifo->getWriteIndex();
+    fillWrappingBuffer(wrappingBuffer, framesAvailable, startIndex);
+}
 
-    size_t numBytes;
-    fifo_frames_t writeIndex = mFifo->getWriteIndex();
-    int byteIndex = convertFramesToBytes(writeIndex);
-    const uint8_t *source = (const uint8_t *) buffer;
-    uint8_t *destination = &mStorage[byteIndex];
-    if ((writeIndex + framesToWrite) > mFrameCapacity) {
-        // write in two parts, first part here
-        fifo_frames_t frames1 = mFrameCapacity - writeIndex;
-        numBytes = convertFramesToBytes(frames1);
-        memcpy(destination, source, numBytes);
-//        ALOGD("FifoBuffer::write(%p to %p, numBytes = %d", source, destination, numBytes);
-        // read second part
-        source += convertFramesToBytes(frames1);
-        destination = &mStorage[0];
-        fifo_frames_t framesLeft = framesToWrite - frames1;
-        numBytes = convertFramesToBytes(framesLeft);
-//        ALOGD("FifoBuffer::write(%p to %p, numBytes = %d", source, destination, numBytes);
-        memcpy(destination, source, numBytes);
-    } else {
-        // just write in one shot
-        numBytes = convertFramesToBytes(framesToWrite);
-//        ALOGD("FifoBuffer::write(%p to %p, numBytes = %d", source, destination, numBytes);
-        memcpy(destination, source, numBytes);
-    }
-    mFifo->advanceWriteIndex(framesToWrite);
+fifo_frames_t FifoBuffer::read(void *buffer, fifo_frames_t numFrames) {
+    WrappingBuffer wrappingBuffer;
+    uint8_t *destination = (uint8_t *) buffer;
+    fifo_frames_t framesLeft = numFrames;
 
-    return framesToWrite;
+    getFullDataAvailable(&wrappingBuffer);
+
+    // Read data in one or two parts.
+    int partIndex = 0;
+    while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
+        fifo_frames_t framesToRead = framesLeft;
+        fifo_frames_t framesAvailable = wrappingBuffer.numFrames[partIndex];
+        //ALOGD("FifoProcessor::read() framesAvailable = %d, partIndex = %d",
+        //      framesAvailable, partIndex);
+        if (framesAvailable > 0) {
+            if (framesToRead > framesAvailable) {
+                framesToRead = framesAvailable;
+            }
+            int32_t numBytes = convertFramesToBytes(framesToRead);
+            memcpy(destination, wrappingBuffer.data[partIndex], numBytes);
+
+            destination += numBytes;
+            framesLeft -= framesToRead;
+        }
+        partIndex++;
+    }
+    fifo_frames_t framesRead = numFrames - framesLeft;
+    mFifo->advanceReadIndex(framesRead);
+    return framesRead;
+}
+
+fifo_frames_t FifoBuffer::write(const void *buffer, fifo_frames_t numFrames) {
+    WrappingBuffer wrappingBuffer;
+    uint8_t *source = (uint8_t *) buffer;
+    fifo_frames_t framesLeft = numFrames;
+
+    getEmptyRoomAvailable(&wrappingBuffer);
+
+    // Read data in one or two parts.
+    int partIndex = 0;
+    while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
+        fifo_frames_t framesToWrite = framesLeft;
+        fifo_frames_t framesAvailable = wrappingBuffer.numFrames[partIndex];
+        if (framesAvailable > 0) {
+            if (framesToWrite > framesAvailable) {
+                framesToWrite = framesAvailable;
+            }
+            int32_t numBytes = convertFramesToBytes(framesToWrite);
+            memcpy(wrappingBuffer.data[partIndex], source, numBytes);
+
+            source += numBytes;
+            framesLeft -= framesToWrite;
+        }
+        partIndex++;
+    }
+    fifo_frames_t framesWritten = numFrames - framesLeft;
+    mFifo->advanceWriteIndex(framesWritten);
+    return framesWritten;
 }
 
 fifo_frames_t FifoBuffer::readNow(void *buffer, fifo_frames_t numFrames) {
diff --git a/media/libaaudio/src/fifo/FifoBuffer.h b/media/libaaudio/src/fifo/FifoBuffer.h
index faa9ae2..2b262a1 100644
--- a/media/libaaudio/src/fifo/FifoBuffer.h
+++ b/media/libaaudio/src/fifo/FifoBuffer.h
@@ -21,15 +21,29 @@
 
 #include "FifoControllerBase.h"
 
+namespace android {
+
+/**
+ * Structure that represents a region in a circular buffer that might be at the
+ * end of the array and split in two.
+ */
+struct WrappingBuffer {
+    enum {
+        SIZE = 2
+    };
+    void *data[SIZE];
+    int32_t numFrames[SIZE];
+};
+
 class FifoBuffer {
 public:
     FifoBuffer(int32_t bytesPerFrame, fifo_frames_t capacityInFrames);
 
-    FifoBuffer(int32_t   bytesPerFrame,
-               fifo_frames_t   capacityInFrames,
-               fifo_counter_t * readCounterAddress,
-               fifo_counter_t * writeCounterAddress,
-               void * dataStorageAddress);
+    FifoBuffer(int32_t bytesPerFrame,
+               fifo_frames_t capacityInFrames,
+               fifo_counter_t *readCounterAddress,
+               fifo_counter_t *writeCounterAddress,
+               void *dataStorageAddress);
 
     ~FifoBuffer();
 
@@ -40,10 +54,33 @@
     fifo_frames_t write(const void *source, fifo_frames_t framesToWrite);
 
     fifo_frames_t getThreshold();
+
     void setThreshold(fifo_frames_t threshold);
 
     fifo_frames_t getBufferCapacityInFrames();
 
+    /**
+     * Return pointer to available full frames in data1 and set size in numFrames1.
+     * if the data is split across the end of the FIFO then set data2 and numFrames2.
+     * Other wise set them to null
+     * @param wrappingBuffer
+     */
+    void getFullDataAvailable(WrappingBuffer *wrappingBuffer);
+
+    /**
+     * Return pointer to available empty frames in data1 and set size in numFrames1.
+     * if the room is split across the end of the FIFO then set data2 and numFrames2.
+     * Other wise set them to null
+     * @param wrappingBuffer
+     */
+    void getEmptyRoomAvailable(WrappingBuffer *wrappingBuffer);
+
+    /**
+     * Copy data from the FIFO into the buffer.
+     * @param buffer
+     * @param numFrames
+     * @return
+     */
     fifo_frames_t readNow(void *buffer, fifo_frames_t numFrames);
 
     int64_t getNextReadTime(int32_t frameRate);
@@ -73,15 +110,21 @@
     }
 
 private:
+
+    void fillWrappingBuffer(WrappingBuffer *wrappingBuffer,
+                            int32_t framesAvailable, int32_t startIndex);
+
     const fifo_frames_t mFrameCapacity;
-    const int32_t       mBytesPerFrame;
-    uint8_t *           mStorage;
-    bool                mStorageOwned; // did this object allocate the storage?
+    const int32_t mBytesPerFrame;
+    uint8_t *mStorage;
+    bool mStorageOwned; // did this object allocate the storage?
     FifoControllerBase *mFifo;
-    fifo_counter_t      mFramesReadCount;
-    fifo_counter_t      mFramesUnderrunCount;
-    int32_t             mUnderrunCount; // need? just use frames
-    int32_t             mLastReadSize;
+    fifo_counter_t mFramesReadCount;
+    fifo_counter_t mFramesUnderrunCount;
+    int32_t mUnderrunCount; // need? just use frames
+    int32_t mLastReadSize;
 };
 
+}  // android
+
 #endif //FIFO_FIFO_BUFFER_H
diff --git a/media/libaaudio/src/fifo/FifoController.h b/media/libaaudio/src/fifo/FifoController.h
index 7434634..79d98a1 100644
--- a/media/libaaudio/src/fifo/FifoController.h
+++ b/media/libaaudio/src/fifo/FifoController.h
@@ -22,6 +22,8 @@
 
 #include "FifoControllerBase.h"
 
+namespace android {
+
 /**
  * A FIFO with counters contained in the class.
  */
@@ -55,5 +57,6 @@
     std::atomic<fifo_counter_t> mWriteCounter;
 };
 
+}  // android
 
 #endif //FIFO_FIFO_CONTROLLER_H
diff --git a/media/libaaudio/src/fifo/FifoControllerBase.cpp b/media/libaaudio/src/fifo/FifoControllerBase.cpp
index 33a253e..14a2be1 100644
--- a/media/libaaudio/src/fifo/FifoControllerBase.cpp
+++ b/media/libaaudio/src/fifo/FifoControllerBase.cpp
@@ -21,6 +21,8 @@
 #include <stdint.h>
 #include "FifoControllerBase.h"
 
+using namespace android;  // TODO just import names needed
+
 FifoControllerBase::FifoControllerBase(fifo_frames_t capacity, fifo_frames_t threshold)
         : mCapacity(capacity)
         , mThreshold(threshold)
diff --git a/media/libaaudio/src/fifo/FifoControllerBase.h b/media/libaaudio/src/fifo/FifoControllerBase.h
index c543519..64af777 100644
--- a/media/libaaudio/src/fifo/FifoControllerBase.h
+++ b/media/libaaudio/src/fifo/FifoControllerBase.h
@@ -19,6 +19,8 @@
 
 #include <stdint.h>
 
+namespace android {
+
 typedef int64_t fifo_counter_t;
 typedef int32_t fifo_frames_t;
 
@@ -118,4 +120,6 @@
     fifo_frames_t mThreshold;
 };
 
+}  // android
+
 #endif // FIFO_FIFO_CONTROLLER_BASE_H
diff --git a/media/libaaudio/src/fifo/FifoControllerIndirect.h b/media/libaaudio/src/fifo/FifoControllerIndirect.h
index 1aaf9ea..5832d9c 100644
--- a/media/libaaudio/src/fifo/FifoControllerIndirect.h
+++ b/media/libaaudio/src/fifo/FifoControllerIndirect.h
@@ -22,6 +22,8 @@
 
 #include "FifoControllerBase.h"
 
+namespace android {
+
 /**
  * A FifoControllerBase with counters external to the class.
  *
@@ -66,4 +68,6 @@
     std::atomic<fifo_counter_t> * mWriteCounterAddress;
 };
 
+}  // android
+
 #endif //FIFO_FIFO_CONTROLLER_INDIRECT_H
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
new file mode 100644
index 0000000..baa24c9
--- /dev/null
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioStreamLegacy"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <utils/String16.h>
+#include <media/AudioTrack.h>
+#include <aaudio/AAudio.h>
+
+#include "core/AudioStream.h"
+#include "legacy/AudioStreamLegacy.h"
+
+using namespace android;
+using namespace aaudio;
+
+AudioStreamLegacy::AudioStreamLegacy()
+        : AudioStream() {
+}
+
+AudioStreamLegacy::~AudioStreamLegacy() {
+}
+
+// Called from AudioTrack.cpp or AudioRecord.cpp
+static void AudioStreamLegacy_callback(int event, void* userData, void *info) {
+    AudioStreamLegacy *streamLegacy = (AudioStreamLegacy *) userData;
+    streamLegacy->processCallback(event, info);
+}
+
+aaudio_legacy_callback_t AudioStreamLegacy::getLegacyCallback() {
+    return AudioStreamLegacy_callback;
+}
+
+// Implement FixedBlockProcessor
+int32_t AudioStreamLegacy::onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) {
+    int32_t frameCount = numBytes / getBytesPerFrame();
+    // Call using the AAudio callback interface.
+    AAudioStream_dataCallback appCallback = getDataCallbackProc();
+    return (*appCallback)(
+            (AAudioStream *) this,
+            getDataCallbackUserData(),
+            buffer,
+            frameCount);
+}
+
+void AudioStreamLegacy::processCallbackCommon(aaudio_callback_operation_t opcode, void *info) {
+    aaudio_data_callback_result_t callbackResult;
+    switch (opcode) {
+        case AAUDIO_CALLBACK_OPERATION_PROCESS_DATA: {
+            // Note that this code assumes an AudioTrack::Buffer is the same as AudioRecord::Buffer
+            // TODO define our own AudioBuffer and pass it from the subclasses.
+            AudioTrack::Buffer *audioBuffer = static_cast<AudioTrack::Buffer *>(info);
+            if (audioBuffer->frameCount == 0) return;
+
+            // If the caller specified an exact size then use a block size adapter.
+            if (mBlockAdapter != nullptr) {
+                int32_t byteCount = audioBuffer->frameCount * getBytesPerFrame();
+                callbackResult = mBlockAdapter->processVariableBlock((uint8_t *) audioBuffer->raw,
+                                                                     byteCount);
+            } else {
+                // Call using the AAudio callback interface.
+                callbackResult = (*getDataCallbackProc())(
+                        (AAudioStream *) this,
+                        getDataCallbackUserData(),
+                        audioBuffer->raw,
+                        audioBuffer->frameCount
+                        );
+            }
+            if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
+                audioBuffer->size = audioBuffer->frameCount * getBytesPerFrame();
+            } else {
+                audioBuffer->size = 0;
+            }
+        }
+            break;
+
+            // Stream got rerouted so we disconnect.
+        case AAUDIO_CALLBACK_OPERATION_DISCONNECTED: {
+            ALOGD("AudioStreamAAudio(): callbackLoop() stream disconnected");
+            if (getErrorCallbackProc() != nullptr) {
+                (*getErrorCallbackProc())(
+                        (AAudioStream *) this,
+                        getErrorCallbackUserData(),
+                        AAUDIO_OK
+                        );
+            }
+            mCallbackEnabled.store(false);
+        }
+            break;
+
+        default:
+            break;
+    }
+}
+
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.h b/media/libaaudio/src/legacy/AudioStreamLegacy.h
new file mode 100644
index 0000000..c109ee7
--- /dev/null
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LEGACY_AUDIO_STREAM_LEGACY_H
+#define LEGACY_AUDIO_STREAM_LEGACY_H
+
+
+#include <aaudio/AAudio.h>
+
+#include "AudioStream.h"
+#include "AAudioLegacy.h"
+#include "utility/FixedBlockAdapter.h"
+
+namespace aaudio {
+
+
+typedef void (*aaudio_legacy_callback_t)(int event, void* user, void *info);
+
+enum {
+    /**
+     * Request that the callback function should fill the data buffer of an output stream,
+     * or process the data of an input stream.
+     * The address parameter passed to the callback function will point to a data buffer.
+     * For an input stream, the data is read-only.
+     * The value1 parameter will be the number of frames.
+     * The value2 parameter is reserved and will be set to zero.
+     * The callback should return AAUDIO_CALLBACK_RESULT_CONTINUE or AAUDIO_CALLBACK_RESULT_STOP.
+     */
+            AAUDIO_CALLBACK_OPERATION_PROCESS_DATA,
+
+    /**
+     * Inform the callback function that the stream was disconnected.
+     * The address parameter passed to the callback function will be NULL.
+     * The value1 will be an error code or AAUDIO_OK.
+     * The value2 parameter is reserved and will be set to zero.
+     * The callback return value will be ignored.
+     */
+            AAUDIO_CALLBACK_OPERATION_DISCONNECTED,
+};
+typedef int32_t aaudio_callback_operation_t;
+
+
+class AudioStreamLegacy : public AudioStream, public FixedBlockProcessor {
+public:
+    AudioStreamLegacy();
+
+    virtual ~AudioStreamLegacy();
+
+    aaudio_legacy_callback_t getLegacyCallback();
+
+    // This is public so it can be called from the C callback function.
+    // This is called from the AudioTrack/AudioRecord client.
+    virtual void processCallback(int event, void *info) = 0;
+
+    void processCallbackCommon(aaudio_callback_operation_t opcode, void *info);
+
+    // Implement FixedBlockProcessor
+    int32_t onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) override;
+
+protected:
+    FixedBlockAdapter         *mBlockAdapter = nullptr;
+    aaudio_wrapping_frames_t   mPositionWhenStarting = 0;
+    int32_t                    mCallbackBufferSize = 0;
+};
+
+} /* namespace aaudio */
+
+#endif //LEGACY_AUDIO_STREAM_LEGACY_H
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index d380eb8..f0a6ceb 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -24,14 +24,16 @@
 #include <aaudio/AAudio.h>
 
 #include "AudioClock.h"
-#include "AudioStreamRecord.h"
-#include "utility/AAudioUtilities.h"
+#include "legacy/AudioStreamLegacy.h"
+#include "legacy/AudioStreamRecord.h"
+#include "utility/FixedBlockWriter.h"
 
 using namespace android;
 using namespace aaudio;
 
 AudioStreamRecord::AudioStreamRecord()
-    : AudioStream()
+    : AudioStreamLegacy()
+    , mFixedBlockWriter(*this)
 {
 }
 
@@ -58,7 +60,6 @@
                               ? 2 : getSamplesPerFrame();
     audio_channel_mask_t channelMask = audio_channel_in_mask_from_count(samplesPerFrame);
 
-    AudioRecord::callback_t callback = nullptr;
     audio_input_flags_t flags = (audio_input_flags_t) AUDIO_INPUT_FLAG_NONE;
 
     size_t frameCount = (builder.getBufferCapacity() == AAUDIO_UNSPECIFIED) ? 0
@@ -68,6 +69,17 @@
             ? AUDIO_FORMAT_PCM_FLOAT
             : AAudioConvert_aaudioToAndroidDataFormat(getFormat());
 
+    // Setup the callback if there is one.
+    AudioRecord::callback_t callback = nullptr;
+    void *callbackData = nullptr;
+    AudioRecord::transfer_type streamTransferType = AudioRecord::transfer_type::TRANSFER_SYNC;
+    if (builder.getDataCallbackProc() != nullptr) {
+        streamTransferType = AudioRecord::transfer_type::TRANSFER_CALLBACK;
+        callback = getLegacyCallback();
+        callbackData = this;
+    }
+    mCallbackBufferSize = builder.getFramesPerDataCallback();
+
     mAudioRecord = new AudioRecord(
             AUDIO_SOURCE_DEFAULT,
             getSampleRate(),
@@ -76,10 +88,10 @@
             mOpPackageName, // const String16& opPackageName TODO does not compile
             frameCount,
             callback,
-            nullptr, //    void* user = nullptr,
+            callbackData,
             0,    //    uint32_t notificationFrames = 0,
             AUDIO_SESSION_ALLOCATE,
-            AudioRecord::TRANSFER_DEFAULT,
+            streamTransferType,
             flags
             //   int uid = -1,
             //   pid_t pid = -1,
@@ -99,6 +111,15 @@
     setSamplesPerFrame(mAudioRecord->channelCount());
     setFormat(AAudioConvert_androidToAAudioDataFormat(mAudioRecord->format()));
 
+    // We may need to pass the data through a block size adapter to guarantee constant size.
+    if (mCallbackBufferSize != AAUDIO_UNSPECIFIED) {
+        int callbackSizeBytes = getBytesPerFrame() * mCallbackBufferSize;
+        mFixedBlockWriter.open(callbackSizeBytes);
+        mBlockAdapter = &mFixedBlockWriter;
+    } else {
+        mBlockAdapter = nullptr;
+    }
+
     setState(AAUDIO_STREAM_STATE_OPEN);
 
     return AAUDIO_OK;
@@ -111,9 +132,29 @@
         mAudioRecord.clear();
         setState(AAUDIO_STREAM_STATE_CLOSED);
     }
+    mFixedBlockWriter.close();
     return AAUDIO_OK;
 }
 
+void AudioStreamRecord::processCallback(int event, void *info) {
+
+    ALOGD("AudioStreamRecord::processCallback(), event %d", event);
+    switch (event) {
+        case AudioRecord::EVENT_MORE_DATA:
+            processCallbackCommon(AAUDIO_CALLBACK_OPERATION_PROCESS_DATA, info);
+            break;
+
+            // Stream got rerouted so we disconnect.
+        case AudioRecord::EVENT_NEW_IAUDIORECORD:
+            processCallbackCommon(AAUDIO_CALLBACK_OPERATION_DISCONNECTED, info);
+            break;
+
+        default:
+            break;
+    }
+    return;
+}
+
 aaudio_result_t AudioStreamRecord::requestStart()
 {
     if (mAudioRecord.get() == nullptr) {
@@ -124,6 +165,7 @@
     if (err != OK) {
         return AAudioConvert_androidToAAudioResult(err);
     }
+
     err = mAudioRecord->start();
     if (err != OK) {
         return AAudioConvert_androidToAAudioResult(err);
@@ -151,7 +193,7 @@
     return AAUDIO_OK;
 }
 
-aaudio_result_t AudioStreamRecord::updateState()
+aaudio_result_t AudioStreamRecord::updateStateWhileWaiting()
 {
     aaudio_result_t result = AAUDIO_OK;
     aaudio_wrapping_frames_t position;
@@ -222,7 +264,7 @@
 
 int32_t AudioStreamRecord::getFramesPerBurst() const
 {
-    return 192; // TODO add query to AudioRecord.cpp
+    return static_cast<int32_t>(mAudioRecord->getNotificationPeriodInFrames());
 }
 
 aaudio_result_t AudioStreamRecord::getTimestamp(clockid_t clockId,
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.h b/media/libaaudio/src/legacy/AudioStreamRecord.h
index 4667f05..897a5b3 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.h
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.h
@@ -23,51 +23,58 @@
 #include "AudioStreamBuilder.h"
 #include "AudioStream.h"
 #include "AAudioLegacy.h"
+#include "legacy/AudioStreamLegacy.h"
+#include "utility/FixedBlockWriter.h"
 
 namespace aaudio {
 
 /**
  * Internal stream that uses the legacy AudioTrack path.
  */
-class AudioStreamRecord : public AudioStream {
+class AudioStreamRecord : public AudioStreamLegacy {
 public:
     AudioStreamRecord();
 
     virtual ~AudioStreamRecord();
 
-    virtual aaudio_result_t open(const AudioStreamBuilder & builder) override;
-    virtual aaudio_result_t close() override;
+    aaudio_result_t open(const AudioStreamBuilder & builder) override;
+    aaudio_result_t close() override;
 
-    virtual aaudio_result_t requestStart() override;
-    virtual aaudio_result_t requestPause() override;
-    virtual aaudio_result_t requestFlush() override;
-    virtual aaudio_result_t requestStop() override;
+    aaudio_result_t requestStart() override;
+    aaudio_result_t requestPause() override;
+    aaudio_result_t requestFlush() override;
+    aaudio_result_t requestStop() override;
 
     virtual aaudio_result_t getTimestamp(clockid_t clockId,
                                          int64_t *framePosition,
                                          int64_t *timeNanoseconds) override;
 
-    virtual aaudio_result_t read(void *buffer,
+    aaudio_result_t read(void *buffer,
                              int32_t numFrames,
                              int64_t timeoutNanoseconds) override;
 
-    virtual aaudio_result_t setBufferSize(int32_t requestedFrames) override;
+    aaudio_result_t setBufferSize(int32_t requestedFrames) override;
 
-    virtual int32_t getBufferSize() const override;
+    int32_t getBufferSize() const override;
 
-    virtual int32_t getBufferCapacity() const override;
+    int32_t getBufferCapacity() const override;
 
-    virtual int32_t getXRunCount() const override;
+    int32_t getXRunCount() const override;
 
-    virtual int32_t getFramesPerBurst() const override;
+    int32_t getFramesPerBurst() const override;
 
-    virtual aaudio_result_t updateState() override;
+    aaudio_result_t updateStateWhileWaiting() override;
+
+    // This is public so it can be called from the C callback function.
+    void processCallback(int event, void *info) override;
 
 private:
     android::sp<android::AudioRecord> mAudioRecord;
+    // adapts between variable sized blocks and fixed size blocks
+    FixedBlockWriter                 mFixedBlockWriter;
+
     // TODO add 64-bit position reporting to AudioRecord and use it.
-    aaudio_wrapping_frames_t   mPositionWhenStarting = 0;
-    android::String16          mOpPackageName;
+    android::String16                mOpPackageName;
 };
 
 } /* namespace aaudio */
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 8bb6aee..1bb9e53 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -20,20 +20,25 @@
 
 #include <stdint.h>
 #include <media/AudioTrack.h>
-#include <aaudio/AAudio.h>
 
+#include <aaudio/AAudio.h>
 #include "utility/AudioClock.h"
-#include "AudioStreamTrack.h"
-#include "utility/AAudioUtilities.h"
+#include "legacy/AudioStreamLegacy.h"
+#include "legacy/AudioStreamTrack.h"
+#include "utility/FixedBlockReader.h"
 
 using namespace android;
 using namespace aaudio;
 
+// Arbitrary and somewhat generous number of bursts.
+#define DEFAULT_BURSTS_PER_BUFFER_CAPACITY     8
+
 /*
  * Create a stream that uses the AudioTrack.
  */
 AudioStreamTrack::AudioStreamTrack()
-    : AudioStream()
+    : AudioStreamLegacy()
+    , mFixedBlockReader(*this)
 {
 }
 
@@ -53,6 +58,8 @@
         return result;
     }
 
+    ALOGD("AudioStreamTrack::open = %p", this);
+
     // Try to create an AudioTrack
     // TODO Support UNSPECIFIED in AudioTrack. For now, use stereo if unspecified.
     int32_t samplesPerFrame = (getSamplesPerFrame() == AAUDIO_UNSPECIFIED)
@@ -61,16 +68,40 @@
     ALOGD("AudioStreamTrack::open(), samplesPerFrame = %d, channelMask = 0x%08x",
             samplesPerFrame, channelMask);
 
-    AudioTrack::callback_t callback = nullptr;
     // TODO add more performance options
     audio_output_flags_t flags = (audio_output_flags_t) AUDIO_OUTPUT_FLAG_FAST;
-    size_t frameCount = (builder.getBufferCapacity() == AAUDIO_UNSPECIFIED) ? 0
-                        : builder.getBufferCapacity();
+
+    int32_t frameCount = builder.getBufferCapacity();
+    ALOGD("AudioStreamTrack::open(), requested buffer capacity %d", frameCount);
+
+    int32_t notificationFrames = 0;
+
     // TODO implement an unspecified AudioTrack format then use that.
-    audio_format_t format = (getFormat() == AAUDIO_UNSPECIFIED)
+    audio_format_t format = (getFormat() == AAUDIO_FORMAT_UNSPECIFIED)
             ? AUDIO_FORMAT_PCM_FLOAT
             : AAudioConvert_aaudioToAndroidDataFormat(getFormat());
 
+    // Setup the callback if there is one.
+    AudioTrack::callback_t callback = nullptr;
+    void *callbackData = nullptr;
+    // Note that TRANSFER_SYNC does not allow FAST track
+    AudioTrack::transfer_type streamTransferType = AudioTrack::transfer_type::TRANSFER_SYNC;
+    if (builder.getDataCallbackProc() != nullptr) {
+        streamTransferType = AudioTrack::transfer_type::TRANSFER_CALLBACK;
+        callback = getLegacyCallback();
+        callbackData = this;
+
+        notificationFrames = builder.getFramesPerDataCallback();
+        // If the total buffer size is unspecified then base the size on the burst size.
+        if (frameCount == AAUDIO_UNSPECIFIED) {
+            // Take advantage of a special trick that allows us to create a buffer
+            // that is some multiple of the burst size.
+            notificationFrames = 0 - DEFAULT_BURSTS_PER_BUFFER_CAPACITY;
+        }
+    }
+    mCallbackBufferSize = builder.getFramesPerDataCallback();
+
+    ALOGD("AudioStreamTrack::open(), notificationFrames = %d", notificationFrames);
     mAudioTrack = new AudioTrack(
             (audio_stream_type_t) AUDIO_STREAM_MUSIC,
             getSampleRate(),
@@ -79,10 +110,10 @@
             frameCount,
             flags,
             callback,
-            nullptr,    // user callback data
-            0,          // notificationFrames
+            callbackData,
+            notificationFrames,
             AUDIO_SESSION_ALLOCATE,
-            AudioTrack::transfer_type::TRANSFER_SYNC // TODO - this does not allow FAST
+            streamTransferType
             );
 
     // Did we get a valid track?
@@ -97,9 +128,21 @@
     // Get the actual values from the AudioTrack.
     setSamplesPerFrame(mAudioTrack->channelCount());
     setSampleRate(mAudioTrack->getSampleRate());
-    setFormat(AAudioConvert_androidToAAudioDataFormat(mAudioTrack->format()));
+    aaudio_audio_format_t aaudioFormat =
+            AAudioConvert_androidToAAudioDataFormat(mAudioTrack->format());
+    setFormat(aaudioFormat);
+
+    // We may need to pass the data through a block size adapter to guarantee constant size.
+    if (mCallbackBufferSize != AAUDIO_UNSPECIFIED) {
+        int callbackSizeBytes = getBytesPerFrame() * mCallbackBufferSize;
+        mFixedBlockReader.open(callbackSizeBytes);
+        mBlockAdapter = &mFixedBlockReader;
+    } else {
+        mBlockAdapter = nullptr;
+    }
 
     setState(AAUDIO_STREAM_STATE_OPEN);
+    setDeviceId(mAudioTrack->getRoutedDeviceId());
 
     return AAUDIO_OK;
 }
@@ -111,11 +154,32 @@
         mAudioTrack.clear(); // TODO is this right?
         setState(AAUDIO_STREAM_STATE_CLOSED);
     }
+    mFixedBlockReader.close();
     return AAUDIO_OK;
 }
 
+void AudioStreamTrack::processCallback(int event, void *info) {
+
+    switch (event) {
+        case AudioTrack::EVENT_MORE_DATA:
+            processCallbackCommon(AAUDIO_CALLBACK_OPERATION_PROCESS_DATA, info);
+            break;
+
+            // Stream got rerouted so we disconnect.
+        case AudioTrack::EVENT_NEW_IAUDIOTRACK:
+            processCallbackCommon(AAUDIO_CALLBACK_OPERATION_DISCONNECTED, info);
+            break;
+
+        default:
+            break;
+    }
+    return;
+}
+
 aaudio_result_t AudioStreamTrack::requestStart()
 {
+    std::lock_guard<std::mutex> lock(mStreamMutex);
+
     if (mAudioTrack.get() == nullptr) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
@@ -124,6 +188,7 @@
     if (err != OK) {
         return AAudioConvert_androidToAAudioResult(err);
     }
+
     err = mAudioTrack->start();
     if (err != OK) {
         return AAudioConvert_androidToAAudioResult(err);
@@ -135,11 +200,14 @@
 
 aaudio_result_t AudioStreamTrack::requestPause()
 {
+    std::lock_guard<std::mutex> lock(mStreamMutex);
+
     if (mAudioTrack.get() == nullptr) {
         return AAUDIO_ERROR_INVALID_STATE;
     } else if (getState() != AAUDIO_STREAM_STATE_STARTING
             && getState() != AAUDIO_STREAM_STATE_STARTED) {
-        ALOGE("requestPause(), called when state is %s", AAudio_convertStreamStateToText(getState()));
+        ALOGE("requestPause(), called when state is %s",
+              AAudio_convertStreamStateToText(getState()));
         return AAUDIO_ERROR_INVALID_STATE;
     }
     setState(AAUDIO_STREAM_STATE_PAUSING);
@@ -152,6 +220,8 @@
 }
 
 aaudio_result_t AudioStreamTrack::requestFlush() {
+    std::lock_guard<std::mutex> lock(mStreamMutex);
+
     if (mAudioTrack.get() == nullptr) {
         return AAUDIO_ERROR_INVALID_STATE;
     } else if (getState() != AAUDIO_STREAM_STATE_PAUSED) {
@@ -165,6 +235,8 @@
 }
 
 aaudio_result_t AudioStreamTrack::requestStop() {
+    std::lock_guard<std::mutex> lock(mStreamMutex);
+
     if (mAudioTrack.get() == nullptr) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
@@ -175,7 +247,7 @@
     return AAUDIO_OK;
 }
 
-aaudio_result_t AudioStreamTrack::updateState()
+aaudio_result_t AudioStreamTrack::updateStateWhileWaiting()
 {
     status_t err;
     aaudio_wrapping_frames_t position;
@@ -272,7 +344,7 @@
 
 int32_t AudioStreamTrack::getFramesPerBurst() const
 {
-    return 192; // TODO add query to AudioTrack.cpp
+    return static_cast<int32_t>(mAudioTrack->getNotificationPeriodInFrames());
 }
 
 int64_t AudioStreamTrack::getFramesRead() {
@@ -303,7 +375,7 @@
     }
     // TODO Merge common code into AudioStreamLegacy after rebasing.
     int timebase;
-    switch(clockId) {
+    switch (clockId) {
         case CLOCK_BOOTTIME:
             timebase = ExtendedTimestamp::TIMEBASE_BOOTTIME;
             break;
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.h b/media/libaaudio/src/legacy/AudioStreamTrack.h
index 7a53022..29f5d15 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.h
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.h
@@ -17,54 +17,63 @@
 #ifndef LEGACY_AUDIO_STREAM_TRACK_H
 #define LEGACY_AUDIO_STREAM_TRACK_H
 
+#include <math.h>
 #include <media/AudioTrack.h>
 #include <aaudio/AAudio.h>
 
 #include "AudioStreamBuilder.h"
 #include "AudioStream.h"
-#include "AAudioLegacy.h"
+#include "legacy/AAudioLegacy.h"
+#include "legacy/AudioStreamLegacy.h"
+#include "utility/FixedBlockReader.h"
 
 namespace aaudio {
 
-
 /**
  * Internal stream that uses the legacy AudioTrack path.
  */
-class AudioStreamTrack : public AudioStream {
+class AudioStreamTrack : public AudioStreamLegacy {
 public:
     AudioStreamTrack();
 
     virtual ~AudioStreamTrack();
 
 
-    virtual aaudio_result_t open(const AudioStreamBuilder & builder) override;
-    virtual aaudio_result_t close() override;
+    aaudio_result_t open(const AudioStreamBuilder & builder) override;
+    aaudio_result_t close() override;
 
-    virtual aaudio_result_t requestStart() override;
-    virtual aaudio_result_t requestPause() override;
-    virtual aaudio_result_t requestFlush() override;
-    virtual aaudio_result_t requestStop() override;
+    aaudio_result_t requestStart() override;
+    aaudio_result_t requestPause() override;
+    aaudio_result_t requestFlush() override;
+    aaudio_result_t requestStop() override;
 
-    virtual aaudio_result_t getTimestamp(clockid_t clockId,
+    aaudio_result_t getTimestamp(clockid_t clockId,
                                        int64_t *framePosition,
                                        int64_t *timeNanoseconds) override;
 
-    virtual aaudio_result_t write(const void *buffer,
+    aaudio_result_t write(const void *buffer,
                              int32_t numFrames,
                              int64_t timeoutNanoseconds) override;
 
-    virtual aaudio_result_t setBufferSize(int32_t requestedFrames) override;
-    virtual int32_t getBufferSize() const override;
-    virtual int32_t getBufferCapacity() const override;
-    virtual int32_t getFramesPerBurst()const  override;
-    virtual int32_t getXRunCount() const override;
+    aaudio_result_t setBufferSize(int32_t requestedFrames) override;
+    int32_t getBufferSize() const override;
+    int32_t getBufferCapacity() const override;
+    int32_t getFramesPerBurst()const  override;
+    int32_t getXRunCount() const override;
 
-    virtual int64_t getFramesRead() override;
+    int64_t getFramesRead() override;
 
-    virtual aaudio_result_t updateState() override;
+    aaudio_result_t updateStateWhileWaiting() override;
+
+    // This is public so it can be called from the C callback function.
+    void processCallback(int event, void *info) override;
 
 private:
+
     android::sp<android::AudioTrack> mAudioTrack;
+    // adapts between variable sized blocks and fixed size blocks
+    FixedBlockReader                 mFixedBlockReader;
+
     // TODO add 64-bit position reporting to AudioRecord and use it.
     aaudio_wrapping_frames_t         mPositionWhenStarting = 0;
     aaudio_wrapping_frames_t         mPositionWhenPausing = 0;
diff --git a/media/libaaudio/src/utility/FixedBlockAdapter.cpp b/media/libaaudio/src/utility/FixedBlockAdapter.cpp
new file mode 100644
index 0000000..f4666af
--- /dev/null
+++ b/media/libaaudio/src/utility/FixedBlockAdapter.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include "FixedBlockAdapter.h"
+
+FixedBlockAdapter::~FixedBlockAdapter() {
+    close();
+}
+
+int32_t FixedBlockAdapter::open(int32_t bytesPerFixedBlock)
+{
+    mSize = bytesPerFixedBlock;
+    mStorage = new uint8_t[bytesPerFixedBlock]; // TODO use std::nothrow
+    mPosition = 0;
+    return 0;
+}
+
+int32_t FixedBlockAdapter::close()
+{
+    delete[] mStorage;
+    mStorage = nullptr;
+    mSize = 0;
+    mPosition = 0;
+    return 0;
+}
diff --git a/media/libaaudio/src/utility/FixedBlockAdapter.h b/media/libaaudio/src/utility/FixedBlockAdapter.h
new file mode 100644
index 0000000..7008b25
--- /dev/null
+++ b/media/libaaudio/src/utility/FixedBlockAdapter.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_FIXED_BLOCK_ADAPTER_H
+#define AAUDIO_FIXED_BLOCK_ADAPTER_H
+
+#include <stdio.h>
+
+/**
+ * Interface for a class that needs fixed-size blocks.
+ */
+class FixedBlockProcessor {
+public:
+    virtual ~FixedBlockProcessor() = default;
+    virtual int32_t onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) = 0;
+};
+
+/**
+ * Base class for a variable-to-fixed-size block adapter.
+ */
+class FixedBlockAdapter
+{
+public:
+    FixedBlockAdapter(FixedBlockProcessor &fixedBlockProcessor)
+    : mFixedBlockProcessor(fixedBlockProcessor) {}
+
+    virtual ~FixedBlockAdapter();
+
+    /**
+     * Allocate internal resources needed for buffering data.
+     */
+    virtual int32_t open(int32_t bytesPerFixedBlock);
+
+    /**
+     * Note that if the fixed-sized blocks must be aligned, then the variable-sized blocks
+     * must have the same alignment.
+     * For example, if the fixed-size blocks must be a multiple of 8, then the variable-sized
+     * blocks must also be a multiple of 8.
+     *
+     * @param buffer
+     * @param numBytes
+     * @return zero if OK or a non-zero code
+     */
+    virtual int32_t processVariableBlock(uint8_t *buffer, int32_t numBytes) = 0;
+
+    /**
+     * Free internal resources.
+     */
+    int32_t close();
+
+protected:
+    FixedBlockProcessor  &mFixedBlockProcessor;
+    uint8_t              *mStorage = nullptr;    // Store data here while assembling buffers.
+    int32_t               mSize = 0;             // Size in bytes of the fixed size buffer.
+    int32_t               mPosition = 0;         // Offset of the last byte read or written.
+};
+
+#endif /* AAUDIO_FIXED_BLOCK_ADAPTER_H */
diff --git a/media/libaaudio/src/utility/FixedBlockReader.cpp b/media/libaaudio/src/utility/FixedBlockReader.cpp
new file mode 100644
index 0000000..21ea70e
--- /dev/null
+++ b/media/libaaudio/src/utility/FixedBlockReader.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+#include <memory.h>
+
+#include "FixedBlockAdapter.h"
+
+#include "FixedBlockReader.h"
+
+
+FixedBlockReader::FixedBlockReader(FixedBlockProcessor &fixedBlockProcessor)
+    : FixedBlockAdapter(fixedBlockProcessor) {
+    mPosition = mSize;
+}
+
+int32_t FixedBlockReader::open(int32_t bytesPerFixedBlock) {
+    int32_t result = FixedBlockAdapter::open(bytesPerFixedBlock);
+    mPosition = mSize; // Indicate no data in storage.
+    return result;
+}
+
+int32_t FixedBlockReader::readFromStorage(uint8_t *buffer, int32_t numBytes) {
+    int32_t bytesToRead = numBytes;
+    int32_t dataAvailable = mSize - mPosition;
+    if (bytesToRead > dataAvailable) {
+        bytesToRead = dataAvailable;
+    }
+    memcpy(buffer, mStorage + mPosition, bytesToRead);
+    mPosition += bytesToRead;
+    return bytesToRead;
+}
+
+int32_t FixedBlockReader::processVariableBlock(uint8_t *buffer, int32_t numBytes) {
+    int32_t result = 0;
+    int32_t bytesLeft = numBytes;
+    while(bytesLeft > 0 && result == 0) {
+        if (mPosition < mSize) {
+            // Use up bytes currently in storage.
+            int32_t bytesRead = readFromStorage(buffer, bytesLeft);
+            buffer += bytesRead;
+            bytesLeft -= bytesRead;
+        } else if (bytesLeft >= mSize) {
+            // Read through if enough for a complete block.
+            result = mFixedBlockProcessor.onProcessFixedBlock(buffer, mSize);
+            buffer += mSize;
+            bytesLeft -= mSize;
+        } else {
+            // Just need a partial block so we have to use storage.
+            result = mFixedBlockProcessor.onProcessFixedBlock(mStorage, mSize);
+            mPosition = 0;
+        }
+    }
+    return result;
+}
+
diff --git a/media/libaaudio/src/utility/FixedBlockReader.h b/media/libaaudio/src/utility/FixedBlockReader.h
new file mode 100644
index 0000000..128dd52
--- /dev/null
+++ b/media/libaaudio/src/utility/FixedBlockReader.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_FIXED_BLOCK_READER_H
+#define AAUDIO_FIXED_BLOCK_READER_H
+
+#include <stdint.h>
+
+#include "FixedBlockAdapter.h"
+
+/**
+ * Read from a fixed-size block to a variable sized block.
+ *
+ * This can be used to convert a pull data flow from fixed sized buffers to variable sized buffers.
+ * An example would be an audio output callback that reads from the app.
+ */
+class FixedBlockReader : public FixedBlockAdapter
+{
+public:
+    FixedBlockReader(FixedBlockProcessor &fixedBlockProcessor);
+
+    virtual ~FixedBlockReader() = default;
+
+    int32_t open(int32_t bytesPerFixedBlock) override;
+
+    int32_t readFromStorage(uint8_t *buffer, int32_t numBytes);
+
+    /**
+     * Read into a variable sized block.
+     */
+    int32_t processVariableBlock(uint8_t *buffer, int32_t numBytes) override;
+};
+
+
+#endif /* AAUDIO_FIXED_BLOCK_READER_H */
diff --git a/media/libaaudio/src/utility/FixedBlockWriter.cpp b/media/libaaudio/src/utility/FixedBlockWriter.cpp
new file mode 100644
index 0000000..2ce8046
--- /dev/null
+++ b/media/libaaudio/src/utility/FixedBlockWriter.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+#include <memory.h>
+
+#include "FixedBlockAdapter.h"
+#include "FixedBlockWriter.h"
+
+FixedBlockWriter::FixedBlockWriter(FixedBlockProcessor &fixedBlockProcessor)
+        : FixedBlockAdapter(fixedBlockProcessor) {}
+
+
+int32_t FixedBlockWriter::writeToStorage(uint8_t *buffer, int32_t numBytes) {
+    int32_t bytesToStore = numBytes;
+    int32_t roomAvailable = mSize - mPosition;
+    if (bytesToStore > roomAvailable) {
+        bytesToStore = roomAvailable;
+    }
+    memcpy(mStorage + mPosition, buffer, bytesToStore);
+    mPosition += bytesToStore;
+    return bytesToStore;
+}
+
+int32_t FixedBlockWriter::processVariableBlock(uint8_t *buffer, int32_t numBytes) {
+    int32_t result = 0;
+    int32_t bytesLeft = numBytes;
+
+    // If we already have data in storage then add to it.
+    if (mPosition > 0) {
+        int32_t bytesWritten = writeToStorage(buffer, bytesLeft);
+        buffer += bytesWritten;
+        bytesLeft -= bytesWritten;
+        // If storage full then flush it out
+        if (mPosition == mSize) {
+            result = mFixedBlockProcessor.onProcessFixedBlock(mStorage, mSize);
+            mPosition = 0;
+        }
+    }
+
+    // Write through if enough for a complete block.
+    while(bytesLeft > mSize && result == 0) {
+        result = mFixedBlockProcessor.onProcessFixedBlock(buffer, mSize);
+        buffer += mSize;
+        bytesLeft -= mSize;
+    }
+
+    // Save any remaining partial block for next time.
+    if (bytesLeft > 0) {
+        writeToStorage(buffer, bytesLeft);
+    }
+
+    return result;
+}
diff --git a/media/libaaudio/src/utility/FixedBlockWriter.h b/media/libaaudio/src/utility/FixedBlockWriter.h
new file mode 100644
index 0000000..f1d917c
--- /dev/null
+++ b/media/libaaudio/src/utility/FixedBlockWriter.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_FIXED_BLOCK_WRITER_H
+#define AAUDIO_FIXED_BLOCK_WRITER_H
+
+#include <stdint.h>
+
+#include "FixedBlockAdapter.h"
+
+/**
+ * This can be used to convert a push data flow from variable sized buffers to fixed sized buffers.
+ * An example would be an audio input callback.
+ */
+class FixedBlockWriter : public FixedBlockAdapter
+{
+public:
+    FixedBlockWriter(FixedBlockProcessor &fixedBlockProcessor);
+
+    virtual ~FixedBlockWriter() = default;
+
+    int32_t writeToStorage(uint8_t *buffer, int32_t numBytes);
+
+    /**
+     * Write from a variable sized block.
+     */
+    int32_t processVariableBlock(uint8_t *buffer, int32_t numBytes) override;
+};
+
+#endif /* AAUDIO_FIXED_BLOCK_WRITER_H */
diff --git a/media/libaaudio/tests/Android.mk b/media/libaaudio/tests/Android.mk
index 7899cf5..06c9364 100644
--- a/media/libaaudio/tests/Android.mk
+++ b/media/libaaudio/tests/Android.mk
@@ -4,8 +4,7 @@
 LOCAL_C_INCLUDES := \
     $(call include-path-for, audio-utils) \
     frameworks/av/media/libaaudio/include \
-    frameworks/av/media/libaaudio/src/core \
-    frameworks/av/media/libaaudio/src/utility
+    frameworks/av/media/libaaudio/src
 LOCAL_SRC_FILES:= test_handle_tracker.cpp
 LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
                           libcutils liblog libmedia libutils
@@ -17,13 +16,22 @@
 LOCAL_C_INCLUDES := \
     $(call include-path-for, audio-utils) \
     frameworks/av/media/libaaudio/include \
-    frameworks/av/media/libaaudio/src \
-    frameworks/av/media/libaaudio/src/core \
-    frameworks/av/media/libaaudio/src/fifo \
-    frameworks/av/media/libaaudio/src/utility
+    frameworks/av/media/libaaudio/src
 LOCAL_SRC_FILES:= test_marshalling.cpp
 LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
                           libcutils liblog libmedia libutils
 LOCAL_STATIC_LIBRARIES := libaaudio
-LOCAL_MODULE := test_marshalling
+LOCAL_MODULE := test_aaudio_marshalling
+include $(BUILD_NATIVE_TEST)
+
+include $(CLEAR_VARS)
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/av/media/libaaudio/include \
+    frameworks/av/media/libaaudio/src
+LOCAL_SRC_FILES:= test_block_adapter.cpp
+LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
+                          libcutils liblog libmedia libutils
+LOCAL_STATIC_LIBRARIES := libaaudio
+LOCAL_MODULE := test_block_adapter
 include $(BUILD_NATIVE_TEST)
diff --git a/media/libaaudio/tests/test_block_adapter.cpp b/media/libaaudio/tests/test_block_adapter.cpp
new file mode 100644
index 0000000..a22abb9
--- /dev/null
+++ b/media/libaaudio/tests/test_block_adapter.cpp
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+
+#include <gtest/gtest.h>
+
+#include "utility/FixedBlockAdapter.h"
+#include "utility/FixedBlockWriter.h"
+#include "utility/FixedBlockReader.h"
+
+#define FIXED_BLOCK_SIZE   47
+#define TEST_BUFFER_SIZE   103
+
+// Pass varying sized blocks.
+// Frames contain a sequential index, which are easily checked.
+class TestBlockAdapter {
+public:
+    TestBlockAdapter()
+            : mTestIndex(0), mLastIndex(0) {
+    }
+
+    ~TestBlockAdapter() = default;
+
+    void fillSequence(int32_t *indexBuffer, int32_t frameCount) {
+        ASSERT_LE(frameCount, TEST_BUFFER_SIZE);
+        for (int i = 0; i < frameCount; i++) {
+            indexBuffer[i] = mLastIndex++;
+        }
+    }
+
+    int checkSequence(const int32_t *indexBuffer, int32_t frameCount) {
+        // This is equivalent to calling an output callback.
+        for (int i = 0; i < frameCount; i++) {
+            int32_t expected = mTestIndex++;
+            int32_t actual = indexBuffer[i];
+            EXPECT_EQ(expected, actual);
+            if (actual != expected) {
+                return -1;
+            }
+        }
+        return 0;
+    }
+
+    int32_t            mTestBuffer[TEST_BUFFER_SIZE];
+    int32_t            mTestIndex;
+    int32_t            mLastIndex;
+};
+
+class TestBlockWriter : public TestBlockAdapter, FixedBlockProcessor {
+public:
+    TestBlockWriter()
+            : mFixedBlockWriter(*this) {
+        mFixedBlockWriter.open(sizeof(int32_t) * FIXED_BLOCK_SIZE);
+    }
+
+    ~TestBlockWriter() {
+        mFixedBlockWriter.close();
+    }
+
+    int32_t onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) override {
+        int32_t frameCount = numBytes / sizeof(int32_t);
+        return checkSequence((int32_t *) buffer, frameCount);
+    }
+
+    // Simulate audio input from a variable sized callback.
+    int32_t testInputWrite(int32_t variableCount) {
+        fillSequence(mTestBuffer, variableCount);
+        int32_t sizeBytes = variableCount * sizeof(int32_t);
+        return mFixedBlockWriter.processVariableBlock((uint8_t *) mTestBuffer, sizeBytes);
+    }
+
+private:
+    FixedBlockWriter mFixedBlockWriter;
+};
+
+class TestBlockReader : public TestBlockAdapter, FixedBlockProcessor {
+public:
+    TestBlockReader()
+            : mFixedBlockReader(*this) {
+        mFixedBlockReader.open(sizeof(int32_t) * FIXED_BLOCK_SIZE);
+    }
+
+    ~TestBlockReader() {
+        mFixedBlockReader.close();
+    }
+
+    int32_t onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) override {
+        int32_t frameCount = numBytes / sizeof(int32_t);
+        fillSequence((int32_t *) buffer, frameCount);
+        return 0;
+    }
+
+    // Simulate audio output from a variable sized callback.
+    int32_t testOutputRead(int32_t variableCount) {
+        int32_t sizeBytes = variableCount * sizeof(int32_t);
+        int32_t result = mFixedBlockReader.processVariableBlock((uint8_t *) mTestBuffer, sizeBytes);
+        if (result >= 0) {
+            result = checkSequence((int32_t *)mTestBuffer, variableCount);
+        }
+        return result;
+    }
+
+private:
+    FixedBlockReader   mFixedBlockReader;
+};
+
+
+TEST(test_block_adapter, block_adapter_write) {
+    TestBlockWriter tester;
+    int result = 0;
+    const int numLoops = 1000;
+
+    for (int i = 0; i<numLoops && result == 0; i++) {
+        long r = random();
+        int32_t size = (r % TEST_BUFFER_SIZE);
+        ASSERT_LE(size, TEST_BUFFER_SIZE);
+        ASSERT_GE(size, 0);
+        result = tester.testInputWrite(size);
+    }
+    ASSERT_EQ(0, result);
+}
+
+TEST(test_block_adapter, block_adapter_read) {
+    TestBlockReader tester;
+    int result = 0;
+    const int numLoops = 1000;
+
+    for (int i = 0; i < numLoops && result == 0; i++) {
+        long r = random();
+        int32_t size = (r % TEST_BUFFER_SIZE);
+        ASSERT_LE(size, TEST_BUFFER_SIZE);
+        ASSERT_GE(size, 0);
+        result = tester.testOutputRead(size);
+    }
+    ASSERT_EQ(0, result);
+};
+
diff --git a/media/libaaudio/tests/test_handle_tracker.cpp b/media/libaaudio/tests/test_handle_tracker.cpp
index e51c39c..e1cb676 100644
--- a/media/libaaudio/tests/test_handle_tracker.cpp
+++ b/media/libaaudio/tests/test_handle_tracker.cpp
@@ -22,7 +22,7 @@
 #include <gtest/gtest.h>
 
 #include <aaudio/AAudioDefinitions.h>
-#include "HandleTracker.h"
+#include "utility/HandleTracker.h"
 
 // Test adding one address.
 TEST(test_handle_tracker, aaudio_handle_tracker) {
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index 6c7cdde..5c54bb2 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -645,10 +645,10 @@
     mAwaitBoost = false;
     if (mFlags & AUDIO_INPUT_FLAG_FAST) {
         if (flags & AUDIO_INPUT_FLAG_FAST) {
-            ALOGI("AUDIO_INPUT_FLAG_FAST successful; frameCount %zu", frameCount);
+            ALOGI("AUDIO_INPUT_FLAG_FAST successful; frameCount %zu -> %zu", frameCount, temp);
             mAwaitBoost = true;
         } else {
-            ALOGW("AUDIO_INPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
+            ALOGW("AUDIO_INPUT_FLAG_FAST denied by server; frameCount %zu -> %zu", frameCount, temp);
             mFlags = (audio_input_flags_t) (mFlags & ~(AUDIO_INPUT_FLAG_FAST |
                     AUDIO_INPUT_FLAG_RAW));
             continue;   // retry
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index d590cb7..3a0ce5e 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -1479,12 +1479,13 @@
     mAwaitBoost = false;
     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
         if (flags & AUDIO_OUTPUT_FLAG_FAST) {
-            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);
+            ALOGI("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu -> %zu", frameCount, temp);
             if (!mThreadCanCallJava) {
                 mAwaitBoost = true;
             }
         } else {
-            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
+            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu", frameCount,
+                    temp);
         }
     }
     mFlags = flags;
diff --git a/media/libaudioclient/AudioTrackShared.cpp b/media/libaudioclient/AudioTrackShared.cpp
index 846f8b8..2ce6c63 100644
--- a/media/libaudioclient/AudioTrackShared.cpp
+++ b/media/libaudioclient/AudioTrackShared.cpp
@@ -696,7 +696,8 @@
     ssize_t filled = rear - front;
     // pipe should not already be overfull
     if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
-        ALOGE("Shared memory control block is corrupt (filled=%zd); shutting down", filled);
+        ALOGE("Shared memory control block is corrupt (filled=%zd, mFrameCount=%zu); shutting down",
+                filled, mFrameCount);
         mIsShutdown = true;
     }
     if (mIsShutdown) {
@@ -820,7 +821,8 @@
     ssize_t filled = rear - cblk->u.mStreaming.mFront;
     // pipe should not already be overfull
     if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
-        ALOGE("Shared memory control block is corrupt (filled=%zd); shutting down", filled);
+        ALOGE("Shared memory control block is corrupt (filled=%zd, mFrameCount=%zu); shutting down",
+                filled, mFrameCount);
         mIsShutdown = true;
         return 0;
     }
diff --git a/media/libaudioclient/include/AudioRecord.h b/media/libaudioclient/include/AudioRecord.h
index 1c8746f..1b034b5 100644
--- a/media/libaudioclient/include/AudioRecord.h
+++ b/media/libaudioclient/include/AudioRecord.h
@@ -243,6 +243,13 @@
             size_t      frameSize() const   { return mFrameSize; }
             audio_source_t inputSource() const  { return mAttributes.source; }
 
+    /*
+     * Return the period of the notification callback in frames.
+     * This value is set when the AudioRecord is constructed.
+     * It can be modified if the AudioRecord is rerouted.
+     */
+            uint32_t    getNotificationPeriodInFrames() const { return mNotificationFramesAct; }
+
     /* After it's created the track is not active. Call start() to
      * make it active. If set, the callback will start being called.
      * If event is not AudioSystem::SYNC_EVENT_NONE, the capture start will be delayed until
diff --git a/media/libaudioclient/include/AudioTrack.h b/media/libaudioclient/include/AudioTrack.h
index 0358363..16eb225 100644
--- a/media/libaudioclient/include/AudioTrack.h
+++ b/media/libaudioclient/include/AudioTrack.h
@@ -348,7 +348,12 @@
             uint32_t    channelCount() const { return mChannelCount; }
             size_t      frameCount() const  { return mFrameCount; }
 
-    // TODO consider notificationFrames() if needed
+    /*
+     * Return the period of the notification callback in frames.
+     * This value is set when the AudioTrack is constructed.
+     * It can be modified if the AudioTrack is rerouted.
+     */
+            uint32_t    getNotificationPeriodInFrames() const { return mNotificationFramesAct; }
 
     /* Return effective size of audio buffer that an application writes to
      * or a negative error if the track is uninitialized.
diff --git a/media/libmedia/IOMX.cpp b/media/libmedia/IOMX.cpp
index 223ca6b..43130eb 100644
--- a/media/libmedia/IOMX.cpp
+++ b/media/libmedia/IOMX.cpp
@@ -549,11 +549,6 @@
     virtual status_t dispatchMessage(const omx_message &msg) {
         return mBase->dispatchMessage(msg);
     }
-
-    // TODO: this is temporary, will be removed when quirks move to OMX side
-    virtual status_t setQuirks(OMX_U32 quirks) {
-        return mBase->setQuirks(quirks);
-    }
 };
 
 IMPLEMENT_META_INTERFACE(OMX, "android.hardware.IOMX");
@@ -975,17 +970,6 @@
             return NO_ERROR;
         }
 
-        case SET_QUIRKS:
-        {
-            CHECK_OMX_INTERFACE(IOMXNode, data, reply);
-
-            OMX_U32 quirks = data.readInt32();
-
-            reply->writeInt32(setQuirks(quirks));
-
-            return NO_ERROR;
-        }
-
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/include/IOMX.h b/media/libmedia/include/IOMX.h
index 62067c7..9a0ada1 100644
--- a/media/libmedia/include/IOMX.h
+++ b/media/libmedia/include/IOMX.h
@@ -170,9 +170,6 @@
             OMX_INDEXTYPE *index) = 0;
 
     virtual status_t dispatchMessage(const omx_message &msg) = 0;
-
-    // TODO: this is temporary, will be removed when quirks move to OMX side
-    virtual status_t setQuirks(OMX_U32 quirks) = 0;
 };
 
 struct omx_message {
diff --git a/media/libmedia/omx/1.0/WOmxNode.cpp b/media/libmedia/omx/1.0/WOmxNode.cpp
index 6c92b52..194378c 100644
--- a/media/libmedia/omx/1.0/WOmxNode.cpp
+++ b/media/libmedia/omx/1.0/WOmxNode.cpp
@@ -242,11 +242,6 @@
     return status;
 }
 
-// TODO: this is temporary, will be removed when quirks move to OMX side.
-status_t LWOmxNode::setQuirks(OMX_U32 quirks) {
-    return toStatusT(mBase->setQuirks(static_cast<uint32_t>(quirks)));;
-}
-
 // TWOmxNode
 TWOmxNode::TWOmxNode(sp<IOMXNode> const& base) : mBase(base) {
 }
@@ -424,11 +419,6 @@
     return toStatus(mBase->dispatchMessage(lMsg));
 }
 
-Return<void> TWOmxNode::setQuirks(uint32_t quirks) {
-    mBase->setQuirks(static_cast<OMX_U32>(quirks));
-    return Void();
-}
-
 }  // namespace utils
 }  // namespace V1_0
 }  // namespace omx
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index d9a5c26..d048777 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -1159,7 +1159,8 @@
                 }
                 if (mSource != nullptr) {
                     if (audio) {
-                        if (mVideoDecoderError || mSource->getFormat(false /* audio */) == NULL) {
+                        if (mVideoDecoderError || mSource->getFormat(false /* audio */) == NULL
+                                || mSurface == NULL) {
                             // When both audio and video have error, or this stream has only audio
                             // which has error, notify client of error.
                             notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
@@ -1169,7 +1170,8 @@
                         }
                         mAudioDecoderError = true;
                     } else {
-                        if (mAudioDecoderError || mSource->getFormat(true /* audio */) == NULL) {
+                        if (mAudioDecoderError || mSource->getFormat(true /* audio */) == NULL
+                                || mAudioSink == NULL) {
                             // When both audio and video have error, or this stream has only video
                             // which has error, notify client of error.
                             notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 72645ab..63b9571 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -6307,7 +6307,6 @@
     AString mime;
 
     AString componentName;
-    uint32_t quirks = 0;
     int32_t encoder = false;
     if (msg->findString("componentName", &componentName)) {
         sp<IMediaCodecList> list = MediaCodecList::getInstance();
@@ -6335,7 +6334,6 @@
     for (size_t matchIndex = 0; matchIndex < matchingCodecs.size();
             ++matchIndex) {
         componentName = matchingCodecs[matchIndex];
-        quirks = MediaCodecList::getQuirksFor(componentName.c_str());
 
         pid_t tid = gettid();
         int prevPriority = androidGetThreadPriority(tid);
@@ -6392,7 +6390,6 @@
         mCodec->mFlags |= kFlagPushBlankBuffersToNativeWindowOnShutdown;
     }
 
-    omxNode->setQuirks(quirks);
     mCodec->mOMX = omx;
     mCodec->mOMXNode = omxNode;
     mCodec->mCallback->onComponentAllocated(mCodec->mComponentName.c_str());
diff --git a/media/libstagefright/ACodecBufferChannel.cpp b/media/libstagefright/ACodecBufferChannel.cpp
index 40ac986..0d9696f 100644
--- a/media/libstagefright/ACodecBufferChannel.cpp
+++ b/media/libstagefright/ACodecBufferChannel.cpp
@@ -300,8 +300,10 @@
                 });
         size_t destinationBufferSize = maxSize;
         size_t heapSize = totalSize + destinationBufferSize;
-        mDealer = makeMemoryDealer(heapSize);
-        mDecryptDestination = mDealer->allocate(destinationBufferSize);
+        if (heapSize > 0) {
+            mDealer = makeMemoryDealer(heapSize);
+            mDecryptDestination = mDealer->allocate(destinationBufferSize);
+        }
     }
     std::vector<const BufferInfo> inputBuffers;
     for (const BufferAndId &elem : array) {
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index bdc37a5..61b8f9d 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -121,6 +121,7 @@
         android.hidl.allocator@1.0 \
         android.hidl.memory@1.0 \
         android.hardware.media.omx@1.0 \
+        libstagefright_xmlparser@1.0 \
 
 LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := libmedia
 
diff --git a/media/libstagefright/include/OMX.h b/media/libstagefright/include/OMX.h
index 5b22a2f..4af3d39 100644
--- a/media/libstagefright/include/OMX.h
+++ b/media/libstagefright/include/OMX.h
@@ -20,7 +20,7 @@
 #include <media/IOMX.h>
 #include <utils/threads.h>
 #include <utils/KeyedVector.h>
-
+#include <media/vndk/xmlparser/1.0/MediaCodecsXmlParser.h>
 #include "OmxNodeOwner.h"
 
 namespace android {
@@ -54,6 +54,7 @@
 private:
     Mutex mLock;
     OMXMaster *mMaster;
+    MediaCodecsXmlParser mParser;
 
     KeyedVector<wp<IBinder>, sp<OMXNodeInstance> > mLiveNodes;
 
diff --git a/media/libstagefright/omx/1.0/Omx.cpp b/media/libstagefright/omx/1.0/Omx.cpp
index e5b89da..365ea5d 100644
--- a/media/libstagefright/omx/1.0/Omx.cpp
+++ b/media/libstagefright/omx/1.0/Omx.cpp
@@ -43,7 +43,9 @@
 
 constexpr size_t kMaxNodeInstances = (1 << 16);
 
-Omx::Omx() : mMaster(new OMXMaster()) {
+Omx::Omx() :
+    mMaster(new OMXMaster()),
+    mParser() {
 }
 
 Omx::~Omx() {
@@ -111,6 +113,19 @@
         return Void();
     }
     instance->setHandle(handle);
+    std::vector<AString> quirkVector;
+    if (mParser.getQuirks(name.c_str(), &quirkVector) == OK) {
+        uint32_t quirks = 0;
+        for (const AString quirk : quirkVector) {
+            if (quirk == "requires-allocate-on-input-ports") {
+                quirks |= kRequiresAllocateBufferOnInputPorts;
+            }
+            if (quirk == "requires-allocate-on-output-ports") {
+                quirks |= kRequiresAllocateBufferOnOutputPorts;
+            }
+        }
+        instance->setQuirks(quirks);
+    }
 
     mLiveNodes.add(observer.get(), instance);
     observer->linkToDeath(this, 0);
diff --git a/media/libstagefright/omx/1.0/Omx.h b/media/libstagefright/omx/1.0/Omx.h
index 001e8cb..23784aa 100644
--- a/media/libstagefright/omx/1.0/Omx.h
+++ b/media/libstagefright/omx/1.0/Omx.h
@@ -23,6 +23,7 @@
 #include "../../include/OMXNodeInstance.h"
 
 #include <android/hardware/media/omx/1.0/IOmx.h>
+#include <media/vndk/xmlparser/1.0/MediaCodecsXmlParser.h>
 
 namespace android {
 
@@ -76,6 +77,7 @@
     Mutex mLock;
     KeyedVector<wp<IBase>, sp<OMXNodeInstance> > mLiveNodes;
     KeyedVector<OMXNodeInstance*, wp<IBase> > mNode2Observer;
+    MediaCodecsXmlParser mParser;
 };
 
 extern "C" IOmx* HIDL_FETCH_IOmx(const char* name);
diff --git a/media/libstagefright/omx/1.0/WOmxNode.cpp b/media/libstagefright/omx/1.0/WOmxNode.cpp
index ea9fb35..1a61007 100644
--- a/media/libstagefright/omx/1.0/WOmxNode.cpp
+++ b/media/libstagefright/omx/1.0/WOmxNode.cpp
@@ -245,11 +245,6 @@
     return status;
 }
 
-// TODO: this is temporary, will be removed when quirks move to OMX side.
-status_t LWOmxNode::setQuirks(OMX_U32 quirks) {
-    return toStatusT(mBase->setQuirks(static_cast<uint32_t>(quirks)));;
-}
-
 // TWOmxNode
 TWOmxNode::TWOmxNode(sp<IOMXNode> const& base) : mBase(base) {
 }
@@ -427,11 +422,6 @@
     return toStatus(mBase->dispatchMessage(lMsg));
 }
 
-Return<void> TWOmxNode::setQuirks(uint32_t quirks) {
-    mBase->setQuirks(static_cast<OMX_U32>(quirks));
-    return Void();
-}
-
 }  // namespace implementation
 }  // namespace V1_0
 }  // namespace omx
diff --git a/media/libstagefright/omx/1.0/WOmxNode.h b/media/libstagefright/omx/1.0/WOmxNode.h
index 8ca3e67..d715374 100644
--- a/media/libstagefright/omx/1.0/WOmxNode.h
+++ b/media/libstagefright/omx/1.0/WOmxNode.h
@@ -103,9 +103,6 @@
             const char *parameter_name,
             OMX_INDEXTYPE *index) override;
     status_t dispatchMessage(const omx_message &msg) override;
-
-    // TODO: this is temporary, will be removed when quirks move to OMX side.
-    status_t setQuirks(OMX_U32 quirks) override;
 };
 
 struct TWOmxNode : public IOmxNode {
@@ -154,7 +151,6 @@
             hidl_string const& parameterName,
             getExtensionIndex_cb _hidl_cb) override;
     Return<Status> dispatchMessage(Message const& msg) override;
-    Return<void> setQuirks(uint32_t quirks) override;
 };
 
 }  // namespace implementation
diff --git a/media/libstagefright/omx/Android.mk b/media/libstagefright/omx/Android.mk
index b1508dc..90333ef 100644
--- a/media/libstagefright/omx/Android.mk
+++ b/media/libstagefright/omx/Android.mk
@@ -41,6 +41,7 @@
         libdl                                     \
         libhidlbase                               \
         libhidlmemory                             \
+        libstagefright_xmlparser@1.0              \
         android.hidl.base@1.0                     \
         android.hidl.memory@1.0                   \
         android.hardware.media@1.0                \
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index bf1418f..8c1141d 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -37,8 +37,7 @@
 // node ids are created by concatenating the pid with a 16-bit counter
 static size_t kMaxNodeInstances = (1 << 16);
 
-OMX::OMX()
-    : mMaster(new OMXMaster) {
+OMX::OMX() : mMaster(new OMXMaster), mParser() {
 }
 
 OMX::~OMX() {
@@ -119,6 +118,19 @@
         return StatusFromOMXError(err);
     }
     instance->setHandle(handle);
+    std::vector<AString> quirkVector;
+    if (mParser.getQuirks(name, &quirkVector) == OK) {
+        uint32_t quirks = 0;
+        for (const AString quirk : quirkVector) {
+            if (quirk == "requires-allocate-on-input-ports") {
+                quirks |= kRequiresAllocateBufferOnInputPorts;
+            }
+            if (quirk == "requires-allocate-on-output-ports") {
+                quirks |= kRequiresAllocateBufferOnOutputPorts;
+            }
+        }
+        instance->setQuirks(quirks);
+    }
 
     mLiveNodes.add(IInterface::asBinder(observer), instance);
     IInterface::asBinder(observer)->linkToDeath(this);
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 807c2ea..d0f64ca 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -41,21 +41,13 @@
 #include <utils/misc.h>
 #include <utils/NativeHandle.h>
 #include <media/OMXBuffer.h>
+#include <media/vndk/xmlparser/1.0/MediaCodecsXmlParser.h>
 
 #include <hidlmemory/mapping.h>
 
 static const OMX_U32 kPortIndexInput = 0;
 static const OMX_U32 kPortIndexOutput = 1;
 
-// Quirk still supported, even though deprecated
-enum Quirks {
-    kRequiresAllocateBufferOnInputPorts   = 1,
-    kRequiresAllocateBufferOnOutputPorts  = 2,
-
-    kQuirksMask = kRequiresAllocateBufferOnInputPorts
-                | kRequiresAllocateBufferOnOutputPorts,
-};
-
 #define CLOGW(fmt, ...) ALOGW("[%p:%s] " fmt, mHandle, mName, ##__VA_ARGS__)
 
 #define CLOG_ERROR_IF(cond, fn, err, fmt, ...) \
@@ -1033,6 +1025,11 @@
     }
 
     Mutex::Autolock autoLock(mLock);
+    if (!mSailed) {
+        ALOGE("b/35467458");
+        android_errorWriteLog(0x534e4554, "35467458");
+        return BAD_VALUE;
+    }
 
     switch (omxBuffer.mBufferType) {
         case OMXBuffer::kBufferTypePreset:
@@ -1470,6 +1467,11 @@
 
     Mutex::Autolock autoLock(mLock);
 
+    if (!mSailed) {
+        ALOGE("b/35467458");
+        android_errorWriteLog(0x534e4554, "35467458");
+        return BAD_VALUE;
+    }
     BufferMeta *buffer_meta = new BufferMeta(portIndex);
 
     OMX_BUFFERHEADERTYPE *header;
diff --git a/media/ndk/Android.bp b/media/ndk/Android.bp
index e4e3d8f..824872f 100644
--- a/media/ndk/Android.bp
+++ b/media/ndk/Android.bp
@@ -17,7 +17,7 @@
 // frameworks/av/include.
 
 ndk_library {
-    name: "libmediandk.ndk",
+    name: "libmediandk",
     symbol_file: "libmediandk.map.txt",
     first_version: "21",
     unversioned_until: "current",
diff --git a/media/vndk/Android.bp b/media/vndk/Android.bp
new file mode 100644
index 0000000..a233d6c
--- /dev/null
+++ b/media/vndk/Android.bp
@@ -0,0 +1,4 @@
+subdirs = [
+    "*",
+]
+
diff --git a/media/vndk/xmlparser/1.0/Android.bp b/media/vndk/xmlparser/1.0/Android.bp
new file mode 100644
index 0000000..c48703c
--- /dev/null
+++ b/media/vndk/xmlparser/1.0/Android.bp
@@ -0,0 +1,37 @@
+cc_library_shared {
+
+    name: "libstagefright_xmlparser@1.0",
+
+    srcs: [
+        "MediaCodecsXmlParser.cpp",
+    ],
+
+    include_dirs: [
+        "frameworks/av/media/libstagefright",
+        "frameworks/av/include",
+    ],
+
+    shared_libs: [
+        "libexpat",
+        "libutils",
+        "liblog",
+        "libcutils",
+        "libstagefright_foundation",
+    ],
+
+    cflags: [
+        "-Werror",
+        "-Wall",
+    ],
+
+    clang: true,
+
+    sanitize: {
+        misc_undefined: [
+            "unsigned-integer-overflow",
+            "signed-integer-overflow",
+        ],
+    },
+
+}
+
diff --git a/media/vndk/xmlparser/1.0/MediaCodecsXmlParser.cpp b/media/vndk/xmlparser/1.0/MediaCodecsXmlParser.cpp
new file mode 100644
index 0000000..84e5514
--- /dev/null
+++ b/media/vndk/xmlparser/1.0/MediaCodecsXmlParser.cpp
@@ -0,0 +1,862 @@
+/*
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaCodecsXmlParser"
+#include <utils/Log.h>
+
+#include <media/vndk/xmlparser/1.0/MediaCodecsXmlParser.h>
+
+#include <media/MediaCodecInfo.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/MediaErrors.h>
+
+#include <sys/stat.h>
+
+#include <expat.h>
+#include <string>
+
+#define MEDIA_CODECS_CONFIG_FILE_PATH_MAX_LENGTH 256
+
+namespace android {
+
+namespace { // Local variables and functions
+
+const char *kProfilingResults =
+        "/data/misc/media/media_codecs_profiling_results.xml";
+
+// Treblized media codec list will be located in /odm/etc or /vendor/etc.
+const char *kConfigLocationList[] =
+        {"/odm/etc", "/vendor/etc", "/etc"};
+constexpr int kConfigLocationListSize =
+        (sizeof(kConfigLocationList) / sizeof(kConfigLocationList[0]));
+
+bool findMediaCodecListFileFullPath(
+        const char *file_name, std::string *out_path) {
+    for (int i = 0; i < kConfigLocationListSize; i++) {
+        *out_path = std::string(kConfigLocationList[i]) + "/" + file_name;
+        struct stat file_stat;
+        if (stat(out_path->c_str(), &file_stat) == 0 &&
+                S_ISREG(file_stat.st_mode)) {
+            return true;
+        }
+    }
+    return false;
+}
+
+// Find TypeInfo by name.
+std::vector<TypeInfo>::iterator findTypeInfo(
+        CodecInfo &codecInfo, const AString &typeName) {
+    return std::find_if(
+            codecInfo.mTypes.begin(), codecInfo.mTypes.end(),
+            [typeName](const auto &typeInfo) {
+                return typeInfo.mName == typeName;
+            });
+}
+
+// Convert a string into a boolean value.
+bool ParseBoolean(const char *s) {
+    if (!strcasecmp(s, "true") || !strcasecmp(s, "yes") || !strcasecmp(s, "y")) {
+        return true;
+    }
+    char *end;
+    unsigned long res = strtoul(s, &end, 10);
+    return *s != '\0' && *end == '\0' && res > 0;
+}
+
+} // unnamed namespace
+
+MediaCodecsXmlParser::MediaCodecsXmlParser() :
+    mInitCheck(NO_INIT),
+    mUpdate(false) {
+    std::string config_file_path;
+    if (findMediaCodecListFileFullPath(
+            "media_codecs.xml", &config_file_path)) {
+        parseTopLevelXMLFile(config_file_path.c_str(), false);
+    } else {
+        mInitCheck = NAME_NOT_FOUND;
+    }
+    if (findMediaCodecListFileFullPath(
+            "media_codecs_performance.xml", &config_file_path)) {
+        parseTopLevelXMLFile(config_file_path.c_str(), true);
+    }
+    parseTopLevelXMLFile(kProfilingResults, true);
+}
+
+void MediaCodecsXmlParser::parseTopLevelXMLFile(
+        const char *codecs_xml, bool ignore_errors) {
+    // get href_base
+    const char *href_base_end = strrchr(codecs_xml, '/');
+    if (href_base_end != NULL) {
+        mHrefBase = AString(codecs_xml, href_base_end - codecs_xml + 1);
+    }
+
+    mInitCheck = OK; // keeping this here for safety
+    mCurrentSection = SECTION_TOPLEVEL;
+    mDepth = 0;
+
+    parseXMLFile(codecs_xml);
+
+    if (mInitCheck != OK) {
+        if (ignore_errors) {
+            mInitCheck = OK;
+            return;
+        }
+        mCodecInfos.clear();
+        return;
+    }
+}
+
+MediaCodecsXmlParser::~MediaCodecsXmlParser() {
+}
+
+status_t MediaCodecsXmlParser::initCheck() const {
+    return mInitCheck;
+}
+
+void MediaCodecsXmlParser::parseXMLFile(const char *path) {
+    FILE *file = fopen(path, "r");
+
+    if (file == NULL) {
+        ALOGW("unable to open media codecs configuration xml file: %s", path);
+        mInitCheck = NAME_NOT_FOUND;
+        return;
+    }
+
+    ALOGV("Start parsing %s", path);
+    XML_Parser parser = ::XML_ParserCreate(NULL);
+    CHECK(parser != NULL);
+
+    ::XML_SetUserData(parser, this);
+    ::XML_SetElementHandler(
+            parser, StartElementHandlerWrapper, EndElementHandlerWrapper);
+
+    const int BUFF_SIZE = 512;
+    while (mInitCheck == OK) {
+        void *buff = ::XML_GetBuffer(parser, BUFF_SIZE);
+        if (buff == NULL) {
+            ALOGE("failed in call to XML_GetBuffer()");
+            mInitCheck = UNKNOWN_ERROR;
+            break;
+        }
+
+        int bytes_read = ::fread(buff, 1, BUFF_SIZE, file);
+        if (bytes_read < 0) {
+            ALOGE("failed in call to read");
+            mInitCheck = ERROR_IO;
+            break;
+        }
+
+        XML_Status status = ::XML_ParseBuffer(parser, bytes_read, bytes_read == 0);
+        if (status != XML_STATUS_OK) {
+            ALOGE("malformed (%s)", ::XML_ErrorString(::XML_GetErrorCode(parser)));
+            mInitCheck = ERROR_MALFORMED;
+            break;
+        }
+
+        if (bytes_read == 0) {
+            break;
+        }
+    }
+
+    ::XML_ParserFree(parser);
+
+    fclose(file);
+    file = NULL;
+}
+
+// static
+void MediaCodecsXmlParser::StartElementHandlerWrapper(
+        void *me, const char *name, const char **attrs) {
+    static_cast<MediaCodecsXmlParser *>(me)->startElementHandler(name, attrs);
+}
+
+// static
+void MediaCodecsXmlParser::EndElementHandlerWrapper(void *me, const char *name) {
+    static_cast<MediaCodecsXmlParser *>(me)->endElementHandler(name);
+}
+
+status_t MediaCodecsXmlParser::includeXMLFile(const char **attrs) {
+    const char *href = NULL;
+    size_t i = 0;
+    while (attrs[i] != NULL) {
+        if (!strcmp(attrs[i], "href")) {
+            if (attrs[i + 1] == NULL) {
+                return -EINVAL;
+            }
+            href = attrs[i + 1];
+            ++i;
+        } else {
+            ALOGE("includeXMLFile: unrecognized attribute: %s", attrs[i]);
+            return -EINVAL;
+        }
+        ++i;
+    }
+
+    // For security reasons and for simplicity, file names can only contain
+    // [a-zA-Z0-9_.] and must start with  media_codecs_ and end with .xml
+    for (i = 0; href[i] != '\0'; i++) {
+        if (href[i] == '.' || href[i] == '_' ||
+                (href[i] >= '0' && href[i] <= '9') ||
+                (href[i] >= 'A' && href[i] <= 'Z') ||
+                (href[i] >= 'a' && href[i] <= 'z')) {
+            continue;
+        }
+        ALOGE("invalid include file name: %s", href);
+        return -EINVAL;
+    }
+
+    AString filename = href;
+    if (!filename.startsWith("media_codecs_") ||
+        !filename.endsWith(".xml")) {
+        ALOGE("invalid include file name: %s", href);
+        return -EINVAL;
+    }
+    filename.insert(mHrefBase, 0);
+
+    parseXMLFile(filename.c_str());
+    return mInitCheck;
+}
+
+void MediaCodecsXmlParser::startElementHandler(
+        const char *name, const char **attrs) {
+    if (mInitCheck != OK) {
+        return;
+    }
+
+    bool inType = true;
+
+    if (!strcmp(name, "Include")) {
+        mInitCheck = includeXMLFile(attrs);
+        if (mInitCheck == OK) {
+            mPastSections.push(mCurrentSection);
+            mCurrentSection = SECTION_INCLUDE;
+        }
+        ++mDepth;
+        return;
+    }
+
+    switch (mCurrentSection) {
+        case SECTION_TOPLEVEL:
+        {
+            if (!strcmp(name, "Decoders")) {
+                mCurrentSection = SECTION_DECODERS;
+            } else if (!strcmp(name, "Encoders")) {
+                mCurrentSection = SECTION_ENCODERS;
+            } else if (!strcmp(name, "Settings")) {
+                mCurrentSection = SECTION_SETTINGS;
+            }
+            break;
+        }
+
+        case SECTION_SETTINGS:
+        {
+            if (!strcmp(name, "Setting")) {
+                mInitCheck = addSettingFromAttributes(attrs);
+            }
+            break;
+        }
+
+        case SECTION_DECODERS:
+        {
+            if (!strcmp(name, "MediaCodec")) {
+                mInitCheck =
+                    addMediaCodecFromAttributes(false /* encoder */, attrs);
+
+                mCurrentSection = SECTION_DECODER;
+            }
+            break;
+        }
+
+        case SECTION_ENCODERS:
+        {
+            if (!strcmp(name, "MediaCodec")) {
+                mInitCheck =
+                    addMediaCodecFromAttributes(true /* encoder */, attrs);
+
+                mCurrentSection = SECTION_ENCODER;
+            }
+            break;
+        }
+
+        case SECTION_DECODER:
+        case SECTION_ENCODER:
+        {
+            if (!strcmp(name, "Quirk")) {
+                mInitCheck = addQuirk(attrs);
+            } else if (!strcmp(name, "Type")) {
+                mInitCheck = addTypeFromAttributes(attrs, (mCurrentSection == SECTION_ENCODER));
+                mCurrentSection =
+                    (mCurrentSection == SECTION_DECODER
+                            ? SECTION_DECODER_TYPE : SECTION_ENCODER_TYPE);
+            }
+        }
+        inType = false;
+        // fall through
+
+        case SECTION_DECODER_TYPE:
+        case SECTION_ENCODER_TYPE:
+        {
+            // ignore limits and features specified outside of type
+            bool outside = !inType && mCurrentType == mCodecInfos[mCurrentName].mTypes.end();
+            if (outside && (!strcmp(name, "Limit") || !strcmp(name, "Feature"))) {
+                ALOGW("ignoring %s specified outside of a Type", name);
+            } else if (!strcmp(name, "Limit")) {
+                mInitCheck = addLimit(attrs);
+            } else if (!strcmp(name, "Feature")) {
+                mInitCheck = addFeature(attrs);
+            }
+            break;
+        }
+
+        default:
+            break;
+    }
+
+    ++mDepth;
+}
+
+void MediaCodecsXmlParser::endElementHandler(const char *name) {
+    if (mInitCheck != OK) {
+        return;
+    }
+
+    switch (mCurrentSection) {
+        case SECTION_SETTINGS:
+        {
+            if (!strcmp(name, "Settings")) {
+                mCurrentSection = SECTION_TOPLEVEL;
+            }
+            break;
+        }
+
+        case SECTION_DECODERS:
+        {
+            if (!strcmp(name, "Decoders")) {
+                mCurrentSection = SECTION_TOPLEVEL;
+            }
+            break;
+        }
+
+        case SECTION_ENCODERS:
+        {
+            if (!strcmp(name, "Encoders")) {
+                mCurrentSection = SECTION_TOPLEVEL;
+            }
+            break;
+        }
+
+        case SECTION_DECODER_TYPE:
+        case SECTION_ENCODER_TYPE:
+        {
+            if (!strcmp(name, "Type")) {
+                mCurrentSection =
+                    (mCurrentSection == SECTION_DECODER_TYPE
+                            ? SECTION_DECODER : SECTION_ENCODER);
+
+                mCurrentType = mCodecInfos[mCurrentName].mTypes.end();
+            }
+            break;
+        }
+
+        case SECTION_DECODER:
+        {
+            if (!strcmp(name, "MediaCodec")) {
+                mCurrentSection = SECTION_DECODERS;
+                mCurrentName.clear();
+            }
+            break;
+        }
+
+        case SECTION_ENCODER:
+        {
+            if (!strcmp(name, "MediaCodec")) {
+                mCurrentSection = SECTION_ENCODERS;
+                mCurrentName.clear();
+            }
+            break;
+        }
+
+        case SECTION_INCLUDE:
+        {
+            if (!strcmp(name, "Include") && mPastSections.size() > 0) {
+                mCurrentSection = mPastSections.top();
+                mPastSections.pop();
+            }
+            break;
+        }
+
+        default:
+            break;
+    }
+
+    --mDepth;
+}
+
+status_t MediaCodecsXmlParser::addSettingFromAttributes(const char **attrs) {
+    const char *name = NULL;
+    const char *value = NULL;
+    const char *update = NULL;
+
+    size_t i = 0;
+    while (attrs[i] != NULL) {
+        if (!strcmp(attrs[i], "name")) {
+            if (attrs[i + 1] == NULL) {
+                ALOGE("addSettingFromAttributes: name is null");
+                return -EINVAL;
+            }
+            name = attrs[i + 1];
+            ++i;
+        } else if (!strcmp(attrs[i], "value")) {
+            if (attrs[i + 1] == NULL) {
+                ALOGE("addSettingFromAttributes: value is null");
+                return -EINVAL;
+            }
+            value = attrs[i + 1];
+            ++i;
+        } else if (!strcmp(attrs[i], "update")) {
+            if (attrs[i + 1] == NULL) {
+                ALOGE("addSettingFromAttributes: update is null");
+                return -EINVAL;
+            }
+            update = attrs[i + 1];
+            ++i;
+        } else {
+            ALOGE("addSettingFromAttributes: unrecognized attribute: %s", attrs[i]);
+            return -EINVAL;
+        }
+
+        ++i;
+    }
+
+    if (name == NULL || value == NULL) {
+        ALOGE("addSettingFromAttributes: name or value unspecified");
+        return -EINVAL;
+    }
+
+    mUpdate = (update != NULL) && ParseBoolean(update);
+    if (mUpdate != (mGlobalSettings.count(name) > 0)) {
+        ALOGE("addSettingFromAttributes: updating non-existing setting");
+        return -EINVAL;
+    }
+    mGlobalSettings[name] = value;
+
+    return OK;
+}
+
+status_t MediaCodecsXmlParser::addMediaCodecFromAttributes(
+        bool encoder, const char **attrs) {
+    const char *name = NULL;
+    const char *type = NULL;
+    const char *update = NULL;
+
+    size_t i = 0;
+    while (attrs[i] != NULL) {
+        if (!strcmp(attrs[i], "name")) {
+            if (attrs[i + 1] == NULL) {
+                ALOGE("addMediaCodecFromAttributes: name is null");
+                return -EINVAL;
+            }
+            name = attrs[i + 1];
+            ++i;
+        } else if (!strcmp(attrs[i], "type")) {
+            if (attrs[i + 1] == NULL) {
+                ALOGE("addMediaCodecFromAttributes: type is null");
+                return -EINVAL;
+            }
+            type = attrs[i + 1];
+            ++i;
+        } else if (!strcmp(attrs[i], "update")) {
+            if (attrs[i + 1] == NULL) {
+                ALOGE("addMediaCodecFromAttributes: update is null");
+                return -EINVAL;
+            }
+            update = attrs[i + 1];
+            ++i;
+        } else {
+            ALOGE("addMediaCodecFromAttributes: unrecognized attribute: %s", attrs[i]);
+            return -EINVAL;
+        }
+
+        ++i;
+    }
+
+    if (name == NULL) {
+        ALOGE("addMediaCodecFromAttributes: name not found");
+        return -EINVAL;
+    }
+
+    mUpdate = (update != NULL) && ParseBoolean(update);
+    if (mUpdate != (mCodecInfos.count(name) > 0)) {
+        ALOGE("addMediaCodecFromAttributes: updating non-existing codec or vice versa");
+        return -EINVAL;
+    }
+
+    CodecInfo *info = &mCodecInfos[name];
+    if (mUpdate) {
+        // existing codec
+        mCurrentName = name;
+        mCurrentType = info->mTypes.begin();
+        if (type != NULL) {
+            // existing type
+            mCurrentType = findTypeInfo(*info, type);
+            if (mCurrentType == info->mTypes.end()) {
+                ALOGE("addMediaCodecFromAttributes: updating non-existing type");
+                return -EINVAL;
+            }
+        }
+    } else {
+        // new codec
+        mCurrentName = name;
+        mQuirks[name].clear();
+        info->mTypes.clear();
+        info->mTypes.emplace_back();
+        mCurrentType = --info->mTypes.end();
+        mCurrentType->mName = type;
+        info->mIsEncoder = encoder;
+    }
+
+    return OK;
+}
+
+status_t MediaCodecsXmlParser::addQuirk(const char **attrs) {
+    const char *name = NULL;
+
+    size_t i = 0;
+    while (attrs[i] != NULL) {
+        if (!strcmp(attrs[i], "name")) {
+            if (attrs[i + 1] == NULL) {
+                ALOGE("addQuirk: name is null");
+                return -EINVAL;
+            }
+            name = attrs[i + 1];
+            ++i;
+        } else {
+            ALOGE("addQuirk: unrecognized attribute: %s", attrs[i]);
+            return -EINVAL;
+        }
+
+        ++i;
+    }
+
+    if (name == NULL) {
+        ALOGE("addQuirk: name not found");
+        return -EINVAL;
+    }
+
+    mQuirks[mCurrentName].emplace_back(name);
+    return OK;
+}
+
+status_t MediaCodecsXmlParser::addTypeFromAttributes(const char **attrs, bool encoder) {
+    const char *name = NULL;
+    const char *update = NULL;
+
+    size_t i = 0;
+    while (attrs[i] != NULL) {
+        if (!strcmp(attrs[i], "name")) {
+            if (attrs[i + 1] == NULL) {
+                ALOGE("addTypeFromAttributes: name is null");
+                return -EINVAL;
+            }
+            name = attrs[i + 1];
+            ++i;
+        } else if (!strcmp(attrs[i], "update")) {
+            if (attrs[i + 1] == NULL) {
+                ALOGE("addTypeFromAttributes: update is null");
+                return -EINVAL;
+            }
+            update = attrs[i + 1];
+            ++i;
+        } else {
+            ALOGE("addTypeFromAttributes: unrecognized attribute: %s", attrs[i]);
+            return -EINVAL;
+        }
+
+        ++i;
+    }
+
+    if (name == NULL) {
+        return -EINVAL;
+    }
+
+    CodecInfo *info = &mCodecInfos[mCurrentName];
+    info->mIsEncoder = encoder;
+    mCurrentType = findTypeInfo(*info, name);
+    if (!mUpdate) {
+        if (mCurrentType != info->mTypes.end()) {
+            ALOGE("addTypeFromAttributes: re-defining existing type without update");
+            return -EINVAL;
+        }
+        info->mTypes.emplace_back();
+        mCurrentType = --info->mTypes.end();
+    } else if (mCurrentType == info->mTypes.end()) {
+        ALOGE("addTypeFromAttributes: updating non-existing type");
+        return -EINVAL;
+    }
+
+    return OK;
+}
+
+static status_t limitFoundMissingAttr(const AString &name, const char *attr, bool found = true) {
+    ALOGE("limit '%s' with %s'%s' attribute", name.c_str(),
+            (found ? "" : "no "), attr);
+    return -EINVAL;
+}
+
+static status_t limitError(const AString &name, const char *msg) {
+    ALOGE("limit '%s' %s", name.c_str(), msg);
+    return -EINVAL;
+}
+
+static status_t limitInvalidAttr(const AString &name, const char *attr, const AString &value) {
+    ALOGE("limit '%s' with invalid '%s' attribute (%s)", name.c_str(),
+            attr, value.c_str());
+    return -EINVAL;
+}
+
+status_t MediaCodecsXmlParser::addLimit(const char **attrs) {
+    sp<AMessage> msg = new AMessage();
+
+    size_t i = 0;
+    while (attrs[i] != NULL) {
+        if (attrs[i + 1] == NULL) {
+            ALOGE("addLimit: limit is not given");
+            return -EINVAL;
+        }
+
+        // attributes with values
+        if (!strcmp(attrs[i], "name")
+                || !strcmp(attrs[i], "default")
+                || !strcmp(attrs[i], "in")
+                || !strcmp(attrs[i], "max")
+                || !strcmp(attrs[i], "min")
+                || !strcmp(attrs[i], "range")
+                || !strcmp(attrs[i], "ranges")
+                || !strcmp(attrs[i], "scale")
+                || !strcmp(attrs[i], "value")) {
+            msg->setString(attrs[i], attrs[i + 1]);
+            ++i;
+        } else {
+            ALOGE("addLimit: unrecognized limit: %s", attrs[i]);
+            return -EINVAL;
+        }
+        ++i;
+    }
+
+    AString name;
+    if (!msg->findString("name", &name)) {
+        ALOGE("limit with no 'name' attribute");
+        return -EINVAL;
+    }
+
+    // size, blocks, bitrate, frame-rate, blocks-per-second, aspect-ratio,
+    // measured-frame-rate, measured-blocks-per-second: range
+    // quality: range + default + [scale]
+    // complexity: range + default
+    bool found;
+    if (mCurrentType == mCodecInfos[mCurrentName].mTypes.end()) {
+        ALOGW("ignoring null type");
+        return OK;
+    }
+
+    if (name == "aspect-ratio" || name == "bitrate" || name == "block-count"
+            || name == "blocks-per-second" || name == "complexity"
+            || name == "frame-rate" || name == "quality" || name == "size"
+            || name == "measured-blocks-per-second" || name.startsWith("measured-frame-rate-")) {
+        AString min, max;
+        if (msg->findString("min", &min) && msg->findString("max", &max)) {
+            min.append("-");
+            min.append(max);
+            if (msg->contains("range") || msg->contains("value")) {
+                return limitError(name, "has 'min' and 'max' as well as 'range' or "
+                        "'value' attributes");
+            }
+            msg->setString("range", min);
+        } else if (msg->contains("min") || msg->contains("max")) {
+            return limitError(name, "has only 'min' or 'max' attribute");
+        } else if (msg->findString("value", &max)) {
+            min = max;
+            min.append("-");
+            min.append(max);
+            if (msg->contains("range")) {
+                return limitError(name, "has both 'range' and 'value' attributes");
+            }
+            msg->setString("range", min);
+        }
+
+        AString range, scale = "linear", def, in_;
+        if (!msg->findString("range", &range)) {
+            return limitError(name, "with no 'range', 'value' or 'min'/'max' attributes");
+        }
+
+        if ((name == "quality" || name == "complexity") ^
+                (found = msg->findString("default", &def))) {
+            return limitFoundMissingAttr(name, "default", found);
+        }
+        if (name != "quality" && msg->findString("scale", &scale)) {
+            return limitFoundMissingAttr(name, "scale");
+        }
+        if ((name == "aspect-ratio") ^ (found = msg->findString("in", &in_))) {
+            return limitFoundMissingAttr(name, "in", found);
+        }
+
+        if (name == "aspect-ratio") {
+            if (!(in_ == "pixels") && !(in_ == "blocks")) {
+                return limitInvalidAttr(name, "in", in_);
+            }
+            in_.erase(5, 1); // (pixel|block)-aspect-ratio
+            in_.append("-");
+            in_.append(name);
+            name = in_;
+        }
+        if (name == "quality") {
+            mCurrentType->mDetails["quality-scale"] = scale;
+        }
+        if (name == "quality" || name == "complexity") {
+            AString tag = name;
+            tag.append("-default");
+            mCurrentType->mDetails[tag] = def;
+        }
+        AString tag = name;
+        tag.append("-range");
+        mCurrentType->mDetails[tag] = range;
+    } else {
+        AString max, value, ranges;
+        if (msg->contains("default")) {
+            return limitFoundMissingAttr(name, "default");
+        } else if (msg->contains("in")) {
+            return limitFoundMissingAttr(name, "in");
+        } else if ((name == "channel-count" || name == "concurrent-instances") ^
+                (found = msg->findString("max", &max))) {
+            return limitFoundMissingAttr(name, "max", found);
+        } else if (msg->contains("min")) {
+            return limitFoundMissingAttr(name, "min");
+        } else if (msg->contains("range")) {
+            return limitFoundMissingAttr(name, "range");
+        } else if ((name == "sample-rate") ^
+                (found = msg->findString("ranges", &ranges))) {
+            return limitFoundMissingAttr(name, "ranges", found);
+        } else if (msg->contains("scale")) {
+            return limitFoundMissingAttr(name, "scale");
+        } else if ((name == "alignment" || name == "block-size") ^
+                (found = msg->findString("value", &value))) {
+            return limitFoundMissingAttr(name, "value", found);
+        }
+
+        if (max.size()) {
+            AString tag = "max-";
+            tag.append(name);
+            mCurrentType->mDetails[tag] = max;
+        } else if (value.size()) {
+            mCurrentType->mDetails[name] = value;
+        } else if (ranges.size()) {
+            AString tag = name;
+            tag.append("-ranges");
+            mCurrentType->mDetails[tag] = ranges;
+        } else {
+            ALOGW("Ignoring unrecognized limit '%s'", name.c_str());
+        }
+    }
+
+    return OK;
+}
+
+status_t MediaCodecsXmlParser::addFeature(const char **attrs) {
+    size_t i = 0;
+    const char *name = NULL;
+    int32_t optional = -1;
+    int32_t required = -1;
+    const char *value = NULL;
+
+    while (attrs[i] != NULL) {
+        if (attrs[i + 1] == NULL) {
+            ALOGE("addFeature: feature is not given");
+            return -EINVAL;
+        }
+
+        // attributes with values
+        if (!strcmp(attrs[i], "name")) {
+            name = attrs[i + 1];
+            ++i;
+        } else if (!strcmp(attrs[i], "optional") || !strcmp(attrs[i], "required")) {
+            int value = (int)ParseBoolean(attrs[i + 1]);
+            if (!strcmp(attrs[i], "optional")) {
+                optional = value;
+            } else {
+                required = value;
+            }
+            ++i;
+        } else if (!strcmp(attrs[i], "value")) {
+            value = attrs[i + 1];
+            ++i;
+        } else {
+            ALOGE("addFeature: unrecognized attribute: %s", attrs[i]);
+            return -EINVAL;
+        }
+        ++i;
+    }
+    if (name == NULL) {
+        ALOGE("feature with no 'name' attribute");
+        return -EINVAL;
+    }
+
+    if (optional == required && optional != -1) {
+        ALOGE("feature '%s' is both/neither optional and required", name);
+        return -EINVAL;
+    }
+
+    if (mCurrentType == mCodecInfos[mCurrentName].mTypes.end()) {
+        ALOGW("ignoring null type");
+        return OK;
+    }
+    if (value != NULL) {
+        mCurrentType->mStringFeatures[name] = value;
+    } else {
+        mCurrentType->mBoolFeatures[name] = (required == 1) || (optional == 0);
+    }
+    return OK;
+}
+
+void MediaCodecsXmlParser::getGlobalSettings(
+        std::map<AString, AString> *settings) const {
+    settings->clear();
+    settings->insert(mGlobalSettings.begin(), mGlobalSettings.end());
+}
+
+status_t MediaCodecsXmlParser::getCodecInfo(const char *name, CodecInfo *info) const {
+    if (mCodecInfos.count(name) == 0) {
+        ALOGE("Codec not found with name '%s'", name);
+        return NAME_NOT_FOUND;
+    }
+    *info = mCodecInfos.at(name);
+    return OK;
+}
+
+status_t MediaCodecsXmlParser::getQuirks(const char *name, std::vector<AString> *quirks) const {
+    if (mQuirks.count(name) == 0) {
+        ALOGE("Codec not found with name '%s'", name);
+        return NAME_NOT_FOUND;
+    }
+    quirks->clear();
+    quirks->insert(quirks->end(), mQuirks.at(name).begin(), mQuirks.at(name).end());
+    return OK;
+}
+
+}  // namespace android
diff --git a/media/vndk/xmlparser/Android.bp b/media/vndk/xmlparser/Android.bp
new file mode 100644
index 0000000..a233d6c
--- /dev/null
+++ b/media/vndk/xmlparser/Android.bp
@@ -0,0 +1,4 @@
+subdirs = [
+    "*",
+]
+
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 7eb179a..42e9c6b 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -117,6 +117,22 @@
 Mutex gLock;
 wp<AudioFlinger> gAudioFlinger;
 
+// Keep a strong reference to media.log service around forever.
+// The service is within our parent process so it can never die in a way that we could observe.
+// These two variables are const after initialization.
+static sp<IBinder> sMediaLogServiceAsBinder;
+static sp<IMediaLogService> sMediaLogService;
+
+static pthread_once_t sMediaLogOnce = PTHREAD_ONCE_INIT;
+
+static void sMediaLogInit()
+{
+    sMediaLogServiceAsBinder = defaultServiceManager()->getService(String16("media.log"));
+    if (sMediaLogServiceAsBinder != 0) {
+        sMediaLogService = interface_cast<IMediaLogService>(sMediaLogServiceAsBinder);
+    }
+}
+
 // ----------------------------------------------------------------------------
 
 std::string formatToString(audio_format_t format) {
@@ -154,6 +170,7 @@
     if (doLog) {
         mLogMemoryDealer = new MemoryDealer(kLogMemorySize, "LogWriters",
                 MemoryHeapBase::READ_ONLY);
+        (void) pthread_once(&sMediaLogOnce, sMediaLogInit);
     }
 
     // reset battery stats.
@@ -230,15 +247,11 @@
     }
 
     // Tell media.log service about any old writers that still need to be unregistered
-    if (mLogMemoryDealer != 0) {
-        sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log"));
-        if (binder != 0) {
-            sp<IMediaLogService> mediaLogService(interface_cast<IMediaLogService>(binder));
-            for (size_t count = mUnregisteredWriters.size(); count > 0; count--) {
-                sp<IMemory> iMemory(mUnregisteredWriters.top()->getIMemory());
-                mUnregisteredWriters.pop();
-                mediaLogService->unregisterWriter(iMemory);
-            }
+    if (sMediaLogService != 0) {
+        for (size_t count = mUnregisteredWriters.size(); count > 0; count--) {
+            sp<IMemory> iMemory(mUnregisteredWriters.top()->getIMemory());
+            mUnregisteredWriters.pop();
+            sMediaLogService->unregisterWriter(iMemory);
         }
     }
 }
@@ -519,13 +532,10 @@
 
         // append a copy of media.log here by forwarding fd to it, but don't attempt
         // to lookup the service if it's not running, as it will block for a second
-        if (mLogMemoryDealer != 0) {
-            sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log"));
-            if (binder != 0) {
-                dprintf(fd, "\nmedia.log:\n");
-                Vector<String16> args;
-                binder->dump(fd, args);
-            }
+        if (sMediaLogServiceAsBinder != 0) {
+            dprintf(fd, "\nmedia.log:\n");
+            Vector<String16> args;
+            sMediaLogServiceAsBinder->dump(fd, args);
         }
 
         // check for optional arguments
@@ -570,16 +580,11 @@
 
 sp<NBLog::Writer> AudioFlinger::newWriter_l(size_t size, const char *name)
 {
-    // If there is no memory allocated for logs, return a dummy writer that does nothing
-    if (mLogMemoryDealer == 0) {
+    // If there is no memory allocated for logs, return a dummy writer that does nothing.
+    // Similarly if we can't contact the media.log service, also return a dummy writer.
+    if (mLogMemoryDealer == 0 || sMediaLogService == 0) {
         return new NBLog::Writer();
     }
-    sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log"));
-    // Similarly if we can't contact the media.log service, also return a dummy writer
-    if (binder == 0) {
-        return new NBLog::Writer();
-    }
-    sp<IMediaLogService> mediaLogService(interface_cast<IMediaLogService>(binder));
     sp<IMemory> shared = mLogMemoryDealer->allocate(NBLog::Timeline::sharedSize(size));
     // If allocation fails, consult the vector of previously unregistered writers
     // and garbage-collect one or more them until an allocation succeeds
@@ -590,7 +595,7 @@
                 // Pick the oldest stale writer to garbage-collect
                 sp<IMemory> iMemory(mUnregisteredWriters[0]->getIMemory());
                 mUnregisteredWriters.removeAt(0);
-                mediaLogService->unregisterWriter(iMemory);
+                sMediaLogService->unregisterWriter(iMemory);
                 // Now the media.log remote reference to IMemory is gone.  When our last local
                 // reference to IMemory also drops to zero at end of this block,
                 // the IMemory destructor will deallocate the region from mLogMemoryDealer.
@@ -609,7 +614,7 @@
     NBLog::Shared *sharedRawPtr = (NBLog::Shared *) shared->pointer();
     new((void *) sharedRawPtr) NBLog::Shared(); // placement new here, but the corresponding
                                                 // explicit destructor not needed since it is POD
-    mediaLogService->registerWriter(shared, size, name);
+    sMediaLogService->registerWriter(shared, size, name);
     return new NBLog::Writer(shared, size);
 }
 
@@ -1544,6 +1549,10 @@
 }
 
 bool AudioFlinger::MediaLogNotifier::threadLoop() {
+    // Should already have been checked, but just in case
+    if (sMediaLogService == 0) {
+        return false;
+    }
     // Wait until there are pending requests
     {
         AutoMutex _l(mMutex);
@@ -1555,11 +1564,7 @@
         mPendingRequests = false;
     }
     // Execute the actual MediaLogService binder call and ignore extra requests for a while
-    sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log"));
-    if (binder != 0) {
-        sp<IMediaLogService> mediaLogService(interface_cast<IMediaLogService>(binder));
-        mediaLogService->requestMergeWakeup();
-    }
+    sMediaLogService->requestMergeWakeup();
     usleep(kPostTriggerSleepPeriod);
     return true;
 }
@@ -2044,7 +2049,7 @@
 
             // the first primary output opened designates the primary hw device
             if ((mPrimaryHardwareDev == NULL) && (flags & AUDIO_OUTPUT_FLAG_PRIMARY)) {
-                ALOGI("Using module %d has the primary audio interface", module);
+                ALOGI("Using module %d as the primary audio interface", module);
                 mPrimaryHardwareDev = playbackThread->getOutput()->audioHwDev;
 
                 AutoMutex lock(mHardwareLock);
@@ -2133,7 +2138,7 @@
                 return BAD_VALUE;
             }
             mMmapThreads.removeItem(output);
-            ALOGV("closing mmapThread %p", mmapThread.get());
+            ALOGD("closing mmapThread %p", mmapThread.get());
         }
         const sp<AudioIoDescriptor> ioDesc = new AudioIoDescriptor();
         ioDesc->mIoHandle = output;
@@ -2148,7 +2153,7 @@
             closeOutputFinish(playbackThread);
         }
     } else if (mmapThread != 0) {
-        ALOGV("mmapThread exit()");
+        ALOGD("mmapThread exit()");
         mmapThread->exit();
         AudioStreamOut *out = mmapThread->clearOutput();
         ALOG_ASSERT(out != NULL, "out shouldn't be NULL");
diff --git a/services/audioflinger/FastThread.cpp b/services/audioflinger/FastThread.cpp
index caf7905..cf9fce3 100644
--- a/services/audioflinger/FastThread.cpp
+++ b/services/audioflinger/FastThread.cpp
@@ -170,7 +170,7 @@
                 }
                 int policy = sched_getscheduler(0) & ~SCHED_RESET_ON_FORK;
                 if (!(policy == SCHED_FIFO || policy == SCHED_RR)) {
-                    ALOGE("did not receive expected priority boost");
+                    ALOGE("did not receive expected priority boost on time");
                 }
                 // This may be overly conservative; there could be times that the normal mixer
                 // requests such a brief cold idle that it doesn't require resetting this flag.
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 3b1edec..3665875 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -447,6 +447,8 @@
         return "RECORD";
     case OFFLOAD:
         return "OFFLOAD";
+    case MMAP:
+        return "MMAP";
     default:
         return "unknown";
     }
@@ -533,7 +535,7 @@
 {
     status_t status = initCheck();
     if (status == NO_ERROR) {
-        ALOGI("AudioFlinger's thread %p ready to run", this);
+        ALOGI("AudioFlinger's thread %p tid=%d ready to run", this, getTid());
     } else {
         ALOGE("No working audio driver found.");
     }
@@ -809,14 +811,15 @@
     char buffer[SIZE];
     String8 result;
 
+    dprintf(fd, "\n%s thread %p, name %s, tid %d, type %d (%s):\n", isOutput() ? "Output" : "Input",
+            this, mThreadName, getTid(), type(), threadTypeToString(type()));
+
     bool locked = AudioFlinger::dumpTryLock(mLock);
     if (!locked) {
-        dprintf(fd, "thread %p may be deadlocked\n", this);
+        dprintf(fd, "  Thread may be deadlocked\n");
     }
 
-    dprintf(fd, "  Thread name: %s\n", mThreadName);
     dprintf(fd, "  I/O handle: %d\n", mId);
-    dprintf(fd, "  TID: %d\n", getTid());
     dprintf(fd, "  Standby: %s\n", mStandby ? "yes" : "no");
     dprintf(fd, "  Sample rate: %u Hz\n", mSampleRate);
     dprintf(fd, "  HAL frame count: %zu\n", mFrameCount);
@@ -1778,8 +1781,6 @@
 
 void AudioFlinger::PlaybackThread::dumpInternals(int fd, const Vector<String16>& args)
 {
-    dprintf(fd, "\nOutput thread %p type %d (%s):\n", this, type(), threadTypeToString(type()));
-
     dumpBase(fd, args);
 
     dprintf(fd, "  Normal frame count: %zu\n", mNormalFrameCount);
@@ -4714,34 +4715,42 @@
     dprintf(fd, "  AudioMixer tracks: 0x%08x\n", mAudioMixer->trackNames());
     dprintf(fd, "  Master mono: %s\n", mMasterMono ? "on" : "off");
 
-    // Make a non-atomic copy of fast mixer dump state so it won't change underneath us
-    // while we are dumping it.  It may be inconsistent, but it won't mutate!
-    // This is a large object so we place it on the heap.
-    // FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
-    const FastMixerDumpState *copy = new FastMixerDumpState(mFastMixerDumpState);
-    copy->dump(fd);
-    delete copy;
+    if (hasFastMixer()) {
+        dprintf(fd, "  FastMixer thread %p tid=%d", mFastMixer.get(), mFastMixer->getTid());
+
+        // Make a non-atomic copy of fast mixer dump state so it won't change underneath us
+        // while we are dumping it.  It may be inconsistent, but it won't mutate!
+        // This is a large object so we place it on the heap.
+        // FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
+        const FastMixerDumpState *copy = new FastMixerDumpState(mFastMixerDumpState);
+        copy->dump(fd);
+        delete copy;
 
 #ifdef STATE_QUEUE_DUMP
-    // Similar for state queue
-    StateQueueObserverDump observerCopy = mStateQueueObserverDump;
-    observerCopy.dump(fd);
-    StateQueueMutatorDump mutatorCopy = mStateQueueMutatorDump;
-    mutatorCopy.dump(fd);
+        // Similar for state queue
+        StateQueueObserverDump observerCopy = mStateQueueObserverDump;
+        observerCopy.dump(fd);
+        StateQueueMutatorDump mutatorCopy = mStateQueueMutatorDump;
+        mutatorCopy.dump(fd);
 #endif
 
+#ifdef AUDIO_WATCHDOG
+        if (mAudioWatchdog != 0) {
+            // Make a non-atomic copy of audio watchdog dump so it won't change underneath us
+            AudioWatchdogDump wdCopy = mAudioWatchdogDump;
+            wdCopy.dump(fd);
+        }
+#endif
+
+    } else {
+        dprintf(fd, "  No FastMixer\n");
+    }
+
 #ifdef TEE_SINK
     // Write the tee output to a .wav file
     dumpTee(fd, mTeeSource, mId);
 #endif
 
-#ifdef AUDIO_WATCHDOG
-    if (mAudioWatchdog != 0) {
-        // Make a non-atomic copy of audio watchdog dump so it won't change underneath us
-        AudioWatchdogDump wdCopy = mAudioWatchdogDump;
-        wdCopy.dump(fd);
-    }
-#endif
 }
 
 uint32_t AudioFlinger::MixerThread::idleSleepTimeUs() const
@@ -6791,7 +6800,7 @@
 bool AudioFlinger::RecordThread::stop(RecordThread::RecordTrack* recordTrack) {
     ALOGV("RecordThread::stop");
     AutoMutex _l(mLock);
-    if (mActiveTracks.indexOf(recordTrack) != 0 || recordTrack->mState == TrackBase::PAUSING) {
+    if (mActiveTracks.indexOf(recordTrack) < 0 || recordTrack->mState == TrackBase::PAUSING) {
         return false;
     }
     // note that threadLoop may still be processing the track at this point [without lock]
@@ -6805,7 +6814,7 @@
     // FIXME incorrect usage of wait: no explicit predicate or loop
     mStartStopCond.wait(mLock);
     // if we have been restarted, recordTrack is in mActiveTracks here
-    if (exitPending() || mActiveTracks.indexOf(recordTrack) != 0) {
+    if (exitPending() || mActiveTracks.indexOf(recordTrack) < 0) {
         ALOGV("Record stopped OK");
         return true;
     }
@@ -6872,8 +6881,6 @@
 
 void AudioFlinger::RecordThread::dumpInternals(int fd, const Vector<String16>& args)
 {
-    dprintf(fd, "\nInput thread %p:\n", this);
-
     dumpBase(fd, args);
 
     AudioStreamIn *input = mInput;
@@ -7585,7 +7592,11 @@
 
     if (mActiveTracks.size() == 0) {
         // for the first track, reuse portId and session allocated when the stream was opened
-        mHalStream->start();
+        ret = mHalStream->start();
+        if (ret != NO_ERROR) {
+            ALOGE("%s: error mHalStream->start() = %d for first track", __FUNCTION__, ret);
+            return ret;
+        }
         portId = mPortId;
         sessionId = mSessionId;
         mStandby = false;
@@ -7640,6 +7651,7 @@
 
     // abort if start is rejected by audio policy manager
     if (ret != NO_ERROR) {
+        ALOGE("%s: error start rejected by AudioPolicyManager = %d", __FUNCTION__, ret);
         if (mActiveTracks.size() != 0) {
             if (isOutput()) {
                 AudioSystem::releaseOutput(mId, streamType(), sessionId);
@@ -7935,15 +7947,17 @@
     if (isOutput() && mPrevOutDevice != mOutDevice) {
         mPrevOutDevice = type;
         sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
-        if (mCallback != 0) {
-            mCallback->onRoutingChanged(deviceId);
+        sp<MmapStreamCallback> callback = mCallback.promote();
+        if (callback != 0) {
+            callback->onRoutingChanged(deviceId);
         }
     }
     if (!isOutput() && mPrevInDevice != mInDevice) {
         mPrevInDevice = type;
         sendIoConfigEvent_l(AUDIO_INPUT_CONFIG_CHANGED);
-        if (mCallback != 0) {
-            mCallback->onRoutingChanged(deviceId);
+        sp<MmapStreamCallback> callback = mCallback.promote();
+        if (callback != 0) {
+            callback->onRoutingChanged(deviceId);
         }
     }
     return status;
@@ -8058,8 +8072,9 @@
 
 void AudioFlinger::MmapThread::threadLoop_exit()
 {
-    if (mCallback != 0) {
-        mCallback->onTearDown();
+    sp<MmapStreamCallback> callback = mCallback.promote();
+    if (callback != 0) {
+        callback->onTearDown();
     }
 }
 
@@ -8107,8 +8122,9 @@
 {
     for (const sp<MmapTrack> &track : mActiveTracks) {
         if (track->isInvalid()) {
-            if (mCallback != 0) {
-                mCallback->onTearDown();
+            sp<MmapStreamCallback> callback = mCallback.promote();
+            if (callback != 0) {
+                callback->onTearDown();
             }
             break;
         }
@@ -8124,8 +8140,6 @@
 
 void AudioFlinger::MmapThread::dumpInternals(int fd, const Vector<String16>& args)
 {
-    dprintf(fd, "\nMmap thread %p:\n", this);
-
     dumpBase(fd, args);
 
     dprintf(fd, "  Attributes: content type %d usage %d source %d\n",
@@ -8285,7 +8299,8 @@
 
         mOutput->stream->setVolume(volume, volume);
 
-        if (mCallback != 0) {
+        sp<MmapStreamCallback> callback = mCallback.promote();
+        if (callback != 0) {
             int channelCount;
             if (isOutput()) {
                 channelCount = audio_channel_count_from_out_mask(mChannelMask);
@@ -8296,7 +8311,7 @@
             for (int i = 0; i < channelCount; i++) {
                 values.add(volume);
             }
-            mCallback->onVolumeChanged(mChannelMask, values);
+            callback->onVolumeChanged(mChannelMask, values);
         }
     }
 }
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 8270e74..7469710 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -31,6 +31,7 @@
         RECORD,             // Thread class is RecordThread
         OFFLOAD,            // Thread class is OffloadThread
         MMAP                // control thread for MMAP stream
+        // If you add any values here, also update ThreadBase::threadTypeToString()
     };
 
     static const char *threadTypeToString(type_t type);
@@ -1527,7 +1528,7 @@
                 audio_session_t         mSessionId;
                 audio_port_handle_t     mPortId;
 
-                sp<MmapStreamCallback>  mCallback;
+                wp<MmapStreamCallback>  mCallback;
                 sp<StreamHalInterface>  mHalStream;
                 sp<DeviceHalInterface>  mHalDevice;
                 AudioHwDevice* const    mAudioHwDev;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
index dbdcca7..bea9f4f 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
@@ -91,8 +91,10 @@
                 AUDIO_CONFIG_BASE_INITIALIZER;
         const audio_patch_handle_t patchHandle = (provider != NULL) ? provider->getPatchHandle() :
                 AUDIO_PATCH_HANDLE_NONE;
-        mClientInterface->onRecordingConfigurationUpdate(event, mSession, mInputSource,
-                &mConfig, &deviceConfig, patchHandle);
+        if (patchHandle != AUDIO_PATCH_HANDLE_NONE) {
+            mClientInterface->onRecordingConfigurationUpdate(event, mSession, mInputSource,
+                    &mConfig, &deviceConfig, patchHandle);
+        }
     }
 
     return mActiveCount;
@@ -126,9 +128,11 @@
                 AUDIO_CONFIG_BASE_INITIALIZER;
         const audio_patch_handle_t patchHandle = (provider != NULL) ? provider->getPatchHandle() :
                 AUDIO_PATCH_HANDLE_NONE;
-        mClientInterface->onRecordingConfigurationUpdate(RECORD_CONFIG_EVENT_START,
-                mSession, mInputSource,
-                &mConfig, &deviceConfig, patchHandle);
+        if (patchHandle != AUDIO_PATCH_HANDLE_NONE) {
+            mClientInterface->onRecordingConfigurationUpdate(RECORD_CONFIG_EVENT_START,
+                    mSession, mInputSource,
+                    &mConfig, &deviceConfig, patchHandle);
+        }
     }
 }
 
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/PolicyConfigurableDomains.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/PolicyConfigurableDomains.xml
index aa2af0f..b43f83b 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/PolicyConfigurableDomains.xml
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/PolicyConfigurableDomains.xml
@@ -3062,7 +3062,7 @@
         <CompoundRule Type="All">
           <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
           <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
-          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="Is" Value="ForceNoBtA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dp"/>
         </CompoundRule>
       </Configuration>
@@ -3070,7 +3070,7 @@
         <CompoundRule Type="All">
           <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
           <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
-          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="Is" Value="ForceNoBtA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpHeadphones"/>
         </CompoundRule>
       </Configuration>
@@ -3078,7 +3078,7 @@
         <CompoundRule Type="All">
           <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
           <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
-          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="Is" Value="ForceNoBtA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpSpeaker"/>
         </CompoundRule>
       </Configuration>
@@ -6472,7 +6472,7 @@
               <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
               <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
               <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="UsbAccessory"/>
-              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceSpeaker"/>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
             </CompoundRule>
           </CompoundRule>
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="UsbDevice"/>
@@ -8416,6 +8416,7 @@
       <ConfigurableElement Path="/Policy/policy/usages/assistance_navigation_guidance/applicable_strategy/strategy"/>
       <ConfigurableElement Path="/Policy/policy/usages/assistance_sonification/applicable_strategy/strategy"/>
       <ConfigurableElement Path="/Policy/policy/usages/game/applicable_strategy/strategy"/>
+      <ConfigurableElement Path="/Policy/policy/usages/assistant/applicable_strategy/strategy"/>
     </ConfigurableElements>
     <Settings>
       <Configuration Name="Calibration">
@@ -8461,6 +8462,9 @@
         <ConfigurableElement Path="/Policy/policy/usages/game/applicable_strategy/strategy">
           <EnumParameter Name="strategy">media</EnumParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/usages/assistant/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">media</EnumParameter>
+        </ConfigurableElement>
       </Configuration>
     </Settings>
   </ConfigurableDomain>
@@ -8738,6 +8742,7 @@
       <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/loopback"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/ip"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/bus"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/stub"/>
     </ConfigurableElements>
     <Settings>
       <Configuration Name="Calibration">
@@ -9428,6 +9433,9 @@
         <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/bus">
           <BitParameter Name="bus">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/stub">
+          <BitParameter Name="stub">0</BitParameter>
+        </ConfigurableElement>
       </Configuration>
     </Settings>
   </ConfigurableDomain>
@@ -9758,7 +9766,7 @@
       </Configuration>
     </Settings>
   </ConfigurableDomain>
-  <ConfigurableDomain Name="DeviceForInputSource.VoiceRecognitionAndHotword" SequenceAware="false">
+  <ConfigurableDomain Name="DeviceForInputSource.VoiceRecognitionAndUnprocessedAndHotword" SequenceAware="false">
     <Configurations>
       <Configuration Name="ScoHeadset">
         <CompoundRule Type="All">
@@ -9790,6 +9798,10 @@
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/wired_headset"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/usb_device"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bluetooth_sco_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/wired_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/usb_device"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/builtin_mic"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/wired_headset"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/usb_device"/>
@@ -9809,6 +9821,18 @@
         <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic">
           <BitParameter Name="builtin_mic">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset">
           <BitParameter Name="bluetooth_sco_headset">1</BitParameter>
         </ConfigurableElement>
@@ -9835,6 +9859,18 @@
         <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic">
           <BitParameter Name="builtin_mic">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset">
           <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
         </ConfigurableElement>
@@ -9861,6 +9897,18 @@
         <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic">
           <BitParameter Name="builtin_mic">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset">
           <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
         </ConfigurableElement>
@@ -9887,6 +9935,18 @@
         <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic">
           <BitParameter Name="builtin_mic">1</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">1</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset">
           <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
         </ConfigurableElement>
@@ -9913,6 +9973,18 @@
         <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic">
           <BitParameter Name="builtin_mic">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset">
           <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
         </ConfigurableElement>
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw
index ecd56b0..eb11980 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw
@@ -375,7 +375,7 @@
 						TelephonyMode IsNot InCall
 						TelephonyMode IsNot InCommunication
 						AvailableOutputDevices Excludes UsbAccessory
-						ForceUseForCommunication Is ForceSpeaker
+						ForceUseForMedia IsNot ForceSpeaker
 				AvailableOutputDevices Includes UsbDevice
 
 				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification_respectful.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification_respectful.pfw
index b30aa4c..cee7cd1 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification_respectful.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification_respectful.pfw
@@ -78,7 +78,7 @@
 				#
 				TelephonyMode IsNot InCall
 				TelephonyMode IsNot InCommunication
-				ForceUseForMedia Is ForceNoBtA2dp
+				ForceUseForMedia IsNot ForceNoBtA2dp
 				AvailableOutputDevices Includes BluetoothA2dp
 
 				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
@@ -105,7 +105,7 @@
 				#
 				TelephonyMode IsNot InCall
 				TelephonyMode IsNot InCommunication
-				ForceUseForMedia Is ForceNoBtA2dp
+				ForceUseForMedia IsNot ForceNoBtA2dp
 				AvailableOutputDevices Includes BluetoothA2dpHeadphones
 
 				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
@@ -132,7 +132,7 @@
 				#
 				TelephonyMode IsNot InCall
 				TelephonyMode IsNot InCommunication
-				ForceUseForMedia Is ForceNoBtA2dp
+				ForceUseForMedia IsNot ForceNoBtA2dp
 				AvailableOutputDevices Includes BluetoothA2dpSpeaker
 
 				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_usage.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_usage.pfw
index 3f5da13..b3115e7 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_usage.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_usage.pfw
@@ -16,6 +16,7 @@
 			/Policy/policy/usages/assistance_navigation_guidance/applicable_strategy/strategy = media
 			/Policy/policy/usages/assistance_sonification/applicable_strategy/strategy = media
 			/Policy/policy/usages/game/applicable_strategy/strategy = media
+			/Policy/policy/usages/assistant/applicable_strategy/strategy = media
 
 	domain: AssistanceAccessibility
 		conf: Sonification
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml
index 71b2b62..ad9c356 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml
@@ -68,7 +68,7 @@
 
         <!--#################### USAGE BEGIN ####################-->
 
-        <ComponentType Name="Usages" Description="associated to audio_stream_type_t definition,
+        <ComponentType Name="Usages" Description="associated to audio_usage_t definition,
                              identifier mapping must match the value of the enum">
             <Component Name="unknown" Type="Usage" Mapping="Amend1:Unknown,Identifier:0"/>
             <Component Name="media" Type="Usage" Mapping="Amend1:Media,Identifier:1"/>
@@ -97,6 +97,7 @@
             <Component Name="game" Type="Usage" Mapping="Amend1:BluetoothSco,Identifier:14"/>
             <Component Name="virtual_source" Type="Usage"
                                              Mapping="Amend1:VirtualSource,Identifier:15"/>
+            <Component Name="assistant" Type="Usage" Mapping="Amend1:Assistant,Identifier:16"/>
         </ComponentType>
 
         <!--#################### USAGE END ####################-->
diff --git a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
index b18df5f..c1d6e85 100644
--- a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
+++ b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
@@ -352,12 +352,12 @@
     metadataCopy.update(ANDROID_CONTROL_SCENE_MODE, &sceneMode, 1);
     // However appending from same vendor tag provider should be fine
     ASSERT_EQ(OK, metadata.append(secondMetadata));
-    // Append from a metadata without vendor tag provider should not be supported
+    // Append from a metadata without vendor tag provider should be supported
     CameraMetadata regularMetadata(10, 20);
     uint8_t controlMode = ANDROID_CONTROL_MODE_AUTO;
     regularMetadata.update(ANDROID_CONTROL_MODE, &controlMode, 1);
-    ASSERT_NE(OK, secondMetadata.append(regularMetadata));
-    ASSERT_EQ(1u, secondMetadata.entryCount());
+    ASSERT_EQ(OK, secondMetadata.append(regularMetadata));
+    ASSERT_EQ(2u, secondMetadata.entryCount());
     ASSERT_EQ(2u, metadata.entryCount());
 
     // Dump
diff --git a/services/oboeservice/AAudioEndpointManager.cpp b/services/oboeservice/AAudioEndpointManager.cpp
new file mode 100644
index 0000000..84fa227
--- /dev/null
+++ b/services/oboeservice/AAudioEndpointManager.cpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <map>
+#include <mutex>
+#include <utils/Singleton.h>
+
+#include "AAudioEndpointManager.h"
+#include "AAudioServiceEndpoint.h"
+
+using namespace android;
+using namespace aaudio;
+
+ANDROID_SINGLETON_STATIC_INSTANCE(AAudioEndpointManager);
+
+AAudioEndpointManager::AAudioEndpointManager()
+        : Singleton<AAudioEndpointManager>() {
+}
+
+AAudioServiceEndpoint *AAudioEndpointManager::findEndpoint(AAudioService &audioService, int32_t deviceId,
+                                                           aaudio_direction_t direction) {
+    AAudioServiceEndpoint *endpoint = nullptr;
+    std::lock_guard<std::mutex> lock(mLock);
+    switch (direction) {
+        case AAUDIO_DIRECTION_INPUT:
+            endpoint = mInputs[deviceId];
+            break;
+        case AAUDIO_DIRECTION_OUTPUT:
+            endpoint = mOutputs[deviceId];
+            break;
+        default:
+            assert(false); // There are only two possible directions.
+            break;
+    }
+
+    // If we can't find an existing one then open one.
+    ALOGD("AAudioEndpointManager::findEndpoint(), found %p", endpoint);
+    if (endpoint == nullptr) {
+        endpoint = new AAudioServiceEndpoint(audioService);
+        if (endpoint->open(deviceId, direction) != AAUDIO_OK) {
+            ALOGD("AAudioEndpointManager::findEndpoint(), open failed");
+            delete endpoint;
+            endpoint = nullptr;
+        } else {
+            switch(direction) {
+                case AAUDIO_DIRECTION_INPUT:
+                    mInputs[deviceId] = endpoint;
+                    break;
+                case AAUDIO_DIRECTION_OUTPUT:
+                    mOutputs[deviceId] = endpoint;
+                    break;
+            }
+        }
+    }
+    return endpoint;
+}
+
+// FIXME add reference counter for serviceEndpoints and removed on last use.
+
+void AAudioEndpointManager::removeEndpoint(AAudioServiceEndpoint *serviceEndpoint) {
+    aaudio_direction_t direction = serviceEndpoint->getDirection();
+    int32_t deviceId = serviceEndpoint->getDeviceId();
+
+    std::lock_guard<std::mutex> lock(mLock);
+    switch(direction) {
+        case AAUDIO_DIRECTION_INPUT:
+            mInputs.erase(deviceId);
+            break;
+        case AAUDIO_DIRECTION_OUTPUT:
+            mOutputs.erase(deviceId);
+            break;
+    }
+}
\ No newline at end of file
diff --git a/services/oboeservice/AAudioEndpointManager.h b/services/oboeservice/AAudioEndpointManager.h
new file mode 100644
index 0000000..48b27f0
--- /dev/null
+++ b/services/oboeservice/AAudioEndpointManager.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_ENDPOINT_MANAGER_H
+#define AAUDIO_AAUDIO_ENDPOINT_MANAGER_H
+
+#include <map>
+#include <mutex>
+#include <utils/Singleton.h>
+
+#include "binding/AAudioServiceMessage.h"
+#include "AAudioServiceEndpoint.h"
+
+namespace aaudio {
+
+class AAudioEndpointManager : public android::Singleton<AAudioEndpointManager>{
+public:
+    AAudioEndpointManager();
+    ~AAudioEndpointManager() = default;
+
+    /**
+     * Find a service endpoint for the given deviceId and direction.
+     * If an endpoint does not already exist then it will try to create one.
+     *
+     * @param deviceId
+     * @param direction
+     * @return endpoint or nullptr
+     */
+    AAudioServiceEndpoint *findEndpoint(android::AAudioService &audioService,
+                                        int32_t deviceId,
+                                        aaudio_direction_t direction);
+
+    void removeEndpoint(AAudioServiceEndpoint *serviceEndpoint);
+
+private:
+
+    std::mutex    mLock;
+
+    // We need separate inputs and outputs because they may both have device==0.
+    // TODO review
+    std::map<int32_t, AAudioServiceEndpoint *> mInputs;
+    std::map<int32_t, AAudioServiceEndpoint *> mOutputs;
+
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_AAUDIO_ENDPOINT_MANAGER_H
diff --git a/services/oboeservice/AAudioMixer.cpp b/services/oboeservice/AAudioMixer.cpp
new file mode 100644
index 0000000..70da339
--- /dev/null
+++ b/services/oboeservice/AAudioMixer.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <cstring>
+#include "AAudioMixer.h"
+
+using android::WrappingBuffer;
+using android::FifoBuffer;
+using android::fifo_frames_t;
+
+AAudioMixer::~AAudioMixer() {
+    delete[] mOutputBuffer;
+}
+
+void AAudioMixer::allocate(int32_t samplesPerFrame, int32_t framesPerBurst) {
+    mSamplesPerFrame = samplesPerFrame;
+    mFramesPerBurst = framesPerBurst;
+    int32_t samplesPerBuffer = samplesPerFrame * framesPerBurst;
+    mOutputBuffer = new float[samplesPerBuffer];
+    mBufferSizeInBytes = samplesPerBuffer * sizeof(float);
+}
+
+void AAudioMixer::clear() {
+    memset(mOutputBuffer, 0, mBufferSizeInBytes);
+}
+
+void AAudioMixer::mix(FifoBuffer *fifo, float volume) {
+    WrappingBuffer wrappingBuffer;
+    float *destination = mOutputBuffer;
+    fifo_frames_t framesLeft = mFramesPerBurst;
+
+    // Gather the data from the client. May be in two parts.
+    fifo->getFullDataAvailable(&wrappingBuffer);
+
+    // Mix data in one or two parts.
+    int partIndex = 0;
+    while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
+        fifo_frames_t framesToMix = framesLeft;
+        fifo_frames_t framesAvailable = wrappingBuffer.numFrames[partIndex];
+        if (framesAvailable > 0) {
+            if (framesToMix > framesAvailable) {
+                framesToMix = framesAvailable;
+            }
+            mixPart(destination, (float *)wrappingBuffer.data[partIndex], framesToMix, volume);
+
+            destination += framesToMix * mSamplesPerFrame;
+            framesLeft -= framesToMix;
+        }
+        partIndex++;
+    }
+    fifo->getFifoControllerBase()->advanceReadIndex(mFramesPerBurst - framesLeft);
+    if (framesLeft > 0) {
+        ALOGW("AAudioMixer::mix() UNDERFLOW by %d / %d frames ----- UNDERFLOW !!!!!!!!!!",
+              framesLeft, mFramesPerBurst);
+    }
+}
+
+void AAudioMixer::mixPart(float *destination, float *source, int32_t numFrames, float volume) {
+    int32_t numSamples = numFrames * mSamplesPerFrame;
+    // TODO maybe optimize using SIMD
+    for (int sampleIndex = 0; sampleIndex < numSamples; sampleIndex++) {
+        *destination++ += *source++ * volume;
+    }
+}
+
+float *AAudioMixer::getOutputBuffer() {
+    return mOutputBuffer;
+}
diff --git a/services/oboeservice/AAudioMixer.h b/services/oboeservice/AAudioMixer.h
new file mode 100644
index 0000000..2191183
--- /dev/null
+++ b/services/oboeservice/AAudioMixer.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_MIXER_H
+#define AAUDIO_AAUDIO_MIXER_H
+
+#include <stdint.h>
+
+#include <aaudio/AAudio.h>
+#include <fifo/FifoBuffer.h>
+
+class AAudioMixer {
+public:
+    AAudioMixer() {}
+    ~AAudioMixer();
+
+    void allocate(int32_t samplesPerFrame, int32_t framesPerBurst);
+
+    void clear();
+
+    void mix(android::FifoBuffer *fifo, float volume);
+
+    void mixPart(float *destination, float *source, int32_t numFrames, float volume);
+
+    float *getOutputBuffer();
+
+private:
+    float   *mOutputBuffer = nullptr;
+    int32_t  mSamplesPerFrame = 0;
+    int32_t  mFramesPerBurst = 0;
+    int32_t  mBufferSizeInBytes = 0;
+};
+
+
+#endif //AAUDIO_AAUDIO_MIXER_H
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index 99b0b4d..e4fa1c5 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -18,28 +18,29 @@
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
-#include <time.h>
-#include <pthread.h>
+//#include <time.h>
+//#include <pthread.h>
 
 #include <aaudio/AAudioDefinitions.h>
+#include <mediautils/SchedulingPolicyService.h>
+#include <utils/String16.h>
 
-#include "HandleTracker.h"
-#include "IAAudioService.h"
-#include "AAudioServiceDefinitions.h"
+#include "binding/AAudioServiceMessage.h"
 #include "AAudioService.h"
-#include "AAudioServiceStreamFakeHal.h"
+#include "AAudioServiceStreamMMAP.h"
+#include "AAudioServiceStreamShared.h"
+#include "AAudioServiceStreamMMAP.h"
+#include "binding/IAAudioService.h"
+#include "utility/HandleTracker.h"
 
 using namespace android;
 using namespace aaudio;
 
 typedef enum
 {
-    AAUDIO_HANDLE_TYPE_DUMMY1, // TODO remove DUMMYs
-    AAUDIO_HANDLE_TYPE_DUMMY2, // make server handles different than client
-    AAUDIO_HANDLE_TYPE_STREAM,
-    AAUDIO_HANDLE_TYPE_COUNT
+    AAUDIO_HANDLE_TYPE_STREAM
 } aaudio_service_handle_type_t;
-static_assert(AAUDIO_HANDLE_TYPE_COUNT <= HANDLE_TRACKER_MAX_TYPES, "Too many handle types.");
+static_assert(AAUDIO_HANDLE_TYPE_STREAM < HANDLE_TRACKER_MAX_TYPES, "Too many handle types.");
 
 android::AAudioService::AAudioService()
     : BnAAudioService() {
@@ -48,18 +49,50 @@
 AAudioService::~AAudioService() {
 }
 
-aaudio_handle_t AAudioService::openStream(aaudio::AAudioStreamRequest &request,
-                                                aaudio::AAudioStreamConfiguration &configuration) {
-    AAudioServiceStreamBase *serviceStream =  new AAudioServiceStreamFakeHal();
-    ALOGD("AAudioService::openStream(): created serviceStream = %p", serviceStream);
-    aaudio_result_t result = serviceStream->open(request, configuration);
-    if (result < 0) {
-        ALOGE("AAudioService::openStream(): open returned %d", result);
+aaudio_handle_t AAudioService::openStream(const aaudio::AAudioStreamRequest &request,
+                                          aaudio::AAudioStreamConfiguration &configurationOutput) {
+    aaudio_result_t result = AAUDIO_OK;
+    AAudioServiceStreamBase *serviceStream = nullptr;
+    const AAudioStreamConfiguration &configurationInput = request.getConstantConfiguration();
+    aaudio_sharing_mode_t sharingMode = configurationInput.getSharingMode();
+    ALOGE("AAudioService::openStream(): sharingMode = %d", sharingMode);
+
+    if (sharingMode != AAUDIO_SHARING_MODE_EXCLUSIVE && sharingMode != AAUDIO_SHARING_MODE_SHARED) {
+        ALOGE("AAudioService::openStream(): unrecognized sharing mode = %d", sharingMode);
+        return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+    }
+
+    if (sharingMode == AAUDIO_SHARING_MODE_EXCLUSIVE) {
+        ALOGD("AAudioService::openStream(), sharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE");
+        serviceStream = new AAudioServiceStreamMMAP();
+        result = serviceStream->open(request, configurationOutput);
+        if (result != AAUDIO_OK) {
+            // fall back to using a shared stream
+            ALOGD("AAudioService::openStream(), EXCLUSIVE mode failed");
+            delete serviceStream;
+            serviceStream = nullptr;
+        } else {
+            configurationOutput.setSharingMode(AAUDIO_SHARING_MODE_EXCLUSIVE);
+        }
+    }
+
+    // if SHARED requested or if EXCLUSIVE failed
+    if (serviceStream == nullptr) {
+        ALOGD("AAudioService::openStream(), sharingMode = AAUDIO_SHARING_MODE_SHARED");
+        serviceStream =  new AAudioServiceStreamShared(*this);
+        result = serviceStream->open(request, configurationOutput);
+        configurationOutput.setSharingMode(AAUDIO_SHARING_MODE_SHARED);
+    }
+
+    if (result != AAUDIO_OK) {
+        delete serviceStream;
+        ALOGE("AAudioService::openStream(): failed, return %d", result);
         return result;
     } else {
         aaudio_handle_t handle = mHandleTracker.put(AAUDIO_HANDLE_TYPE_STREAM, serviceStream);
         ALOGD("AAudioService::openStream(): handle = 0x%08X", handle);
         if (handle < 0) {
+            ALOGE("AAudioService::openStream(): handle table full");
             delete serviceStream;
         }
         return handle;
@@ -72,7 +105,7 @@
                                   streamHandle);
     ALOGD("AAudioService.closeStream(0x%08X)", streamHandle);
     if (serviceStream != nullptr) {
-        ALOGD("AAudioService::closeStream(): deleting serviceStream = %p", serviceStream);
+        serviceStream->close();
         delete serviceStream;
         return AAUDIO_OK;
     }
@@ -89,27 +122,32 @@
                 aaudio_handle_t streamHandle,
                 aaudio::AudioEndpointParcelable &parcelable) {
     AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
-    ALOGD("AAudioService::getStreamDescription(), serviceStream = %p", serviceStream);
     if (serviceStream == nullptr) {
+        ALOGE("AAudioService::getStreamDescription(), illegal stream handle = 0x%0x", streamHandle);
         return AAUDIO_ERROR_INVALID_HANDLE;
     }
-    return serviceStream->getDescription(parcelable);
+    ALOGD("AAudioService::getStreamDescription(), handle = 0x%08x", streamHandle);
+    aaudio_result_t result = serviceStream->getDescription(parcelable);
+    ALOGD("AAudioService::getStreamDescription(), result = %d", result);
+    // parcelable.dump();
+    return result;
 }
 
 aaudio_result_t AAudioService::startStream(aaudio_handle_t streamHandle) {
     AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
-    ALOGD("AAudioService::startStream(), serviceStream = %p", serviceStream);
     if (serviceStream == nullptr) {
+        ALOGE("AAudioService::startStream(), illegal stream handle = 0x%0x", streamHandle);
         return AAUDIO_ERROR_INVALID_HANDLE;
     }
     aaudio_result_t result = serviceStream->start();
+    ALOGD("AAudioService::startStream(), serviceStream->start() returned %d", result);
     return result;
 }
 
 aaudio_result_t AAudioService::pauseStream(aaudio_handle_t streamHandle) {
     AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
-    ALOGD("AAudioService::pauseStream(), serviceStream = %p", serviceStream);
     if (serviceStream == nullptr) {
+        ALOGE("AAudioService::pauseStream(), illegal stream handle = 0x%0x", streamHandle);
         return AAUDIO_ERROR_INVALID_HANDLE;
     }
     aaudio_result_t result = serviceStream->pause();
@@ -118,35 +156,33 @@
 
 aaudio_result_t AAudioService::flushStream(aaudio_handle_t streamHandle) {
     AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
-    ALOGD("AAudioService::flushStream(), serviceStream = %p", serviceStream);
     if (serviceStream == nullptr) {
+        ALOGE("AAudioService::flushStream(), illegal stream handle = 0x%0x", streamHandle);
         return AAUDIO_ERROR_INVALID_HANDLE;
     }
     return serviceStream->flush();
 }
 
 aaudio_result_t AAudioService::registerAudioThread(aaudio_handle_t streamHandle,
+                                                         pid_t clientProcessId,
                                                          pid_t clientThreadId,
                                                          int64_t periodNanoseconds) {
     AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
     ALOGD("AAudioService::registerAudioThread(), serviceStream = %p", serviceStream);
     if (serviceStream == nullptr) {
-        ALOGE("AAudioService::registerAudioThread(), serviceStream == nullptr");
+        ALOGE("AAudioService::registerAudioThread(), illegal stream handle = 0x%0x", streamHandle);
         return AAUDIO_ERROR_INVALID_HANDLE;
     }
     if (serviceStream->getRegisteredThread() != AAudioServiceStreamBase::ILLEGAL_THREAD_ID) {
         ALOGE("AAudioService::registerAudioThread(), thread already registered");
-        return AAUDIO_ERROR_INVALID_ORDER;
+        return AAUDIO_ERROR_INVALID_STATE;
     }
     serviceStream->setRegisteredThread(clientThreadId);
-    // Boost client thread to SCHED_FIFO
-    struct sched_param sp;
-    memset(&sp, 0, sizeof(sp));
-    sp.sched_priority = 2; // TODO use 'requestPriority' function from frameworks/av/media/utils
-    int err = sched_setscheduler(clientThreadId, SCHED_FIFO, &sp);
+    int err = android::requestPriority(clientProcessId, clientThreadId,
+                                       DEFAULT_AUDIO_PRIORITY, true /* isForApp */);
     if (err != 0){
-        ALOGE("AAudioService::sched_setscheduler() failed, errno = %d, priority = %d",
-              errno, sp.sched_priority);
+        ALOGE("AAudioService::registerAudioThread() failed, errno = %d, priority = %d",
+              errno, DEFAULT_AUDIO_PRIORITY);
         return AAUDIO_ERROR_INTERNAL;
     } else {
         return AAUDIO_OK;
@@ -154,11 +190,13 @@
 }
 
 aaudio_result_t AAudioService::unregisterAudioThread(aaudio_handle_t streamHandle,
-                                                           pid_t clientThreadId) {
+                                                     pid_t clientProcessId,
+                                                     pid_t clientThreadId) {
     AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
     ALOGI("AAudioService::unregisterAudioThread(), serviceStream = %p", serviceStream);
     if (serviceStream == nullptr) {
-        ALOGE("AAudioService::unregisterAudioThread(), serviceStream == nullptr");
+        ALOGE("AAudioService::unregisterAudioThread(), illegal stream handle = 0x%0x",
+              streamHandle);
         return AAUDIO_ERROR_INVALID_HANDLE;
     }
     if (serviceStream->getRegisteredThread() != clientThreadId) {
diff --git a/services/oboeservice/AAudioService.h b/services/oboeservice/AAudioService.h
index a520d7a..5a7a2b6 100644
--- a/services/oboeservice/AAudioService.h
+++ b/services/oboeservice/AAudioService.h
@@ -22,17 +22,19 @@
 
 #include <binder/BinderService.h>
 
-#include <aaudio/AAudioDefinitions.h>
 #include <aaudio/AAudio.h>
 #include "utility/HandleTracker.h"
-#include "IAAudioService.h"
+#include "binding/IAAudioService.h"
+#include "binding/AAudioServiceInterface.h"
+
 #include "AAudioServiceStreamBase.h"
 
 namespace android {
 
 class AAudioService :
     public BinderService<AAudioService>,
-    public BnAAudioService
+    public BnAAudioService,
+    public aaudio::AAudioServiceInterface
 {
     friend class BinderService<AAudioService>;
 
@@ -40,9 +42,9 @@
     AAudioService();
     virtual ~AAudioService();
 
-    static const char* getServiceName() { return "media.audio_aaudio"; }
+    static const char* getServiceName() { return AAUDIO_SERVICE_NAME; }
 
-    virtual aaudio_handle_t openStream(aaudio::AAudioStreamRequest &request,
+    virtual aaudio_handle_t openStream(const aaudio::AAudioStreamRequest &request,
                                      aaudio::AAudioStreamConfiguration &configuration);
 
     virtual aaudio_result_t closeStream(aaudio_handle_t streamHandle);
@@ -58,9 +60,11 @@
     virtual aaudio_result_t flushStream(aaudio_handle_t streamHandle);
 
     virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle,
-                                              pid_t pid, int64_t periodNanoseconds) ;
+                                              pid_t pid, pid_t tid,
+                                              int64_t periodNanoseconds) ;
 
-    virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle, pid_t pid);
+    virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle,
+                                                  pid_t pid, pid_t tid);
 
 private:
 
@@ -68,6 +72,9 @@
 
     HandleTracker mHandleTracker;
 
+    enum constants {
+        DEFAULT_AUDIO_PRIORITY = 2
+    };
 };
 
 } /* namespace android */
diff --git a/services/oboeservice/AAudioServiceDefinitions.h b/services/oboeservice/AAudioServiceDefinitions.h
deleted file mode 100644
index f98acbf..0000000
--- a/services/oboeservice/AAudioServiceDefinitions.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef AAUDIO_AAUDIO_SERVICE_DEFINITIONS_H
-#define AAUDIO_AAUDIO_SERVICE_DEFINITIONS_H
-
-#include <stdint.h>
-
-#include <aaudio/AAudio.h>
-
-#include "binding/RingBufferParcelable.h"
-
-namespace aaudio {
-
-// TODO move this an "include" folder for the service.
-
-struct AAudioMessageTimestamp {
-    int64_t position;
-    int64_t                deviceOffset; // add to client position to get device position
-    int64_t     timestamp;
-};
-
-typedef enum aaudio_service_event_e : uint32_t {
-    AAUDIO_SERVICE_EVENT_STARTED,
-    AAUDIO_SERVICE_EVENT_PAUSED,
-    AAUDIO_SERVICE_EVENT_FLUSHED,
-    AAUDIO_SERVICE_EVENT_CLOSED,
-    AAUDIO_SERVICE_EVENT_DISCONNECTED
-} aaudio_service_event_t;
-
-struct AAudioMessageEvent {
-    aaudio_service_event_t event;
-    int32_t data1;
-    int64_t data2;
-};
-
-typedef struct AAudioServiceMessage_s {
-    enum class code : uint32_t {
-        NOTHING,
-        TIMESTAMP,
-        EVENT,
-    };
-
-    code what;
-    union {
-        AAudioMessageTimestamp timestamp;
-        AAudioMessageEvent event;
-    };
-} AAudioServiceMessage;
-
-} /* namespace aaudio */
-
-#endif //AAUDIO_AAUDIO_SERVICE_DEFINITIONS_H
diff --git a/services/oboeservice/AAudioServiceEndpoint.cpp b/services/oboeservice/AAudioServiceEndpoint.cpp
new file mode 100644
index 0000000..80551c9
--- /dev/null
+++ b/services/oboeservice/AAudioServiceEndpoint.cpp
@@ -0,0 +1,188 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <algorithm>
+#include <mutex>
+#include <vector>
+
+#include "core/AudioStreamBuilder.h"
+#include "AAudioServiceEndpoint.h"
+#include "AAudioServiceStreamShared.h"
+
+using namespace android;  // TODO just import names needed
+using namespace aaudio;   // TODO just import names needed
+
+#define MIN_TIMEOUT_NANOS        (1000 * AAUDIO_NANOS_PER_MILLISECOND)
+
+// Wait at least this many times longer than the operation should take.
+#define MIN_TIMEOUT_OPERATIONS    4
+
+// The mStreamInternal will use a service interface that does not go through Binder.
+AAudioServiceEndpoint::AAudioServiceEndpoint(AAudioService &audioService)
+        : mStreamInternal(audioService, true)
+        {
+}
+
+AAudioServiceEndpoint::~AAudioServiceEndpoint() {
+}
+
+// Set up an EXCLUSIVE MMAP stream that will be shared.
+aaudio_result_t AAudioServiceEndpoint::open(int32_t deviceId, aaudio_direction_t direction) {
+    AudioStreamBuilder builder;
+    builder.setSharingMode(AAUDIO_SHARING_MODE_EXCLUSIVE);
+    builder.setDeviceId(deviceId);
+    builder.setDirection(direction);
+    aaudio_result_t result = mStreamInternal.open(builder);
+    if (result == AAUDIO_OK) {
+        mMixer.allocate(mStreamInternal.getSamplesPerFrame(), mStreamInternal.getFramesPerBurst());
+    }
+    return result;
+}
+
+aaudio_result_t AAudioServiceEndpoint::close() {
+    return mStreamInternal.close();
+}
+
+// TODO, maybe use an interface to reduce exposure
+aaudio_result_t AAudioServiceEndpoint::registerStream(AAudioServiceStreamShared *sharedStream) {
+    ALOGD("AAudioServiceEndpoint::registerStream(%p)", sharedStream);
+    // TODO use real-time technique to avoid mutex, eg. atomic command FIFO
+    std::lock_guard<std::mutex> lock(mLockStreams);
+    mRegisteredStreams.push_back(sharedStream);
+    return AAUDIO_OK;
+}
+
+aaudio_result_t AAudioServiceEndpoint::unregisterStream(AAudioServiceStreamShared *sharedStream) {
+    ALOGD("AAudioServiceEndpoint::unregisterStream(%p)", sharedStream);
+    std::lock_guard<std::mutex> lock(mLockStreams);
+    mRegisteredStreams.erase(std::remove(mRegisteredStreams.begin(), mRegisteredStreams.end(), sharedStream),
+              mRegisteredStreams.end());
+    return AAUDIO_OK;
+}
+
+aaudio_result_t AAudioServiceEndpoint::startStream(AAudioServiceStreamShared *sharedStream) {
+    // TODO use real-time technique to avoid mutex, eg. atomic command FIFO
+    ALOGD("AAudioServiceEndpoint(): startStream() entering");
+    std::lock_guard<std::mutex> lock(mLockStreams);
+    mRunningStreams.push_back(sharedStream);
+    if (mRunningStreams.size() == 1) {
+        startMixer_l();
+    }
+    return AAUDIO_OK;
+}
+
+aaudio_result_t AAudioServiceEndpoint::stopStream(AAudioServiceStreamShared *sharedStream) {
+    std::lock_guard<std::mutex> lock(mLockStreams);
+    mRunningStreams.erase(std::remove(mRunningStreams.begin(), mRunningStreams.end(), sharedStream),
+              mRunningStreams.end());
+    if (mRunningStreams.size() == 0) {
+        stopMixer_l();
+    }
+    return AAUDIO_OK;
+}
+
+static void *aaudio_mixer_thread_proc(void *context) {
+    AAudioServiceEndpoint *stream = (AAudioServiceEndpoint *) context;
+    //LOGD("AudioStreamAAudio(): oboe_callback_thread, stream = %p", stream);
+    if (stream != NULL) {
+        return stream->callbackLoop();
+    } else {
+        return NULL;
+    }
+}
+
+// Render audio in the application callback and then write the data to the stream.
+void *AAudioServiceEndpoint::callbackLoop() {
+    aaudio_result_t result = AAUDIO_OK;
+
+    ALOGD("AAudioServiceEndpoint(): callbackLoop() entering");
+
+    result = mStreamInternal.requestStart();
+    ALOGD("AAudioServiceEndpoint(): callbackLoop() after requestStart()  %d, isPlaying() = %d",
+          result, (int) mStreamInternal.isPlaying());
+
+    // result might be a frame count
+    while (mCallbackEnabled.load() && mStreamInternal.isPlaying() && (result >= 0)) {
+        // Mix data from each active stream.
+        {
+            mMixer.clear();
+            std::lock_guard<std::mutex> lock(mLockStreams);
+            for(AAudioServiceStreamShared *sharedStream : mRunningStreams) {
+                FifoBuffer *fifo = sharedStream->getDataFifoBuffer();
+                float volume = 0.5; // TODO get from system
+                mMixer.mix(fifo, volume);
+            }
+        }
+
+        // Write audio data to stream using a blocking write.
+        ALOGD("AAudioServiceEndpoint(): callbackLoop() write(%d)", getFramesPerBurst());
+        int64_t timeoutNanos = calculateReasonableTimeout(mStreamInternal.getFramesPerBurst());
+        result = mStreamInternal.write(mMixer.getOutputBuffer(), getFramesPerBurst(), timeoutNanos);
+        if (result == AAUDIO_ERROR_DISCONNECTED) {
+            disconnectRegisteredStreams();
+            break;
+        } else if (result != getFramesPerBurst()) {
+            ALOGW("AAudioServiceEndpoint(): callbackLoop() wrote %d / %d",
+                  result, getFramesPerBurst());
+            break;
+        }
+    }
+
+    ALOGD("AAudioServiceEndpoint(): callbackLoop() exiting, result = %d, isPlaying() = %d",
+          result, (int) mStreamInternal.isPlaying());
+
+    result = mStreamInternal.requestStop();
+
+    return NULL; // TODO review
+}
+
+aaudio_result_t AAudioServiceEndpoint::startMixer_l() {
+    // Launch the callback loop thread.
+    int64_t periodNanos = mStreamInternal.getFramesPerBurst()
+                          * AAUDIO_NANOS_PER_SECOND
+                          / getSampleRate();
+    mCallbackEnabled.store(true);
+    return mStreamInternal.createThread(periodNanos, aaudio_mixer_thread_proc, this);
+}
+
+aaudio_result_t AAudioServiceEndpoint::stopMixer_l() {
+    mCallbackEnabled.store(false);
+    return mStreamInternal.joinThread(NULL, calculateReasonableTimeout(mStreamInternal.getFramesPerBurst()));
+}
+
+// TODO Call method in AudioStreamInternal when that callback CL is merged.
+int64_t AAudioServiceEndpoint::calculateReasonableTimeout(int32_t framesPerOperation) {
+
+    // Wait for at least a second or some number of callbacks to join the thread.
+    int64_t timeoutNanoseconds = (MIN_TIMEOUT_OPERATIONS * framesPerOperation * AAUDIO_NANOS_PER_SECOND)
+                                 / getSampleRate();
+    if (timeoutNanoseconds < MIN_TIMEOUT_NANOS) { // arbitrary number of seconds
+        timeoutNanoseconds = MIN_TIMEOUT_NANOS;
+    }
+    return timeoutNanoseconds;
+}
+
+void AAudioServiceEndpoint::disconnectRegisteredStreams() {
+    std::lock_guard<std::mutex> lock(mLockStreams);
+    for(AAudioServiceStreamShared *sharedStream : mRunningStreams) {
+        sharedStream->onStop();
+    }
+    mRunningStreams.clear();
+    for(AAudioServiceStreamShared *sharedStream : mRegisteredStreams) {
+        sharedStream->onDisconnect();
+    }
+    mRegisteredStreams.clear();
+}
diff --git a/services/oboeservice/AAudioServiceEndpoint.h b/services/oboeservice/AAudioServiceEndpoint.h
new file mode 100644
index 0000000..020d38a
--- /dev/null
+++ b/services/oboeservice/AAudioServiceEndpoint.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_SERVICE_ENDPOINT_H
+#define AAUDIO_SERVICE_ENDPOINT_H
+
+#include <atomic>
+#include <functional>
+#include <mutex>
+#include <vector>
+
+#include "client/AudioStreamInternal.h"
+#include "binding/AAudioServiceMessage.h"
+#include "AAudioServiceStreamShared.h"
+#include "AAudioServiceStreamMMAP.h"
+#include "AAudioMixer.h"
+#include "AAudioService.h"
+
+namespace aaudio {
+
+class AAudioServiceEndpoint {
+public:
+    explicit AAudioServiceEndpoint(android::AAudioService &audioService);
+    virtual ~AAudioServiceEndpoint();
+
+    aaudio_result_t open(int32_t deviceId, aaudio_direction_t direction);
+
+    int32_t getSampleRate() const { return mStreamInternal.getSampleRate(); }
+    int32_t getSamplesPerFrame() const { return mStreamInternal.getSamplesPerFrame();  }
+    int32_t getFramesPerBurst() const { return mStreamInternal.getFramesPerBurst();  }
+
+    aaudio_result_t registerStream(AAudioServiceStreamShared *sharedStream);
+    aaudio_result_t unregisterStream(AAudioServiceStreamShared *sharedStream);
+    aaudio_result_t startStream(AAudioServiceStreamShared *sharedStream);
+    aaudio_result_t stopStream(AAudioServiceStreamShared *sharedStream);
+    aaudio_result_t close();
+
+    int32_t getDeviceId() const { return mStreamInternal.getDeviceId(); }
+
+    aaudio_direction_t getDirection() const { return mStreamInternal.getDirection(); }
+
+    void disconnectRegisteredStreams();
+
+    void *callbackLoop();
+
+private:
+    aaudio_result_t startMixer_l();
+    aaudio_result_t stopMixer_l();
+
+    int64_t calculateReasonableTimeout(int32_t framesPerOperation);
+
+    AudioStreamInternal      mStreamInternal;
+    AAudioMixer              mMixer;
+    AAudioServiceStreamMMAP  mStreamMMAP;
+
+    std::atomic<bool>        mCallbackEnabled;
+
+    std::mutex               mLockStreams;
+    std::vector<AAudioServiceStreamShared *> mRegisteredStreams;
+    std::vector<AAudioServiceStreamShared *> mRunningStreams;
+};
+
+} /* namespace aaudio */
+
+
+#endif //AAUDIO_SERVICE_ENDPOINT_H
diff --git a/services/oboeservice/AAudioServiceMain.cpp b/services/oboeservice/AAudioServiceMain.cpp
deleted file mode 100644
index aa89180..0000000
--- a/services/oboeservice/AAudioServiceMain.cpp
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "AAudioService"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
-#include <stdint.h>
-#include <math.h>
-
-#include <utils/RefBase.h>
-#include <binder/TextOutput.h>
-
-#include <binder/IInterface.h>
-#include <binder/IBinder.h>
-#include <binder/ProcessState.h>
-#include <binder/IServiceManager.h>
-#include <binder/IPCThreadState.h>
-
-#include <cutils/ashmem.h>
-#include <sys/mman.h>
-
-#include "AAudioServiceDefinitions.h"
-#include "IAAudioService.h"
-#include "AAudioService.h"
-
-using namespace android;
-using namespace aaudio;
-
-/**
- * This is used to test the AAudioService as a standalone application.
- * It is not used when the AAudioService is integrated with AudioFlinger.
- */
-int main(int argc, char **argv) {
-    printf("Test AAudioService %s\n", argv[1]);
-    ALOGD("This is the AAudioService");
-
-    defaultServiceManager()->addService(String16("AAudioService"), new AAudioService());
-    android::ProcessState::self()->startThreadPool();
-    printf("AAudioService service is now ready\n");
-    IPCThreadState::self()->joinThreadPool();
-    printf("AAudioService service thread joined\n");
-
-    return 0;
-}
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index a7938dc..b15043d 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -18,43 +18,138 @@
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
-#include "IAAudioService.h"
-#include "AAudioServiceDefinitions.h"
-#include "AAudioServiceStreamBase.h"
-#include "AudioEndpointParcelable.h"
+#include <mutex>
 
-using namespace android;
-using namespace aaudio;
+#include "binding/IAAudioService.h"
+#include "binding/AAudioServiceMessage.h"
+#include "utility/AudioClock.h"
+
+#include "AAudioServiceStreamBase.h"
+#include "TimestampScheduler.h"
+
+using namespace android;  // TODO just import names needed
+using namespace aaudio;   // TODO just import names needed
 
 /**
- * Construct the AudioCommandQueues and the AudioDataQueue
- * and fill in the endpoint parcelable.
+ * Base class for streams in the service.
+ * @return
  */
 
 AAudioServiceStreamBase::AAudioServiceStreamBase()
         : mUpMessageQueue(nullptr)
-{
-    // TODO could fail so move out of constructor
-    mUpMessageQueue = new SharedRingBuffer();
-    mUpMessageQueue->allocate(sizeof(AAudioServiceMessage), QUEUE_UP_CAPACITY_COMMANDS);
+        , mAAudioThread() {
 }
 
 AAudioServiceStreamBase::~AAudioServiceStreamBase() {
-    Mutex::Autolock _l(mLockUpMessageQueue);
-    delete mUpMessageQueue;
+    close();
 }
 
-void AAudioServiceStreamBase::sendServiceEvent(aaudio_service_event_t event,
-                              int32_t data1,
-                              int64_t data2) {
+aaudio_result_t AAudioServiceStreamBase::open(const aaudio::AAudioStreamRequest &request,
+                     aaudio::AAudioStreamConfiguration &configurationOutput) {
+    std::lock_guard<std::mutex> lock(mLockUpMessageQueue);
+    if (mUpMessageQueue != nullptr) {
+        return AAUDIO_ERROR_INVALID_STATE;
+    } else {
+        mUpMessageQueue = new SharedRingBuffer();
+        return mUpMessageQueue->allocate(sizeof(AAudioServiceMessage), QUEUE_UP_CAPACITY_COMMANDS);
+    }
+}
 
-    Mutex::Autolock _l(mLockUpMessageQueue);
+aaudio_result_t AAudioServiceStreamBase::close() {
+    std::lock_guard<std::mutex> lock(mLockUpMessageQueue);
+    delete mUpMessageQueue;
+    mUpMessageQueue = nullptr;
+    return AAUDIO_OK;
+}
+
+aaudio_result_t AAudioServiceStreamBase::start() {
+    sendServiceEvent(AAUDIO_SERVICE_EVENT_STARTED);
+    mState = AAUDIO_STREAM_STATE_STARTED;
+    mThreadEnabled.store(true);
+    return mAAudioThread.start(this);
+}
+
+aaudio_result_t AAudioServiceStreamBase::pause() {
+
+    sendCurrentTimestamp();
+    mThreadEnabled.store(false);
+    aaudio_result_t result = mAAudioThread.stop();
+    if (result != AAUDIO_OK) {
+        processError();
+        return result;
+    }
+    sendServiceEvent(AAUDIO_SERVICE_EVENT_PAUSED);
+    mState = AAUDIO_STREAM_STATE_PAUSED;
+    return result;
+}
+
+// implement Runnable
+void AAudioServiceStreamBase::run() {
+    ALOGD("AAudioServiceStreamMMAP::run() entering ----------------");
+    TimestampScheduler timestampScheduler;
+    timestampScheduler.setBurstPeriod(mFramesPerBurst, mSampleRate);
+    timestampScheduler.start(AudioClock::getNanoseconds());
+    int64_t nextTime = timestampScheduler.nextAbsoluteTime();
+    while(mThreadEnabled.load()) {
+        if (AudioClock::getNanoseconds() >= nextTime) {
+            aaudio_result_t result = sendCurrentTimestamp();
+            if (result != AAUDIO_OK) {
+                break;
+            }
+            nextTime = timestampScheduler.nextAbsoluteTime();
+        } else  {
+            // Sleep until it is time to send the next timestamp.
+            AudioClock::sleepUntilNanoTime(nextTime);
+        }
+    }
+    ALOGD("AAudioServiceStreamMMAP::run() exiting ----------------");
+}
+
+void AAudioServiceStreamBase::processError() {
+    sendServiceEvent(AAUDIO_SERVICE_EVENT_DISCONNECTED);
+}
+
+aaudio_result_t AAudioServiceStreamBase::sendServiceEvent(aaudio_service_event_t event,
+                                               double  dataDouble,
+                                               int64_t dataLong) {
     AAudioServiceMessage command;
     command.what = AAudioServiceMessage::code::EVENT;
     command.event.event = event;
-    command.event.data1 = data1;
-    command.event.data2 = data2;
-    mUpMessageQueue->getFifoBuffer()->write(&command, 1);
+    command.event.dataDouble = dataDouble;
+    command.event.dataLong = dataLong;
+    return writeUpMessageQueue(&command);
+}
+
+aaudio_result_t AAudioServiceStreamBase::writeUpMessageQueue(AAudioServiceMessage *command) {
+    std::lock_guard<std::mutex> lock(mLockUpMessageQueue);
+    int32_t count = mUpMessageQueue->getFifoBuffer()->write(command, 1);
+    if (count != 1) {
+        ALOGE("writeUpMessageQueue(): Queue full. Did client die?");
+        return AAUDIO_ERROR_WOULD_BLOCK;
+    } else {
+        return AAUDIO_OK;
+    }
+}
+
+aaudio_result_t AAudioServiceStreamBase::sendCurrentTimestamp() {
+    AAudioServiceMessage command;
+    aaudio_result_t result = getFreeRunningPosition(&command.timestamp.position,
+                                                    &command.timestamp.timestamp);
+    if (result == AAUDIO_OK) {
+        command.what = AAudioServiceMessage::code::TIMESTAMP;
+        result = writeUpMessageQueue(&command);
+    }
+    return result;
 }
 
 
+/**
+ * Get an immutable description of the in-memory queues
+ * used to communicate with the underlying HAL or Service.
+ */
+aaudio_result_t AAudioServiceStreamBase::getDescription(AudioEndpointParcelable &parcelable) {
+    // Gather information on the message queue.
+    mUpMessageQueue->fillParcelable(parcelable,
+                                    parcelable.mUpMessageQueueParcelable);
+    return getDownDataDescription(parcelable);
+}
\ No newline at end of file
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index 7a812f9..91eec35 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -17,13 +17,15 @@
 #ifndef AAUDIO_AAUDIO_SERVICE_STREAM_BASE_H
 #define AAUDIO_AAUDIO_SERVICE_STREAM_BASE_H
 
-#include <utils/Mutex.h>
+#include <mutex>
 
-#include "IAAudioService.h"
-#include "AAudioServiceDefinitions.h"
 #include "fifo/FifoBuffer.h"
+#include "binding/IAAudioService.h"
+#include "binding/AudioEndpointParcelable.h"
+#include "binding/AAudioServiceMessage.h"
+#include "utility/AAudioUtilities.h"
+
 #include "SharedRingBuffer.h"
-#include "AudioEndpointParcelable.h"
 #include "AAudioThread.h"
 
 namespace aaudio {
@@ -32,7 +34,11 @@
 // This should be way more than we need.
 #define QUEUE_UP_CAPACITY_COMMANDS (128)
 
-class AAudioServiceStreamBase {
+/**
+ * Base class for a stream in the AAudio service.
+ */
+class AAudioServiceStreamBase
+    : public Runnable  {
 
 public:
     AAudioServiceStreamBase();
@@ -42,16 +48,14 @@
         ILLEGAL_THREAD_ID = 0
     };
 
-    /**
-     * Fill in a parcelable description of stream.
-     */
-    virtual aaudio_result_t getDescription(aaudio::AudioEndpointParcelable &parcelable) = 0;
-
+    // -------------------------------------------------------------------
     /**
      * Open the device.
      */
-    virtual aaudio_result_t open(aaudio::AAudioStreamRequest &request,
-                               aaudio::AAudioStreamConfiguration &configuration) = 0;
+    virtual aaudio_result_t open(const aaudio::AAudioStreamRequest &request,
+                                 aaudio::AAudioStreamConfiguration &configurationOutput) = 0;
+
+    virtual aaudio_result_t close();
 
     /**
      * Start the flow of data.
@@ -68,39 +72,69 @@
      */
     virtual aaudio_result_t flush() = 0;
 
-    virtual aaudio_result_t close() = 0;
+    // -------------------------------------------------------------------
 
-    virtual void sendCurrentTimestamp() = 0;
+    /**
+     * Send a message to the client.
+     */
+    aaudio_result_t sendServiceEvent(aaudio_service_event_t event,
+                                     double  dataDouble = 0.0,
+                                     int64_t dataLong = 0);
 
-    int32_t getFramesPerBurst() {
-        return mFramesPerBurst;
-    }
+    /**
+     * Fill in a parcelable description of stream.
+     */
+    aaudio_result_t getDescription(AudioEndpointParcelable &parcelable);
 
-    virtual void sendServiceEvent(aaudio_service_event_t event,
-                                  int32_t data1 = 0,
-                                  int64_t data2 = 0);
 
-    virtual void setRegisteredThread(pid_t pid) {
+    void setRegisteredThread(pid_t pid) {
         mRegisteredClientThread = pid;
     }
 
-    virtual pid_t getRegisteredThread() {
+    pid_t getRegisteredThread() const {
         return mRegisteredClientThread;
     }
 
+    int32_t getFramesPerBurst() const {
+        return mFramesPerBurst;
+    }
+
+    int32_t calculateBytesPerFrame() const {
+        return mSamplesPerFrame * AAudioConvert_formatToSizeInBytes(mAudioFormat);
+    }
+
+    void run() override; // to implement Runnable
+
+    void processError();
+
 protected:
+    aaudio_result_t writeUpMessageQueue(AAudioServiceMessage *command);
+
+    aaudio_result_t sendCurrentTimestamp();
+
+    virtual aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) = 0;
+
+    virtual aaudio_result_t getDownDataDescription(AudioEndpointParcelable &parcelable) = 0;
+
+    aaudio_stream_state_t               mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
 
     pid_t              mRegisteredClientThread = ILLEGAL_THREAD_ID;
 
     SharedRingBuffer*  mUpMessageQueue;
+    std::mutex         mLockUpMessageQueue;
 
-    int32_t            mSampleRate = 0;
-    int32_t            mBytesPerFrame = 0;
+    AAudioThread        mAAudioThread;
+    // This is used by one thread to tell another thread to exit. So it must be atomic.
+    std::atomic<bool>   mThreadEnabled;
+
+
+    int                mAudioDataFileDescriptor = -1;
+
+    aaudio_audio_format_t mAudioFormat = AAUDIO_FORMAT_UNSPECIFIED;
     int32_t            mFramesPerBurst = 0;
-    int32_t            mCapacityInFrames = 0;
-    int32_t            mCapacityInBytes = 0;
-
-    android::Mutex     mLockUpMessageQueue;
+    int32_t            mSamplesPerFrame = AAUDIO_UNSPECIFIED;
+    int32_t            mSampleRate = AAUDIO_UNSPECIFIED;
+    int32_t            mCapacityInFrames = AAUDIO_UNSPECIFIED;
 };
 
 } /* namespace aaudio */
diff --git a/services/oboeservice/AAudioServiceStreamExclusive.h b/services/oboeservice/AAudioServiceStreamExclusive.h
new file mode 100644
index 0000000..db382a3
--- /dev/null
+++ b/services/oboeservice/AAudioServiceStreamExclusive.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_SERVICE_STREAM_EXCLUSIVE_H
+#define AAUDIO_AAUDIO_SERVICE_STREAM_EXCLUSIVE_H
+
+#include "AAudioServiceStreamMMAP.h"
+
+namespace aaudio {
+
+/**
+ * Exclusive mode stream in the AAudio service.
+ *
+ * This is currently a stub.
+ * We may move code from AAudioServiceStreamMMAP into this class.
+ * If not, then it will be removed.
+ */
+class AAudioServiceStreamExclusive : public AAudioServiceStreamMMAP {
+
+public:
+    AAudioServiceStreamExclusive() {};
+    virtual ~AAudioServiceStreamExclusive() = default;
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_AAUDIO_SERVICE_STREAM_EXCLUSIVE_H
diff --git a/services/oboeservice/AAudioServiceStreamFakeHal.cpp b/services/oboeservice/AAudioServiceStreamFakeHal.cpp
deleted file mode 100644
index 71d3542..0000000
--- a/services/oboeservice/AAudioServiceStreamFakeHal.cpp
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "AAudioService"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
-#include <atomic>
-
-#include "AudioClock.h"
-#include "AudioEndpointParcelable.h"
-
-#include "AAudioServiceStreamBase.h"
-#include "AAudioServiceStreamFakeHal.h"
-
-#include "FakeAudioHal.h"
-
-using namespace android;
-using namespace aaudio;
-
-// HACK values for Marlin
-#define CARD_ID              0
-#define DEVICE_ID            19
-
-/**
- * Construct the audio message queuues and message queues.
- */
-
-AAudioServiceStreamFakeHal::AAudioServiceStreamFakeHal()
-        : AAudioServiceStreamBase()
-        , mStreamId(nullptr)
-        , mPreviousFrameCounter(0)
-        , mAAudioThread()
-{
-}
-
-AAudioServiceStreamFakeHal::~AAudioServiceStreamFakeHal() {
-    ALOGD("AAudioServiceStreamFakeHal::~AAudioServiceStreamFakeHal() call close()");
-    close();
-}
-
-aaudio_result_t AAudioServiceStreamFakeHal::open(aaudio::AAudioStreamRequest &request,
-                                       aaudio::AAudioStreamConfiguration &configurationOutput) {
-    // Open stream on HAL and pass information about the ring buffer to the client.
-    mmap_buffer_info mmapInfo;
-    aaudio_result_t error;
-
-    // Open HAL
-    int bufferCapacity = request.getConfiguration().getBufferCapacity();
-    error = fake_hal_open(CARD_ID, DEVICE_ID, bufferCapacity, &mStreamId);
-    if(error < 0) {
-        ALOGE("Could not open card %d, device %d", CARD_ID, DEVICE_ID);
-        return error;
-    }
-
-    // Get information about the shared audio buffer.
-    error = fake_hal_get_mmap_info(mStreamId, &mmapInfo);
-    if (error < 0) {
-        ALOGE("fake_hal_get_mmap_info returned %d", error);
-        fake_hal_close(mStreamId);
-        mStreamId = nullptr;
-        return error;
-    }
-    mHalFileDescriptor = mmapInfo.fd;
-    mFramesPerBurst = mmapInfo.burst_size_in_frames;
-    mCapacityInFrames = mmapInfo.buffer_capacity_in_frames;
-    mCapacityInBytes = mmapInfo.buffer_capacity_in_bytes;
-    mSampleRate = mmapInfo.sample_rate;
-    mBytesPerFrame = mmapInfo.channel_count * sizeof(int16_t); // FIXME based on data format
-    ALOGD("AAudioServiceStreamFakeHal::open() mmapInfo.burst_size_in_frames = %d",
-         mmapInfo.burst_size_in_frames);
-    ALOGD("AAudioServiceStreamFakeHal::open() mmapInfo.buffer_capacity_in_frames = %d",
-         mmapInfo.buffer_capacity_in_frames);
-    ALOGD("AAudioServiceStreamFakeHal::open() mmapInfo.buffer_capacity_in_bytes = %d",
-         mmapInfo.buffer_capacity_in_bytes);
-
-    // Fill in AAudioStreamConfiguration
-    configurationOutput.setSampleRate(mSampleRate);
-    configurationOutput.setSamplesPerFrame(mmapInfo.channel_count);
-    configurationOutput.setAudioFormat(AAUDIO_FORMAT_PCM_I16);
-
-    return AAUDIO_OK;
-}
-
-/**
- * Get an immutable description of the in-memory queues
- * used to communicate with the underlying HAL or Service.
- */
-aaudio_result_t AAudioServiceStreamFakeHal::getDescription(AudioEndpointParcelable &parcelable) {
-    // Gather information on the message queue.
-    mUpMessageQueue->fillParcelable(parcelable,
-                                    parcelable.mUpMessageQueueParcelable);
-
-    // Gather information on the data queue.
-    // TODO refactor into a SharedRingBuffer?
-    int fdIndex = parcelable.addFileDescriptor(mHalFileDescriptor, mCapacityInBytes);
-    parcelable.mDownDataQueueParcelable.setupMemory(fdIndex, 0, mCapacityInBytes);
-    parcelable.mDownDataQueueParcelable.setBytesPerFrame(mBytesPerFrame);
-    parcelable.mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
-    parcelable.mDownDataQueueParcelable.setCapacityInFrames(mCapacityInFrames);
-    return AAUDIO_OK;
-}
-
-/**
- * Start the flow of data.
- */
-aaudio_result_t AAudioServiceStreamFakeHal::start() {
-    if (mStreamId == nullptr) return AAUDIO_ERROR_NULL;
-    aaudio_result_t result = fake_hal_start(mStreamId);
-    sendServiceEvent(AAUDIO_SERVICE_EVENT_STARTED);
-    mState = AAUDIO_STREAM_STATE_STARTED;
-    if (result == AAUDIO_OK) {
-        mThreadEnabled.store(true);
-        result = mAAudioThread.start(this);
-    }
-    return result;
-}
-
-/**
- * Stop the flow of data such that start() can resume with loss of data.
- */
-aaudio_result_t AAudioServiceStreamFakeHal::pause() {
-    if (mStreamId == nullptr) return AAUDIO_ERROR_NULL;
-    sendCurrentTimestamp();
-    aaudio_result_t result = fake_hal_pause(mStreamId);
-    sendServiceEvent(AAUDIO_SERVICE_EVENT_PAUSED);
-    mState = AAUDIO_STREAM_STATE_PAUSED;
-    mFramesRead.reset32();
-    ALOGD("AAudioServiceStreamFakeHal::pause() sent AAUDIO_SERVICE_EVENT_PAUSED");
-    mThreadEnabled.store(false);
-    result = mAAudioThread.stop();
-    return result;
-}
-
-/**
- *  Discard any data held by the underlying HAL or Service.
- */
-aaudio_result_t AAudioServiceStreamFakeHal::flush() {
-    if (mStreamId == nullptr) return AAUDIO_ERROR_NULL;
-    // TODO how do we flush an MMAP/NOIRQ buffer? sync pointers?
-    ALOGD("AAudioServiceStreamFakeHal::pause() send AAUDIO_SERVICE_EVENT_FLUSHED");
-    sendServiceEvent(AAUDIO_SERVICE_EVENT_FLUSHED);
-    mState = AAUDIO_STREAM_STATE_FLUSHED;
-    return AAUDIO_OK;
-}
-
-aaudio_result_t AAudioServiceStreamFakeHal::close() {
-    aaudio_result_t result = AAUDIO_OK;
-    if (mStreamId != nullptr) {
-        result = fake_hal_close(mStreamId);
-        mStreamId = nullptr;
-    }
-    return result;
-}
-
-void AAudioServiceStreamFakeHal::sendCurrentTimestamp() {
-    int frameCounter = 0;
-    int error = fake_hal_get_frame_counter(mStreamId, &frameCounter);
-    if (error < 0) {
-        ALOGE("AAudioServiceStreamFakeHal::sendCurrentTimestamp() error %d",
-                error);
-    } else if (frameCounter != mPreviousFrameCounter) {
-        AAudioServiceMessage command;
-        command.what = AAudioServiceMessage::code::TIMESTAMP;
-        mFramesRead.update32(frameCounter);
-        command.timestamp.position = mFramesRead.get();
-        ALOGD("AAudioServiceStreamFakeHal::sendCurrentTimestamp() HAL frames = %d, pos = %d",
-                frameCounter, (int)mFramesRead.get());
-        command.timestamp.timestamp = AudioClock::getNanoseconds();
-        mUpMessageQueue->getFifoBuffer()->write(&command, 1);
-        mPreviousFrameCounter = frameCounter;
-    }
-}
-
-// implement Runnable
-void AAudioServiceStreamFakeHal::run() {
-    TimestampScheduler timestampScheduler;
-    timestampScheduler.setBurstPeriod(mFramesPerBurst, mSampleRate);
-    timestampScheduler.start(AudioClock::getNanoseconds());
-    while(mThreadEnabled.load()) {
-        int64_t nextTime = timestampScheduler.nextAbsoluteTime();
-        if (AudioClock::getNanoseconds() >= nextTime) {
-            sendCurrentTimestamp();
-        } else  {
-            // Sleep until it is time to send the next timestamp.
-            AudioClock::sleepUntilNanoTime(nextTime);
-        }
-    }
-}
-
diff --git a/services/oboeservice/AAudioServiceStreamFakeHal.h b/services/oboeservice/AAudioServiceStreamFakeHal.h
deleted file mode 100644
index e9480fb..0000000
--- a/services/oboeservice/AAudioServiceStreamFakeHal.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef AAUDIO_AAUDIO_SERVICE_STREAM_FAKE_HAL_H
-#define AAUDIO_AAUDIO_SERVICE_STREAM_FAKE_HAL_H
-
-#include "AAudioServiceDefinitions.h"
-#include "AAudioServiceStreamBase.h"
-#include "FakeAudioHal.h"
-#include "MonotonicCounter.h"
-#include "AudioEndpointParcelable.h"
-#include "TimestampScheduler.h"
-
-namespace aaudio {
-
-class AAudioServiceStreamFakeHal
-    : public AAudioServiceStreamBase
-    , public Runnable {
-
-public:
-    AAudioServiceStreamFakeHal();
-    virtual ~AAudioServiceStreamFakeHal();
-
-    virtual aaudio_result_t getDescription(AudioEndpointParcelable &parcelable) override;
-
-    virtual aaudio_result_t open(aaudio::AAudioStreamRequest &request,
-                                 aaudio::AAudioStreamConfiguration &configurationOutput) override;
-
-    /**
-     * Start the flow of data.
-     */
-    virtual aaudio_result_t start() override;
-
-    /**
-     * Stop the flow of data such that start() can resume with loss of data.
-     */
-    virtual aaudio_result_t pause() override;
-
-    /**
-     *  Discard any data held by the underlying HAL or Service.
-     */
-    virtual aaudio_result_t flush() override;
-
-    virtual aaudio_result_t close() override;
-
-    void sendCurrentTimestamp();
-
-    virtual void run() override; // to implement Runnable
-
-private:
-    fake_hal_stream_ptr    mStreamId; // Move to HAL
-
-    MonotonicCounter       mFramesWritten;
-    MonotonicCounter       mFramesRead;
-    int                    mHalFileDescriptor = -1;
-    int                    mPreviousFrameCounter = 0;   // from HAL
-
-    aaudio_stream_state_t    mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
-
-    AAudioThread             mAAudioThread;
-    std::atomic<bool>      mThreadEnabled;
-};
-
-} // namespace aaudio
-
-#endif //AAUDIO_AAUDIO_SERVICE_STREAM_FAKE_HAL_H
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.cpp b/services/oboeservice/AAudioServiceStreamMMAP.cpp
new file mode 100644
index 0000000..b70c625
--- /dev/null
+++ b/services/oboeservice/AAudioServiceStreamMMAP.cpp
@@ -0,0 +1,269 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <atomic>
+#include <stdint.h>
+
+#include <utils/String16.h>
+#include <media/nbaio/AudioStreamOutSink.h>
+#include <media/MmapStreamInterface.h>
+
+#include "AAudioServiceStreamBase.h"
+#include "AAudioServiceStreamMMAP.h"
+#include "binding/AudioEndpointParcelable.h"
+#include "SharedMemoryProxy.h"
+#include "utility/AAudioUtilities.h"
+
+using namespace android;
+using namespace aaudio;
+
+#define AAUDIO_BUFFER_CAPACITY_MIN    4 * 512
+#define AAUDIO_SAMPLE_RATE_DEFAULT    48000
+
+/**
+ * Stream that uses an MMAP buffer.
+ */
+
+AAudioServiceStreamMMAP::AAudioServiceStreamMMAP()
+        : AAudioServiceStreamBase()
+        , mMmapStreamCallback(new MyMmapStreamCallback(*this))
+        , mPreviousFrameCounter(0)
+        , mMmapStream(nullptr) {
+}
+
+AAudioServiceStreamMMAP::~AAudioServiceStreamMMAP() {
+    close();
+}
+
+aaudio_result_t AAudioServiceStreamMMAP::close() {
+    ALOGD("AAudioServiceStreamMMAP::close() called, %p", mMmapStream.get());
+    mMmapStream.clear(); // TODO review. Is that all we have to do?
+    return AAudioServiceStreamBase::close();
+}
+
+// Open stream on HAL and pass information about the shared memory buffer back to the client.
+aaudio_result_t AAudioServiceStreamMMAP::open(const aaudio::AAudioStreamRequest &request,
+                                       aaudio::AAudioStreamConfiguration &configurationOutput) {
+    const audio_attributes_t attributes = {
+        .content_type = AUDIO_CONTENT_TYPE_MUSIC,
+        .usage = AUDIO_USAGE_MEDIA,
+        .source = AUDIO_SOURCE_DEFAULT,
+        .flags = AUDIO_FLAG_LOW_LATENCY,
+        .tags = ""
+    };
+    audio_config_base_t config;
+
+    aaudio_result_t result = AAudioServiceStreamBase::open(request, configurationOutput);
+    if (result != AAUDIO_OK) {
+        ALOGE("AAudioServiceStreamBase open returned %d", result);
+        return result;
+    }
+
+    const AAudioStreamConfiguration &configurationInput = request.getConstantConfiguration();
+    audio_port_handle_t deviceId = configurationInput.getDeviceId();
+
+    ALOGI("open request dump()");
+    request.dump();
+
+    mMmapClient.clientUid = request.getUserId();
+    mMmapClient.clientPid = request.getProcessId();
+    aaudio_direction_t direction = request.getDirection();
+
+    // Fill in config
+    aaudio_audio_format_t aaudioFormat = configurationInput.getAudioFormat();
+    if (aaudioFormat == AAUDIO_UNSPECIFIED || aaudioFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+        ALOGI("open forcing use of AAUDIO_FORMAT_PCM_I16");
+        aaudioFormat = AAUDIO_FORMAT_PCM_I16;
+    }
+    config.format = AAudioConvert_aaudioToAndroidDataFormat(aaudioFormat);
+
+    int32_t aaudioSampleRate = configurationInput.getSampleRate();
+    if (aaudioSampleRate == AAUDIO_UNSPECIFIED) {
+        aaudioSampleRate = AAUDIO_SAMPLE_RATE_DEFAULT;
+    }
+    config.sample_rate = aaudioSampleRate;
+
+    int32_t aaudioSamplesPerFrame = configurationInput.getSamplesPerFrame();
+
+    if (direction == AAUDIO_DIRECTION_OUTPUT) {
+        config.channel_mask = (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED)
+                            ? AUDIO_CHANNEL_OUT_STEREO
+                            : audio_channel_out_mask_from_count(aaudioSamplesPerFrame);
+    } else if (direction == AAUDIO_DIRECTION_INPUT) {
+        config.channel_mask =  (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED)
+                            ? AUDIO_CHANNEL_IN_STEREO
+                            : audio_channel_in_mask_from_count(aaudioSamplesPerFrame);
+    } else {
+        ALOGE("openMmapStream - invalid direction = %d", direction);
+        return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+    }
+
+    mMmapClient.packageName.setTo(String16("aaudio_service")); // FIXME what should we do here?
+
+    MmapStreamInterface::stream_direction_t streamDirection = (direction == AAUDIO_DIRECTION_OUTPUT)
+        ? MmapStreamInterface::DIRECTION_OUTPUT : MmapStreamInterface::DIRECTION_INPUT;
+
+    // Open HAL stream.
+    status_t status = MmapStreamInterface::openMmapStream(streamDirection,
+                                                          &attributes,
+                                                          &config,
+                                                          mMmapClient,
+                                                          &deviceId,
+                                                          mMmapStreamCallback,
+                                                          mMmapStream);
+    if (status != OK) {
+        ALOGE("openMmapStream returned status %d", status);
+        return AAUDIO_ERROR_UNAVAILABLE;
+    }
+
+    // Create MMAP/NOIRQ buffer.
+    int32_t minSizeFrames = configurationInput.getBufferCapacity();
+    if (minSizeFrames == 0) { // zero will get rejected
+        minSizeFrames = AAUDIO_BUFFER_CAPACITY_MIN;
+    }
+    status = mMmapStream->createMmapBuffer(minSizeFrames, &mMmapBufferinfo);
+    if (status != OK) {
+        ALOGE("%s: createMmapBuffer() returned status %d, return AAUDIO_ERROR_UNAVAILABLE",
+              __FILE__, status);
+        return AAUDIO_ERROR_UNAVAILABLE;
+    } else {
+        ALOGD("createMmapBuffer status %d shared_address = %p buffer_size %d burst_size %d",
+              status, mMmapBufferinfo.shared_memory_address,
+              mMmapBufferinfo.buffer_size_frames,
+              mMmapBufferinfo.burst_size_frames);
+    }
+
+    // Get information about the stream and pass it back to the caller.
+    mSamplesPerFrame = (direction == AAUDIO_DIRECTION_OUTPUT)
+                           ? audio_channel_count_from_out_mask(config.channel_mask)
+                           : audio_channel_count_from_in_mask(config.channel_mask);
+
+    mAudioDataFileDescriptor = mMmapBufferinfo.shared_memory_fd;
+    mFramesPerBurst = mMmapBufferinfo.burst_size_frames;
+    mCapacityInFrames = mMmapBufferinfo.buffer_size_frames;
+    mAudioFormat = AAudioConvert_androidToAAudioDataFormat(config.format);
+    mSampleRate = config.sample_rate;
+
+    // Fill in AAudioStreamConfiguration
+    configurationOutput.setSampleRate(mSampleRate);
+    configurationOutput.setSamplesPerFrame(mSamplesPerFrame);
+    configurationOutput.setAudioFormat(mAudioFormat);
+    configurationOutput.setDeviceId(deviceId);
+
+    return AAUDIO_OK;
+}
+
+
+/**
+ * Start the flow of data.
+ */
+aaudio_result_t AAudioServiceStreamMMAP::start() {
+    if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
+    aaudio_result_t result = mMmapStream->start(mMmapClient, &mPortHandle);
+    if (result != AAUDIO_OK) {
+        ALOGE("AAudioServiceStreamMMAP::start() mMmapStream->start() returned %d", result);
+        processError();
+    } else {
+        result = AAudioServiceStreamBase::start();
+    }
+    return result;
+}
+
+/**
+ * Stop the flow of data such that start() can resume with loss of data.
+ */
+aaudio_result_t AAudioServiceStreamMMAP::pause() {
+    if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
+
+    aaudio_result_t result1 = AAudioServiceStreamBase::pause();
+    aaudio_result_t result2 = mMmapStream->stop(mPortHandle);
+    mFramesRead.reset32();
+    return (result1 != AAUDIO_OK) ? result1 : result2;
+}
+
+/**
+ *  Discard any data held by the underlying HAL or Service.
+ */
+aaudio_result_t AAudioServiceStreamMMAP::flush() {
+    if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
+    // TODO how do we flush an MMAP/NOIRQ buffer? sync pointers?
+    ALOGD("AAudioServiceStreamMMAP::pause() send AAUDIO_SERVICE_EVENT_FLUSHED");
+    sendServiceEvent(AAUDIO_SERVICE_EVENT_FLUSHED);
+    mState = AAUDIO_STREAM_STATE_FLUSHED;
+    return AAUDIO_OK;
+}
+
+
+aaudio_result_t AAudioServiceStreamMMAP::getFreeRunningPosition(int64_t *positionFrames,
+                                                                int64_t *timeNanos) {
+    struct audio_mmap_position position;
+    if (mMmapStream == nullptr) {
+        processError();
+        return AAUDIO_ERROR_NULL;
+    }
+    status_t status = mMmapStream->getMmapPosition(&position);
+    if (status != OK) {
+        ALOGE("sendCurrentTimestamp(): getMmapPosition() returned %d", status);
+        processError();
+        return AAudioConvert_androidToAAudioResult(status);
+    } else {
+        mFramesRead.update32(position.position_frames);
+        *positionFrames = mFramesRead.get();
+        *timeNanos = position.time_nanoseconds;
+    }
+    return AAUDIO_OK;
+}
+
+void AAudioServiceStreamMMAP::onTearDown() {
+    ALOGD("AAudioServiceStreamMMAP::onTearDown() called - TODO");
+};
+
+void AAudioServiceStreamMMAP::onVolumeChanged(audio_channel_mask_t channels,
+                     android::Vector<float> values) {
+    // TODO do we really need a different volume for each channel?
+    float volume = values[0];
+    ALOGD("AAudioServiceStreamMMAP::onVolumeChanged() volume[0] = %f", volume);
+    sendServiceEvent(AAUDIO_SERVICE_EVENT_VOLUME, volume);
+};
+
+void AAudioServiceStreamMMAP::onRoutingChanged(audio_port_handle_t deviceId) {
+    ALOGD("AAudioServiceStreamMMAP::onRoutingChanged() called with %d, old = %d",
+          deviceId, mPortHandle);
+    if (mPortHandle > 0 && mPortHandle != deviceId) {
+        sendServiceEvent(AAUDIO_SERVICE_EVENT_DISCONNECTED);
+    }
+    mPortHandle = deviceId;
+};
+
+/**
+ * Get an immutable description of the data queue from the HAL.
+ */
+aaudio_result_t AAudioServiceStreamMMAP::getDownDataDescription(AudioEndpointParcelable &parcelable)
+{
+    // Gather information on the data queue based on HAL info.
+    int32_t bytesPerFrame = calculateBytesPerFrame();
+    int32_t capacityInBytes = mCapacityInFrames * bytesPerFrame;
+    int fdIndex = parcelable.addFileDescriptor(mAudioDataFileDescriptor, capacityInBytes);
+    parcelable.mDownDataQueueParcelable.setupMemory(fdIndex, 0, capacityInBytes);
+    parcelable.mDownDataQueueParcelable.setBytesPerFrame(bytesPerFrame);
+    parcelable.mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
+    parcelable.mDownDataQueueParcelable.setCapacityInFrames(mCapacityInFrames);
+    return AAUDIO_OK;
+}
\ No newline at end of file
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.h b/services/oboeservice/AAudioServiceStreamMMAP.h
new file mode 100644
index 0000000..f121c5c
--- /dev/null
+++ b/services/oboeservice/AAudioServiceStreamMMAP.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_SERVICE_STREAM_MMAP_H
+#define AAUDIO_AAUDIO_SERVICE_STREAM_MMAP_H
+
+#include <atomic>
+
+#include <media/audiohal/StreamHalInterface.h>
+#include <media/MmapStreamCallback.h>
+#include <media/MmapStreamInterface.h>
+#include <utils/RefBase.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
+
+#include "binding/AAudioServiceMessage.h"
+#include "AAudioServiceStreamBase.h"
+#include "binding/AudioEndpointParcelable.h"
+#include "SharedMemoryProxy.h"
+#include "TimestampScheduler.h"
+#include "utility/MonotonicCounter.h"
+
+namespace aaudio {
+
+    /**
+     * Manage one memory mapped buffer that originated from a HAL.
+     */
+class AAudioServiceStreamMMAP
+    : public AAudioServiceStreamBase
+    , public android::MmapStreamCallback {
+
+public:
+    AAudioServiceStreamMMAP();
+    virtual ~AAudioServiceStreamMMAP();
+
+
+    aaudio_result_t open(const aaudio::AAudioStreamRequest &request,
+                                 aaudio::AAudioStreamConfiguration &configurationOutput) override;
+
+    /**
+     * Start the flow of audio data.
+     *
+     * This is not guaranteed to be synchronous but it currently is.
+     * An AAUDIO_SERVICE_EVENT_STARTED will be sent to the client when complete.
+     */
+    aaudio_result_t start() override;
+
+    /**
+     * Stop the flow of data so that start() can resume without loss of data.
+     *
+     * This is not guaranteed to be synchronous but it currently is.
+     * An AAUDIO_SERVICE_EVENT_PAUSED will be sent to the client when complete.
+    */
+    aaudio_result_t pause() override;
+
+    /**
+     *  Discard any data held by the underlying HAL or Service.
+     *
+     * This is not guaranteed to be synchronous but it currently is.
+     * An AAUDIO_SERVICE_EVENT_FLUSHED will be sent to the client when complete.
+     */
+    aaudio_result_t flush() override;
+
+    aaudio_result_t close() override;
+
+    /**
+     * Send a MMAP/NOIRQ buffer timestamp to the client.
+     */
+    aaudio_result_t sendCurrentTimestamp();
+
+    // -------------- Callback functions ---------------------
+    void onTearDown() override;
+
+    void onVolumeChanged(audio_channel_mask_t channels,
+                         android::Vector<float> values) override;
+
+    void onRoutingChanged(audio_port_handle_t deviceId) override;
+
+protected:
+
+    aaudio_result_t getDownDataDescription(AudioEndpointParcelable &parcelable) override;
+
+    aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) override;
+
+private:
+    // This proxy class was needed to prevent a crash in AudioFlinger
+    // when the stream was closed.
+    class MyMmapStreamCallback : public android::MmapStreamCallback {
+    public:
+        explicit MyMmapStreamCallback(android::MmapStreamCallback &serviceCallback)
+            : mServiceCallback(serviceCallback){}
+        virtual ~MyMmapStreamCallback() = default;
+
+        void onTearDown() override {
+            mServiceCallback.onTearDown();
+        };
+
+        void onVolumeChanged(audio_channel_mask_t channels, android::Vector<float> values) override
+        {
+            mServiceCallback.onVolumeChanged(channels, values);
+        };
+
+        void onRoutingChanged(audio_port_handle_t deviceId) override {
+            mServiceCallback.onRoutingChanged(deviceId);
+        };
+
+    private:
+        android::MmapStreamCallback &mServiceCallback;
+    };
+
+    android::sp<MyMmapStreamCallback>   mMmapStreamCallback;
+    MonotonicCounter                    mFramesWritten;
+    MonotonicCounter                    mFramesRead;
+    int32_t                             mPreviousFrameCounter = 0;   // from HAL
+
+    // Interface to the AudioFlinger MMAP support.
+    android::sp<android::MmapStreamInterface> mMmapStream;
+    struct audio_mmap_buffer_info             mMmapBufferinfo;
+    android::MmapStreamInterface::Client      mMmapClient;
+    audio_port_handle_t                       mPortHandle = -1; // TODO review best default
+};
+
+} // namespace aaudio
+
+#endif //AAUDIO_AAUDIO_SERVICE_STREAM_MMAP_H
diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp
new file mode 100644
index 0000000..cd9336b
--- /dev/null
+++ b/services/oboeservice/AAudioServiceStreamShared.cpp
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <mutex>
+
+#include <aaudio/AAudio.h>
+
+#include "binding/IAAudioService.h"
+
+#include "binding/AAudioServiceMessage.h"
+#include "AAudioServiceStreamBase.h"
+#include "AAudioServiceStreamShared.h"
+#include "AAudioEndpointManager.h"
+#include "AAudioService.h"
+#include "AAudioServiceEndpoint.h"
+
+using namespace android;
+using namespace aaudio;
+
+#define MIN_BURSTS_PER_BUFFER   2
+#define MAX_BURSTS_PER_BUFFER   32
+
+AAudioServiceStreamShared::AAudioServiceStreamShared(AAudioService &audioService)
+    : mAudioService(audioService)
+    {
+}
+
+AAudioServiceStreamShared::~AAudioServiceStreamShared() {
+    close();
+}
+
+aaudio_result_t AAudioServiceStreamShared::open(const aaudio::AAudioStreamRequest &request,
+                     aaudio::AAudioStreamConfiguration &configurationOutput)  {
+
+    aaudio_result_t result = AAudioServiceStreamBase::open(request, configurationOutput);
+    if (result != AAUDIO_OK) {
+        ALOGE("AAudioServiceStreamBase open returned %d", result);
+        return result;
+    }
+
+    const AAudioStreamConfiguration &configurationInput = request.getConstantConfiguration();
+    int32_t deviceId = configurationInput.getDeviceId();
+    aaudio_direction_t direction = request.getDirection();
+
+    ALOGD("AAudioServiceStreamShared::open(), direction = %d", direction);
+    AAudioEndpointManager &mEndpointManager = AAudioEndpointManager::getInstance();
+    mServiceEndpoint = mEndpointManager.findEndpoint(mAudioService, deviceId, direction);
+    ALOGD("AAudioServiceStreamShared::open(), mServiceEndPoint = %p", mServiceEndpoint);
+    if (mServiceEndpoint == nullptr) {
+        return AAUDIO_ERROR_UNAVAILABLE;
+    }
+
+    // Is the request compatible with the shared endpoint?
+    mAudioFormat = configurationInput.getAudioFormat();
+    if (mAudioFormat == AAUDIO_FORMAT_UNSPECIFIED) {
+        mAudioFormat = AAUDIO_FORMAT_PCM_FLOAT;
+    } else if (mAudioFormat != AAUDIO_FORMAT_PCM_FLOAT) {
+        return AAUDIO_ERROR_INVALID_FORMAT;
+    }
+
+    mSampleRate = configurationInput.getSampleRate();
+    if (mSampleRate == AAUDIO_FORMAT_UNSPECIFIED) {
+        mSampleRate = mServiceEndpoint->getSampleRate();
+    } else if (mSampleRate != mServiceEndpoint->getSampleRate()) {
+        return AAUDIO_ERROR_INVALID_RATE;
+    }
+
+    mSamplesPerFrame = configurationInput.getSamplesPerFrame();
+    if (mSamplesPerFrame == AAUDIO_FORMAT_UNSPECIFIED) {
+        mSamplesPerFrame = mServiceEndpoint->getSamplesPerFrame();
+    } else if (mSamplesPerFrame != mServiceEndpoint->getSamplesPerFrame()) {
+        return AAUDIO_ERROR_OUT_OF_RANGE;
+    }
+
+    // Determine this stream's shared memory buffer capacity.
+    mFramesPerBurst = mServiceEndpoint->getFramesPerBurst();
+    int32_t minCapacityFrames = configurationInput.getBufferCapacity();
+    int32_t numBursts = (minCapacityFrames + mFramesPerBurst - 1) / mFramesPerBurst;
+    if (numBursts < MIN_BURSTS_PER_BUFFER) {
+        numBursts = MIN_BURSTS_PER_BUFFER;
+    } else if (numBursts > MAX_BURSTS_PER_BUFFER) {
+        numBursts = MAX_BURSTS_PER_BUFFER;
+    }
+    mCapacityInFrames = numBursts * mFramesPerBurst;
+    ALOGD("AAudioServiceStreamShared::open(), mCapacityInFrames = %d", mCapacityInFrames);
+
+    // Create audio data shared memory buffer for client.
+    mAudioDataQueue = new SharedRingBuffer();
+    mAudioDataQueue->allocate(calculateBytesPerFrame(), mCapacityInFrames);
+
+    // Fill in configuration for client.
+    configurationOutput.setSampleRate(mSampleRate);
+    configurationOutput.setSamplesPerFrame(mSamplesPerFrame);
+    configurationOutput.setAudioFormat(mAudioFormat);
+    configurationOutput.setDeviceId(deviceId);
+
+    mServiceEndpoint->registerStream(this);
+
+    return AAUDIO_OK;
+}
+
+/**
+ * Start the flow of audio data.
+ *
+ * An AAUDIO_SERVICE_EVENT_STARTED will be sent to the client when complete.
+ */
+aaudio_result_t AAudioServiceStreamShared::start()  {
+    // Add this stream to the mixer.
+    aaudio_result_t result = mServiceEndpoint->startStream(this);
+    if (result != AAUDIO_OK) {
+        ALOGE("AAudioServiceStreamShared::start() mServiceEndpoint returned %d", result);
+        processError();
+    } else {
+        result = AAudioServiceStreamBase::start();
+    }
+    return AAUDIO_OK;
+}
+
+/**
+ * Stop the flow of data so that start() can resume without loss of data.
+ *
+ * An AAUDIO_SERVICE_EVENT_PAUSED will be sent to the client when complete.
+*/
+aaudio_result_t AAudioServiceStreamShared::pause()  {
+    // Add this stream to the mixer.
+    aaudio_result_t result = mServiceEndpoint->stopStream(this);
+    if (result != AAUDIO_OK) {
+        ALOGE("AAudioServiceStreamShared::stop() mServiceEndpoint returned %d", result);
+        processError();
+    } else {
+        result = AAudioServiceStreamBase::start();
+    }
+    return AAUDIO_OK;
+}
+
+/**
+ *  Discard any data held by the underlying HAL or Service.
+ *
+ * An AAUDIO_SERVICE_EVENT_FLUSHED will be sent to the client when complete.
+ */
+aaudio_result_t AAudioServiceStreamShared::flush()  {
+    // TODO make sure we are paused
+    return AAUDIO_OK;
+}
+
+aaudio_result_t AAudioServiceStreamShared::close()  {
+    pause();
+    // TODO wait for pause() to synchronize
+    mServiceEndpoint->unregisterStream(this);
+    mServiceEndpoint->close();
+    mServiceEndpoint = nullptr;
+    return AAudioServiceStreamBase::close();
+}
+
+/**
+ * Get an immutable description of the data queue created by this service.
+ */
+aaudio_result_t AAudioServiceStreamShared::getDownDataDescription(AudioEndpointParcelable &parcelable)
+{
+    // Gather information on the data queue.
+    mAudioDataQueue->fillParcelable(parcelable,
+                                    parcelable.mDownDataQueueParcelable);
+    parcelable.mDownDataQueueParcelable.setFramesPerBurst(getFramesPerBurst());
+    return AAUDIO_OK;
+}
+
+void AAudioServiceStreamShared::onStop() {
+}
+
+void AAudioServiceStreamShared::onDisconnect() {
+    mServiceEndpoint->close();
+    mServiceEndpoint = nullptr;
+}
+
+
+aaudio_result_t AAudioServiceStreamShared::getFreeRunningPosition(int64_t *positionFrames,
+                                                                int64_t *timeNanos) {
+    *positionFrames = mAudioDataQueue->getFifoBuffer()->getReadCounter();
+    *timeNanos = AudioClock::getNanoseconds();
+    return AAUDIO_OK;
+}
diff --git a/services/oboeservice/AAudioServiceStreamShared.h b/services/oboeservice/AAudioServiceStreamShared.h
new file mode 100644
index 0000000..f6df7ce
--- /dev/null
+++ b/services/oboeservice/AAudioServiceStreamShared.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_SERVICE_STREAM_SHARED_H
+#define AAUDIO_AAUDIO_SERVICE_STREAM_SHARED_H
+
+#include "fifo/FifoBuffer.h"
+#include "binding/AAudioServiceMessage.h"
+#include "binding/AAudioStreamRequest.h"
+#include "binding/AAudioStreamConfiguration.h"
+
+#include "AAudioService.h"
+#include "AAudioServiceStreamBase.h"
+
+namespace aaudio {
+
+// We expect the queue to only have a few commands.
+// This should be way more than we need.
+#define QUEUE_UP_CAPACITY_COMMANDS (128)
+
+class AAudioEndpointManager;
+class AAudioServiceEndpoint;
+class SharedRingBuffer;
+
+/**
+ * One of these is created for every MODE_SHARED stream in the AAudioService.
+ *
+ * Each Shared stream will register itself with an AAudioServiceEndpoint when it is opened.
+ */
+class AAudioServiceStreamShared : public AAudioServiceStreamBase {
+
+public:
+    AAudioServiceStreamShared(android::AAudioService &aAudioService);
+    virtual ~AAudioServiceStreamShared();
+
+    aaudio_result_t open(const aaudio::AAudioStreamRequest &request,
+                         aaudio::AAudioStreamConfiguration &configurationOutput) override;
+
+    /**
+     * Start the flow of audio data.
+     *
+     * This is not guaranteed to be synchronous but it currently is.
+     * An AAUDIO_SERVICE_EVENT_STARTED will be sent to the client when complete.
+     */
+    aaudio_result_t start() override;
+
+    /**
+     * Stop the flow of data so that start() can resume without loss of data.
+     *
+     * This is not guaranteed to be synchronous but it currently is.
+     * An AAUDIO_SERVICE_EVENT_PAUSED will be sent to the client when complete.
+    */
+    aaudio_result_t pause() override;
+
+    /**
+     *  Discard any data held by the underlying HAL or Service.
+     *
+     * This is not guaranteed to be synchronous but it currently is.
+     * An AAUDIO_SERVICE_EVENT_FLUSHED will be sent to the client when complete.
+     */
+    aaudio_result_t flush() override;
+
+    aaudio_result_t close() override;
+
+    android::FifoBuffer *getDataFifoBuffer() { return mAudioDataQueue->getFifoBuffer(); }
+
+    void onStop();
+
+    void onDisconnect();
+
+protected:
+
+    aaudio_result_t getDownDataDescription(AudioEndpointParcelable &parcelable) override;
+
+    aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) override;
+
+private:
+    android::AAudioService  &mAudioService;
+    AAudioServiceEndpoint   *mServiceEndpoint = nullptr;
+    SharedRingBuffer        *mAudioDataQueue;
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_AAUDIO_SERVICE_STREAM_SHARED_H
diff --git a/services/oboeservice/AAudioThread.cpp b/services/oboeservice/AAudioThread.cpp
index f5e5784..b1b563d 100644
--- a/services/oboeservice/AAudioThread.cpp
+++ b/services/oboeservice/AAudioThread.cpp
@@ -21,14 +21,17 @@
 #include <pthread.h>
 
 #include <aaudio/AAudioDefinitions.h>
+#include <utility/AAudioUtilities.h>
 
 #include "AAudioThread.h"
 
 using namespace aaudio;
 
 
-AAudioThread::AAudioThread() {
-    // mThread is a pthread_t of unknown size so we need memset.
+AAudioThread::AAudioThread()
+    : mRunnable(nullptr)
+    , mHasThread(false) {
+    // mThread is a pthread_t of unknown size so we need memset().
     memset(&mThread, 0, sizeof(mThread));
 }
 
@@ -50,14 +53,16 @@
 
 aaudio_result_t AAudioThread::start(Runnable *runnable) {
     if (mHasThread) {
+        ALOGE("AAudioThread::start() - mHasThread.load() already true");
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    mRunnable = runnable; // TODO use atomic?
+    // mRunnable will be read by the new thread when it starts.
+    // pthread_create() forces a memory synchronization so mRunnable does not need to be atomic.
+    mRunnable = runnable;
     int err = pthread_create(&mThread, nullptr, AAudioThread_internalThreadProc, this);
     if (err != 0) {
-        ALOGE("AAudioThread::pthread_create() returned %d", err);
-        // TODO convert errno to aaudio_result_t
-        return AAUDIO_ERROR_INTERNAL;
+        ALOGE("AAudioThread::start() - pthread_create() returned %d %s", err, strerror(err));
+        return AAudioConvert_androidToAAudioResult(-err);
     } else {
         mHasThread = true;
         return AAUDIO_OK;
@@ -70,7 +75,11 @@
     }
     int err = pthread_join(mThread, nullptr);
     mHasThread = false;
-    // TODO convert errno to aaudio_result_t
-    return err ? AAUDIO_ERROR_INTERNAL : AAUDIO_OK;
+    if (err != 0) {
+        ALOGE("AAudioThread::stop() - pthread_join() returned %d %s", err, strerror(err));
+        return AAudioConvert_androidToAAudioResult(-err);
+    } else {
+        return AAUDIO_OK;
+    }
 }
 
diff --git a/services/oboeservice/AAudioThread.h b/services/oboeservice/AAudioThread.h
index a5d43a4..dd9f640 100644
--- a/services/oboeservice/AAudioThread.h
+++ b/services/oboeservice/AAudioThread.h
@@ -24,16 +24,20 @@
 
 namespace aaudio {
 
+/**
+ * Abstract class similar to Java Runnable.
+ */
 class Runnable {
 public:
     Runnable() {};
     virtual ~Runnable() = default;
 
-    virtual void run() {}
+    virtual void run() = 0;
 };
 
 /**
- * Abstraction for a host thread.
+ * Abstraction for a host dependent thread.
+ * TODO Consider using Android "Thread" class or std::thread instead.
  */
 class AAudioThread
 {
@@ -62,9 +66,9 @@
     void dispatch(); // called internally from 'C' thread wrapper
 
 private:
-    Runnable*          mRunnable = nullptr; // TODO make atomic with memory barrier?
-    bool               mHasThread = false;
-    pthread_t          mThread; // initialized in constructor
+    Runnable    *mRunnable;
+    bool         mHasThread;
+    pthread_t    mThread; // initialized in constructor
 
 };
 
diff --git a/services/oboeservice/Android.mk b/services/oboeservice/Android.mk
index 5cd9121..a9c80ae 100644
--- a/services/oboeservice/Android.mk
+++ b/services/oboeservice/Android.mk
@@ -3,52 +3,54 @@
 # AAudio Service
 include $(CLEAR_VARS)
 
-LOCAL_MODULE := aaudioservice
+LOCAL_MODULE := libaaudioservice
 LOCAL_MODULE_TAGS := optional
 
 LIBAAUDIO_DIR := ../../media/libaaudio
 LIBAAUDIO_SRC_DIR := $(LIBAAUDIO_DIR)/src
 
 LOCAL_C_INCLUDES := \
+    $(TOPDIR)frameworks/av/services/audioflinger \
     $(call include-path-for, audio-utils) \
     frameworks/native/include \
     system/core/base/include \
     $(TOP)/frameworks/native/media/libaaudio/include/include \
     $(TOP)/frameworks/av/media/libaaudio/include \
+    $(TOP)/frameworks/av/media/utils/include \
     frameworks/native/include \
     $(TOP)/external/tinyalsa/include \
-    $(TOP)/frameworks/av/media/libaaudio/src \
-    $(TOP)/frameworks/av/media/libaaudio/src/binding \
-    $(TOP)/frameworks/av/media/libaaudio/src/client \
-    $(TOP)/frameworks/av/media/libaaudio/src/core \
-    $(TOP)/frameworks/av/media/libaaudio/src/fifo \
-    $(TOP)/frameworks/av/media/libaaudio/src/utility
+    $(TOP)/frameworks/av/media/libaaudio/src
 
-# TODO These could be in a libaaudio_common library
 LOCAL_SRC_FILES += \
     $(LIBAAUDIO_SRC_DIR)/utility/HandleTracker.cpp \
-    $(LIBAAUDIO_SRC_DIR)/utility/AAudioUtilities.cpp \
-    $(LIBAAUDIO_SRC_DIR)/fifo/FifoBuffer.cpp \
-    $(LIBAAUDIO_SRC_DIR)/fifo/FifoControllerBase.cpp \
-    $(LIBAAUDIO_SRC_DIR)/binding/SharedMemoryParcelable.cpp \
-    $(LIBAAUDIO_SRC_DIR)/binding/SharedRegionParcelable.cpp \
-    $(LIBAAUDIO_SRC_DIR)/binding/RingBufferParcelable.cpp \
-    $(LIBAAUDIO_SRC_DIR)/binding/AudioEndpointParcelable.cpp \
-    $(LIBAAUDIO_SRC_DIR)/binding/AAudioStreamRequest.cpp \
-    $(LIBAAUDIO_SRC_DIR)/binding/AAudioStreamConfiguration.cpp \
-    $(LIBAAUDIO_SRC_DIR)/binding/IAAudioService.cpp \
+    SharedMemoryProxy.cpp \
     SharedRingBuffer.cpp \
-    FakeAudioHal.cpp \
+    AAudioEndpointManager.cpp \
+    AAudioMixer.cpp \
     AAudioService.cpp \
+    AAudioServiceEndpoint.cpp \
     AAudioServiceStreamBase.cpp \
-    AAudioServiceStreamFakeHal.cpp \
+    AAudioServiceStreamMMAP.cpp \
+    AAudioServiceStreamShared.cpp \
     TimestampScheduler.cpp \
-    AAudioServiceMain.cpp \
     AAudioThread.cpp
 
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
+# LOCAL_CFLAGS += -fvisibility=hidden
 LOCAL_CFLAGS += -Wno-unused-parameter
 LOCAL_CFLAGS += -Wall -Werror
 
-LOCAL_SHARED_LIBRARIES :=  libbinder libcutils libutils liblog libtinyalsa
+LOCAL_SHARED_LIBRARIES :=  \
+    libaaudio \
+    libaudioflinger \
+    libbinder \
+    libcutils \
+    libmediautils \
+    libutils \
+    liblog \
+    libtinyalsa
 
-include $(BUILD_EXECUTABLE)
+include $(BUILD_SHARED_LIBRARY)
+
+
diff --git a/services/oboeservice/FakeAudioHal.cpp b/services/oboeservice/FakeAudioHal.cpp
deleted file mode 100644
index 122671e..0000000
--- a/services/oboeservice/FakeAudioHal.cpp
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Simple fake HAL that supports ALSA MMAP/NOIRQ mode.
- */
-
-#include <iostream>
-#include <math.h>
-#include <limits>
-#include <string.h>
-#include <unistd.h>
-
-#include <sound/asound.h>
-
-#include "tinyalsa/asoundlib.h"
-
-#include "FakeAudioHal.h"
-
-//using namespace aaudio;
-
-using sample_t = int16_t;
-using std::cout;
-using std::endl;
-
-#undef SNDRV_PCM_IOCTL_SYNC_PTR
-#define SNDRV_PCM_IOCTL_SYNC_PTR 0xc0884123
-#define PCM_ERROR_MAX 128
-
-const int SAMPLE_RATE = 48000;       // Hz
-const int CHANNEL_COUNT = 2;
-
-struct pcm {
-    int fd;
-    unsigned int flags;
-    int running:1;
-    int prepared:1;
-    int underruns;
-    unsigned int buffer_size;
-    unsigned int boundary;
-    char error[PCM_ERROR_MAX];
-    struct pcm_config config;
-    struct snd_pcm_mmap_status *mmap_status;
-    struct snd_pcm_mmap_control *mmap_control;
-    struct snd_pcm_sync_ptr *sync_ptr;
-    void *mmap_buffer;
-    unsigned int noirq_frames_per_msec;
-    int wait_for_avail_min;
-};
-
-static int pcm_sync_ptr(struct pcm *pcm, int flags) {
-    if (pcm->sync_ptr) {
-        pcm->sync_ptr->flags = flags;
-        if (ioctl(pcm->fd, SNDRV_PCM_IOCTL_SYNC_PTR, pcm->sync_ptr) < 0)
-            return -1;
-    }
-    return 0;
-}
-
-int pcm_get_hw_ptr(struct pcm* pcm, unsigned int* hw_ptr) {
-    if (!hw_ptr || !pcm) return -EINVAL;
-
-    int result = pcm_sync_ptr(pcm, SNDRV_PCM_SYNC_PTR_HWSYNC);
-    if (!result) {
-        *hw_ptr = pcm->sync_ptr->s.status.hw_ptr;
-    }
-
-    return result;
-}
-
-typedef struct stream_tracker {
-    struct pcm * pcm;
-    int          framesPerBurst;
-    sample_t   * hwBuffer;
-    int32_t      capacityInFrames;
-    int32_t      capacityInBytes;
-} stream_tracker_t;
-
-#define FRAMES_PER_BURST_QUALCOMM 192
-#define FRAMES_PER_BURST_NVIDIA   128
-
-int fake_hal_open(int card_id, int device_id,
-                  int frameCapacity,
-                  fake_hal_stream_ptr *streamPP) {
-    int framesPerBurst = FRAMES_PER_BURST_QUALCOMM; // TODO update as needed
-    int periodCountRequested = frameCapacity / framesPerBurst;
-    int periodCount = 32;
-    unsigned int offset1;
-    unsigned int frames1;
-    void *area = nullptr;
-    int mmapAvail = 0;
-
-    // Try to match requested size with a power of 2.
-    while (periodCount < periodCountRequested && periodCount < 1024) {
-        periodCount *= 2;
-    }
-    std::cout << "fake_hal_open() requested frameCapacity = " << frameCapacity << std::endl;
-    std::cout << "fake_hal_open() periodCountRequested = " << periodCountRequested << std::endl;
-    std::cout << "fake_hal_open() periodCount = " << periodCount << std::endl;
-
-    // Configuration for an ALSA stream.
-    pcm_config cfg;
-    memset(&cfg, 0, sizeof(cfg));
-    cfg.channels = CHANNEL_COUNT;
-    cfg.format = PCM_FORMAT_S16_LE;
-    cfg.rate = SAMPLE_RATE;
-    cfg.period_count = periodCount;
-    cfg.period_size = framesPerBurst;
-    cfg.start_threshold = 0; // for NOIRQ, should just start, was     framesPerBurst;
-    cfg.stop_threshold = INT32_MAX;
-    cfg.silence_size = 0;
-    cfg.silence_threshold = 0;
-    cfg.avail_min = framesPerBurst;
-
-    stream_tracker_t *streamTracker = (stream_tracker_t *) malloc(sizeof(stream_tracker_t));
-    if (streamTracker == nullptr) {
-        return -1;
-    }
-    memset(streamTracker, 0, sizeof(stream_tracker_t));
-
-    streamTracker->pcm = pcm_open(card_id, device_id, PCM_OUT | PCM_MMAP | PCM_NOIRQ, &cfg);
-    if (streamTracker->pcm == nullptr) {
-        cout << "Could not open device." << endl;
-        free(streamTracker);
-        return -1;
-    }
-
-    streamTracker->framesPerBurst = cfg.period_size; // Get from ALSA
-    streamTracker->capacityInFrames = pcm_get_buffer_size(streamTracker->pcm);
-    streamTracker->capacityInBytes = pcm_frames_to_bytes(streamTracker->pcm, streamTracker->capacityInFrames);
-    std::cout << "fake_hal_open() streamTracker->framesPerBurst = " << streamTracker->framesPerBurst << std::endl;
-    std::cout << "fake_hal_open() streamTracker->capacityInFrames = " << streamTracker->capacityInFrames << std::endl;
-
-    if (pcm_is_ready(streamTracker->pcm) < 0) {
-        cout << "Device is not ready." << endl;
-        goto error;
-    }
-
-    if (pcm_prepare(streamTracker->pcm) < 0) {
-        cout << "Device could not be prepared." << endl;
-        cout << "For Marlin, please enter:" << endl;
-        cout << "   adb shell" << endl;
-        cout << "   tinymix \"QUAT_MI2S_RX Audio Mixer MultiMedia8\" 1" << endl;
-        goto error;
-    }
-    mmapAvail = pcm_mmap_avail(streamTracker->pcm);
-    if (mmapAvail <= 0) {
-        cout << "fake_hal_open() mmap_avail is <=0" << endl;
-        goto error;
-    }
-    cout << "fake_hal_open() mmap_avail = " << mmapAvail << endl;
-
-    // Where is the memory mapped area?
-    if (pcm_mmap_begin(streamTracker->pcm, &area, &offset1, &frames1) < 0)  {
-        cout << "fake_hal_open() pcm_mmap_begin failed" << endl;
-        goto error;
-    }
-
-    // Clear the buffer.
-    memset((sample_t*) area, 0, streamTracker->capacityInBytes);
-    streamTracker->hwBuffer = (sample_t*) area;
-    streamTracker->hwBuffer[0] = 32000; // impulse
-
-    // Prime the buffer so it can start.
-    if (pcm_mmap_commit(streamTracker->pcm, 0, framesPerBurst) < 0) {
-        cout << "fake_hal_open() pcm_mmap_commit failed" << endl;
-        goto error;
-    }
-
-    *streamPP = streamTracker;
-    return 1;
-
-error:
-    fake_hal_close(streamTracker);
-    return -1;
-}
-
-int fake_hal_get_mmap_info(fake_hal_stream_ptr stream, mmap_buffer_info *info) {
-    stream_tracker_t *streamTracker = (stream_tracker_t *) stream;
-    info->fd = streamTracker->pcm->fd; // TODO use tinyalsa function
-    info->hw_buffer = streamTracker->hwBuffer;
-    info->burst_size_in_frames = streamTracker->framesPerBurst;
-    info->buffer_capacity_in_frames = streamTracker->capacityInFrames;
-    info->buffer_capacity_in_bytes = streamTracker->capacityInBytes;
-    info->sample_rate = SAMPLE_RATE;
-    info->channel_count = CHANNEL_COUNT;
-    return 0;
-}
-
-int fake_hal_start(fake_hal_stream_ptr stream) {
-    stream_tracker_t *streamTracker = (stream_tracker_t *) stream;
-    if (pcm_start(streamTracker->pcm) < 0) {
-        cout << "fake_hal_start failed" << endl;
-        return -1;
-    }
-    return 0;
-}
-
-int fake_hal_pause(fake_hal_stream_ptr stream) {
-    stream_tracker_t *streamTracker = (stream_tracker_t *) stream;
-    if (pcm_stop(streamTracker->pcm) < 0) {
-        cout << "fake_hal_stop failed" << endl;
-        return -1;
-    }
-    return 0;
-}
-
-int fake_hal_get_frame_counter(fake_hal_stream_ptr stream, int *frame_counter) {
-    stream_tracker_t *streamTracker = (stream_tracker_t *) stream;
-    if (pcm_get_hw_ptr(streamTracker->pcm, (unsigned int *)frame_counter) < 0) {
-        cout << "fake_hal_get_frame_counter failed" << endl;
-        return -1;
-    }
-    return 0;
-}
-
-int fake_hal_close(fake_hal_stream_ptr stream) {
-    stream_tracker_t *streamTracker = (stream_tracker_t *) stream;
-    pcm_close(streamTracker->pcm);
-    free(streamTracker);
-    return 0;
-}
-
diff --git a/services/oboeservice/FakeAudioHal.h b/services/oboeservice/FakeAudioHal.h
deleted file mode 100644
index d3aa4e8..0000000
--- a/services/oboeservice/FakeAudioHal.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Simple fake HAL that supports ALSA MMAP/NOIRQ mode.
- */
-
-#ifndef FAKE_AUDIO_HAL_H
-#define FAKE_AUDIO_HAL_H
-
-//namespace aaudio {
-
-using sample_t = int16_t;
-struct mmap_buffer_info {
-    int       fd;
-    int32_t   burst_size_in_frames;
-    int32_t   buffer_capacity_in_frames;
-    int32_t   buffer_capacity_in_bytes;
-    int32_t   sample_rate;
-    int32_t   channel_count;
-    sample_t *hw_buffer;
-};
-
-typedef void *fake_hal_stream_ptr;
-
-//extern "C"
-//{
-
-int fake_hal_open(int card_id, int device_id,
-                  int frameCapacity,
-                  fake_hal_stream_ptr *stream_pp);
-
-int fake_hal_get_mmap_info(fake_hal_stream_ptr stream, mmap_buffer_info *info);
-
-int fake_hal_start(fake_hal_stream_ptr stream);
-
-int fake_hal_pause(fake_hal_stream_ptr stream);
-
-int fake_hal_get_frame_counter(fake_hal_stream_ptr stream, int *frame_counter);
-
-int fake_hal_close(fake_hal_stream_ptr stream);
-
-//} /* "C" */
-
-//} /* namespace aaudio */
-
-#endif // FAKE_AUDIO_HAL_H
diff --git a/services/oboeservice/SharedMemoryProxy.cpp b/services/oboeservice/SharedMemoryProxy.cpp
new file mode 100644
index 0000000..83ae1d4
--- /dev/null
+++ b/services/oboeservice/SharedMemoryProxy.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <aaudio/AAudioDefinitions.h>
+#include "SharedMemoryProxy.h"
+
+using namespace android;
+using namespace aaudio;
+
+SharedMemoryProxy::~SharedMemoryProxy()
+{
+    if (mOriginalSharedMemory != nullptr) {
+        munmap(mOriginalSharedMemory, mSharedMemorySizeInBytes);
+        mOriginalSharedMemory = nullptr;
+    }
+    if (mProxySharedMemory != nullptr) {
+        munmap(mProxySharedMemory, mSharedMemorySizeInBytes);
+        close(mProxyFileDescriptor);
+        mProxySharedMemory = nullptr;
+    }
+}
+
+aaudio_result_t SharedMemoryProxy::open(int originalFD, int32_t capacityInBytes) {
+    mOriginalFileDescriptor = originalFD;
+    mSharedMemorySizeInBytes = capacityInBytes;
+
+    mProxyFileDescriptor = ashmem_create_region("AAudioProxyDataBuffer", mSharedMemorySizeInBytes);
+    if (mProxyFileDescriptor < 0) {
+        ALOGE("SharedMemoryProxy::open() ashmem_create_region() failed %d", errno);
+        return AAUDIO_ERROR_INTERNAL;
+    }
+    int err = ashmem_set_prot_region(mProxyFileDescriptor, PROT_READ|PROT_WRITE);
+    if (err < 0) {
+        ALOGE("SharedMemoryProxy::open() ashmem_set_prot_region() failed %d", errno);
+        close(mProxyFileDescriptor);
+        mProxyFileDescriptor = -1;
+        return AAUDIO_ERROR_INTERNAL; // TODO convert errno to a better AAUDIO_ERROR;
+    }
+
+    // Get original memory address.
+    mOriginalSharedMemory = (uint8_t *) mmap(0, mSharedMemorySizeInBytes,
+                         PROT_READ|PROT_WRITE,
+                         MAP_SHARED,
+                         mOriginalFileDescriptor, 0);
+    if (mOriginalSharedMemory == MAP_FAILED) {
+        ALOGE("SharedMemoryProxy::open() original mmap(%d) failed %d (%s)",
+                mOriginalFileDescriptor, errno, strerror(errno));
+        return AAUDIO_ERROR_INTERNAL; // TODO convert errno to a better AAUDIO_ERROR;
+    }
+
+    // Map the fd to the same memory addresses.
+    mProxySharedMemory = (uint8_t *) mmap(mOriginalSharedMemory, mSharedMemorySizeInBytes,
+                         PROT_READ|PROT_WRITE,
+                         MAP_SHARED,
+                         mProxyFileDescriptor, 0);
+    if (mProxySharedMemory != mOriginalSharedMemory) {
+        ALOGE("SharedMemoryProxy::open() proxy mmap(%d) failed %d", mProxyFileDescriptor, errno);
+        munmap(mOriginalSharedMemory, mSharedMemorySizeInBytes);
+        mOriginalSharedMemory = nullptr;
+        close(mProxyFileDescriptor);
+        mProxyFileDescriptor = -1;
+        return AAUDIO_ERROR_INTERNAL; // TODO convert errno to a better AAUDIO_ERROR;
+    }
+
+    return AAUDIO_OK;
+}
diff --git a/services/oboeservice/SharedMemoryProxy.h b/services/oboeservice/SharedMemoryProxy.h
new file mode 100644
index 0000000..99bfdea
--- /dev/null
+++ b/services/oboeservice/SharedMemoryProxy.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_SHARED_MEMORY_PROXY_H
+#define AAUDIO_SHARED_MEMORY_PROXY_H
+
+#include <stdint.h>
+#include <cutils/ashmem.h>
+#include <sys/mman.h>
+
+#include <aaudio/AAudioDefinitions.h>
+
+namespace aaudio {
+
+/**
+ * Proxy for sharing memory between two file descriptors.
+ */
+class SharedMemoryProxy {
+public:
+    SharedMemoryProxy() {}
+
+    ~SharedMemoryProxy();
+
+    aaudio_result_t open(int fd, int32_t capacityInBytes);
+
+    int getFileDescriptor() const {
+        return mProxyFileDescriptor;
+    }
+
+private:
+    int            mOriginalFileDescriptor = -1;
+    int            mProxyFileDescriptor = -1;
+    uint8_t       *mOriginalSharedMemory = nullptr;
+    uint8_t       *mProxySharedMemory = nullptr;
+    int32_t        mSharedMemorySizeInBytes = 0;
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_SHARED_MEMORY_PROXY_H
diff --git a/services/oboeservice/SharedRingBuffer.cpp b/services/oboeservice/SharedRingBuffer.cpp
index 9ac8fdf..efcc9d6 100644
--- a/services/oboeservice/SharedRingBuffer.cpp
+++ b/services/oboeservice/SharedRingBuffer.cpp
@@ -18,11 +18,8 @@
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
-#include "AudioClock.h"
-#include "AudioEndpointParcelable.h"
-
-//#include "AAudioServiceStreamBase.h"
-//#include "AAudioServiceStreamFakeHal.h"
+#include "binding/RingBufferParcelable.h"
+#include "binding/AudioEndpointParcelable.h"
 
 #include "SharedRingBuffer.h"
 
diff --git a/services/oboeservice/SharedRingBuffer.h b/services/oboeservice/SharedRingBuffer.h
index 75f138b..a2c3766 100644
--- a/services/oboeservice/SharedRingBuffer.h
+++ b/services/oboeservice/SharedRingBuffer.h
@@ -22,8 +22,8 @@
 #include <sys/mman.h>
 
 #include "fifo/FifoBuffer.h"
-#include "RingBufferParcelable.h"
-#include "AudioEndpointParcelable.h"
+#include "binding/RingBufferParcelable.h"
+#include "binding/AudioEndpointParcelable.h"
 
 namespace aaudio {
 
@@ -41,22 +41,22 @@
 
     virtual ~SharedRingBuffer();
 
-    aaudio_result_t allocate(fifo_frames_t bytesPerFrame, fifo_frames_t capacityInFrames);
+    aaudio_result_t allocate(android::fifo_frames_t bytesPerFrame, android::fifo_frames_t capacityInFrames);
 
     void fillParcelable(AudioEndpointParcelable &endpointParcelable,
                         RingBufferParcelable &ringBufferParcelable);
 
-    FifoBuffer * getFifoBuffer() {
+    android::FifoBuffer * getFifoBuffer() {
         return mFifoBuffer;
     }
 
 private:
-    int            mFileDescriptor = -1;
-    FifoBuffer   * mFifoBuffer = nullptr;
-    uint8_t      * mSharedMemory = nullptr;
-    int32_t        mSharedMemorySizeInBytes = 0;
-    int32_t        mDataMemorySizeInBytes = 0;
-    fifo_frames_t  mCapacityInFrames = 0;
+    int                    mFileDescriptor = -1;
+    android::FifoBuffer   *mFifoBuffer = nullptr;
+    uint8_t               *mSharedMemory = nullptr;
+    int32_t                mSharedMemorySizeInBytes = 0;
+    int32_t                mDataMemorySizeInBytes = 0;
+    android::fifo_frames_t mCapacityInFrames = 0;
 };
 
 } /* namespace aaudio */
diff --git a/services/oboeservice/TimestampScheduler.h b/services/oboeservice/TimestampScheduler.h
index 91a2477..325bee4 100644
--- a/services/oboeservice/TimestampScheduler.h
+++ b/services/oboeservice/TimestampScheduler.h
@@ -17,15 +17,8 @@
 #ifndef AAUDIO_TIMESTAMP_SCHEDULER_H
 #define AAUDIO_TIMESTAMP_SCHEDULER_H
 
-
-
-#include "IAAudioService.h"
-#include "AAudioServiceDefinitions.h"
-#include "AudioStream.h"
-#include "fifo/FifoBuffer.h"
-#include "SharedRingBuffer.h"
-#include "AudioEndpointParcelable.h"
-#include "utility/AudioClock.h"
+#include <aaudio/AAudioDefinitions.h>
+#include <utility/AudioClock.h>
 
 namespace aaudio {
 
@@ -47,8 +40,7 @@
     void start(int64_t startTime);
 
     /**
-     * Calculate the next time that the read position should be
-     * measured.
+     * Calculate the next time that the read position should be measured.
      */
     int64_t nextAbsoluteTime();
 
@@ -68,8 +60,8 @@
 private:
     // Start with an arbitrary default so we do not divide by zero.
     int64_t mBurstPeriod = AAUDIO_NANOS_PER_MILLISECOND;
-    int64_t mStartTime;
-    int64_t mLastTime;
+    int64_t mStartTime = 0;
+    int64_t mLastTime = 0;
 };
 
 } /* namespace aaudio */