Merge "Added process name to PID log"
diff --git a/cmds/stagefright/Android.mk b/cmds/stagefright/Android.mk
index 9a236fc..a98a207 100644
--- a/cmds/stagefright/Android.mk
+++ b/cmds/stagefright/Android.mk
@@ -3,19 +3,21 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:=       \
-	stagefright.cpp \
-	jpeg.cpp	\
-	SineSource.cpp
+        stagefright.cpp \
+        jpeg.cpp        \
+        SineSource.cpp
 
 LOCAL_SHARED_LIBRARIES := \
-	libstagefright libmedia libutils libbinder libstagefright_foundation \
-	libjpeg libgui libcutils liblog
+        libstagefright libmedia libutils libbinder libstagefright_foundation \
+        libjpeg libgui libcutils liblog \
+        android.hardware.media.omx@1.0 \
+        android.hardware.media.omx@1.0-utils
 
 LOCAL_C_INCLUDES:= \
-	frameworks/av/media/libstagefright \
-	frameworks/av/media/libstagefright/include \
-	$(TOP)/frameworks/native/include/media/openmax \
-	external/jpeg \
+        frameworks/av/media/libstagefright \
+        frameworks/av/media/libstagefright/include \
+        $(TOP)/frameworks/native/include/media/openmax \
+        external/jpeg \
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
 
@@ -30,16 +32,16 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:=         \
-	SineSource.cpp    \
-	record.cpp
+        SineSource.cpp    \
+        record.cpp
 
 LOCAL_SHARED_LIBRARIES := \
-	libstagefright libmedia liblog libutils libbinder libstagefright_foundation
+        libstagefright libmedia liblog libutils libbinder libstagefright_foundation
 
 LOCAL_C_INCLUDES:= \
-	frameworks/av/media/libstagefright \
-	$(TOP)/frameworks/native/include/media/openmax \
-	$(TOP)/frameworks/native/include/media/hardware
+        frameworks/av/media/libstagefright \
+        $(TOP)/frameworks/native/include/media/openmax \
+        $(TOP)/frameworks/native/include/media/hardware
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
 
@@ -54,16 +56,16 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:=         \
-	SineSource.cpp    \
-	recordvideo.cpp
+        SineSource.cpp    \
+        recordvideo.cpp
 
 LOCAL_SHARED_LIBRARIES := \
-	libstagefright libmedia liblog libutils libbinder libstagefright_foundation
+        libstagefright libmedia liblog libutils libbinder libstagefright_foundation
 
 LOCAL_C_INCLUDES:= \
-	frameworks/av/media/libstagefright \
-	$(TOP)/frameworks/native/include/media/openmax \
-	$(TOP)/frameworks/native/include/media/hardware
+        frameworks/av/media/libstagefright \
+        $(TOP)/frameworks/native/include/media/openmax \
+        $(TOP)/frameworks/native/include/media/hardware
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
 
@@ -79,15 +81,15 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:=         \
-	SineSource.cpp    \
-	audioloop.cpp
+        SineSource.cpp    \
+        audioloop.cpp
 
 LOCAL_SHARED_LIBRARIES := \
-	libstagefright libmedia liblog libutils libbinder libstagefright_foundation
+        libstagefright libmedia liblog libutils libbinder libstagefright_foundation
 
 LOCAL_C_INCLUDES:= \
-	frameworks/av/media/libstagefright \
-	$(TOP)/frameworks/native/include/media/openmax
+        frameworks/av/media/libstagefright \
+        $(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
 
@@ -105,12 +107,12 @@
         stream.cpp    \
 
 LOCAL_SHARED_LIBRARIES := \
-	libstagefright liblog libutils libbinder libgui \
-	libstagefright_foundation libmedia libcutils
+        libstagefright liblog libutils libbinder libgui \
+        libstagefright_foundation libmedia libcutils
 
 LOCAL_C_INCLUDES:= \
-	frameworks/av/media/libstagefright \
-	$(TOP)/frameworks/native/include/media/openmax
+        frameworks/av/media/libstagefright \
+        $(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
 
@@ -125,16 +127,16 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:=               \
-	codec.cpp               \
-	SimplePlayer.cpp        \
+        codec.cpp               \
+        SimplePlayer.cpp        \
 
 LOCAL_SHARED_LIBRARIES := \
-	libstagefright liblog libutils libbinder libstagefright_foundation \
-	libmedia libaudioclient libgui libcutils
+        libstagefright liblog libutils libbinder libstagefright_foundation \
+        libmedia libaudioclient libgui libcutils
 
 LOCAL_C_INCLUDES:= \
-	frameworks/av/media/libstagefright \
-	$(TOP)/frameworks/native/include/media/openmax
+        frameworks/av/media/libstagefright \
+        $(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
 
@@ -149,33 +151,33 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:= \
-	filters/argbtorgba.rs \
-	filters/nightvision.rs \
-	filters/saturation.rs \
-	mediafilter.cpp \
+        filters/argbtorgba.rs \
+        filters/nightvision.rs \
+        filters/saturation.rs \
+        mediafilter.cpp \
 
 LOCAL_SHARED_LIBRARIES := \
-	libstagefright \
-	liblog \
-	libutils \
-	libbinder \
-	libstagefright_foundation \
-	libmedia \
-	libgui \
-	libcutils \
-	libRScpp \
+        libstagefright \
+        liblog \
+        libutils \
+        libbinder \
+        libstagefright_foundation \
+        libmedia \
+        libgui \
+        libcutils \
+        libRScpp \
 
 LOCAL_C_INCLUDES:= \
-	$(TOP)/frameworks/av/media/libstagefright \
-	$(TOP)/frameworks/native/include/media/openmax \
-	$(TOP)/frameworks/rs/cpp \
-	$(TOP)/frameworks/rs \
+        $(TOP)/frameworks/av/media/libstagefright \
+        $(TOP)/frameworks/native/include/media/openmax \
+        $(TOP)/frameworks/rs/cpp \
+        $(TOP)/frameworks/rs \
 
 intermediates := $(call intermediates-dir-for,STATIC_LIBRARIES,libRS,TARGET,)
 LOCAL_C_INCLUDES += $(intermediates)
 
 LOCAL_STATIC_LIBRARIES:= \
-	libstagefright_mediafilter
+        libstagefright_mediafilter
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
 
@@ -190,15 +192,15 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:=               \
-	muxer.cpp            \
+        muxer.cpp            \
 
 LOCAL_SHARED_LIBRARIES := \
-	libstagefright liblog libutils libbinder libstagefright_foundation \
-	libcutils libc
+        libstagefright liblog libutils libbinder libstagefright_foundation \
+        libcutils libc
 
 LOCAL_C_INCLUDES:= \
-	frameworks/av/media/libstagefright \
-	$(TOP)/frameworks/native/include/media/openmax
+        frameworks/av/media/libstagefright \
+        $(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
 
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index 5e3a859..ffa09eb 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -64,6 +64,9 @@
 #include <gui/Surface.h>
 #include <gui/SurfaceComposerClient.h>
 
+#include <android/hardware/media/omx/1.0/IOmx.h>
+#include <omx/hal/1.0/utils/WOmx.h>
+
 using namespace android;
 
 static long gNumRepetitions;
@@ -904,13 +907,25 @@
     }
 
     if (listComponents) {
-        sp<IServiceManager> sm = defaultServiceManager();
-        sp<IBinder> binder = sm->getService(String16("media.codec"));
-        sp<IMediaCodecService> service = interface_cast<IMediaCodecService>(binder);
+        sp<IOMX> omx;
+        int32_t trebleOmx = property_get_int32("persist.media.treble_omx", -1);
+        if ((trebleOmx == 1) || ((trebleOmx == -1) &&
+                property_get_bool("persist.hal.binderization", 0))) {
+            using namespace ::android::hardware::media::omx::V1_0;
+            sp<IOmx> tOmx = IOmx::getService();
 
-        CHECK(service.get() != NULL);
+            CHECK(tOmx.get() != NULL);
 
-        sp<IOMX> omx = service->getOMX();
+            omx = new utils::LWOmx(tOmx);
+        } else {
+            sp<IServiceManager> sm = defaultServiceManager();
+            sp<IBinder> binder = sm->getService(String16("media.codec"));
+            sp<IMediaCodecService> service = interface_cast<IMediaCodecService>(binder);
+
+            CHECK(service.get() != NULL);
+
+            omx = service->getOMX();
+        }
         CHECK(omx.get() != NULL);
 
         List<IOMX::ComponentInfo> list;
diff --git a/drm/libmediadrm/DrmHal.cpp b/drm/libmediadrm/DrmHal.cpp
index 73dc93b..8200d55 100644
--- a/drm/libmediadrm/DrmHal.cpp
+++ b/drm/libmediadrm/DrmHal.cpp
@@ -349,6 +349,7 @@
     bool result = false;
 
     if (mFactory != NULL && mFactory->isCryptoSchemeSupported(uuid)) {
+        result = true;
         if (mimeType != "") {
             result = mFactory->isContentTypeSupported(mimeType.string());
         }
diff --git a/include/media/IMediaPlayer.h b/include/media/IMediaPlayer.h
index a4cc152..e5a98dd 100644
--- a/include/media/IMediaPlayer.h
+++ b/include/media/IMediaPlayer.h
@@ -24,7 +24,6 @@
 #include <system/audio.h>
 
 #include <media/IMediaSource.h>
-#include <media/drm/DrmAPI.h>   // for DrmPlugin::* enum
 #include <media/VolumeShaper.h>
 
 // Fwd decl to make sure everyone agrees that the scope of struct sockaddr_in is
@@ -97,22 +96,10 @@
                                     const sp<VolumeShaper::Operation>& operation) = 0;
     virtual sp<VolumeShaper::State> getVolumeShaperState(int id) = 0;
 
-    // ModDrm
-    virtual status_t        prepareDrm(const uint8_t uuid[16], const int mode) = 0;
+    // Modular DRM
+    virtual status_t        prepareDrm(const uint8_t uuid[16],
+                                    const Vector<uint8_t>& drmSessionId) = 0;
     virtual status_t        releaseDrm() = 0;
-    virtual status_t        getKeyRequest(Vector<uint8_t> const& scope,
-                                    String8 const &mimeType,
-                                    DrmPlugin::KeyType keyType,
-                                    KeyedVector<String8, String8>& optionalParameters,
-                                    Vector<uint8_t>& request,
-                                    String8& defaultUrl,
-                                    DrmPlugin::KeyRequestType& keyRequestType) = 0;
-    virtual status_t        provideKeyResponse(Vector<uint8_t>& releaseKeySetId,
-                                    Vector<uint8_t>& response,
-                                    Vector<uint8_t>& keySetId) = 0;
-    virtual status_t        restoreKeys(Vector<uint8_t> const& keySetId) = 0;
-    virtual status_t        getDrmPropertyString(String8 const& name, String8& value) = 0;
-    virtual status_t        setDrmPropertyString(String8 const& name, String8 const& value) = 0;
 
     // Invoke a generic method on the player by using opaque parcels
     // for the request and reply.
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index b3e53fc..a01f7f2 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -285,33 +285,13 @@
         return INVALID_OPERATION;
     }
 
-    // ModDrm
-    virtual status_t prepareDrm(const uint8_t uuid[16], const int mode) {
+    // Modular DRM
+    virtual status_t prepareDrm(const uint8_t uuid[16], const Vector<uint8_t>& drmSessionId) {
         return INVALID_OPERATION;
     }
     virtual status_t releaseDrm() {
         return INVALID_OPERATION;
     }
-    virtual status_t getKeyRequest(Vector<uint8_t> const& scope, String8 const& mimeType,
-                             DrmPlugin::KeyType keyType,
-                             KeyedVector<String8, String8>& optionalParameters,
-                             Vector<uint8_t>& request, String8& defaultUrl,
-                             DrmPlugin::KeyRequestType& keyRequestType) {
-        return INVALID_OPERATION;
-    }
-    virtual status_t provideKeyResponse(Vector<uint8_t>& releaseKeySetId,
-                             Vector<uint8_t>& response, Vector<uint8_t>& keySetId) {
-        return INVALID_OPERATION;
-    }
-    virtual status_t restoreKeys(Vector<uint8_t> const& keySetId) {
-        return INVALID_OPERATION;
-    }
-    virtual status_t getDrmPropertyString(String8 const& name, String8& value) {
-        return INVALID_OPERATION;
-    }
-    virtual status_t setDrmPropertyString(String8 const& name, String8 const& value) {
-        return INVALID_OPERATION;
-    }
 
 private:
     friend class MediaPlayerService;
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index fbe3926..18d69a7 100644
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -266,19 +266,9 @@
                                     const sp<VolumeShaper::Configuration>& configuration,
                                     const sp<VolumeShaper::Operation>& operation);
             sp<VolumeShaper::State> getVolumeShaperState(int id);
-            // ModDrm
-            status_t        prepareDrm(const uint8_t uuid[16], const int mode);
+            // Modular DRM
+            status_t        prepareDrm(const uint8_t uuid[16], const Vector<uint8_t>& drmSessionId);
             status_t        releaseDrm();
-            status_t        getKeyRequest(Vector<uint8_t> const& scope, String8 const& mimeType,
-                                    DrmPlugin::KeyType keyType,
-                                    KeyedVector<String8, String8>& optionalParameters,
-                                    Vector<uint8_t>& request, String8& defaultUrl,
-                                    DrmPlugin::KeyRequestType& keyRequestType);
-            status_t        provideKeyResponse(Vector<uint8_t>& releaseKeySetId,
-                                    Vector<uint8_t>& response, Vector<uint8_t>& keySetId);
-            status_t        restoreKeys(Vector<uint8_t> const& keySetId);
-            status_t        getDrmPropertyString(String8 const& name, String8& value);
-            status_t        setDrmPropertyString(String8 const& name, String8 const& value);
 
 private:
             void            clear_l();
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index 814a643..90d9942 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -94,9 +94,8 @@
 
     static status_t getOMXChannelMapping(size_t numChannels, OMX_AUDIO_CHANNELTYPE map[]);
 
-    // Read the flag from "media.use_treble_omx", save it locally, and return
-    // it.
-    bool updateTrebleFlag();
+    // Save the flag.
+    void setTrebleFlag(bool trebleFlag);
     // Return the saved flag.
     bool getTrebleFlag() const;
 
diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h
index 699ae48..20b26e2 100644
--- a/include/media/stagefright/MediaCodec.h
+++ b/include/media/stagefright/MediaCodec.h
@@ -91,6 +91,8 @@
             const sp<ICrypto> &crypto,
             uint32_t flags);
 
+    status_t releaseCrypto();
+
     status_t setCallback(const sp<AMessage> &callback);
 
     status_t setOnFrameRenderedNotification(const sp<AMessage> &notify);
@@ -239,6 +241,7 @@
         kWhatSetParameters                  = 'setP',
         kWhatSetCallback                    = 'setC',
         kWhatSetNotification                = 'setN',
+        kWhatDrmReleaseCrypto               = 'rDrm',
     };
 
     enum {
@@ -416,6 +419,8 @@
         mStickyError = err;
     }
 
+    void onReleaseCrypto(const sp<AMessage>& msg);
+
     DISALLOW_EVIL_CONSTRUCTORS(MediaCodec);
 };
 
diff --git a/include/media/stagefright/MediaCodecSource.h b/include/media/stagefright/MediaCodecSource.h
index f9a46a9..5e99b78 100644
--- a/include/media/stagefright/MediaCodecSource.h
+++ b/include/media/stagefright/MediaCodecSource.h
@@ -54,7 +54,7 @@
     // MediaSource
     virtual status_t start(MetaData *params = NULL);
     virtual status_t stop();
-    virtual status_t pause();
+    virtual status_t pause(MetaData *params = NULL);
     virtual sp<MetaData> getFormat();
     virtual status_t read(
             MediaBuffer **buffer,
@@ -66,6 +66,12 @@
     // for AHandlerReflector
     void onMessageReceived(const sp<AMessage> &msg);
 
+    // Set GraphicBufferSource stop time. GraphicBufferSource will stop
+    // after receiving a buffer with timestamp larger or equal than stopTimeUs.
+    // All the buffers with timestamp larger or equal to stopTimeUs will be
+    // discarded. stopTimeUs uses SYSTEM_TIME_MONOTONIC time base.
+    status_t setStopStimeUs(int64_t stopTimeUs);
+
 protected:
     virtual ~MediaCodecSource();
 
@@ -79,6 +85,7 @@
         kWhatStop,
         kWhatPause,
         kWhatSetInputBufferTimeOffset,
+        kWhatSetStopTimeOffset,
         kWhatGetFirstSampleSystemTimeUs,
         kWhatStopStalled,
     };
@@ -91,13 +98,23 @@
             uint32_t flags = 0);
 
     status_t onStart(MetaData *params);
-    void onPause();
+
+    // Pause the source at pauseStartTimeUs. For non-surface input,
+    // buffers will be dropped immediately. For surface input, buffers
+    // with timestamp smaller than pauseStartTimeUs will still be encoded.
+    // Buffers with timestamp larger or queal to pauseStartTimeUs will be
+    // dropped. pauseStartTimeUs uses SYSTEM_TIME_MONOTONIC time base.
+    void onPause(int64_t pauseStartTimeUs);
+
     status_t init();
     status_t initEncoder();
     void releaseEncoder();
     status_t feedEncoderInputBuffers();
-    void suspend();
-    void resume(int64_t skipFramesBeforeUs = -1ll);
+    // Resume GraphicBufferSource at resumeStartTimeUs. Buffers
+    // from GraphicBufferSource with timestamp larger or equal to
+    // resumeStartTimeUs will be encoded. resumeStartTimeUs uses
+    // SYSTEM_TIME_MONOTONIC time base.
+    void resume(int64_t resumeStartTimeUs = -1ll);
     void signalEOS(status_t err = ERROR_END_OF_STREAM);
     bool reachedEOS();
     status_t postSynchronouslyAndReturnError(const sp<AMessage> &msg);
diff --git a/include/media/stagefright/OMXClient.h b/include/media/stagefright/OMXClient.h
index 6b86cbf..315f19b 100644
--- a/include/media/stagefright/OMXClient.h
+++ b/include/media/stagefright/OMXClient.h
@@ -26,7 +26,8 @@
 public:
     OMXClient();
 
-    status_t connect();
+    status_t connect(bool* trebleFlag = nullptr);
+    status_t connectLegacy();
     status_t connectTreble();
     void disconnect();
 
diff --git a/include/media/stagefright/SurfaceUtils.h b/include/media/stagefright/SurfaceUtils.h
index 13d580c..a7747c7 100644
--- a/include/media/stagefright/SurfaceUtils.h
+++ b/include/media/stagefright/SurfaceUtils.h
@@ -33,6 +33,8 @@
         ANativeWindow *nativeWindow /* nonnull */,
         int width, int height, int format, int rotation, int usage, bool reconnect);
 status_t pushBlankBuffersToNativeWindow(ANativeWindow *nativeWindow /* nonnull */);
+status_t nativeWindowConnect(ANativeWindow *surface, const char *reason);
+status_t nativeWindowDisconnect(ANativeWindow *surface, const char *reason);
 
 } // namespace android
 
diff --git a/media/audioserver/audioserver.rc b/media/audioserver/audioserver.rc
index 4b0f6a2..9d42bce 100644
--- a/media/audioserver/audioserver.rc
+++ b/media/audioserver/audioserver.rc
@@ -5,4 +5,9 @@
     group audio camera drmrpc inet media mediadrm net_bt net_bt_admin net_bw_acct
     ioprio rt 4
     writepid /dev/cpuset/foreground/tasks /dev/stune/foreground/tasks
-    onrestart restart audio-hal-2-0
\ No newline at end of file
+    onrestart restart audio-hal-2-0
+
+on property:vts.native_server.on=1
+    stop audioserver
+on property:vts.native_server.on=0
+    start audioserver
diff --git a/media/libaaudio/examples/write_sine/src/write_sine.cpp b/media/libaaudio/examples/write_sine/src/write_sine.cpp
index a16dfdc..9e6268a 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine.cpp
@@ -25,6 +25,8 @@
 
 #define SAMPLE_RATE   48000
 #define NUM_SECONDS   10
+#define NANOS_PER_MICROSECOND ((int64_t)1000)
+#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
 
 static const char *getSharingModeText(aaudio_sharing_mode_t mode) {
     const char *modeText = "unknown";
@@ -51,18 +53,18 @@
     int actualSamplesPerFrame = 0;
     const int requestedSampleRate = SAMPLE_RATE;
     int actualSampleRate = 0;
-    const aaudio_audio_format_t requestedDataFormat = AAUDIO_FORMAT_PCM16;
-    aaudio_audio_format_t actualDataFormat = AAUDIO_FORMAT_PCM16;
+    const aaudio_audio_format_t requestedDataFormat = AAUDIO_FORMAT_PCM_I16;
+    aaudio_audio_format_t actualDataFormat = AAUDIO_FORMAT_PCM_I16;
 
     const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
     aaudio_sharing_mode_t actualSharingMode = AAUDIO_SHARING_MODE_SHARED;
 
-    AAudioStreamBuilder aaudioBuilder = AAUDIO_STREAM_BUILDER_NONE;
-    AAudioStream aaudioStream = AAUDIO_STREAM_NONE;
+    AAudioStreamBuilder aaudioBuilder = nullptr;
+    AAudioStream aaudioStream = nullptr;
     aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNINITIALIZED;
-    aaudio_size_frames_t framesPerBurst = 0;
-    aaudio_size_frames_t framesToPlay = 0;
-    aaudio_size_frames_t framesLeft = 0;
+    int32_t framesPerBurst = 0;
+    int32_t framesToPlay = 0;
+    int32_t framesLeft = 0;
     int32_t xRunCount = 0;
     int16_t *data = nullptr;
 
@@ -82,57 +84,42 @@
     }
 
     // Request stream properties.
-    result = AAudioStreamBuilder_setSampleRate(aaudioBuilder, requestedSampleRate);
-    if (result != AAUDIO_OK) {
-        goto finish;
-    }
-    result = AAudioStreamBuilder_setSamplesPerFrame(aaudioBuilder, requestedSamplesPerFrame);
-    if (result != AAUDIO_OK) {
-        goto finish;
-    }
-    result = AAudioStreamBuilder_setFormat(aaudioBuilder, requestedDataFormat);
-    if (result != AAUDIO_OK) {
-        goto finish;
-    }
-    result = AAudioStreamBuilder_setSharingMode(aaudioBuilder, requestedSharingMode);
-    if (result != AAUDIO_OK) {
-        goto finish;
-    }
+    AAudioStreamBuilder_setSampleRate(aaudioBuilder, requestedSampleRate);
+    AAudioStreamBuilder_setSamplesPerFrame(aaudioBuilder, requestedSamplesPerFrame);
+    AAudioStreamBuilder_setFormat(aaudioBuilder, requestedDataFormat);
+    AAudioStreamBuilder_setSharingMode(aaudioBuilder, requestedSharingMode);
+
 
     // Create an AAudioStream using the Builder.
     result = AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream);
-    printf("aaudioStream 0x%08x\n", aaudioStream);
     if (result != AAUDIO_OK) {
         goto finish;
     }
 
-    result = AAudioStream_getState(aaudioStream, &state);
+    state = AAudioStream_getState(aaudioStream);
     printf("after open, state = %s\n", AAudio_convertStreamStateToText(state));
 
     // Check to see what kind of stream we actually got.
-    result = AAudioStream_getSampleRate(aaudioStream, &actualSampleRate);
+    actualSampleRate = AAudioStream_getSampleRate(aaudioStream);
     printf("SampleRate: requested = %d, actual = %d\n", requestedSampleRate, actualSampleRate);
 
     sineOsc1.setup(440.0, actualSampleRate);
     sineOsc2.setup(660.0, actualSampleRate);
 
-    result = AAudioStream_getSamplesPerFrame(aaudioStream, &actualSamplesPerFrame);
+    actualSamplesPerFrame = AAudioStream_getSamplesPerFrame(aaudioStream);
     printf("SamplesPerFrame: requested = %d, actual = %d\n",
             requestedSamplesPerFrame, actualSamplesPerFrame);
 
-    result = AAudioStream_getSharingMode(aaudioStream, &actualSharingMode);
+    actualSharingMode = AAudioStream_getSharingMode(aaudioStream);
     printf("SharingMode: requested = %s, actual = %s\n",
             getSharingModeText(requestedSharingMode),
             getSharingModeText(actualSharingMode));
 
     // This is the number of frames that are read in one chunk by a DMA controller
     // or a DSP or a mixer.
-    result = AAudioStream_getFramesPerBurst(aaudioStream, &framesPerBurst);
+    framesPerBurst = AAudioStream_getFramesPerBurst(aaudioStream);
     printf("DataFormat: original framesPerBurst = %d\n",framesPerBurst);
-    if (result != AAUDIO_OK) {
-        fprintf(stderr, "ERROR - AAudioStream_getFramesPerBurst() returned %d\n", result);
-        goto finish;
-    }
+
     // Some DMA might use very short bursts of 16 frames. We don't need to write such small
     // buffers. But it helps to use a multiple of the burst size for predictable scheduling.
     while (framesPerBurst < 48) {
@@ -140,7 +127,7 @@
     }
     printf("DataFormat: final framesPerBurst = %d\n",framesPerBurst);
 
-    AAudioStream_getFormat(aaudioStream, &actualDataFormat);
+    actualDataFormat = AAudioStream_getFormat(aaudioStream);
     printf("DataFormat: requested = %d, actual = %d\n", requestedDataFormat, actualDataFormat);
     // TODO handle other data formats
 
@@ -160,7 +147,7 @@
         goto finish;
     }
 
-    result = AAudioStream_getState(aaudioStream, &state);
+    state = AAudioStream_getState(aaudioStream);
     printf("after start, state = %s\n", AAudio_convertStreamStateToText(state));
 
     // Play for a while.
@@ -174,7 +161,7 @@
         }
 
         // Write audio data to the stream.
-        aaudio_nanoseconds_t timeoutNanos = 100 * AAUDIO_NANOS_PER_MILLISECOND;
+        int64_t timeoutNanos = 100 * NANOS_PER_MILLISECOND;
         int minFrames = (framesToPlay < framesPerBurst) ? framesToPlay : framesPerBurst;
         int actual = AAudioStream_write(aaudioStream, data, minFrames, timeoutNanos);
         if (actual < 0) {
@@ -187,7 +174,7 @@
         framesLeft -= actual;
     }
 
-    result = AAudioStream_getXRunCount(aaudioStream, &xRunCount);
+    xRunCount = AAudioStream_getXRunCount(aaudioStream);
     printf("AAudioStream_getXRunCount %d\n", xRunCount);
 
 finish:
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_threaded.cpp b/media/libaaudio/examples/write_sine/src/write_sine_threaded.cpp
index 7cb14f9..cc7ba5a 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine_threaded.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine_threaded.cpp
@@ -26,14 +26,18 @@
 #include <aaudio/AAudio.h>
 #include "SineGenerator.h"
 
-#define NUM_SECONDS   10
+#define NUM_SECONDS           10
+#define NANOS_PER_MICROSECOND ((int64_t)1000)
+#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
+#define MILLIS_PER_SECOND     1000
+#define NANOS_PER_SECOND      (NANOS_PER_MILLISECOND * MILLIS_PER_SECOND)
 
-#define SHARING_MODE  AAUDIO_SHARING_MODE_EXCLUSIVE
-//#define SHARING_MODE  AAUDIO_SHARING_MODE_SHARED
+//#define SHARING_MODE  AAUDIO_SHARING_MODE_EXCLUSIVE
+#define SHARING_MODE  AAUDIO_SHARING_MODE_SHARED
 
 // Prototype for a callback.
 typedef int audio_callback_proc_t(float *outputBuffer,
-                                     aaudio_size_frames_t numFrames,
+                                     int32_t numFrames,
                                      void *userContext);
 
 static void *SimpleAAudioPlayerThreadProc(void *arg);
@@ -75,33 +79,27 @@
         result = AAudio_createStreamBuilder(&mBuilder);
         if (result != AAUDIO_OK) return result;
 
-        result = AAudioStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
-        if (result != AAUDIO_OK) goto finish1;
+        AAudioStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
 
         // Open an AAudioStream using the Builder.
         result = AAudioStreamBuilder_openStream(mBuilder, &mStream);
         if (result != AAUDIO_OK) goto finish1;
 
         // Check to see what kind of stream we actually got.
-        result = AAudioStream_getSampleRate(mStream, &mFramesPerSecond);
+        mFramesPerSecond = AAudioStream_getSampleRate(mStream);
         printf("open() mFramesPerSecond = %d\n", mFramesPerSecond);
-        if (result != AAUDIO_OK) goto finish2;
 
-        result = AAudioStream_getSamplesPerFrame(mStream, &mSamplesPerFrame);
+        mSamplesPerFrame = AAudioStream_getSamplesPerFrame(mStream);
         printf("open() mSamplesPerFrame = %d\n", mSamplesPerFrame);
-        if (result != AAUDIO_OK) goto finish2;
 
         {
-            aaudio_size_frames_t bufferCapacity;
-            result = AAudioStream_getBufferCapacity(mStream, &bufferCapacity);
-            if (result != AAUDIO_OK) goto finish2;
+            int32_t bufferCapacity = AAudioStream_getBufferCapacityInFrames(mStream);
             printf("open() got bufferCapacity = %d\n", bufferCapacity);
         }
 
         // This is the number of frames that are read in one chunk by a DMA controller
         // or a DSP or a mixer.
-        result = AAudioStream_getFramesPerBurst(mStream, &mFramesPerBurst);
-        if (result != AAUDIO_OK) goto finish2;
+        mFramesPerBurst = AAudioStream_getFramesPerBurst(mStream);
         // Some DMA might use very short bursts. We don't need to write such small
         // buffers. But it helps to use a multiple of the burst size for predictable scheduling.
         while (mFramesPerBurst < 48) {
@@ -109,11 +107,7 @@
         }
         printf("DataFormat: final framesPerBurst = %d\n",mFramesPerBurst);
 
-        result = AAudioStream_getFormat(mStream, &mDataFormat);
-        if (result != AAUDIO_OK) {
-            fprintf(stderr, "ERROR - AAudioStream_getFormat() returned %d\n", result);
-            goto finish2;
-        }
+        mDataFormat = AAudioStream_getFormat(mStream);
 
         // Allocate a buffer for the audio data.
         mOutputBuffer = new float[mFramesPerBurst * mSamplesPerFrame];
@@ -123,7 +117,7 @@
         }
 
         // If needed allocate a buffer for converting float to int16_t.
-        if (mDataFormat == AAUDIO_FORMAT_PCM16) {
+        if (mDataFormat == AAUDIO_FORMAT_PCM_I16) {
             mConversionBuffer = new int16_t[mFramesPerBurst * mSamplesPerFrame];
             if (mConversionBuffer == nullptr) {
                 fprintf(stderr, "ERROR - could not allocate conversion buffer\n");
@@ -132,23 +126,20 @@
         }
         return result;
 
-     finish2:
-        AAudioStream_close(mStream);
-        mStream = AAUDIO_HANDLE_INVALID;
      finish1:
         AAudioStreamBuilder_delete(mBuilder);
-        mBuilder = AAUDIO_HANDLE_INVALID;
+        mBuilder = nullptr;
         return result;
     }
 
     aaudio_result_t close() {
-        if (mStream != AAUDIO_HANDLE_INVALID) {
+        if (mStream != nullptr) {
             stop();
-            printf("call AAudioStream_close(0x%08x)\n", mStream);  fflush(stdout);
+            printf("call AAudioStream_close(%p)\n", mStream);  fflush(stdout);
             AAudioStream_close(mStream);
-            mStream = AAUDIO_HANDLE_INVALID;
+            mStream = nullptr;
             AAudioStreamBuilder_delete(mBuilder);
-            mBuilder = AAUDIO_HANDLE_INVALID;
+            mBuilder = nullptr;
             delete mOutputBuffer;
             mOutputBuffer = nullptr;
             delete mConversionBuffer;
@@ -160,7 +151,7 @@
     // Start a thread that will call the callback proc.
     aaudio_result_t start() {
         mEnabled = true;
-        aaudio_nanoseconds_t nanosPerBurst = mFramesPerBurst * AAUDIO_NANOS_PER_SECOND
+        int64_t nanosPerBurst = mFramesPerBurst * NANOS_PER_SECOND
                                            / mFramesPerSecond;
         return AAudioStream_createThread(mStream, nanosPerBurst,
                                        SimpleAAudioPlayerThreadProc,
@@ -170,7 +161,7 @@
     // Tell the thread to stop.
     aaudio_result_t stop() {
         mEnabled = false;
-        return AAudioStream_joinThread(mStream, nullptr, 2 * AAUDIO_NANOS_PER_SECOND);
+        return AAudioStream_joinThread(mStream, nullptr, 2 * NANOS_PER_SECOND);
     }
 
     aaudio_result_t callbackLoop() {
@@ -186,8 +177,8 @@
 
         // Give up after several burst periods have passed.
         const int burstsPerTimeout = 8;
-        aaudio_nanoseconds_t nanosPerTimeout =
-                        burstsPerTimeout * mFramesPerBurst * AAUDIO_NANOS_PER_SECOND
+        int64_t nanosPerTimeout =
+                        burstsPerTimeout * mFramesPerBurst * NANOS_PER_SECOND
                         / mFramesPerSecond;
 
         while (mEnabled && result >= 0) {
@@ -213,7 +204,7 @@
             }
         }
 
-        result = AAudioStream_getXRunCount(mStream, &xRunCount);
+        xRunCount = AAudioStream_getXRunCount(mStream);
         printf("AAudioStream_getXRunCount %d\n", xRunCount);
 
         result = AAudioStream_requestStop(mStream);
@@ -226,20 +217,20 @@
     }
 
 private:
-    AAudioStreamBuilder   mBuilder = AAUDIO_HANDLE_INVALID;
-    AAudioStream          mStream = AAUDIO_HANDLE_INVALID;
-    float            *  mOutputBuffer = nullptr;
-    int16_t          *  mConversionBuffer = nullptr;
+    AAudioStreamBuilder   mBuilder = nullptr;
+    AAudioStream          mStream = nullptr;
+    float                *mOutputBuffer = nullptr;
+    int16_t              *mConversionBuffer = nullptr;
 
-    audio_callback_proc_t * mCallbackProc = nullptr;
-    void             *  mUserContext = nullptr;
+    audio_callback_proc_t *mCallbackProc = nullptr;
+    void                 *mUserContext = nullptr;
     aaudio_sharing_mode_t mRequestedSharingMode = SHARING_MODE;
-    int32_t             mSamplesPerFrame = 0;
-    int32_t             mFramesPerSecond = 0;
-    aaudio_size_frames_t  mFramesPerBurst = 0;
-    aaudio_audio_format_t mDataFormat = AAUDIO_FORMAT_PCM16;
+    int32_t               mSamplesPerFrame = 0;
+    int32_t               mFramesPerSecond = 0;
+    int32_t               mFramesPerBurst = 0;
+    aaudio_audio_format_t mDataFormat = AAUDIO_FORMAT_PCM_I16;
 
-    volatile bool       mEnabled = false; // used to request that callback exit its loop
+    volatile bool         mEnabled = false; // used to request that callback exit its loop
 };
 
 static void *SimpleAAudioPlayerThreadProc(void *arg) {
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index dad5285..43b5205 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -20,21 +20,21 @@
 #ifndef AAUDIO_AAUDIO_H
 #define AAUDIO_AAUDIO_H
 
+#include <time.h>
 #include "AAudioDefinitions.h"
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-typedef aaudio_handle_t AAudioStream;
-typedef aaudio_handle_t AAudioStreamBuilder;
+typedef struct AAudioStreamStruct * AAudioStream;
+typedef struct AAudioStreamBuilderStruct *  AAudioStreamBuilder;
 
-#define AAUDIO_STREAM_NONE         ((AAudioStream)AAUDIO_HANDLE_INVALID)
-#define AAUDIO_STREAM_BUILDER_NONE ((AAudioStreamBuilder)AAUDIO_HANDLE_INVALID)
+#define AAUDIO_STREAM_NONE         ((AAudioStream)nullptr)
+#define AAUDIO_STREAM_BUILDER_NONE ((AAudioStreamBuilder)nullptr)
 
-/* AAUDIO_API will probably get defined in a Makefile for a specific platform. */
 #ifndef AAUDIO_API
-#define AAUDIO_API /* for exporting symbols */
+#define AAUDIO_API /* export this symbol */
 #endif
 
 // ============================================================
@@ -42,11 +42,6 @@
 // ============================================================
 
 /**
- * @return time in the same clock domain as the timestamps
- */
-AAUDIO_API aaudio_nanoseconds_t AAudio_getNanoseconds(aaudio_clockid_t clockid);
-
-/**
  * The text is the ASCII symbol corresponding to the returnCode,
  * or an English message saying the returnCode is unrecognized.
  * This is intended for developers to use when debugging.
@@ -76,7 +71,7 @@
  * The deviceId is initially unspecified, meaning that the current default device will be used.
  *
  * The default direction is AAUDIO_DIRECTION_OUTPUT.
- * The default sharing mode is AAUDIO_SHARING_MODE_LEGACY.
+ * The default sharing mode is AAUDIO_SHARING_MODE_SHARED.
  * The data format, samplesPerFrames and sampleRate are unspecified and will be
  * chosen by the device when it is opened.
  *
@@ -86,23 +81,15 @@
 
 /**
  * Request an audio device identified device using an ID.
- * The ID is platform specific.
  * On Android, for example, the ID could be obtained from the Java AudioManager.
  *
  * By default, the primary device will be used.
  *
- * @param builder handle provided by AAudio_createStreamBuilder()
- * @param deviceId platform specific identifier or AAUDIO_DEVICE_UNSPECIFIED
- * @return AAUDIO_OK or a negative error.
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param deviceId device identifier or AAUDIO_DEVICE_UNSPECIFIED
  */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setDeviceId(AAudioStreamBuilder builder,
-                                                     aaudio_device_id_t deviceId);
-/**
- * Passes back requested device ID.
- * @return AAUDIO_OK or a negative error.
- */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getDeviceId(AAudioStreamBuilder builder,
-                                                     aaudio_device_id_t *deviceId);
+AAUDIO_API void AAudioStreamBuilder_setDeviceId(AAudioStreamBuilder builder,
+                                                     int32_t deviceId);
 
 /**
  * Request a sample rate in Hz.
@@ -114,19 +101,10 @@
  * But it is traditionally called "sample rate". Se we use that term.
  *
  * Default is AAUDIO_UNSPECIFIED.
- *
- * @return AAUDIO_OK or a negative error.
- */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setSampleRate(AAudioStreamBuilder builder,
-                                                       aaudio_sample_rate_t sampleRate);
 
-/**
- * Returns sample rate in Hertz (samples per second).
- * @return AAUDIO_OK or a negative error.
  */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getSampleRate(AAudioStreamBuilder builder,
-                                                       aaudio_sample_rate_t *sampleRate);
-
+AAUDIO_API void AAudioStreamBuilder_setSampleRate(AAudioStreamBuilder builder,
+                                                       int32_t sampleRate);
 
 /**
  * Request a number of samples per frame.
@@ -136,98 +114,48 @@
  * Default is AAUDIO_UNSPECIFIED.
  *
  * Note, this quantity is sometimes referred to as "channel count".
- *
- * @return AAUDIO_OK or a negative error.
  */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder builder,
+AAUDIO_API void AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder builder,
                                                    int32_t samplesPerFrame);
 
 /**
- * Note, this quantity is sometimes referred to as "channel count".
- *
- * @param builder handle provided by AAudio_createStreamBuilder()
- * @param samplesPerFrame pointer to a variable to be set to samplesPerFrame.
- * @return AAUDIO_OK or a negative error.
- */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getSamplesPerFrame(AAudioStreamBuilder builder,
-                                                   int32_t *samplesPerFrame);
-
-
-/**
  * Request a sample data format, for example AAUDIO_FORMAT_PCM_I16.
  * The application should query for the actual format after the stream is opened.
- *
- * @return AAUDIO_OK or a negative error.
  */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setFormat(AAudioStreamBuilder builder,
+AAUDIO_API void AAudioStreamBuilder_setFormat(AAudioStreamBuilder builder,
                                                    aaudio_audio_format_t format);
 
 /**
- * @return AAUDIO_OK or a negative error.
- */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getFormat(AAudioStreamBuilder builder,
-                                                   aaudio_audio_format_t *format);
-
-/**
  * Request a mode for sharing the device.
  * The requested sharing mode may not be available.
  * So the application should query for the actual mode after the stream is opened.
  *
- * @param builder handle provided by AAudio_createStreamBuilder()
+ * @param builder reference provided by AAudio_createStreamBuilder()
  * @param sharingMode AAUDIO_SHARING_MODE_LEGACY or AAUDIO_SHARING_MODE_EXCLUSIVE
- * @return AAUDIO_OK or a negative error.
  */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setSharingMode(AAudioStreamBuilder builder,
+AAUDIO_API void AAudioStreamBuilder_setSharingMode(AAudioStreamBuilder builder,
                                                         aaudio_sharing_mode_t sharingMode);
 
 /**
- * Return requested sharing mode.
- * @return AAUDIO_OK or a negative error
- */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getSharingMode(AAudioStreamBuilder builder,
-                                                        aaudio_sharing_mode_t *sharingMode);
-
-/**
  * Request the direction for a stream. The default is AAUDIO_DIRECTION_OUTPUT.
  *
- * @param builder handle provided by AAudio_createStreamBuilder()
+ * @param builder reference provided by AAudio_createStreamBuilder()
  * @param direction AAUDIO_DIRECTION_OUTPUT or AAUDIO_DIRECTION_INPUT
- * @return AAUDIO_OK or a negative error.
  */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setDirection(AAudioStreamBuilder builder,
+AAUDIO_API void AAudioStreamBuilder_setDirection(AAudioStreamBuilder builder,
                                                             aaudio_direction_t direction);
 
 /**
- * @param builder handle provided by AAudio_createStreamBuilder()
- * @param direction pointer to a variable to be set to the currently requested direction.
- * @return AAUDIO_OK or a negative error.
- */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getDirection(AAudioStreamBuilder builder,
-                                                            aaudio_direction_t *direction);
-
-/**
  * Set the requested maximum buffer capacity in frames.
  * The final AAudioStream capacity may differ, but will probably be at least this big.
  *
  * Default is AAUDIO_UNSPECIFIED.
  *
- * @param builder handle provided by AAudio_createStreamBuilder()
+ * @param builder reference provided by AAudio_createStreamBuilder()
  * @param frames the desired buffer capacity in frames or AAUDIO_UNSPECIFIED
- * @return AAUDIO_OK or a negative error.
  */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setBufferCapacity(AAudioStreamBuilder builder,
-                                                                 aaudio_size_frames_t frames);
-
-/**
- * Query the requested maximum buffer capacity in frames that was passed to
- * AAudioStreamBuilder_setBufferCapacity().
- *
- * @param builder handle provided by AAudio_createStreamBuilder()
- * @param frames pointer to variable to receive the requested buffer capacity
- * @return AAUDIO_OK or a negative error.
- */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getBufferCapacity(AAudioStreamBuilder builder,
-                                                                 aaudio_size_frames_t *frames);
+AAUDIO_API void AAudioStreamBuilder_setBufferCapacityInFrames(AAudioStreamBuilder builder,
+                                                                 int32_t frames);
 
 /**
  * Open a stream based on the options in the StreamBuilder.
@@ -235,8 +163,8 @@
  * AAudioStream_close must be called when finished with the stream to recover
  * the memory and to free the associated resources.
  *
- * @param builder handle provided by AAudio_createStreamBuilder()
- * @param stream pointer to a variable to receive the new stream handle
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param stream pointer to a variable to receive the new stream reference
  * @return AAUDIO_OK or a negative error.
  */
 AAUDIO_API aaudio_result_t  AAudioStreamBuilder_openStream(AAudioStreamBuilder builder,
@@ -245,7 +173,7 @@
 /**
  * Delete the resources associated with the StreamBuilder.
  *
- * @param builder handle provided by AAudio_createStreamBuilder()
+ * @param builder reference provided by AAudio_createStreamBuilder()
  * @return AAUDIO_OK or a negative error.
  */
 AAUDIO_API aaudio_result_t  AAudioStreamBuilder_delete(AAudioStreamBuilder builder);
@@ -257,7 +185,7 @@
 /**
  * Free the resources associated with a stream created by AAudioStreamBuilder_openStream()
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return AAUDIO_OK or a negative error.
  */
 AAUDIO_API aaudio_result_t  AAudioStream_close(AAudioStream stream);
@@ -268,7 +196,7 @@
  * Otherwise it will underflow.
  * After this call the state will be in AAUDIO_STREAM_STATE_STARTING or AAUDIO_STREAM_STATE_STARTED.
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return AAUDIO_OK or a negative error.
  */
 AAUDIO_API aaudio_result_t  AAudioStream_requestStart(AAudioStream stream);
@@ -279,7 +207,7 @@
  * Use AAudioStream_Start() to resume playback after a pause.
  * After this call the state will be in AAUDIO_STREAM_STATE_PAUSING or AAUDIO_STREAM_STATE_PAUSED.
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return AAUDIO_OK or a negative error.
  */
 AAUDIO_API aaudio_result_t  AAudioStream_requestPause(AAudioStream stream);
@@ -291,7 +219,7 @@
  * Frame counters are not reset by a flush. They may be advanced.
  * After this call the state will be in AAUDIO_STREAM_STATE_FLUSHING or AAUDIO_STREAM_STATE_FLUSHED.
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return AAUDIO_OK or a negative error.
  */
 AAUDIO_API aaudio_result_t  AAudioStream_requestFlush(AAudioStream stream);
@@ -301,23 +229,29 @@
  * The stream will stop after all of the data currently buffered has been played.
  * After this call the state will be in AAUDIO_STREAM_STATE_STOPPING or AAUDIO_STREAM_STATE_STOPPED.
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return AAUDIO_OK or a negative error.
  */
 AAUDIO_API aaudio_result_t  AAudioStream_requestStop(AAudioStream stream);
 
 /**
- * Query the current state, eg. AAUDIO_STREAM_STATE_PAUSING
+ * Query the current state of the client, eg. AAUDIO_STREAM_STATE_PAUSING
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * This function will immediately return the state without updating the state.
+ * If you want to update the client state based on the server state then
+ * call AAudioStream_waitForStateChange() with currentState
+ * set to AAUDIO_STREAM_STATE_UNKNOWN and a zero timeout.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @param state pointer to a variable that will be set to the current state
- * @return AAUDIO_OK or a negative error.
  */
-AAUDIO_API aaudio_result_t AAudioStream_getState(AAudioStream stream, aaudio_stream_state_t *state);
+AAUDIO_API aaudio_stream_state_t AAudioStream_getState(AAudioStream stream);
 
 /**
  * Wait until the current state no longer matches the input state.
  *
+ * This will update the current client state.
+ *
  * <pre><code>
  * aaudio_stream_state_t currentState;
  * aaudio_result_t result = AAudioStream_getState(stream, &currentState);
@@ -327,7 +261,7 @@
  * }
  * </code></pre>
  *
- * @param stream A handle provided by AAudioStreamBuilder_openStream()
+ * @param stream A reference provided by AAudioStreamBuilder_openStream()
  * @param inputState The state we want to avoid.
  * @param nextState Pointer to a variable that will be set to the new state.
  * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
@@ -336,7 +270,7 @@
 AAUDIO_API aaudio_result_t AAudioStream_waitForStateChange(AAudioStream stream,
                                             aaudio_stream_state_t inputState,
                                             aaudio_stream_state_t *nextState,
-                                            aaudio_nanoseconds_t timeoutNanoseconds);
+                                            int64_t timeoutNanoseconds);
 
 // ============================================================
 // Stream I/O
@@ -358,12 +292,12 @@
  * @param buffer The address of the first sample.
  * @param numFrames Number of frames to read. Only complete frames will be written.
  * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
- * @return The number of frames actually written or a negative error.
+ * @return The number of frames actually read or a negative error.
  */
 AAUDIO_API aaudio_result_t AAudioStream_read(AAudioStream stream,
                                void *buffer,
-                               aaudio_size_frames_t numFrames,
-                               aaudio_nanoseconds_t timeoutNanoseconds);
+                               int32_t numFrames,
+                               int64_t timeoutNanoseconds);
 
 /**
  * Write data to the stream.
@@ -385,8 +319,8 @@
  */
 AAUDIO_API aaudio_result_t AAudioStream_write(AAudioStream stream,
                                const void *buffer,
-                               aaudio_size_frames_t numFrames,
-                               aaudio_nanoseconds_t timeoutNanoseconds);
+                               int32_t numFrames,
+                               int64_t timeoutNanoseconds);
 
 
 // ============================================================
@@ -401,6 +335,11 @@
  *
  * Only one thread may be associated with a stream.
  *
+ * If you are using multiple streams then we recommend that you only do
+ * blocking reads or writes on one stream. You can do non-blocking I/O on the
+ * other streams by setting the timeout to zero.
+ * This thread should be created for the stream that you will block on.
+ *
  * Note that this API is in flux.
  *
  * @param stream A stream created using AAudioStreamBuilder_openStream().
@@ -410,13 +349,12 @@
  * @return AAUDIO_OK or a negative error.
  */
 AAUDIO_API aaudio_result_t AAudioStream_createThread(AAudioStream stream,
-                                     aaudio_nanoseconds_t periodNanoseconds,
+                                     int64_t periodNanoseconds,
                                      aaudio_audio_thread_proc_t *threadProc,
                                      void *arg);
 
 /**
  * Wait until the thread exits or an error occurs.
- * The thread handle will be deleted.
  *
  * @param stream A stream created using AAudioStreamBuilder_openStream().
  * @param returnArg a pointer to a variable to receive the return value
@@ -425,7 +363,7 @@
  */
 AAUDIO_API aaudio_result_t AAudioStream_joinThread(AAudioStream stream,
                                    void **returnArg,
-                                   aaudio_nanoseconds_t timeoutNanoseconds);
+                                   int64_t timeoutNanoseconds);
 
 // ============================================================
 // Stream - queries
@@ -435,49 +373,51 @@
 /**
  * This can be used to adjust the latency of the buffer by changing
  * the threshold where blocking will occur.
- * By combining this with AAudioStream_getUnderrunCount(), the latency can be tuned
+ * By combining this with AAudioStream_getXRunCount(), the latency can be tuned
  * at run-time for each device.
  *
- * This cannot be set higher than AAudioStream_getBufferCapacity().
+ * This cannot be set higher than AAudioStream_getBufferCapacityInFrames().
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * Note that you will probably not get the exact size you request.
+ * Call AAudioStream_getBufferSizeInFrames() to see what the actual final size is.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @param requestedFrames requested number of frames that can be filled without blocking
- * @param actualFrames receives final number of frames
- * @return AAUDIO_OK or a negative error
+ * @return actual buffer size in frames or a negative error
  */
-AAUDIO_API aaudio_result_t AAudioStream_setBufferSize(AAudioStream stream,
-                                                      aaudio_size_frames_t requestedFrames,
-                                                      aaudio_size_frames_t *actualFrames);
+AAUDIO_API aaudio_result_t AAudioStream_setBufferSizeInFrames(AAudioStream stream,
+                                                      int32_t requestedFrames);
 
 /**
  * Query the maximum number of frames that can be filled without blocking.
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
- * @param frames pointer to variable to receive the buffer size
- * @return AAUDIO_OK or a negative error.
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return buffer size in frames.
  */
-AAUDIO_API aaudio_result_t AAudioStream_getBufferSize(AAudioStream stream,
-                                                      aaudio_size_frames_t *frames);
+AAUDIO_API int32_t AAudioStream_getBufferSizeInFrames(AAudioStream stream);
 
 /**
- * Query the number of frames that are read or written by the endpoint at one time.
+ * Query the number of frames that the application should read or write at
+ * one time for optimal performance. It is OK if an application writes
+ * a different number of frames. But the buffer size may need to be larger
+ * in order to avoid underruns or overruns.
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
- * @param frames pointer to variable to receive the burst size
- * @return AAUDIO_OK or a negative error.
+ * Note that this may or may not match the actual device burst size.
+ * For some endpoints, the burst size can vary dynamically.
+ * But these tend to be devices with high latency.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return burst size
  */
-AAUDIO_API aaudio_result_t AAudioStream_getFramesPerBurst(AAudioStream stream,
-                                                          aaudio_size_frames_t *frames);
+AAUDIO_API int32_t AAudioStream_getFramesPerBurst(AAudioStream stream);
 
 /**
  * Query maximum buffer capacity in frames.
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
- * @param frames pointer to variable to receive the buffer capacity
- * @return AAUDIO_OK or a negative error.
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return  the buffer capacity in frames
  */
-AAUDIO_API aaudio_result_t AAudioStream_getBufferCapacity(AAudioStream stream,
-                                                          aaudio_size_frames_t *frames);
+AAUDIO_API int32_t AAudioStream_getBufferCapacityInFrames(AAudioStream stream);
 
 /**
  * An XRun is an Underrun or an Overrun.
@@ -488,90 +428,75 @@
  *
  * An underrun or overrun can cause an audible "pop" or "glitch".
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
- * @param xRunCount pointer to variable to receive the underrun or overrun count
- * @return AAUDIO_OK or a negative error.
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return the underrun or overrun count
  */
-AAUDIO_API aaudio_result_t AAudioStream_getXRunCount(AAudioStream stream, int32_t *xRunCount);
+AAUDIO_API int32_t AAudioStream_getXRunCount(AAudioStream stream);
 
 /**
- * @param stream handle provided by AAudioStreamBuilder_openStream()
- * @param sampleRate pointer to variable to receive the actual sample rate
- * @return AAUDIO_OK or a negative error.
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return actual sample rate
  */
-AAUDIO_API aaudio_result_t AAudioStream_getSampleRate(AAudioStream stream,
-                                                      aaudio_sample_rate_t *sampleRate);
+AAUDIO_API int32_t AAudioStream_getSampleRate(AAudioStream stream);
 
 /**
  * The samplesPerFrame is also known as channelCount.
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
- * @param samplesPerFrame pointer to variable to receive the actual samples per frame
- * @return AAUDIO_OK or a negative error.
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return actual samples per frame
  */
-AAUDIO_API aaudio_result_t AAudioStream_getSamplesPerFrame(AAudioStream stream,
-                                                           int32_t *samplesPerFrame);
+AAUDIO_API int32_t AAudioStream_getSamplesPerFrame(AAudioStream stream);
 
 /**
- * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @param deviceId pointer to variable to receive the actual device ID
  * @return AAUDIO_OK or a negative error.
  */
-AAUDIO_API aaudio_result_t AAudioStream_getDeviceId(AAudioStream stream,
-                                                    aaudio_device_id_t *deviceId);
+AAUDIO_API int32_t AAudioStream_getDeviceId(AAudioStream stream,
+                                                    int32_t *deviceId);
 
 /**
- * @param stream handle provided by AAudioStreamBuilder_openStream()
- * @param format pointer to variable to receive the actual data format
- * @return AAUDIO_OK or a negative error.
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return actual data format
  */
-AAUDIO_API aaudio_result_t AAudioStream_getFormat(AAudioStream stream,
-                                                  aaudio_audio_format_t *format);
+AAUDIO_API aaudio_audio_format_t AAudioStream_getFormat(AAudioStream stream);
 
 /**
  * Provide actual sharing mode.
- * @param stream handle provided by AAudioStreamBuilder_openStream()
- * @param sharingMode pointer to variable to receive the actual sharing mode
- * @return AAUDIO_OK or a negative error.
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return  actual sharing mode
  */
-AAUDIO_API aaudio_result_t AAudioStream_getSharingMode(AAudioStream stream,
-                                        aaudio_sharing_mode_t *sharingMode);
+AAUDIO_API aaudio_sharing_mode_t AAudioStream_getSharingMode(AAudioStream stream);
 
 /**
- * @param stream handle provided by AAudioStreamBuilder_openStream()
- * @param direction pointer to a variable to be set to the current direction.
- * @return AAUDIO_OK or a negative error.
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return direction
  */
-AAUDIO_API aaudio_result_t AAudioStream_getDirection(AAudioStream stream,
-                                                     aaudio_direction_t *direction);
+AAUDIO_API aaudio_direction_t AAudioStream_getDirection(AAudioStream stream);
 
 /**
  * Passes back the number of frames that have been written since the stream was created.
  * For an output stream, this will be advanced by the application calling write().
- * For an input stream, this will be advanced by the device or service.
+ * For an input stream, this will be advanced by the endpoint.
  *
  * The frame position is monotonically increasing.
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
- * @param frames pointer to variable to receive the frames written
- * @return AAUDIO_OK or a negative error.
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return frames written
  */
-AAUDIO_API aaudio_result_t AAudioStream_getFramesWritten(AAudioStream stream,
-                                                   aaudio_position_frames_t *frames);
+AAUDIO_API int64_t AAudioStream_getFramesWritten(AAudioStream stream);
 
 /**
  * Passes back the number of frames that have been read since the stream was created.
- * For an output stream, this will be advanced by the device or service.
+ * For an output stream, this will be advanced by the endpoint.
  * For an input stream, this will be advanced by the application calling read().
  *
  * The frame position is monotonically increasing.
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
- * @param frames pointer to variable to receive the frames written
- * @return AAUDIO_OK or a negative error.
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return frames read
  */
-AAUDIO_API aaudio_result_t AAudioStream_getFramesRead(AAudioStream stream,
-                                                      aaudio_position_frames_t *frames);
+AAUDIO_API int64_t AAudioStream_getFramesRead(AAudioStream stream);
 
 /**
  * Passes back the time at which a particular frame was presented.
@@ -589,16 +514,16 @@
  *
  * The position and time passed back are monotonically increasing.
  *
- * @param stream A handle provided by AAudioStreamBuilder_openStream()
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @param clockid AAUDIO_CLOCK_MONOTONIC or AAUDIO_CLOCK_BOOTTIME
  * @param framePosition pointer to a variable to receive the position
  * @param timeNanoseconds pointer to a variable to receive the time
  * @return AAUDIO_OK or a negative error
  */
 AAUDIO_API aaudio_result_t AAudioStream_getTimestamp(AAudioStream stream,
-                                      aaudio_clockid_t clockid,
-                                      aaudio_position_frames_t *framePosition,
-                                      aaudio_nanoseconds_t *timeNanoseconds);
+                                      clockid_t clockid,
+                                      int64_t *framePosition,
+                                      int64_t *timeNanoseconds);
 
 #ifdef __cplusplus
 }
diff --git a/media/libaaudio/include/aaudio/AAudioDefinitions.h b/media/libaaudio/include/aaudio/AAudioDefinitions.h
index 5b72b94..5b7b819 100644
--- a/media/libaaudio/include/aaudio/AAudioDefinitions.h
+++ b/media/libaaudio/include/aaudio/AAudioDefinitions.h
@@ -23,25 +23,7 @@
 extern "C" {
 #endif
 
-typedef int32_t  aaudio_handle_t; // negative handles are error codes
 typedef int32_t  aaudio_result_t;
-/**
- * A platform specific identifier for a device.
- */
-typedef int32_t  aaudio_device_id_t;
-typedef int32_t  aaudio_sample_rate_t;
-/** This is used for small quantities such as the number of frames in a buffer. */
-typedef int32_t  aaudio_size_frames_t;
-/** This is used for small quantities such as the number of bytes in a frame. */
-typedef int32_t  aaudio_size_bytes_t;
-/**
- * This is used for large quantities, such as the number of frames that have
- * been played since a stream was started.
- * At 48000 Hz, a 32-bit integer would wrap around in just over 12 hours.
- */
-typedef int64_t  aaudio_position_frames_t;
-
-typedef int64_t  aaudio_nanoseconds_t;
 
 /**
  * This is used to represent a value that has not been specified.
@@ -50,18 +32,11 @@
  * and would accept whatever it was given.
  */
 #define AAUDIO_UNSPECIFIED           0
-#define AAUDIO_DEVICE_UNSPECIFIED    ((aaudio_device_id_t) -1)
-#define AAUDIO_NANOS_PER_MICROSECOND ((int64_t)1000)
-#define AAUDIO_NANOS_PER_MILLISECOND (AAUDIO_NANOS_PER_MICROSECOND * 1000)
-#define AAUDIO_MILLIS_PER_SECOND     1000
-#define AAUDIO_NANOS_PER_SECOND      (AAUDIO_NANOS_PER_MILLISECOND * AAUDIO_MILLIS_PER_SECOND)
-
-#define AAUDIO_HANDLE_INVALID     ((aaudio_handle_t)-1)
+#define AAUDIO_DEVICE_UNSPECIFIED    ((int32_t) -1)
 
 enum aaudio_direction_t {
     AAUDIO_DIRECTION_OUTPUT,
-    AAUDIO_DIRECTION_INPUT,
-    AAUDIO_DIRECTION_COUNT // This should always be last.
+    AAUDIO_DIRECTION_INPUT
 };
 
 enum aaudio_audio_format_t {
@@ -73,11 +48,6 @@
     AAUDIO_FORMAT_PCM_I32
 };
 
-// TODO These are deprecated. Remove these aliases once all references are replaced.
-#define AAUDIO_FORMAT_PCM16    AAUDIO_FORMAT_PCM_I16
-#define AAUDIO_FORMAT_PCM824   AAUDIO_FORMAT_PCM_I8_24
-#define AAUDIO_FORMAT_PCM32    AAUDIO_FORMAT_PCM_I32
-
 enum {
     AAUDIO_OK,
     AAUDIO_ERROR_BASE = -900, // TODO review
@@ -102,15 +72,10 @@
     AAUDIO_ERROR_NO_SERVICE
 };
 
-typedef enum {
-    AAUDIO_CLOCK_MONOTONIC, // Clock since booted, pauses when CPU is sleeping.
-    AAUDIO_CLOCK_BOOTTIME,  // Clock since booted, runs all the time.
-    AAUDIO_CLOCK_COUNT // This should always be last.
-} aaudio_clockid_t;
-
 typedef enum
 {
     AAUDIO_STREAM_STATE_UNINITIALIZED = 0,
+    AAUDIO_STREAM_STATE_UNKNOWN,
     AAUDIO_STREAM_STATE_OPEN,
     AAUDIO_STREAM_STATE_STARTING,
     AAUDIO_STREAM_STATE_STARTED,
@@ -135,9 +100,7 @@
      * Multiple applications will be mixed by the AAudio Server.
      * This will have higher latency than the EXCLUSIVE mode.
      */
-    AAUDIO_SHARING_MODE_SHARED,
-
-    AAUDIO_SHARING_MODE_COUNT // This should always be last.
+    AAUDIO_SHARING_MODE_SHARED
 } aaudio_sharing_mode_t;
 
 #ifdef __cplusplus
diff --git a/media/libaaudio/libaaudio.map.txt b/media/libaaudio/libaaudio.map.txt
index ecae991..a9e9109 100644
--- a/media/libaaudio/libaaudio.map.txt
+++ b/media/libaaudio/libaaudio.map.txt
@@ -1,20 +1,15 @@
 LIBAAUDIO {
   global:
-    AAudio_getNanoseconds;
     AAudio_convertResultToText;
     AAudio_convertStreamStateToText;
     AAudio_createStreamBuilder;
     AAudioStreamBuilder_setDeviceId;
     AAudioStreamBuilder_setSampleRate;
-    AAudioStreamBuilder_getSampleRate;
     AAudioStreamBuilder_setSamplesPerFrame;
-    AAudioStreamBuilder_getSamplesPerFrame;
     AAudioStreamBuilder_setFormat;
-    AAudioStreamBuilder_getFormat;
     AAudioStreamBuilder_setSharingMode;
-    AAudioStreamBuilder_getSharingMode;
     AAudioStreamBuilder_setDirection;
-    AAudioStreamBuilder_getDirection;
+    AAudioStreamBuilder_setBufferCapacityInFrames;
     AAudioStreamBuilder_openStream;
     AAudioStreamBuilder_delete;
     AAudioStream_close;
@@ -28,13 +23,14 @@
     AAudioStream_write;
     AAudioStream_createThread;
     AAudioStream_joinThread;
-    AAudioStream_setBufferSize;
-    AAudioStream_getBufferSize;
+    AAudioStream_setBufferSizeInFrames;
+    AAudioStream_getBufferSizeInFrames;
     AAudioStream_getFramesPerBurst;
-    AAudioStream_getBufferCapacity;
+    AAudioStream_getBufferCapacityInFrames;
     AAudioStream_getXRunCount;
     AAudioStream_getSampleRate;
     AAudioStream_getSamplesPerFrame;
+    AAudioStream_getDeviceId;
     AAudioStream_getFormat;
     AAudioStream_getSharingMode;
     AAudioStream_getDirection;
diff --git a/media/libaaudio/scripts/convert_typedefs_int32.sh b/media/libaaudio/scripts/convert_typedefs_int32.sh
new file mode 100755
index 0000000..7bdbe3a
--- /dev/null
+++ b/media/libaaudio/scripts/convert_typedefs_int32.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+echo "Use SED to convert typedefs in AAudio API"
+
+echo "Top is ${ANDROID_BUILD_TOP}"
+LIBAAUDIO_DIR=${ANDROID_BUILD_TOP}/frameworks/av/media/libaaudio
+echo "LIBAAUDIO_DIR is ${LIBAAUDIO_DIR}"
+OBOESERVICE_DIR=${ANDROID_BUILD_TOP}/frameworks/av/services/oboeservice
+echo "OBOESERVICE_DIR is ${OBOESERVICE_DIR}"
+OBOETEST_DIR=${ANDROID_BUILD_TOP}/cts/tests/tests/nativemedia/aaudio/src/
+echo "OBOETEST_DIR is ${OBOETEST_DIR}"
+
+function convertPathPattern {
+    path=$1
+    pattern=$2
+    find $path -type f  -name $pattern -exec sed -i -f ${LIBAAUDIO_DIR}/scripts/typedefs_to_int32.sed {} \;
+}
+
+function convertPath {
+    path=$1
+    convertPathPattern $1 '*.cpp'
+    convertPathPattern $1 '*.h'
+}
+
+convertPath ${LIBAAUDIO_DIR}
+convertPath ${OBOESERVICE_DIR}
+convertPathPattern ${OBOETEST_DIR} test_aaudio.cpp
+
diff --git a/media/libaaudio/scripts/revert_all_aaudio.sh b/media/libaaudio/scripts/revert_all_aaudio.sh
index de3fa7a..19c7f81 100755
--- a/media/libaaudio/scripts/revert_all_aaudio.sh
+++ b/media/libaaudio/scripts/revert_all_aaudio.sh
@@ -1,27 +1,18 @@
 #!/bin/bash
 
-echo "Revert Oboe names to AAudio names"
+echo "Revert typedefs"
 
 echo "Top is ${ANDROID_BUILD_TOP}"
-LIBOBOE_DIR=${ANDROID_BUILD_TOP}/frameworks/av/media/liboboe
-echo "LIBOBOE_DIR is ${LIBOBOE_DIR}"
+LIBAAUDIO_DIR=${ANDROID_BUILD_TOP}/frameworks/av/media/libaaudio
+echo "LIBAAUDIO_DIR is ${LIBAAUDIO_DIR}"
 OBOESERVICE_DIR=${ANDROID_BUILD_TOP}/frameworks/av/services/oboeservice
 echo "OBOESERVICE_DIR is ${OBOESERVICE_DIR}"
 OBOETEST_DIR=${ANDROID_BUILD_TOP}/cts/tests/tests/nativemedia/aaudio/src/
 echo "OBOETEST_DIR is ${OBOETEST_DIR}"
 
-git checkout -- ${LIBOBOE_DIR}/examples
-git checkout -- ${LIBOBOE_DIR}/include
-git checkout -- ${LIBOBOE_DIR}/src
-git checkout -- ${LIBOBOE_DIR}/tests
-git checkout -- ${LIBOBOE_DIR}/Android.bp
-git checkout -- ${LIBOBOE_DIR}/README.md
-git checkout -- ${LIBOBOE_DIR}/liboboe.map.txt
+git checkout -- ${LIBAAUDIO_DIR}/examples
+git checkout -- ${LIBAAUDIO_DIR}/include
+git checkout -- ${LIBAAUDIO_DIR}/src
+git checkout -- ${LIBAAUDIO_DIR}/tests
 git checkout -- ${OBOESERVICE_DIR}
-git checkout -- ${OBOETEST_DIR}
 
-rm -rf ${LIBOBOE_DIR}/include/aaudio
-
-find . -name "*aaudio*.cpp" -print -delete
-find . -name "*AAudio*.cpp" -print -delete
-find . -name "*AAudio*.h"   -print -delete
diff --git a/media/libaaudio/scripts/typedefs_to_int32.sed b/media/libaaudio/scripts/typedefs_to_int32.sed
new file mode 100644
index 0000000..392c9a0
--- /dev/null
+++ b/media/libaaudio/scripts/typedefs_to_int32.sed
@@ -0,0 +1,8 @@
+s/aaudio_device_id_t/int32_t/g
+s/aaudio_sample_rate_t/int32_t/g
+s/aaudio_size_frames_t/int32_t/g
+s/aaudio_size_bytes_t/int32_t/g
+s/aaudio_sample_rate_t/int32_t/g
+
+s/aaudio_position_frames_t/int64_t/g
+s/aaudio_nanoseconds_t/int64_t/g
diff --git a/media/libaaudio/src/binding/AAudioServiceDefinitions.h b/media/libaaudio/src/binding/AAudioServiceDefinitions.h
index ca637ef..b58d170 100644
--- a/media/libaaudio/src/binding/AAudioServiceDefinitions.h
+++ b/media/libaaudio/src/binding/AAudioServiceDefinitions.h
@@ -44,6 +44,10 @@
 
 namespace aaudio {
 
+typedef int32_t  aaudio_handle_t;
+
+#define AAUDIO_HANDLE_INVALID  ((aaudio_handle_t) -1)
+
 enum aaudio_commands_t {
     OPEN_STREAM = IBinder::FIRST_CALL_TRANSACTION,
     CLOSE_STREAM,
@@ -57,9 +61,9 @@
 
 // TODO Expand this to include all the open parameters.
 typedef struct AAudioServiceStreamInfo_s {
-    int32_t             deviceId;
-    int32_t             samplesPerFrame;  // number of channels
-    aaudio_sample_rate_t  sampleRate;
+    int32_t               deviceId;
+    int32_t               samplesPerFrame;  // number of channels
+    int32_t               sampleRate;
     aaudio_audio_format_t audioFormat;
 } AAudioServiceStreamInfo;
 
diff --git a/media/libaaudio/src/binding/AAudioServiceMessage.h b/media/libaaudio/src/binding/AAudioServiceMessage.h
index 16cb5eb..cc77d59 100644
--- a/media/libaaudio/src/binding/AAudioServiceMessage.h
+++ b/media/libaaudio/src/binding/AAudioServiceMessage.h
@@ -23,12 +23,12 @@
 
 namespace aaudio {
 
-// TODO move this an "include" folder for the service.
+// TODO move this to an "include" folder for the service.
 
 struct AAudioMessageTimestamp {
-    aaudio_position_frames_t position;
-    int64_t                deviceOffset; // add to client position to get device position
-    aaudio_nanoseconds_t     timestamp;
+    int64_t    position;
+    int64_t    deviceOffset; // add to client position to get device position
+    int64_t    timestamp;
 };
 
 typedef enum aaudio_service_event_e : uint32_t {
@@ -41,8 +41,8 @@
 
 struct AAudioMessageEvent {
     aaudio_service_event_t event;
-    int32_t data1;
-    int64_t data2;
+    int32_t                data1;
+    int64_t                data2;
 };
 
 typedef struct AAudioServiceMessage_s {
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.h b/media/libaaudio/src/binding/AAudioStreamConfiguration.h
index efcdae8..57b1c59 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.h
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.h
@@ -34,19 +34,19 @@
     AAudioStreamConfiguration();
     virtual ~AAudioStreamConfiguration();
 
-    aaudio_device_id_t getDeviceId() const {
+    int32_t getDeviceId() const {
         return mDeviceId;
     }
 
-    void setDeviceId(aaudio_device_id_t deviceId) {
+    void setDeviceId(int32_t deviceId) {
         mDeviceId = deviceId;
     }
 
-    aaudio_sample_rate_t getSampleRate() const {
+    int32_t getSampleRate() const {
         return mSampleRate;
     }
 
-    void setSampleRate(aaudio_sample_rate_t sampleRate) {
+    void setSampleRate(int32_t sampleRate) {
         mSampleRate = sampleRate;
     }
 
@@ -66,11 +66,11 @@
         mAudioFormat = audioFormat;
     }
 
-    aaudio_size_frames_t getBufferCapacity() const {
+    int32_t getBufferCapacity() const {
         return mBufferCapacity;
     }
 
-    void setBufferCapacity(aaudio_size_frames_t frames) {
+    void setBufferCapacity(int32_t frames) {
         mBufferCapacity = frames;
     }
 
@@ -83,11 +83,11 @@
     void dump();
 
 protected:
-    aaudio_device_id_t    mDeviceId        = AAUDIO_DEVICE_UNSPECIFIED;
-    aaudio_sample_rate_t  mSampleRate      = AAUDIO_UNSPECIFIED;
+    int32_t               mDeviceId        = AAUDIO_DEVICE_UNSPECIFIED;
+    int32_t               mSampleRate      = AAUDIO_UNSPECIFIED;
     int32_t               mSamplesPerFrame = AAUDIO_UNSPECIFIED;
     aaudio_audio_format_t mAudioFormat     = AAUDIO_FORMAT_UNSPECIFIED;
-    aaudio_size_frames_t  mBufferCapacity  = AAUDIO_UNSPECIFIED;
+    int32_t               mBufferCapacity  = AAUDIO_UNSPECIFIED;
 };
 
 } /* namespace aaudio */
diff --git a/media/libaaudio/src/binding/IAAudioService.cpp b/media/libaaudio/src/binding/IAAudioService.cpp
index 899ebc0..c21033e 100644
--- a/media/libaaudio/src/binding/IAAudioService.cpp
+++ b/media/libaaudio/src/binding/IAAudioService.cpp
@@ -18,12 +18,15 @@
 
 #include "binding/AudioEndpointParcelable.h"
 #include "binding/AAudioStreamRequest.h"
+#include "binding/AAudioServiceDefinitions.h"
 #include "binding/AAudioStreamConfiguration.h"
 #include "binding/IAAudioService.h"
 #include "utility/AAudioUtilities.h"
 
 namespace android {
 
+using aaudio::aaudio_handle_t;
+
 /**
  * This is used by the AAudio Client to talk to the AAudio Service.
  *
@@ -137,7 +140,7 @@
     }
 
     virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle, pid_t clientThreadId,
-                                              aaudio_nanoseconds_t periodNanoseconds)
+                                              int64_t periodNanoseconds)
     override {
         Parcel data, reply;
         // send command
@@ -182,11 +185,11 @@
 
 status_t BnAAudioService::onTransact(uint32_t code, const Parcel& data,
                                         Parcel* reply, uint32_t flags) {
-    AAudioStream stream;
+    aaudio_handle_t stream;
     aaudio::AAudioStreamRequest request;
     aaudio::AAudioStreamConfiguration configuration;
     pid_t pid;
-    aaudio_nanoseconds_t nanoseconds;
+    int64_t nanoseconds;
     aaudio_result_t result;
     ALOGV("BnAAudioService::onTransact(%i) %i", code, flags);
     data.checkInterface(this);
diff --git a/media/libaaudio/src/binding/IAAudioService.h b/media/libaaudio/src/binding/IAAudioService.h
index f3b297e..53c3b45 100644
--- a/media/libaaudio/src/binding/IAAudioService.h
+++ b/media/libaaudio/src/binding/IAAudioService.h
@@ -29,7 +29,6 @@
 #include "binding/AAudioStreamRequest.h"
 #include "binding/AAudioStreamConfiguration.h"
 
-
 namespace android {
 
 // Interface (our AIDL) - Shared by server and client
@@ -43,39 +42,39 @@
      * @param configuration contains information about the created stream
      * @return handle to the stream or a negative error
      */
-    virtual aaudio_handle_t openStream(aaudio::AAudioStreamRequest &request,
+    virtual aaudio::aaudio_handle_t openStream(aaudio::AAudioStreamRequest &request,
                                      aaudio::AAudioStreamConfiguration &configuration) = 0;
 
-    virtual aaudio_result_t closeStream(aaudio_handle_t streamHandle) = 0;
+    virtual aaudio_result_t closeStream(aaudio::aaudio_handle_t streamHandle) = 0;
 
     /* Get an immutable description of the in-memory queues
     * used to communicate with the underlying HAL or Service.
     */
-    virtual aaudio_result_t getStreamDescription(aaudio_handle_t streamHandle,
+    virtual aaudio_result_t getStreamDescription(aaudio::aaudio_handle_t streamHandle,
                                                aaudio::AudioEndpointParcelable &parcelable) = 0;
 
     /**
      * Start the flow of data.
      */
-    virtual aaudio_result_t startStream(aaudio_handle_t streamHandle) = 0;
+    virtual aaudio_result_t startStream(aaudio::aaudio_handle_t streamHandle) = 0;
 
     /**
      * Stop the flow of data such that start() can resume without loss of data.
      */
-    virtual aaudio_result_t pauseStream(aaudio_handle_t streamHandle) = 0;
+    virtual aaudio_result_t pauseStream(aaudio::aaudio_handle_t streamHandle) = 0;
 
     /**
      *  Discard any data held by the underlying HAL or Service.
      */
-    virtual aaudio_result_t flushStream(aaudio_handle_t streamHandle) = 0;
+    virtual aaudio_result_t flushStream(aaudio::aaudio_handle_t streamHandle) = 0;
 
     /**
      * Manage the specified thread as a low latency audio thread.
      */
-    virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle, pid_t clientThreadId,
-                                              aaudio_nanoseconds_t periodNanoseconds) = 0;
+    virtual aaudio_result_t registerAudioThread(aaudio::aaudio_handle_t streamHandle, pid_t clientThreadId,
+                                              int64_t periodNanoseconds) = 0;
 
-    virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle,
+    virtual aaudio_result_t unregisterAudioThread(aaudio::aaudio_handle_t streamHandle,
                                                 pid_t clientThreadId) = 0;
 };
 
diff --git a/media/libaaudio/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
index 5cd9782..47c4774 100644
--- a/media/libaaudio/src/client/AudioEndpoint.cpp
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -167,8 +167,8 @@
     return mDownDataQueue->getWriteCounter();
 }
 
-aaudio_size_frames_t AudioEndpoint::setBufferSizeInFrames(aaudio_size_frames_t requestedFrames,
-                                            aaudio_size_frames_t *actualFrames)
+int32_t AudioEndpoint::setBufferSizeInFrames(int32_t requestedFrames,
+                                            int32_t *actualFrames)
 {
     if (requestedFrames < ENDPOINT_DATA_QUEUE_SIZE_MIN) {
         requestedFrames = ENDPOINT_DATA_QUEUE_SIZE_MIN;
diff --git a/media/libaaudio/src/client/AudioEndpoint.h b/media/libaaudio/src/client/AudioEndpoint.h
index e786513..caee488 100644
--- a/media/libaaudio/src/client/AudioEndpoint.h
+++ b/media/libaaudio/src/client/AudioEndpoint.h
@@ -71,13 +71,13 @@
      */
     bool isOutputFreeRunning() const { return mOutputFreeRunning; }
 
-    int32_t setBufferSizeInFrames(aaudio_size_frames_t requestedFrames,
-                                  aaudio_size_frames_t *actualFrames);
-    aaudio_size_frames_t getBufferSizeInFrames() const;
+    int32_t setBufferSizeInFrames(int32_t requestedFrames,
+                                  int32_t *actualFrames);
+    int32_t getBufferSizeInFrames() const;
 
-    aaudio_size_frames_t getBufferCapacityInFrames() const;
+    int32_t getBufferCapacityInFrames() const;
 
-    aaudio_size_frames_t getFullFramesAvailable();
+    int32_t getFullFramesAvailable();
 
 private:
     FifoBuffer   * mUpCommandQueue;
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 19f2300..54f4870 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -172,14 +172,14 @@
 
 aaudio_result_t AudioStreamInternal::requestStart()
 {
-    aaudio_nanoseconds_t startTime;
+    int64_t startTime;
     ALOGD("AudioStreamInternal(): start()");
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
     const sp<IAAudioService>& aaudioService = getAAudioService();
     if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
-    startTime = AAudio_getNanoseconds(AAUDIO_CLOCK_MONOTONIC);
+    startTime = AudioClock::getNanoseconds();
     mClockModel.start(startTime);
     processTimestamp(0, startTime);
     setState(AAUDIO_STREAM_STATE_STARTING);
@@ -194,7 +194,7 @@
     }
     const sp<IAAudioService>& aaudioService = getAAudioService();
     if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
-    mClockModel.stop(AAudio_getNanoseconds(AAUDIO_CLOCK_MONOTONIC));
+    mClockModel.stop(AudioClock::getNanoseconds());
     setState(AAUDIO_STREAM_STATE_PAUSING);
     return aaudioService->pauseStream(mServiceStreamHandle);
 }
@@ -212,10 +212,10 @@
 
 void AudioStreamInternal::onFlushFromServer() {
     ALOGD("AudioStreamInternal(): onFlushFromServer()");
-    aaudio_position_frames_t readCounter = mAudioEndpoint.getDownDataReadCounter();
-    aaudio_position_frames_t writeCounter = mAudioEndpoint.getDownDataWriteCounter();
+    int64_t readCounter = mAudioEndpoint.getDownDataReadCounter();
+    int64_t writeCounter = mAudioEndpoint.getDownDataWriteCounter();
     // Bump offset so caller does not see the retrograde motion in getFramesRead().
-    aaudio_position_frames_t framesFlushed = writeCounter - readCounter;
+    int64_t framesFlushed = writeCounter - readCounter;
     mFramesOffsetFromService += framesFlushed;
     // Flush written frames by forcing writeCounter to readCounter.
     // This is because we cannot move the read counter in the hardware.
@@ -262,10 +262,10 @@
 
 // TODO use aaudio_clockid_t all the way down to AudioClock
 aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
-                           aaudio_position_frames_t *framePosition,
-                           aaudio_nanoseconds_t *timeNanoseconds) {
+                           int64_t *framePosition,
+                           int64_t *timeNanoseconds) {
 // TODO implement using real HAL
-    aaudio_nanoseconds_t time = AudioClock::getNanoseconds();
+    int64_t time = AudioClock::getNanoseconds();
     *framePosition = mClockModel.convertTimeToPosition(time);
     *timeNanoseconds = time + (10 * AAUDIO_NANOS_PER_MILLISECOND); // Fake hardware delay
     return AAUDIO_OK;
@@ -278,9 +278,9 @@
 #if LOG_TIMESTAMPS
 static void AudioStreamInternal_LogTimestamp(AAudioServiceMessage &command) {
     static int64_t oldPosition = 0;
-    static aaudio_nanoseconds_t oldTime = 0;
+    static int64_t oldTime = 0;
     int64_t framePosition = command.timestamp.position;
-    aaudio_nanoseconds_t nanoTime = command.timestamp.timestamp;
+    int64_t nanoTime = command.timestamp.timestamp;
     ALOGD("AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %llu",
          (long long) framePosition,
          (long long) nanoTime);
@@ -298,7 +298,7 @@
 #endif
 
 aaudio_result_t AudioStreamInternal::onTimestampFromServer(AAudioServiceMessage *message) {
-    aaudio_position_frames_t framePosition = 0;
+    int64_t framePosition = 0;
 #if LOG_TIMESTAMPS
     AudioStreamInternal_LogTimestamp(command);
 #endif
@@ -370,12 +370,12 @@
 
 // Write the data, block if needed and timeoutMillis > 0
 aaudio_result_t AudioStreamInternal::write(const void *buffer, int32_t numFrames,
-                                         aaudio_nanoseconds_t timeoutNanoseconds)
+                                         int64_t timeoutNanoseconds)
 {
     aaudio_result_t result = AAUDIO_OK;
     uint8_t* source = (uint8_t*)buffer;
-    aaudio_nanoseconds_t currentTimeNanos = AudioClock::getNanoseconds();
-    aaudio_nanoseconds_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
+    int64_t currentTimeNanos = AudioClock::getNanoseconds();
+    int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
     int32_t framesLeft = numFrames;
 //    ALOGD("AudioStreamInternal::write(%p, %d) at time %08llu , mState = %d ------------------",
 //         buffer, numFrames, (unsigned long long) currentTimeNanos, mState);
@@ -383,7 +383,7 @@
     // Write until all the data has been written or until a timeout occurs.
     while (framesLeft > 0) {
         // The call to writeNow() will not block. It will just write as much as it can.
-        aaudio_nanoseconds_t wakeTimeNanos = 0;
+        int64_t wakeTimeNanos = 0;
         aaudio_result_t framesWritten = writeNow(source, framesLeft,
                                                currentTimeNanos, &wakeTimeNanos);
 //        ALOGD("AudioStreamInternal::write() writeNow() framesLeft = %d --> framesWritten = %d", framesLeft, framesWritten);
@@ -422,7 +422,7 @@
 
 // Write as much data as we can without blocking.
 aaudio_result_t AudioStreamInternal::writeNow(const void *buffer, int32_t numFrames,
-                                         aaudio_nanoseconds_t currentNanoTime, aaudio_nanoseconds_t *wakeTimePtr) {
+                                         int64_t currentNanoTime, int64_t *wakeTimePtr) {
     {
         aaudio_result_t result = processCommands();
         if (result != AAUDIO_OK) {
@@ -452,7 +452,7 @@
     // Calculate an ideal time to wake up.
     if (wakeTimePtr != nullptr && framesWritten >= 0) {
         // By default wake up a few milliseconds from now.  // TODO review
-        aaudio_nanoseconds_t wakeTime = currentNanoTime + (2 * AAUDIO_NANOS_PER_MILLISECOND);
+        int64_t wakeTime = currentNanoTime + (2 * AAUDIO_NANOS_PER_MILLISECOND);
         switch (getState()) {
             case AAUDIO_STREAM_STATE_OPEN:
             case AAUDIO_STREAM_STATE_STARTING:
@@ -487,7 +487,7 @@
 
 aaudio_result_t AudioStreamInternal::waitForStateChange(aaudio_stream_state_t currentState,
                                                       aaudio_stream_state_t *nextState,
-                                                      aaudio_nanoseconds_t timeoutNanoseconds)
+                                                      int64_t timeoutNanoseconds)
 
 {
     aaudio_result_t result = processCommands();
@@ -522,33 +522,38 @@
 }
 
 
-void AudioStreamInternal::processTimestamp(uint64_t position, aaudio_nanoseconds_t time) {
+void AudioStreamInternal::processTimestamp(uint64_t position, int64_t time) {
     mClockModel.processTimestamp( position, time);
 }
 
-aaudio_result_t AudioStreamInternal::setBufferSize(aaudio_size_frames_t requestedFrames,
-                                        aaudio_size_frames_t *actualFrames) {
-    return mAudioEndpoint.setBufferSizeInFrames(requestedFrames, actualFrames);
+aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {
+    int32_t actualFrames = 0;
+    aaudio_result_t result = mAudioEndpoint.setBufferSizeInFrames(requestedFrames, &actualFrames);
+    if (result < 0) {
+        return result;
+    } else {
+        return (aaudio_result_t) actualFrames;
+    }
 }
 
-aaudio_size_frames_t AudioStreamInternal::getBufferSize() const
+int32_t AudioStreamInternal::getBufferSize() const
 {
     return mAudioEndpoint.getBufferSizeInFrames();
 }
 
-aaudio_size_frames_t AudioStreamInternal::getBufferCapacity() const
+int32_t AudioStreamInternal::getBufferCapacity() const
 {
     return mAudioEndpoint.getBufferCapacityInFrames();
 }
 
-aaudio_size_frames_t AudioStreamInternal::getFramesPerBurst() const
+int32_t AudioStreamInternal::getFramesPerBurst() const
 {
     return mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
 }
 
-aaudio_position_frames_t AudioStreamInternal::getFramesRead()
+int64_t AudioStreamInternal::getFramesRead()
 {
-    aaudio_position_frames_t framesRead =
+    int64_t framesRead =
             mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
             + mFramesOffsetFromService;
     // Prevent retrograde motion.
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 666df3a..6f3a7ac 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -49,8 +49,8 @@
 
     // TODO use aaudio_clockid_t all the way down to AudioClock
     virtual aaudio_result_t getTimestamp(clockid_t clockId,
-                                       aaudio_position_frames_t *framePosition,
-                                       aaudio_nanoseconds_t *timeNanoseconds) override;
+                                       int64_t *framePosition,
+                                       int64_t *timeNanoseconds) override;
 
 
     virtual aaudio_result_t updateState() override;
@@ -62,22 +62,21 @@
 
     virtual aaudio_result_t write(const void *buffer,
                              int32_t numFrames,
-                             aaudio_nanoseconds_t timeoutNanoseconds) override;
+                             int64_t timeoutNanoseconds) override;
 
     virtual aaudio_result_t waitForStateChange(aaudio_stream_state_t currentState,
                                           aaudio_stream_state_t *nextState,
-                                          aaudio_nanoseconds_t timeoutNanoseconds) override;
+                                          int64_t timeoutNanoseconds) override;
 
-    virtual aaudio_result_t setBufferSize(aaudio_size_frames_t requestedFrames,
-                                        aaudio_size_frames_t *actualFrames) override;
+    virtual aaudio_result_t setBufferSize(int32_t requestedFrames) override;
 
-    virtual aaudio_size_frames_t getBufferSize() const override;
+    virtual int32_t getBufferSize() const override;
 
-    virtual aaudio_size_frames_t getBufferCapacity() const override;
+    virtual int32_t getBufferCapacity() const override;
 
-    virtual aaudio_size_frames_t getFramesPerBurst() const override;
+    virtual int32_t getFramesPerBurst() const override;
 
-    virtual aaudio_position_frames_t getFramesRead() override;
+    virtual int64_t getFramesRead() override;
 
     virtual int32_t getXRunCount() const override {
         return mXRunCount;
@@ -100,8 +99,8 @@
  */
     virtual aaudio_result_t writeNow(const void *buffer,
                                 int32_t numFrames,
-                                aaudio_nanoseconds_t currentTimeNanos,
-                                aaudio_nanoseconds_t *wakeTimePtr);
+                                int64_t currentTimeNanos,
+                                int64_t *wakeTimePtr);
 
     void onFlushFromServer();
 
@@ -112,15 +111,15 @@
 private:
     IsochronousClockModel    mClockModel;
     AudioEndpoint            mAudioEndpoint;
-    aaudio_handle_t            mServiceStreamHandle;
+    aaudio_handle_t          mServiceStreamHandle;
     EndpointDescriptor       mEndpointDescriptor;
     // Offset from underlying frame position.
-    aaudio_position_frames_t   mFramesOffsetFromService = 0;
-    aaudio_position_frames_t   mLastFramesRead = 0;
-    aaudio_size_frames_t       mFramesPerBurst;
+    int64_t                  mFramesOffsetFromService = 0;
+    int64_t                  mLastFramesRead = 0;
+    int32_t                  mFramesPerBurst;
     int32_t                  mXRunCount = 0;
 
-    void processTimestamp(uint64_t position, aaudio_nanoseconds_t time);
+    void processTimestamp(uint64_t position, int64_t time);
 };
 
 } /* namespace aaudio */
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index bdb491d..4c8aabc 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -21,6 +21,7 @@
 #include <stdint.h>
 #include <aaudio/AAudioDefinitions.h>
 
+#include "utility/AudioClock.h"
 #include "IsochronousClockModel.h"
 
 #define MIN_LATENESS_NANOS (10 * AAUDIO_NANOS_PER_MICROSECOND)
@@ -29,11 +30,11 @@
 using namespace aaudio;
 
 IsochronousClockModel::IsochronousClockModel()
-        : mSampleRate(48000)
+        : mMarkerFramePosition(0)
+        , mMarkerNanoTime(0)
+        , mSampleRate(48000)
         , mFramesPerBurst(64)
         , mMaxLatenessInNanos(0)
-        , mMarkerFramePosition(0)
-        , mMarkerNanoTime(0)
         , mState(STATE_STOPPED)
 {
 }
@@ -41,21 +42,21 @@
 IsochronousClockModel::~IsochronousClockModel() {
 }
 
-void IsochronousClockModel::start(aaudio_nanoseconds_t nanoTime)
+void IsochronousClockModel::start(int64_t nanoTime)
 {
     mMarkerNanoTime = nanoTime;
     mState = STATE_STARTING;
 }
 
-void IsochronousClockModel::stop(aaudio_nanoseconds_t nanoTime)
+void IsochronousClockModel::stop(int64_t nanoTime)
 {
     mMarkerNanoTime = nanoTime;
     mMarkerFramePosition = convertTimeToPosition(nanoTime); // TODO should we do this?
     mState = STATE_STOPPED;
 }
 
-void IsochronousClockModel::processTimestamp(aaudio_position_frames_t framePosition,
-                                             aaudio_nanoseconds_t nanoTime) {
+void IsochronousClockModel::processTimestamp(int64_t framePosition,
+                                             int64_t nanoTime) {
     int64_t framesDelta = framePosition - mMarkerFramePosition;
     int64_t nanosDelta = nanoTime - mMarkerNanoTime;
     if (nanosDelta < 1000) {
@@ -115,7 +116,6 @@
     default:
         break;
     }
-    ++mTimestampCount;
 }
 
 void IsochronousClockModel::setSampleRate(int32_t sampleRate) {
@@ -133,41 +133,41 @@
     mMaxLatenessInNanos = (nanosLate > MIN_LATENESS_NANOS) ? nanosLate : MIN_LATENESS_NANOS;
 }
 
-aaudio_nanoseconds_t IsochronousClockModel::convertDeltaPositionToTime(
-        aaudio_position_frames_t framesDelta) const {
+int64_t IsochronousClockModel::convertDeltaPositionToTime(
+        int64_t framesDelta) const {
     return (AAUDIO_NANOS_PER_SECOND * framesDelta) / mSampleRate;
 }
 
-int64_t IsochronousClockModel::convertDeltaTimeToPosition(aaudio_nanoseconds_t nanosDelta) const {
+int64_t IsochronousClockModel::convertDeltaTimeToPosition(int64_t nanosDelta) const {
     return (mSampleRate * nanosDelta) / AAUDIO_NANOS_PER_SECOND;
 }
 
-aaudio_nanoseconds_t IsochronousClockModel::convertPositionToTime(
-        aaudio_position_frames_t framePosition) const {
+int64_t IsochronousClockModel::convertPositionToTime(
+        int64_t framePosition) const {
     if (mState == STATE_STOPPED) {
         return mMarkerNanoTime;
     }
-    aaudio_position_frames_t nextBurstIndex = (framePosition + mFramesPerBurst - 1) / mFramesPerBurst;
-    aaudio_position_frames_t nextBurstPosition = mFramesPerBurst * nextBurstIndex;
-    aaudio_position_frames_t framesDelta = nextBurstPosition - mMarkerFramePosition;
-    aaudio_nanoseconds_t nanosDelta = convertDeltaPositionToTime(framesDelta);
-    aaudio_nanoseconds_t time = (aaudio_nanoseconds_t) (mMarkerNanoTime + nanosDelta);
+    int64_t nextBurstIndex = (framePosition + mFramesPerBurst - 1) / mFramesPerBurst;
+    int64_t nextBurstPosition = mFramesPerBurst * nextBurstIndex;
+    int64_t framesDelta = nextBurstPosition - mMarkerFramePosition;
+    int64_t nanosDelta = convertDeltaPositionToTime(framesDelta);
+    int64_t time = (int64_t) (mMarkerNanoTime + nanosDelta);
 //    ALOGI("IsochronousClockModel::convertPositionToTime: pos = %llu --> time = %llu",
 //         (unsigned long long)framePosition,
 //         (unsigned long long)time);
     return time;
 }
 
-aaudio_position_frames_t IsochronousClockModel::convertTimeToPosition(
-        aaudio_nanoseconds_t nanoTime) const {
+int64_t IsochronousClockModel::convertTimeToPosition(
+        int64_t nanoTime) const {
     if (mState == STATE_STOPPED) {
         return mMarkerFramePosition;
     }
-    aaudio_nanoseconds_t nanosDelta = nanoTime - mMarkerNanoTime;
-    aaudio_position_frames_t framesDelta = convertDeltaTimeToPosition(nanosDelta);
-    aaudio_position_frames_t nextBurstPosition = mMarkerFramePosition + framesDelta;
-    aaudio_position_frames_t nextBurstIndex = nextBurstPosition / mFramesPerBurst;
-    aaudio_position_frames_t position = nextBurstIndex * mFramesPerBurst;
+    int64_t nanosDelta = nanoTime - mMarkerNanoTime;
+    int64_t framesDelta = convertDeltaTimeToPosition(nanosDelta);
+    int64_t nextBurstPosition = mMarkerFramePosition + framesDelta;
+    int64_t nextBurstIndex = nextBurstPosition / mFramesPerBurst;
+    int64_t position = nextBurstIndex * mFramesPerBurst;
 //    ALOGI("IsochronousClockModel::convertTimeToPosition: time = %llu --> pos = %llu",
 //         (unsigned long long)nanoTime,
 //         (unsigned long long)position);
diff --git a/media/libaaudio/src/client/IsochronousClockModel.h b/media/libaaudio/src/client/IsochronousClockModel.h
index b188a3d..524c286 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.h
+++ b/media/libaaudio/src/client/IsochronousClockModel.h
@@ -34,17 +34,17 @@
     IsochronousClockModel();
     virtual ~IsochronousClockModel();
 
-    void start(aaudio_nanoseconds_t nanoTime);
-    void stop(aaudio_nanoseconds_t nanoTime);
+    void start(int64_t nanoTime);
+    void stop(int64_t nanoTime);
 
-    void processTimestamp(aaudio_position_frames_t framePosition, aaudio_nanoseconds_t nanoTime);
+    void processTimestamp(int64_t framePosition, int64_t nanoTime);
 
     /**
      * @param sampleRate rate of the stream in frames per second
      */
-    void setSampleRate(aaudio_sample_rate_t sampleRate);
+    void setSampleRate(int32_t sampleRate);
 
-    aaudio_sample_rate_t getSampleRate() const {
+    int32_t getSampleRate() const {
         return mSampleRate;
     }
 
@@ -53,9 +53,9 @@
      *
      * @param framesPerBurst number of frames that stream advance at one time.
      */
-    void setFramesPerBurst(aaudio_size_frames_t framesPerBurst);
+    void setFramesPerBurst(int32_t framesPerBurst);
 
-    aaudio_size_frames_t getFramesPerBurst() const {
+    int32_t getFramesPerBurst() const {
         return mFramesPerBurst;
     }
 
@@ -65,7 +65,7 @@
      * @param framePosition position of the stream in frames
      * @return time in nanoseconds
      */
-    aaudio_nanoseconds_t convertPositionToTime(aaudio_position_frames_t framePosition) const;
+    int64_t convertPositionToTime(int64_t framePosition) const;
 
     /**
      * Calculate an estimated position where the stream will be at the specified time.
@@ -73,19 +73,19 @@
      * @param nanoTime time of interest
      * @return position in frames
      */
-    aaudio_position_frames_t convertTimeToPosition(aaudio_nanoseconds_t nanoTime) const;
+    int64_t convertTimeToPosition(int64_t nanoTime) const;
 
     /**
      * @param framesDelta difference in frames
      * @return duration in nanoseconds
      */
-    aaudio_nanoseconds_t convertDeltaPositionToTime(aaudio_position_frames_t framesDelta) const;
+    int64_t convertDeltaPositionToTime(int64_t framesDelta) const;
 
     /**
      * @param nanosDelta duration in nanoseconds
      * @return frames that stream will advance in that time
      */
-    aaudio_position_frames_t convertDeltaTimeToPosition(aaudio_nanoseconds_t nanosDelta) const;
+    int64_t convertDeltaTimeToPosition(int64_t nanosDelta) const;
 
 private:
     enum clock_model_state_t {
@@ -95,13 +95,12 @@
         STATE_RUNNING
     };
 
-    aaudio_sample_rate_t     mSampleRate;
-    aaudio_size_frames_t     mFramesPerBurst;
-    int32_t                mMaxLatenessInNanos;
-    aaudio_position_frames_t mMarkerFramePosition;
-    aaudio_nanoseconds_t     mMarkerNanoTime;
-    int32_t                mTimestampCount;
-    clock_model_state_t     mState;
+    int64_t             mMarkerFramePosition;
+    int64_t             mMarkerNanoTime;
+    int32_t             mSampleRate;
+    int32_t             mFramesPerBurst;
+    int32_t             mMaxLatenessInNanos;
+    clock_model_state_t mState;
 
     void update();
 };
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index 04dbda1..1208f66 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -32,18 +32,11 @@
 
 using namespace aaudio;
 
-// This is not the maximum theoretic possible number of handles that the HandlerTracker
-// class could support; instead it is the maximum number of handles that we are configuring
-// for our HandleTracker instance (sHandleTracker).
-#define AAUDIO_MAX_HANDLES  64
 
 // Macros for common code that includes a return.
 // TODO Consider using do{}while(0) construct. I tried but it hung AndroidStudio
 #define CONVERT_BUILDER_HANDLE_OR_RETURN() \
-    convertAAudioBuilderToStreamBuilder(builder); \
-    if (streamBuilder == nullptr) { \
-        return AAUDIO_ERROR_INVALID_HANDLE; \
-    }
+    convertAAudioBuilderToStreamBuilder(builder);
 
 #define COMMON_GET_FROM_BUILDER_OR_RETURN(resultPtr) \
     CONVERT_BUILDER_HANDLE_OR_RETURN() \
@@ -51,31 +44,6 @@
         return AAUDIO_ERROR_NULL; \
     }
 
-#define CONVERT_STREAM_HANDLE_OR_RETURN() \
-    convertAAudioStreamToAudioStream(stream); \
-    if (audioStream == nullptr) { \
-        return AAUDIO_ERROR_INVALID_HANDLE; \
-    }
-
-#define COMMON_GET_FROM_STREAM_OR_RETURN(resultPtr) \
-    CONVERT_STREAM_HANDLE_OR_RETURN(); \
-    if ((resultPtr) == nullptr) { \
-        return AAUDIO_ERROR_NULL; \
-    }
-
-// Static data.
-// TODO static constructors are discouraged, alternatives?
-static HandleTracker sHandleTracker(AAUDIO_MAX_HANDLES);
-
-typedef enum
-{
-    AAUDIO_HANDLE_TYPE_STREAM,
-    AAUDIO_HANDLE_TYPE_STREAM_BUILDER,
-    AAUDIO_HANDLE_TYPE_COUNT
-} aaudio_handle_type_t;
-static_assert(AAUDIO_HANDLE_TYPE_COUNT <= HANDLE_TRACKER_MAX_TYPES, "Too many handle types.");
-
-
 #define AAUDIO_CASE_ENUM(name) case name: return #name
 
 AAUDIO_API const char * AAudio_convertResultToText(aaudio_result_t returnCode) {
@@ -104,6 +72,7 @@
 AAUDIO_API const char * AAudio_convertStreamStateToText(aaudio_stream_state_t state) {
     switch (state) {
         AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_UNINITIALIZED);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_UNKNOWN);
         AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_OPEN);
         AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STARTING);
         AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STARTED);
@@ -123,157 +92,73 @@
 
 static AudioStream *convertAAudioStreamToAudioStream(AAudioStream stream)
 {
-    return (AudioStream *) sHandleTracker.get(AAUDIO_HANDLE_TYPE_STREAM,
-                                              (aaudio_handle_t) stream);
+    return (AudioStream *) stream;
 }
 
 static AudioStreamBuilder *convertAAudioBuilderToStreamBuilder(AAudioStreamBuilder builder)
 {
-    return (AudioStreamBuilder *) sHandleTracker.get(AAUDIO_HANDLE_TYPE_STREAM_BUILDER,
-                                                     (aaudio_handle_t) builder);
+    return (AudioStreamBuilder *) builder;
 }
 
 AAUDIO_API aaudio_result_t AAudio_createStreamBuilder(AAudioStreamBuilder *builder)
 {
     ALOGD("AAudio_createStreamBuilder(): check sHandleTracker.isInitialized ()");
-    if (!sHandleTracker.isInitialized()) {
-        return AAUDIO_ERROR_NO_MEMORY;
-    }
     AudioStreamBuilder *audioStreamBuilder =  new AudioStreamBuilder();
     if (audioStreamBuilder == nullptr) {
         return AAUDIO_ERROR_NO_MEMORY;
     }
-    ALOGD("AAudio_createStreamBuilder(): created AudioStreamBuilder = %p", audioStreamBuilder);
-    // TODO protect the put() with a Mutex
-    AAudioStreamBuilder handle = sHandleTracker.put(AAUDIO_HANDLE_TYPE_STREAM_BUILDER,
-            audioStreamBuilder);
-    if (handle < 0) {
-        delete audioStreamBuilder;
-        return static_cast<aaudio_result_t>(handle);
-    } else {
-        *builder = handle;
-    }
+    *builder = (AAudioStreamBuilder) audioStreamBuilder;
     return AAUDIO_OK;
 }
 
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setDeviceId(AAudioStreamBuilder builder,
-                                                     aaudio_device_id_t deviceId)
+AAUDIO_API void AAudioStreamBuilder_setDeviceId(AAudioStreamBuilder builder,
+                                                     int32_t deviceId)
 {
-    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
     streamBuilder->setDeviceId(deviceId);
-    return AAUDIO_OK;
 }
 
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getDeviceId(AAudioStreamBuilder builder,
-                                              aaudio_device_id_t *deviceId)
+AAUDIO_API void AAudioStreamBuilder_setSampleRate(AAudioStreamBuilder builder,
+                                              int32_t sampleRate)
 {
-    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(deviceId);
-    *deviceId = streamBuilder->getDeviceId();
-    return AAUDIO_OK;
-}
-
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setSampleRate(AAudioStreamBuilder builder,
-                                              aaudio_sample_rate_t sampleRate)
-{
-    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
     streamBuilder->setSampleRate(sampleRate);
-    return AAUDIO_OK;
 }
 
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getSampleRate(AAudioStreamBuilder builder,
-                                              aaudio_sample_rate_t *sampleRate)
-{
-    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(sampleRate);
-    *sampleRate = streamBuilder->getSampleRate();
-    return AAUDIO_OK;
-}
-
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder builder,
+AAUDIO_API void AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder builder,
                                                    int32_t samplesPerFrame)
 {
-    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
     streamBuilder->setSamplesPerFrame(samplesPerFrame);
-    return AAUDIO_OK;
 }
 
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getSamplesPerFrame(AAudioStreamBuilder builder,
-                                                   int32_t *samplesPerFrame)
-{
-    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(samplesPerFrame);
-    *samplesPerFrame = streamBuilder->getSamplesPerFrame();
-    return AAUDIO_OK;
-}
-
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setDirection(AAudioStreamBuilder builder,
+AAUDIO_API void AAudioStreamBuilder_setDirection(AAudioStreamBuilder builder,
                                              aaudio_direction_t direction)
 {
-    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
     streamBuilder->setDirection(direction);
-    return AAUDIO_OK;
 }
 
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getDirection(AAudioStreamBuilder builder,
-                                             aaudio_direction_t *direction)
-{
-    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(direction);
-    *direction = streamBuilder->getDirection();
-    return AAUDIO_OK;
-}
 
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setFormat(AAudioStreamBuilder builder,
+AAUDIO_API void AAudioStreamBuilder_setFormat(AAudioStreamBuilder builder,
                                                    aaudio_audio_format_t format)
 {
-    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
     streamBuilder->setFormat(format);
-    return AAUDIO_OK;
 }
 
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getFormat(AAudioStreamBuilder builder,
-                                                   aaudio_audio_format_t *format)
-{
-    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(format);
-    *format = streamBuilder->getFormat();
-    return AAUDIO_OK;
-}
-
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setSharingMode(AAudioStreamBuilder builder,
+AAUDIO_API void AAudioStreamBuilder_setSharingMode(AAudioStreamBuilder builder,
                                                         aaudio_sharing_mode_t sharingMode)
 {
-    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
-    if ((sharingMode < 0) || (sharingMode >= AAUDIO_SHARING_MODE_COUNT)) {
-        return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
-    } else {
-        streamBuilder->setSharingMode(sharingMode);
-        return AAUDIO_OK;
-    }
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
+    streamBuilder->setSharingMode(sharingMode);
 }
 
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getSharingMode(AAudioStreamBuilder builder,
-                                                        aaudio_sharing_mode_t *sharingMode)
+AAUDIO_API void AAudioStreamBuilder_setBufferCapacityInFrames(AAudioStreamBuilder builder,
+                                                        int32_t frames)
 {
-    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(sharingMode);
-    *sharingMode = streamBuilder->getSharingMode();
-    return AAUDIO_OK;
-}
-
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setBufferCapacity(AAudioStreamBuilder builder,
-                                                        aaudio_size_frames_t frames)
-{
-    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
-    if (frames < 0) {
-        return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
-    } else {
-        streamBuilder->setBufferCapacity(frames);
-        return AAUDIO_OK;
-    }
-}
-
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getBufferCapacity(AAudioStreamBuilder builder,
-                                                        aaudio_size_frames_t *frames)
-{
-    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(frames);
-    *frames = streamBuilder->getBufferCapacity();
-    return AAUDIO_OK;
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
+    streamBuilder->setBufferCapacity(frames);
 }
 
 static aaudio_result_t  AAudioInternal_openStream(AudioStreamBuilder *streamBuilder,
@@ -284,14 +169,7 @@
     if (result != AAUDIO_OK) {
         return result;
     } else {
-        // Create a handle for referencing the object.
-        // TODO protect the put() with a Mutex
-        AAudioStream handle = sHandleTracker.put(AAUDIO_HANDLE_TYPE_STREAM, audioStream);
-        if (handle < 0) {
-            delete audioStream;
-            return static_cast<aaudio_result_t>(handle);
-        }
-        *streamPtr = handle;
+        *streamPtr = (AAudioStream) audioStream;
         return AAUDIO_OK;
     }
 }
@@ -299,15 +177,14 @@
 AAUDIO_API aaudio_result_t  AAudioStreamBuilder_openStream(AAudioStreamBuilder builder,
                                                      AAudioStream *streamPtr)
 {
-    ALOGD("AAudioStreamBuilder_openStream(): builder = 0x%08X", builder);
+    ALOGD("AAudioStreamBuilder_openStream(): builder = %p", builder);
     AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(streamPtr);
     return AAudioInternal_openStream(streamBuilder, streamPtr);
 }
 
 AAUDIO_API aaudio_result_t  AAudioStreamBuilder_delete(AAudioStreamBuilder builder)
 {
-    AudioStreamBuilder *streamBuilder = (AudioStreamBuilder *)
-            sHandleTracker.remove(AAUDIO_HANDLE_TYPE_STREAM_BUILDER, builder);
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
     if (streamBuilder != nullptr) {
         delete streamBuilder;
         return AAUDIO_OK;
@@ -317,9 +194,8 @@
 
 AAUDIO_API aaudio_result_t  AAudioStream_close(AAudioStream stream)
 {
-    AudioStream *audioStream = (AudioStream *)
-            sHandleTracker.remove(AAUDIO_HANDLE_TYPE_STREAM, (aaudio_handle_t)stream);
-    ALOGD("AAudioStream_close(0x%08X), audioStream = %p", stream, audioStream);
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    ALOGD("AAudioStream_close(%p)", stream);
     if (audioStream != nullptr) {
         audioStream->close();
         delete audioStream;
@@ -330,39 +206,39 @@
 
 AAUDIO_API aaudio_result_t  AAudioStream_requestStart(AAudioStream stream)
 {
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
-    ALOGD("AAudioStream_requestStart(0x%08X), audioStream = %p", stream, audioStream);
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    ALOGD("AAudioStream_requestStart(%p)", stream);
     return audioStream->requestStart();
 }
 
 AAUDIO_API aaudio_result_t  AAudioStream_requestPause(AAudioStream stream)
 {
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
-    ALOGD("AAudioStream_requestPause(0x%08X), audioStream = %p", stream, audioStream);
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    ALOGD("AAudioStream_requestPause(%p)", stream);
     return audioStream->requestPause();
 }
 
 AAUDIO_API aaudio_result_t  AAudioStream_requestFlush(AAudioStream stream)
 {
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
-    ALOGD("AAudioStream_requestFlush(0x%08X), audioStream = %p", stream, audioStream);
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    ALOGD("AAudioStream_requestFlush(%p)", stream);
     return audioStream->requestFlush();
 }
 
 AAUDIO_API aaudio_result_t  AAudioStream_requestStop(AAudioStream stream)
 {
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
-    ALOGD("AAudioStream_requestStop(0x%08X), audioStream = %p", stream, audioStream);
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    ALOGD("AAudioStream_requestStop(%p)", stream);
     return audioStream->requestStop();
 }
 
 AAUDIO_API aaudio_result_t AAudioStream_waitForStateChange(AAudioStream stream,
                                             aaudio_stream_state_t inputState,
                                             aaudio_stream_state_t *nextState,
-                                            aaudio_nanoseconds_t timeoutNanoseconds)
+                                            int64_t timeoutNanoseconds)
 {
 
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
     return audioStream->waitForStateChange(inputState, nextState, timeoutNanoseconds);
 }
 
@@ -372,10 +248,10 @@
 
 AAUDIO_API aaudio_result_t AAudioStream_read(AAudioStream stream,
                                void *buffer,
-                               aaudio_size_frames_t numFrames,
-                               aaudio_nanoseconds_t timeoutNanoseconds)
+                               int32_t numFrames,
+                               int64_t timeoutNanoseconds)
 {
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
     if (buffer == nullptr) {
         return AAUDIO_ERROR_NULL;
     }
@@ -393,10 +269,10 @@
 
 AAUDIO_API aaudio_result_t AAudioStream_write(AAudioStream stream,
                                const void *buffer,
-                               aaudio_size_frames_t numFrames,
-                               aaudio_nanoseconds_t timeoutNanoseconds)
+                               int32_t numFrames,
+                               int64_t timeoutNanoseconds)
 {
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
     if (buffer == nullptr) {
         return AAUDIO_ERROR_NULL;
     }
@@ -417,18 +293,18 @@
 // ============================================================
 
 AAUDIO_API aaudio_result_t AAudioStream_createThread(AAudioStream stream,
-                                     aaudio_nanoseconds_t periodNanoseconds,
+                                     int64_t periodNanoseconds,
                                      aaudio_audio_thread_proc_t *threadProc, void *arg)
 {
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
     return audioStream->createThread(periodNanoseconds, threadProc, arg);
 }
 
 AAUDIO_API aaudio_result_t AAudioStream_joinThread(AAudioStream stream,
                                    void **returnArg,
-                                   aaudio_nanoseconds_t timeoutNanoseconds)
+                                   int64_t timeoutNanoseconds)
 {
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
     return audioStream->joinThread(returnArg, timeoutNanoseconds);
 }
 
@@ -436,147 +312,104 @@
 // Stream - queries
 // ============================================================
 
-// TODO Use aaudio_clockid_t all the way down through the C++ streams.
-static clockid_t AAudioConvert_fromAAudioClockId(aaudio_clockid_t clockid)
+AAUDIO_API int32_t AAudioStream_getSampleRate(AAudioStream stream)
 {
-    clockid_t hostClockId;
-    switch (clockid) {
-        case AAUDIO_CLOCK_MONOTONIC:
-            hostClockId = CLOCK_MONOTONIC;
-            break;
-        case AAUDIO_CLOCK_BOOTTIME:
-            hostClockId = CLOCK_BOOTTIME;
-            break;
-        default:
-            hostClockId = 0; // TODO review
-    }
-    return hostClockId;
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getSampleRate();
 }
 
-aaudio_nanoseconds_t AAudio_getNanoseconds(aaudio_clockid_t clockid)
+AAUDIO_API int32_t AAudioStream_getSamplesPerFrame(AAudioStream stream)
 {
-    clockid_t hostClockId = AAudioConvert_fromAAudioClockId(clockid);
-   return AudioClock::getNanoseconds(hostClockId);
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getSamplesPerFrame();
 }
 
-AAUDIO_API aaudio_result_t AAudioStream_getSampleRate(AAudioStream stream, aaudio_sample_rate_t *sampleRate)
+AAUDIO_API aaudio_stream_state_t AAudioStream_getState(AAudioStream stream)
 {
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(sampleRate);
-    *sampleRate = audioStream->getSampleRate();
-    return AAUDIO_OK;
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getState();
 }
 
-AAUDIO_API aaudio_result_t AAudioStream_getSamplesPerFrame(AAudioStream stream, int32_t *samplesPerFrame)
+AAUDIO_API aaudio_audio_format_t AAudioStream_getFormat(AAudioStream stream)
 {
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(samplesPerFrame);
-    *samplesPerFrame = audioStream->getSamplesPerFrame();
-    return AAUDIO_OK;
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getFormat();
 }
 
-AAUDIO_API aaudio_result_t AAudioStream_getState(AAudioStream stream, aaudio_stream_state_t *state)
+AAUDIO_API aaudio_result_t AAudioStream_setBufferSizeInFrames(AAudioStream stream,
+                                                int32_t requestedFrames)
 {
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(state);
-    *state = audioStream->getState();
-    return AAUDIO_OK;
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->setBufferSize(requestedFrames);
 }
 
-AAUDIO_API aaudio_result_t AAudioStream_getFormat(AAudioStream stream, aaudio_audio_format_t *format)
+AAUDIO_API int32_t AAudioStream_getBufferSizeInFrames(AAudioStream stream)
 {
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(format);
-    *format = audioStream->getFormat();
-    return AAUDIO_OK;
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getBufferSize();
 }
 
-AAUDIO_API aaudio_result_t AAudioStream_setBufferSize(AAudioStream stream,
-                                                aaudio_size_frames_t requestedFrames,
-                                                aaudio_size_frames_t *actualFrames)
+AAUDIO_API aaudio_direction_t AAudioStream_getDirection(AAudioStream stream)
 {
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
-    return audioStream->setBufferSize(requestedFrames, actualFrames);
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getDirection();
 }
 
-AAUDIO_API aaudio_result_t AAudioStream_getBufferSize(AAudioStream stream, aaudio_size_frames_t *frames)
+AAUDIO_API int32_t AAudioStream_getFramesPerBurst(AAudioStream stream)
 {
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(frames);
-    *frames = audioStream->getBufferSize();
-    return AAUDIO_OK;
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getFramesPerBurst();
 }
 
-AAUDIO_API aaudio_result_t AAudioStream_getDirection(AAudioStream stream, int32_t *direction)
+AAUDIO_API int32_t AAudioStream_getBufferCapacityInFrames(AAudioStream stream)
 {
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(direction);
-    *direction = audioStream->getDirection();
-    return AAUDIO_OK;
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getBufferCapacity();
 }
 
-AAUDIO_API aaudio_result_t AAudioStream_getFramesPerBurst(AAudioStream stream,
-                                                    aaudio_size_frames_t *framesPerBurst)
+AAUDIO_API int32_t AAudioStream_getXRunCount(AAudioStream stream)
 {
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(framesPerBurst);
-    *framesPerBurst = audioStream->getFramesPerBurst();
-    return AAUDIO_OK;
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getXRunCount();
 }
 
-AAUDIO_API aaudio_result_t AAudioStream_getBufferCapacity(AAudioStream stream,
-                                           aaudio_size_frames_t *capacity)
+AAUDIO_API int32_t AAudioStream_getDeviceId(AAudioStream stream)
 {
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(capacity);
-    *capacity = audioStream->getBufferCapacity();
-    return AAUDIO_OK;
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getDeviceId();
 }
 
-AAUDIO_API aaudio_result_t AAudioStream_getXRunCount(AAudioStream stream, int32_t *xRunCount)
+AAUDIO_API aaudio_sharing_mode_t AAudioStream_getSharingMode(AAudioStream stream)
 {
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(xRunCount);
-    *xRunCount = audioStream->getXRunCount();
-    return AAUDIO_OK;
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getSharingMode();
 }
 
-AAUDIO_API aaudio_result_t AAudioStream_getDeviceId(AAudioStream stream,
-                                                 aaudio_device_id_t *deviceId)
+AAUDIO_API int64_t AAudioStream_getFramesWritten(AAudioStream stream)
 {
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(deviceId);
-    *deviceId = audioStream->getDeviceId();
-    return AAUDIO_OK;
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getFramesWritten();
 }
 
-AAUDIO_API aaudio_result_t AAudioStream_getSharingMode(AAudioStream stream,
-                                                 aaudio_sharing_mode_t *sharingMode)
+AAUDIO_API int64_t AAudioStream_getFramesRead(AAudioStream stream)
 {
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(sharingMode);
-    *sharingMode = audioStream->getSharingMode();
-    return AAUDIO_OK;
-}
-
-AAUDIO_API aaudio_result_t AAudioStream_getFramesWritten(AAudioStream stream,
-                                                   aaudio_position_frames_t *frames)
-{
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(frames);
-    *frames = audioStream->getFramesWritten();
-    return AAUDIO_OK;
-}
-
-AAUDIO_API aaudio_result_t AAudioStream_getFramesRead(AAudioStream stream, aaudio_position_frames_t *frames)
-{
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(frames);
-    *frames = audioStream->getFramesRead();
-    return AAUDIO_OK;
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getFramesRead();
 }
 
 AAUDIO_API aaudio_result_t AAudioStream_getTimestamp(AAudioStream stream,
-                                      aaudio_clockid_t clockid,
-                                      aaudio_position_frames_t *framePosition,
-                                      aaudio_nanoseconds_t *timeNanoseconds)
+                                      clockid_t clockid,
+                                      int64_t *framePosition,
+                                      int64_t *timeNanoseconds)
 {
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
     if (framePosition == nullptr) {
         return AAUDIO_ERROR_NULL;
     } else if (timeNanoseconds == nullptr) {
         return AAUDIO_ERROR_NULL;
-    } else if (clockid != AAUDIO_CLOCK_MONOTONIC && clockid != AAUDIO_CLOCK_BOOTTIME) {
+    } else if (clockid != CLOCK_MONOTONIC && clockid != CLOCK_BOOTTIME) {
         return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
     }
 
-    clockid_t hostClockId = AAudioConvert_fromAAudioClockId(clockid);
-    return audioStream->getTimestamp(hostClockId, framePosition, timeNanoseconds);
+    return audioStream->getTimestamp(clockid, framePosition, timeNanoseconds);
 }
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 77d3cc0..c4962ee 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -52,7 +52,7 @@
 
 aaudio_result_t AudioStream::waitForStateTransition(aaudio_stream_state_t startingState,
                                                aaudio_stream_state_t endingState,
-                                               aaudio_nanoseconds_t timeoutNanoseconds)
+                                               int64_t timeoutNanoseconds)
 {
     aaudio_stream_state_t state = getState();
     aaudio_stream_state_t nextState = state;
@@ -73,10 +73,10 @@
 
 aaudio_result_t AudioStream::waitForStateChange(aaudio_stream_state_t currentState,
                                                 aaudio_stream_state_t *nextState,
-                                                aaudio_nanoseconds_t timeoutNanoseconds)
+                                                int64_t timeoutNanoseconds)
 {
     // TODO replace this when similar functionality added to AudioTrack.cpp
-    aaudio_nanoseconds_t durationNanos = 20 * AAUDIO_NANOS_PER_MILLISECOND;
+    int64_t durationNanos = 20 * AAUDIO_NANOS_PER_MILLISECOND;
     aaudio_stream_state_t state = getState();
     while (state == currentState && timeoutNanoseconds > 0) {
         if (durationNanos > timeoutNanoseconds) {
@@ -120,7 +120,7 @@
     return audioStream->wrapUserThread();
 }
 
-aaudio_result_t AudioStream::createThread(aaudio_nanoseconds_t periodNanoseconds,
+aaudio_result_t AudioStream::createThread(int64_t periodNanoseconds,
                                      aaudio_audio_thread_proc_t *threadProc,
                                      void* threadArg)
 {
@@ -144,7 +144,7 @@
     }
 }
 
-aaudio_result_t AudioStream::joinThread(void** returnArg, aaudio_nanoseconds_t timeoutNanoseconds)
+aaudio_result_t AudioStream::joinThread(void** returnArg, int64_t timeoutNanoseconds)
 {
     if (!mHasThread) {
         return AAUDIO_ERROR_INVALID_STATE;
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index acfed97..f5f9d28 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -52,8 +52,8 @@
 
     // TODO use aaudio_clockid_t all the way down to AudioClock
     virtual aaudio_result_t getTimestamp(clockid_t clockId,
-                                       aaudio_position_frames_t *framePosition,
-                                       aaudio_nanoseconds_t *timeNanoseconds) = 0;
+                                       int64_t *framePosition,
+                                       int64_t *timeNanoseconds) = 0;
 
 
     virtual aaudio_result_t updateState() = 0;
@@ -63,7 +63,7 @@
 
     virtual aaudio_result_t waitForStateChange(aaudio_stream_state_t currentState,
                                           aaudio_stream_state_t *nextState,
-                                          aaudio_nanoseconds_t timeoutNanoseconds);
+                                          int64_t timeoutNanoseconds);
 
     /**
      * Open the stream using the parameters in the builder.
@@ -79,16 +79,15 @@
         return AAUDIO_OK;
     }
 
-    virtual aaudio_result_t setBufferSize(aaudio_size_frames_t requestedFrames,
-                                        aaudio_size_frames_t *actualFrames) {
+    virtual aaudio_result_t setBufferSize(int32_t requestedFrames) {
         return AAUDIO_ERROR_UNIMPLEMENTED;
     }
 
-    virtual aaudio_result_t createThread(aaudio_nanoseconds_t periodNanoseconds,
+    virtual aaudio_result_t createThread(int64_t periodNanoseconds,
                                        aaudio_audio_thread_proc_t *threadProc,
                                        void *threadArg);
 
-    virtual aaudio_result_t joinThread(void **returnArg, aaudio_nanoseconds_t timeoutNanoseconds);
+    virtual aaudio_result_t joinThread(void **returnArg, int64_t timeoutNanoseconds);
 
     virtual aaudio_result_t registerThread() {
         return AAUDIO_OK;
@@ -106,19 +105,19 @@
 
     // ============== Queries ===========================
 
-    virtual aaudio_stream_state_t getState() const {
+    aaudio_stream_state_t getState() const {
         return mState;
     }
 
-    virtual aaudio_size_frames_t getBufferSize() const {
+    virtual int32_t getBufferSize() const {
         return AAUDIO_ERROR_UNIMPLEMENTED;
     }
 
-    virtual aaudio_size_frames_t getBufferCapacity() const {
+    virtual int32_t getBufferCapacity() const {
         return AAUDIO_ERROR_UNIMPLEMENTED;
     }
 
-    virtual aaudio_size_frames_t getFramesPerBurst() const {
+    virtual int32_t getFramesPerBurst() const {
         return AAUDIO_ERROR_UNIMPLEMENTED;
     }
 
@@ -142,7 +141,7 @@
         return mSamplesPerFrame;
     }
 
-    aaudio_device_id_t getDeviceId() const {
+    int32_t getDeviceId() const {
         return mDeviceId;
     }
 
@@ -154,19 +153,19 @@
         return mDirection;
     }
 
-    aaudio_size_bytes_t getBytesPerFrame() const {
+    int32_t getBytesPerFrame() const {
         return mSamplesPerFrame * getBytesPerSample();
     }
 
-    aaudio_size_bytes_t getBytesPerSample() const {
+    int32_t getBytesPerSample() const {
         return AAudioConvert_formatToSizeInBytes(mFormat);
     }
 
-    virtual aaudio_position_frames_t getFramesWritten() {
+    virtual int64_t getFramesWritten() {
         return mFramesWritten.get();
     }
 
-    virtual aaudio_position_frames_t getFramesRead() {
+    virtual int64_t getFramesRead() {
         return mFramesRead.get();
     }
 
@@ -174,25 +173,25 @@
     // ============== I/O ===========================
     // A Stream will only implement read() or write() depending on its direction.
     virtual aaudio_result_t write(const void *buffer,
-                             aaudio_size_frames_t numFrames,
-                             aaudio_nanoseconds_t timeoutNanoseconds) {
+                             int32_t numFrames,
+                             int64_t timeoutNanoseconds) {
         return AAUDIO_ERROR_UNIMPLEMENTED;
     }
 
     virtual aaudio_result_t read(void *buffer,
-                            aaudio_size_frames_t numFrames,
-                            aaudio_nanoseconds_t timeoutNanoseconds) {
+                            int32_t numFrames,
+                            int64_t timeoutNanoseconds) {
         return AAUDIO_ERROR_UNIMPLEMENTED;
     }
 
 protected:
 
-    virtual aaudio_position_frames_t incrementFramesWritten(aaudio_size_frames_t frames) {
-        return static_cast<aaudio_position_frames_t>(mFramesWritten.increment(frames));
+    virtual int64_t incrementFramesWritten(int32_t frames) {
+        return static_cast<int64_t>(mFramesWritten.increment(frames));
     }
 
-    virtual aaudio_position_frames_t incrementFramesRead(aaudio_size_frames_t frames) {
-        return static_cast<aaudio_position_frames_t>(mFramesRead.increment(frames));
+    virtual int64_t incrementFramesRead(int32_t frames) {
+        return static_cast<int64_t>(mFramesRead.increment(frames));
     }
 
     /**
@@ -202,13 +201,13 @@
      *   or AAUDIO_ERROR_TIMEOUT
      */
     virtual aaudio_result_t waitForStateTransition(aaudio_stream_state_t startingState,
-                                              aaudio_stream_state_t endingState,
-                                              aaudio_nanoseconds_t timeoutNanoseconds);
+                                                   aaudio_stream_state_t endingState,
+                                                   int64_t timeoutNanoseconds);
 
     /**
      * This should not be called after the open() call.
      */
-    void setSampleRate(aaudio_sample_rate_t sampleRate) {
+    void setSampleRate(int32_t sampleRate) {
         mSampleRate = sampleRate;
     }
 
@@ -243,33 +242,33 @@
     MonotonicCounter     mFramesWritten;
     MonotonicCounter     mFramesRead;
 
-    void setPeriodNanoseconds(aaudio_nanoseconds_t periodNanoseconds) {
+    void setPeriodNanoseconds(int64_t periodNanoseconds) {
         mPeriodNanoseconds.store(periodNanoseconds, std::memory_order_release);
     }
 
-    aaudio_nanoseconds_t getPeriodNanoseconds() {
+    int64_t getPeriodNanoseconds() {
         return mPeriodNanoseconds.load(std::memory_order_acquire);
     }
 
 private:
     // These do not change after open().
-    int32_t              mSamplesPerFrame = AAUDIO_UNSPECIFIED;
-    aaudio_sample_rate_t   mSampleRate = AAUDIO_UNSPECIFIED;
-    aaudio_stream_state_t  mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
-    aaudio_device_id_t     mDeviceId = AAUDIO_UNSPECIFIED;
+    int32_t                mSamplesPerFrame = AAUDIO_UNSPECIFIED;
+    int32_t                mSampleRate = AAUDIO_UNSPECIFIED;
+    int32_t                mDeviceId = AAUDIO_UNSPECIFIED;
     aaudio_sharing_mode_t  mSharingMode = AAUDIO_SHARING_MODE_SHARED;
     aaudio_audio_format_t  mFormat = AAUDIO_FORMAT_UNSPECIFIED;
     aaudio_direction_t     mDirection = AAUDIO_DIRECTION_OUTPUT;
+    aaudio_stream_state_t  mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
 
     // background thread ----------------------------------
-    bool                 mHasThread = false;
-    pthread_t            mThread; // initialized in constructor
+    bool                   mHasThread = false;
+    pthread_t              mThread; // initialized in constructor
 
     // These are set by the application thread and then read by the audio pthread.
-    std::atomic<aaudio_nanoseconds_t>  mPeriodNanoseconds; // for tuning SCHED_FIFO threads
+    std::atomic<int64_t>   mPeriodNanoseconds; // for tuning SCHED_FIFO threads
     // TODO make atomic?
     aaudio_audio_thread_proc_t* mThreadProc = nullptr;
-    void*                mThreadArg = nullptr;
+    void*                  mThreadArg = nullptr;
     aaudio_result_t        mThreadRegistrationResult = AAUDIO_OK;
 
 
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.h b/media/libaaudio/src/core/AudioStreamBuilder.h
index f366688..7b5f35c 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.h
+++ b/media/libaaudio/src/core/AudioStreamBuilder.h
@@ -56,11 +56,11 @@
         return this;
     }
 
-    aaudio_sample_rate_t getSampleRate() const {
+    int32_t getSampleRate() const {
         return mSampleRate;
     }
 
-    AudioStreamBuilder* setSampleRate(aaudio_sample_rate_t sampleRate) {
+    AudioStreamBuilder* setSampleRate(int32_t sampleRate) {
         mSampleRate = sampleRate;
         return this;
     }
@@ -83,20 +83,20 @@
         return this;
     }
 
-    aaudio_size_frames_t getBufferCapacity() const {
+    int32_t getBufferCapacity() const {
         return mBufferCapacity;
     }
 
-    AudioStreamBuilder* setBufferCapacity(aaudio_size_frames_t frames) {
+    AudioStreamBuilder* setBufferCapacity(int32_t frames) {
         mBufferCapacity = frames;
         return this;
     }
 
-    aaudio_device_id_t getDeviceId() const {
+    int32_t getDeviceId() const {
         return mDeviceId;
     }
 
-    AudioStreamBuilder* setDeviceId(aaudio_device_id_t deviceId) {
+    AudioStreamBuilder* setDeviceId(int32_t deviceId) {
         mDeviceId = deviceId;
         return this;
     }
@@ -105,12 +105,12 @@
 
 private:
     int32_t                mSamplesPerFrame = AAUDIO_UNSPECIFIED;
-    aaudio_sample_rate_t   mSampleRate = AAUDIO_UNSPECIFIED;
-    aaudio_device_id_t     mDeviceId = AAUDIO_DEVICE_UNSPECIFIED;
+    int32_t                mSampleRate = AAUDIO_UNSPECIFIED;
+    int32_t                mDeviceId = AAUDIO_DEVICE_UNSPECIFIED;
     aaudio_sharing_mode_t  mSharingMode = AAUDIO_SHARING_MODE_SHARED;
     aaudio_audio_format_t  mFormat = AAUDIO_FORMAT_UNSPECIFIED;
     aaudio_direction_t     mDirection = AAUDIO_DIRECTION_OUTPUT;
-    aaudio_size_frames_t   mBufferCapacity = AAUDIO_UNSPECIFIED;
+    int32_t                mBufferCapacity = AAUDIO_UNSPECIFIED;
 };
 
 } /* namespace aaudio */
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index 17d0a54..dd040a0 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -177,11 +177,11 @@
 }
 
 aaudio_result_t AudioStreamRecord::read(void *buffer,
-                                      aaudio_size_frames_t numFrames,
-                                      aaudio_nanoseconds_t timeoutNanoseconds)
+                                      int32_t numFrames,
+                                      int64_t timeoutNanoseconds)
 {
-    aaudio_size_frames_t bytesPerFrame = getBytesPerFrame();
-    aaudio_size_bytes_t numBytes;
+    int32_t bytesPerFrame = getBytesPerFrame();
+    int32_t numBytes;
     aaudio_result_t result = AAudioConvert_framesToBytes(numFrames, bytesPerFrame, &numBytes);
     if (result != AAUDIO_OK) {
         return result;
@@ -195,25 +195,23 @@
     } else if (bytesRead < 0) {
         return AAudioConvert_androidToAAudioResult(bytesRead);
     }
-    aaudio_size_frames_t framesRead = (aaudio_size_frames_t)(bytesRead / bytesPerFrame);
+    int32_t framesRead = (int32_t)(bytesRead / bytesPerFrame);
     return (aaudio_result_t) framesRead;
 }
 
-aaudio_result_t AudioStreamRecord::setBufferSize(aaudio_size_frames_t requestedFrames,
-                                             aaudio_size_frames_t *actualFrames)
+aaudio_result_t AudioStreamRecord::setBufferSize(int32_t requestedFrames)
 {
-    *actualFrames = getBufferCapacity();
-    return AAUDIO_OK;
+    return getBufferSize();
 }
 
-aaudio_size_frames_t AudioStreamRecord::getBufferSize() const
+int32_t AudioStreamRecord::getBufferSize() const
 {
     return getBufferCapacity(); // TODO implement in AudioRecord?
 }
 
-aaudio_size_frames_t AudioStreamRecord::getBufferCapacity() const
+int32_t AudioStreamRecord::getBufferCapacity() const
 {
-    return static_cast<aaudio_size_frames_t>(mAudioRecord->frameCount());
+    return static_cast<int32_t>(mAudioRecord->frameCount());
 }
 
 int32_t AudioStreamRecord::getXRunCount() const
@@ -221,7 +219,7 @@
     return AAUDIO_ERROR_UNIMPLEMENTED; // TODO implement when AudioRecord supports it
 }
 
-aaudio_size_frames_t AudioStreamRecord::getFramesPerBurst() const
+int32_t AudioStreamRecord::getFramesPerBurst() const
 {
     return 192; // TODO add query to AudioRecord.cpp
 }
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.h b/media/libaaudio/src/legacy/AudioStreamRecord.h
index a2ac9f3..c8d389b 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.h
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.h
@@ -44,25 +44,24 @@
     virtual aaudio_result_t requestStop() override;
 
     virtual aaudio_result_t getTimestamp(clockid_t clockId,
-                                       aaudio_position_frames_t *framePosition,
-                                       aaudio_nanoseconds_t *timeNanoseconds) override {
+                                       int64_t *framePosition,
+                                       int64_t *timeNanoseconds) override {
         return AAUDIO_ERROR_UNIMPLEMENTED; // TODO
     }
 
     virtual aaudio_result_t read(void *buffer,
-                             aaudio_size_frames_t numFrames,
-                             aaudio_nanoseconds_t timeoutNanoseconds) override;
+                             int32_t numFrames,
+                             int64_t timeoutNanoseconds) override;
 
-    virtual aaudio_result_t setBufferSize(aaudio_size_frames_t requestedFrames,
-                                             aaudio_size_frames_t *actualFrames) override;
+    virtual aaudio_result_t setBufferSize(int32_t requestedFrames) override;
 
-    virtual aaudio_size_frames_t getBufferSize() const override;
+    virtual int32_t getBufferSize() const override;
 
-    virtual aaudio_size_frames_t getBufferCapacity() const override;
+    virtual int32_t getBufferCapacity() const override;
 
     virtual int32_t getXRunCount() const override;
 
-    virtual aaudio_size_frames_t getFramesPerBurst() const override;
+    virtual int32_t getFramesPerBurst() const override;
 
     virtual aaudio_result_t updateState() override;
 
@@ -70,7 +69,7 @@
     android::sp<android::AudioRecord> mAudioRecord;
     // TODO add 64-bit position reporting to AudioRecord and use it.
     aaudio_wrapping_frames_t   mPositionWhenStarting = 0;
-    android::String16        mOpPackageName;
+    android::String16          mOpPackageName;
 };
 
 } /* namespace aaudio */
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index b7d8664..e0a04c3 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -221,11 +221,11 @@
 }
 
 aaudio_result_t AudioStreamTrack::write(const void *buffer,
-                                      aaudio_size_frames_t numFrames,
-                                      aaudio_nanoseconds_t timeoutNanoseconds)
+                                      int32_t numFrames,
+                                      int64_t timeoutNanoseconds)
 {
-    aaudio_size_frames_t bytesPerFrame = getBytesPerFrame();
-    aaudio_size_bytes_t numBytes;
+    int32_t bytesPerFrame = getBytesPerFrame();
+    int32_t numBytes;
     aaudio_result_t result = AAudioConvert_framesToBytes(numFrames, bytesPerFrame, &numBytes);
     if (result != AAUDIO_OK) {
         return result;
@@ -240,31 +240,29 @@
         ALOGE("invalid write, returned %d", (int)bytesWritten);
         return AAudioConvert_androidToAAudioResult(bytesWritten);
     }
-    aaudio_size_frames_t framesWritten = (aaudio_size_frames_t)(bytesWritten / bytesPerFrame);
+    int32_t framesWritten = (int32_t)(bytesWritten / bytesPerFrame);
     incrementFramesWritten(framesWritten);
     return framesWritten;
 }
 
-aaudio_result_t AudioStreamTrack::setBufferSize(aaudio_size_frames_t requestedFrames,
-                                             aaudio_size_frames_t *actualFrames)
+aaudio_result_t AudioStreamTrack::setBufferSize(int32_t requestedFrames)
 {
     ssize_t result = mAudioTrack->setBufferSizeInFrames(requestedFrames);
-    if (result != OK) {
+    if (result < 0) {
         return AAudioConvert_androidToAAudioResult(result);
     } else {
-        *actualFrames = result;
-        return AAUDIO_OK;
+        return result;
     }
 }
 
-aaudio_size_frames_t AudioStreamTrack::getBufferSize() const
+int32_t AudioStreamTrack::getBufferSize() const
 {
-    return static_cast<aaudio_size_frames_t>(mAudioTrack->getBufferSizeInFrames());
+    return static_cast<int32_t>(mAudioTrack->getBufferSizeInFrames());
 }
 
-aaudio_size_frames_t AudioStreamTrack::getBufferCapacity() const
+int32_t AudioStreamTrack::getBufferCapacity() const
 {
-    return static_cast<aaudio_size_frames_t>(mAudioTrack->frameCount());
+    return static_cast<int32_t>(mAudioTrack->frameCount());
 }
 
 int32_t AudioStreamTrack::getXRunCount() const
@@ -277,7 +275,7 @@
     return 192; // TODO add query to AudioTrack.cpp
 }
 
-aaudio_position_frames_t AudioStreamTrack::getFramesRead() {
+int64_t AudioStreamTrack::getFramesRead() {
     aaudio_wrapping_frames_t position;
     status_t result;
     switch (getState()) {
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.h b/media/libaaudio/src/legacy/AudioStreamTrack.h
index 73d0cac..1de07ce 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.h
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.h
@@ -46,31 +46,30 @@
     virtual aaudio_result_t requestStop() override;
 
     virtual aaudio_result_t getTimestamp(clockid_t clockId,
-                                       aaudio_position_frames_t *framePosition,
-                                       aaudio_nanoseconds_t *timeNanoseconds) override {
+                                       int64_t *framePosition,
+                                       int64_t *timeNanoseconds) override {
         return AAUDIO_ERROR_UNIMPLEMENTED; // TODO call getTimestamp(ExtendedTimestamp *timestamp);
     }
 
     virtual aaudio_result_t write(const void *buffer,
-                             aaudio_size_frames_t numFrames,
-                             aaudio_nanoseconds_t timeoutNanoseconds) override;
+                             int32_t numFrames,
+                             int64_t timeoutNanoseconds) override;
 
-    virtual aaudio_result_t setBufferSize(aaudio_size_frames_t requestedFrames,
-                                             aaudio_size_frames_t *actualFrames) override;
-    virtual aaudio_size_frames_t getBufferSize() const override;
-    virtual aaudio_size_frames_t getBufferCapacity() const override;
-    virtual aaudio_size_frames_t getFramesPerBurst()const  override;
+    virtual aaudio_result_t setBufferSize(int32_t requestedFrames) override;
+    virtual int32_t getBufferSize() const override;
+    virtual int32_t getBufferCapacity() const override;
+    virtual int32_t getFramesPerBurst()const  override;
     virtual int32_t getXRunCount() const override;
 
-    virtual aaudio_position_frames_t getFramesRead() override;
+    virtual int64_t getFramesRead() override;
 
     virtual aaudio_result_t updateState() override;
 
 private:
     android::sp<android::AudioTrack> mAudioTrack;
     // TODO add 64-bit position reporting to AudioRecord and use it.
-    aaudio_wrapping_frames_t           mPositionWhenStarting = 0;
-    aaudio_wrapping_frames_t           mPositionWhenPausing = 0;
+    aaudio_wrapping_frames_t         mPositionWhenStarting = 0;
+    aaudio_wrapping_frames_t         mPositionWhenPausing = 0;
 };
 
 } /* namespace aaudio */
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index 34c1ae4..26fa75d 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -27,8 +27,8 @@
 
 using namespace android;
 
-aaudio_size_bytes_t AAudioConvert_formatToSizeInBytes(aaudio_audio_format_t format) {
-    aaudio_size_bytes_t size = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+int32_t AAudioConvert_formatToSizeInBytes(aaudio_audio_format_t format) {
+    int32_t size = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
     switch (format) {
         case AAUDIO_FORMAT_PCM_I16:
             size = sizeof(int16_t);
@@ -172,12 +172,12 @@
     return aaudioFormat;
 }
 
-aaudio_size_bytes_t AAudioConvert_framesToBytes(aaudio_size_frames_t numFrames,
-                                            aaudio_size_bytes_t bytesPerFrame,
-                                            aaudio_size_bytes_t *sizeInBytes) {
+int32_t AAudioConvert_framesToBytes(int32_t numFrames,
+                                            int32_t bytesPerFrame,
+                                            int32_t *sizeInBytes) {
     // TODO implement more elegantly
     const int32_t maxChannels = 256; // ridiculously large
-    const aaudio_size_frames_t maxBytesPerFrame = maxChannels * sizeof(float);
+    const int32_t maxBytesPerFrame = maxChannels * sizeof(float);
     // Prevent overflow by limiting multiplicands.
     if (bytesPerFrame > maxBytesPerFrame || numFrames > (0x3FFFFFFF / maxBytesPerFrame)) {
         ALOGE("size overflow, numFrames = %d, frameSize = %zd", numFrames, bytesPerFrame);
diff --git a/media/libaaudio/src/utility/AAudioUtilities.h b/media/libaaudio/src/utility/AAudioUtilities.h
index 38696df..d3b5ffe 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.h
+++ b/media/libaaudio/src/utility/AAudioUtilities.h
@@ -46,9 +46,9 @@
  * @param sizeInBytes total size in bytes
  * @return AAUDIO_OK or negative error, eg. AAUDIO_ERROR_OUT_OF_RANGE
  */
-aaudio_size_bytes_t AAudioConvert_framesToBytes(aaudio_size_frames_t numFrames,
-                                            aaudio_size_bytes_t bytesPerFrame,
-                                            aaudio_size_bytes_t *sizeInBytes);
+int32_t AAudioConvert_framesToBytes(int32_t numFrames,
+                                            int32_t bytesPerFrame,
+                                            int32_t *sizeInBytes);
 
 audio_format_t AAudioConvert_aaudioToAndroidDataFormat(aaudio_audio_format_t aaudio_format);
 
@@ -57,6 +57,6 @@
 /**
  * @return the size of a sample of the given format in bytes or AAUDIO_ERROR_ILLEGAL_ARGUMENT
  */
-aaudio_size_bytes_t AAudioConvert_formatToSizeInBytes(aaudio_audio_format_t format);
+int32_t AAudioConvert_formatToSizeInBytes(aaudio_audio_format_t format);
 
 #endif //UTILITY_AAUDIO_UTILITIES_H
diff --git a/media/libaaudio/src/utility/AudioClock.h b/media/libaaudio/src/utility/AudioClock.h
index 9ac21d3..952c7b8 100644
--- a/media/libaaudio/src/utility/AudioClock.h
+++ b/media/libaaudio/src/utility/AudioClock.h
@@ -22,9 +22,15 @@
 
 #include <aaudio/AAudioDefinitions.h>
 
+// Time conversion constants.
+#define AAUDIO_NANOS_PER_MICROSECOND ((int64_t)1000)
+#define AAUDIO_NANOS_PER_MILLISECOND (AAUDIO_NANOS_PER_MICROSECOND * 1000)
+#define AAUDIO_MILLIS_PER_SECOND     1000
+#define AAUDIO_NANOS_PER_SECOND      (AAUDIO_NANOS_PER_MILLISECOND * AAUDIO_MILLIS_PER_SECOND)
+
 class AudioClock {
 public:
-    static aaudio_nanoseconds_t getNanoseconds(clockid_t clockId = CLOCK_MONOTONIC) {
+    static int64_t getNanoseconds(clockid_t clockId = CLOCK_MONOTONIC) {
         struct timespec time;
         int result = clock_gettime(clockId, &time);
         if (result < 0) {
@@ -42,7 +48,7 @@
      * @param clockId CLOCK_MONOTONIC is default
      * @return 0, a negative error, or 1 if the call is interrupted by a signal handler (EINTR)
      */
-    static int sleepUntilNanoTime(aaudio_nanoseconds_t nanoTime,
+    static int sleepUntilNanoTime(int64_t nanoTime,
                                   clockid_t clockId = CLOCK_MONOTONIC) {
         if (nanoTime > 0) {
             struct timespec time;
@@ -72,7 +78,7 @@
      * @param clockId CLOCK_MONOTONIC is default
      * @return 0, a negative error, or 1 if the call is interrupted by a signal handler (EINTR)
      */
-    static int sleepForNanos(aaudio_nanoseconds_t nanoseconds, clockid_t clockId = CLOCK_MONOTONIC) {
+    static int sleepForNanos(int64_t nanoseconds, clockid_t clockId = CLOCK_MONOTONIC) {
         if (nanoseconds > 0) {
             struct timespec time;
             time.tv_sec = nanoseconds / AAUDIO_NANOS_PER_SECOND;
diff --git a/media/libaaudio/src/utility/HandleTracker.h b/media/libaaudio/src/utility/HandleTracker.h
index c80860c..23a73ed 100644
--- a/media/libaaudio/src/utility/HandleTracker.h
+++ b/media/libaaudio/src/utility/HandleTracker.h
@@ -20,6 +20,7 @@
 #include <stdint.h>
 #include <utils/Mutex.h>
 
+typedef int32_t  aaudio_handle_t;
 typedef int32_t  handle_tracker_type_t;       // what kind of handle
 typedef int32_t  handle_tracker_slot_t;       // index in allocation table
 typedef int32_t  handle_tracker_generation_t; // incremented when slot used
diff --git a/media/libaudiohal/EffectHalHidl.cpp b/media/libaudiohal/EffectHalHidl.cpp
index a9d737a..d17f8bd 100644
--- a/media/libaudiohal/EffectHalHidl.cpp
+++ b/media/libaudiohal/EffectHalHidl.cpp
@@ -26,8 +26,12 @@
 #include "HidlUtils.h"
 
 using ::android::hardware::audio::effect::V2_0::AudioBuffer;
+using ::android::hardware::audio::effect::V2_0::EffectBufferAccess;
+using ::android::hardware::audio::effect::V2_0::EffectConfigParameters;
 using ::android::hardware::audio::effect::V2_0::MessageQueueFlagBits;
 using ::android::hardware::audio::effect::V2_0::Result;
+using ::android::hardware::audio::common::V2_0::AudioChannelMask;
+using ::android::hardware::audio::common::V2_0::AudioFormat;
 using ::android::hardware::hidl_vec;
 using ::android::hardware::MQDescriptorSync;
 using ::android::hardware::Return;
@@ -56,6 +60,46 @@
             descriptor.implementor.data(), descriptor.implementor.size());
 }
 
+// TODO(mnaganov): These buffer conversion functions should be shared with Effect wrapper
+// via HidlUtils. Move them there when hardware/interfaces will get un-frozen again.
+
+// static
+void EffectHalHidl::effectBufferConfigFromHal(
+        const buffer_config_t& halConfig, EffectBufferConfig* config) {
+    config->samplingRateHz = halConfig.samplingRate;
+    config->channels = AudioChannelMask(halConfig.channels);
+    config->format = AudioFormat(halConfig.format);
+    config->accessMode = EffectBufferAccess(halConfig.accessMode);
+    config->mask = EffectConfigParameters(halConfig.mask);
+}
+
+// static
+void EffectHalHidl::effectBufferConfigToHal(
+        const EffectBufferConfig& config, buffer_config_t* halConfig) {
+    halConfig->buffer.frameCount = 0;
+    halConfig->buffer.raw = NULL;
+    halConfig->samplingRate = config.samplingRateHz;
+    halConfig->channels = static_cast<uint32_t>(config.channels);
+    halConfig->bufferProvider.cookie = NULL;
+    halConfig->bufferProvider.getBuffer = NULL;
+    halConfig->bufferProvider.releaseBuffer = NULL;
+    halConfig->format = static_cast<uint8_t>(config.format);
+    halConfig->accessMode = static_cast<uint8_t>(config.accessMode);
+    halConfig->mask = static_cast<uint8_t>(config.mask);
+}
+
+// static
+void EffectHalHidl::effectConfigFromHal(const effect_config_t& halConfig, EffectConfig* config) {
+    effectBufferConfigFromHal(halConfig.inputCfg, &config->inputCfg);
+    effectBufferConfigFromHal(halConfig.outputCfg, &config->outputCfg);
+}
+
+// static
+void EffectHalHidl::effectConfigToHal(const EffectConfig& config, effect_config_t* halConfig) {
+    effectBufferConfigToHal(config.inputCfg, &halConfig->inputCfg);
+    effectBufferConfigToHal(config.outputCfg, &halConfig->outputCfg);
+}
+
 // static
 status_t EffectHalHidl::analyzeResult(const Result& result) {
     switch (result) {
@@ -166,6 +210,15 @@
 status_t EffectHalHidl::command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
         uint32_t *replySize, void *pReplyData) {
     if (mEffect == 0) return NO_INIT;
+
+    // Special cases.
+    if (cmdCode == EFFECT_CMD_SET_CONFIG || cmdCode == EFFECT_CMD_SET_CONFIG_REVERSE) {
+        return setConfigImpl(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
+    } else if (cmdCode == EFFECT_CMD_GET_CONFIG || cmdCode == EFFECT_CMD_GET_CONFIG_REVERSE) {
+        return getConfigImpl(cmdCode, replySize, pReplyData);
+    }
+
+    // Common case.
     hidl_vec<uint8_t> hidlData;
     if (pCmdData != nullptr && cmdSize > 0) {
         hidlData.setToExternal(reinterpret_cast<uint8_t*>(pCmdData), cmdSize);
@@ -205,4 +258,58 @@
     return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
 }
 
+status_t EffectHalHidl::getConfigImpl(
+        uint32_t cmdCode, uint32_t *replySize, void *pReplyData) {
+    if (replySize == NULL || *replySize != sizeof(effect_config_t) || pReplyData == NULL) {
+        return BAD_VALUE;
+    }
+    status_t result = FAILED_TRANSACTION;
+    Return<void> ret;
+    if (cmdCode == EFFECT_CMD_GET_CONFIG) {
+        ret = mEffect->getConfig([&] (Result r, const EffectConfig &hidlConfig) {
+            result = analyzeResult(r);
+            if (r == Result::OK) {
+                effectConfigToHal(hidlConfig, static_cast<effect_config_t*>(pReplyData));
+            }
+        });
+    } else {
+        ret = mEffect->getConfigReverse([&] (Result r, const EffectConfig &hidlConfig) {
+            result = analyzeResult(r);
+            if (r == Result::OK) {
+                effectConfigToHal(hidlConfig, static_cast<effect_config_t*>(pReplyData));
+            }
+        });
+    }
+    if (!ret.isOk()) {
+        result = FAILED_TRANSACTION;
+    }
+    return result;
+}
+
+status_t EffectHalHidl::setConfigImpl(
+        uint32_t cmdCode, uint32_t cmdSize, void *pCmdData, uint32_t *replySize, void *pReplyData) {
+    if (pCmdData == NULL || cmdSize != sizeof(effect_config_t) ||
+            replySize == NULL || *replySize != sizeof(int32_t) || pReplyData == NULL) {
+        return BAD_VALUE;
+    }
+    const effect_config_t *halConfig = static_cast<effect_config_t*>(pCmdData);
+    if (halConfig->inputCfg.bufferProvider.getBuffer != NULL ||
+            halConfig->inputCfg.bufferProvider.releaseBuffer != NULL ||
+            halConfig->outputCfg.bufferProvider.getBuffer != NULL ||
+            halConfig->outputCfg.bufferProvider.releaseBuffer != NULL) {
+        ALOGE("Buffer provider callbacks are not supported");
+    }
+    EffectConfig hidlConfig;
+    effectConfigFromHal(*halConfig, &hidlConfig);
+    Return<Result> ret = cmdCode == EFFECT_CMD_SET_CONFIG ?
+            mEffect->setConfig(hidlConfig, nullptr, nullptr) :
+            mEffect->setConfigReverse(hidlConfig, nullptr, nullptr);
+    status_t result = FAILED_TRANSACTION;
+    if (ret.isOk()) {
+        result = analyzeResult(ret);
+        *static_cast<int32_t*>(pReplyData) = result;
+    }
+    return result;
+}
+
 } // namespace android
diff --git a/media/libaudiohal/EffectHalHidl.h b/media/libaudiohal/EffectHalHidl.h
index 1ed1153..c8db36f 100644
--- a/media/libaudiohal/EffectHalHidl.h
+++ b/media/libaudiohal/EffectHalHidl.h
@@ -23,6 +23,8 @@
 #include <fmq/MessageQueue.h>
 #include <system/audio_effect.h>
 
+using ::android::hardware::audio::effect::V2_0::EffectBufferConfig;
+using ::android::hardware::audio::effect::V2_0::EffectConfig;
 using ::android::hardware::audio::effect::V2_0::EffectDescriptor;
 using ::android::hardware::audio::effect::V2_0::IEffect;
 using ::android::hardware::EventFlag;
@@ -75,6 +77,12 @@
     EventFlag* mEfGroup;
 
     static status_t analyzeResult(const hardware::audio::effect::V2_0::Result& result);
+    static void effectBufferConfigFromHal(
+            const buffer_config_t& halConfig, EffectBufferConfig* config);
+    static void effectBufferConfigToHal(
+            const EffectBufferConfig& config, buffer_config_t* halConfig);
+    static void effectConfigFromHal(const effect_config_t& halConfig, EffectConfig* config);
+    static void effectConfigToHal(const EffectConfig& config, effect_config_t* halConfig);
 
     // Can not be constructed directly by clients.
     EffectHalHidl(const sp<IEffect>& effect, uint64_t effectId);
@@ -82,8 +90,12 @@
     // The destructor automatically releases the effect.
     virtual ~EffectHalHidl();
 
+    status_t getConfigImpl(uint32_t cmdCode, uint32_t *replySize, void *pReplyData);
     status_t prepareForProcessing();
     status_t processImpl(uint32_t mqFlag);
+    status_t setConfigImpl(
+            uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
+            uint32_t *replySize, void *pReplyData);
     status_t setProcessBuffers();
 };
 
diff --git a/media/libmedia/IMediaPlayer.cpp b/media/libmedia/IMediaPlayer.cpp
index 5222a42..3996227 100644
--- a/media/libmedia/IMediaPlayer.cpp
+++ b/media/libmedia/IMediaPlayer.cpp
@@ -72,14 +72,9 @@
     SET_NEXT_PLAYER,
     APPLY_VOLUME_SHAPER,
     GET_VOLUME_SHAPER_STATE,
-    // ModDrm
+    // Modular DRM
     PREPARE_DRM,
     RELEASE_DRM,
-    GET_KEY_REQUEST,
-    PROVIDE_KEY_RESPONSE,
-    RESTORE_KEYS,
-    GET_DRM_PROPERTY_STRING,
-    SET_DRM_PROPERTY_STRING,
 };
 
 // ModDrm helpers
@@ -521,14 +516,14 @@
         return state;
     }
 
-    // ModDrm
-    status_t prepareDrm(const uint8_t uuid[16], const int mode)
+    // Modular DRM
+    status_t prepareDrm(const uint8_t uuid[16], const Vector<uint8_t>& drmSessionId)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
 
         data.write(uuid, 16);
-        data.writeInt32(mode);
+        writeVector(data, drmSessionId);
 
         status_t status = remote()->transact(PREPARE_DRM, data, &reply);
         if (status != OK) {
@@ -552,105 +547,6 @@
 
         return reply.readInt32();
     }
-
-    status_t getKeyRequest(Vector<uint8_t> const& scope, String8 const& mimeType,
-            DrmPlugin::KeyType keyType, KeyedVector<String8, String8>& optionalParameters,
-            Vector<uint8_t>& request, String8& defaultUrl,
-            DrmPlugin::KeyRequestType& keyRequestType)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
-
-        writeVector(data, scope);
-        data.writeString8(mimeType);
-        data.writeInt32((int32_t)keyType);
-
-        data.writeUint32(optionalParameters.size());
-        for (size_t i = 0; i < optionalParameters.size(); ++i) {
-            data.writeString8(optionalParameters.keyAt(i));
-            data.writeString8(optionalParameters.valueAt(i));
-        }
-
-        status_t status = remote()->transact(GET_KEY_REQUEST, data, &reply);
-        if (status != OK) {
-            ALOGE("getKeyRequest: binder call failed: %d", status);
-            return status;
-        }
-
-        readVector(reply, request);
-        defaultUrl = reply.readString8();
-        keyRequestType = (DrmPlugin::KeyRequestType)reply.readInt32();
-
-        return reply.readInt32();
-    }
-
-    status_t provideKeyResponse(Vector<uint8_t>& releaseKeySetId, Vector<uint8_t>& response,
-            Vector<uint8_t> &keySetId)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
-
-        writeVector(data, releaseKeySetId);
-        writeVector(data, response);
-
-        status_t status = remote()->transact(PROVIDE_KEY_RESPONSE, data, &reply);
-        if (status != OK) {
-            ALOGE("provideKeyResponse: binder call failed: %d", status);
-            return status;
-        }
-
-        readVector(reply, keySetId);
-
-        return reply.readInt32();
-    }
-
-    status_t restoreKeys(Vector<uint8_t> const& keySetId)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
-
-        writeVector(data, keySetId);
-
-        status_t status = remote()->transact(RESTORE_KEYS, data, &reply);
-        if (status != OK) {
-            ALOGE("restoreKeys: binder call failed: %d", status);
-            return status;
-        }
-
-        return reply.readInt32();
-    }
-
-    status_t getDrmPropertyString(String8 const& name, String8& value)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
-
-        data.writeString8(name);
-        status_t status = remote()->transact(GET_DRM_PROPERTY_STRING, data, &reply);
-        if (status != OK) {
-            ALOGE("getDrmPropertyString: binder call failed: %d", status);
-            return status;
-        }
-
-        value = reply.readString8();
-        return reply.readInt32();
-    }
-
-    status_t setDrmPropertyString(String8 const& name, String8 const& value)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
-
-        data.writeString8(name);
-        data.writeString8(value);
-        status_t status = remote()->transact(SET_DRM_PROPERTY_STRING, data, &reply);
-        if (status != OK) {
-            ALOGE("setDrmPropertyString: binder call failed: %d", status);
-            return status;
-        }
-
-        return reply.readInt32();
-    }
 };
 
 IMPLEMENT_META_INTERFACE(MediaPlayer, "android.media.IMediaPlayer");
@@ -983,15 +879,16 @@
             return NO_ERROR;
         } break;
 
-        // ModDrm
+        // Modular DRM
         case PREPARE_DRM: {
             CHECK_INTERFACE(IMediaPlayer, data, reply);
+
             uint8_t uuid[16];
             data.read(uuid, sizeof(uuid));
+            Vector<uint8_t> drmSessionId;
+            readVector(data, drmSessionId);
 
-            int mode = data.readInt32();
-
-            uint32_t result = prepareDrm(uuid, mode);
+            uint32_t result = prepareDrm(uuid, drmSessionId);
             reply->writeInt32(result);
             return OK;
         }
@@ -1002,73 +899,6 @@
             reply->writeInt32(result);
             return OK;
         }
-        case GET_KEY_REQUEST: {
-            CHECK_INTERFACE(IMediaPlayer, data, reply);
-
-            Vector<uint8_t> scope;
-            readVector(data, scope);
-            String8 mimeType = data.readString8();
-            DrmPlugin::KeyType keyType = (DrmPlugin::KeyType)data.readInt32();
-
-            KeyedVector<String8, String8> optionalParameters;
-            uint32_t count = data.readUint32();
-            for (size_t i = 0; i < count; ++i) {
-                String8 key, value;
-                key = data.readString8();
-                value = data.readString8();
-                optionalParameters.add(key, value);
-            }
-
-            Vector<uint8_t> request;
-            String8 defaultUrl;
-            DrmPlugin::KeyRequestType keyRequestType = DrmPlugin::kKeyRequestType_Unknown;
-
-            status_t result = getKeyRequest(scope, mimeType, keyType, optionalParameters,
-                                      request, defaultUrl, keyRequestType);
-
-            writeVector(*reply, request);
-            reply->writeString8(defaultUrl);
-            reply->writeInt32(keyRequestType);
-            reply->writeInt32(result);
-            return OK;
-        }
-        case PROVIDE_KEY_RESPONSE: {
-            CHECK_INTERFACE(IMediaPlayer, data, reply);
-            Vector<uint8_t> releaseKeySetId, response, keySetId;
-            readVector(data, releaseKeySetId);
-            readVector(data, response);
-            uint32_t result = provideKeyResponse(releaseKeySetId, response, keySetId);
-            writeVector(*reply, keySetId);
-            reply->writeInt32(result);
-            return OK;
-        }
-        case RESTORE_KEYS: {
-            CHECK_INTERFACE(IMediaPlayer, data, reply);
-
-            Vector<uint8_t> keySetId;
-            readVector(data, keySetId);
-            uint32_t result = restoreKeys(keySetId);
-            reply->writeInt32(result);
-            return OK;
-        }
-        case GET_DRM_PROPERTY_STRING: {
-            CHECK_INTERFACE(IMediaPlayer, data, reply);
-            String8 name, value;
-            name = data.readString8();
-            uint32_t result = getDrmPropertyString(name, value);
-            reply->writeString8(value);
-            reply->writeInt32(result);
-            return OK;
-        }
-        case SET_DRM_PROPERTY_STRING: {
-            CHECK_INTERFACE(IMediaPlayer, data, reply);
-            String8 name, value;
-            name = data.readString8();
-            value = data.readString8();
-            uint32_t result = setDrmPropertyString(name, value);
-            reply->writeInt32(result);
-            return OK;
-        }
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/aidl/android/IGraphicBufferSource.aidl b/media/libmedia/aidl/android/IGraphicBufferSource.aidl
index a8dd309..325c631 100644
--- a/media/libmedia/aidl/android/IGraphicBufferSource.aidl
+++ b/media/libmedia/aidl/android/IGraphicBufferSource.aidl
@@ -25,11 +25,12 @@
  */
 interface IGraphicBufferSource {
     void configure(IOMXNode omxNode, int dataSpace);
-    void setSuspend(boolean suspend);
+    void setSuspend(boolean suspend, long suspendTimeUs);
     void setRepeatPreviousFrameDelayUs(long repeatAfterUs);
     void setMaxFps(float maxFps);
     void setTimeLapseConfig(long timePerFrameUs, long timePerCaptureUs);
     void setStartTimeUs(long startTimeUs);
+    void setStopTimeUs(long stopTimeUs);
     void setColorAspects(int aspects);
     void setTimeOffsetUs(long timeOffsetsUs);
     void signalEndOfInputStream();
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index dfc2e1b..685065a 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -16,7 +16,7 @@
 */
 
 //#define LOG_NDEBUG 0
-#define LOG_TAG "MediaPlayer"
+#define LOG_TAG "MediaPlayerNative"
 
 #include <fcntl.h>
 #include <inttypes.h>
@@ -1012,9 +1012,12 @@
     return mPlayer->getVolumeShaperState(id);
 }
 
-// ModDrm
-status_t MediaPlayer::prepareDrm(const uint8_t uuid[16], const int mode)
+// Modular DRM
+status_t MediaPlayer::prepareDrm(const uint8_t uuid[16], const Vector<uint8_t>& drmSessionId)
 {
+    // TODO change to ALOGV
+    ALOGD("prepareDrm: uuid: %p  drmSessionId: %p(%zu)", uuid,
+            drmSessionId.array(), drmSessionId.size());
     Mutex::Autolock _l(mLock);
     if (mPlayer == NULL) {
         return NO_INIT;
@@ -1026,10 +1029,19 @@
         return INVALID_OPERATION;
     }
 
-    status_t ret = mPlayer->prepareDrm(uuid, mode);
-    ALOGV("prepareDrm: ret=%d", ret);
+    if (drmSessionId.isEmpty()) {
+        ALOGE("prepareDrm: Unexpected. Can't proceed with crypto. Empty drmSessionId.");
+        return INVALID_OPERATION;
+    }
 
-    return ret;
+    // Passing down to mediaserver mainly for creating the crypto
+    status_t status = mPlayer->prepareDrm(uuid, drmSessionId);
+    ALOGE_IF(status != OK, "prepareDrm: Failed at mediaserver with ret: %d", status);
+
+    // TODO change to ALOGV
+    ALOGD("prepareDrm: mediaserver::prepareDrm ret=%d", status);
+
+    return status;
 }
 
 status_t MediaPlayer::releaseDrm()
@@ -1039,96 +1051,26 @@
         return NO_INIT;
     }
 
-    // Not allowing releaseDrm in an active state
-    if (mCurrentState & (MEDIA_PLAYER_STARTED | MEDIA_PLAYER_PAUSED)) {
-        ALOGE("releaseDrm can not be called in the started/paused state.");
+    // Not allowing releaseDrm in an active/resumable state
+    if (mCurrentState & (MEDIA_PLAYER_STARTED |
+                         MEDIA_PLAYER_PAUSED |
+                         MEDIA_PLAYER_PLAYBACK_COMPLETE |
+                         MEDIA_PLAYER_STATE_ERROR)) {
+        ALOGE("releaseDrm Unexpected state %d. Can only be called in stopped/idle.", mCurrentState);
         return INVALID_OPERATION;
     }
 
-    status_t ret = mPlayer->releaseDrm();
-    ALOGV("releaseDrm: ret=%d", ret);
-
-    return ret;
-}
-
-status_t MediaPlayer::getKeyRequest(Vector<uint8_t> const& scope, String8 const& mimeType,
-                              DrmPlugin::KeyType keyType,
-                              KeyedVector<String8, String8>& optionalParameters,
-                              Vector<uint8_t>& request, String8& defaultUrl,
-                              DrmPlugin::KeyRequestType& keyRequestType)
-{
-    Mutex::Autolock _l(mLock);
-    if (mPlayer == NULL) {
-        return NO_INIT;
+    status_t status = mPlayer->releaseDrm();
+    // TODO change to ALOGV
+    ALOGD("releaseDrm: mediaserver::releaseDrm ret: %d", status);
+    if (status != OK) {
+        ALOGE("releaseDrm: Failed at mediaserver with ret: %d", status);
+        // Overriding to OK so the client proceed with its own cleanup
+        // Client can't do more cleanup. mediaserver release its crypto at end of session anyway.
+        status = OK;
     }
 
-    // Not enforcing a particular state beyond the checks enforced by the Java layer
-    // Key exchange can happen after the start.
-    status_t ret = mPlayer->getKeyRequest(scope, mimeType, keyType, optionalParameters,
-                                     request, defaultUrl, keyRequestType);
-    ALOGV("getKeyRequest ret=%d  %d %s %d ", ret,
-          (int)request.size(), defaultUrl.string(), (int)keyRequestType);
-
-    return ret;
-}
-
-status_t MediaPlayer::provideKeyResponse(Vector<uint8_t>& releaseKeySetId,
-                              Vector<uint8_t>& response, Vector<uint8_t>& keySetId)
-{
-    Mutex::Autolock _l(mLock);
-    if (mPlayer == NULL) {
-        return NO_INIT;
-    }
-
-    // Not enforcing a particular state beyond the checks enforced by the Java layer
-    // Key exchange can happen after the start.
-    status_t ret = mPlayer->provideKeyResponse(releaseKeySetId, response, keySetId);
-    ALOGV("provideKeyResponse: ret=%d", ret);
-
-    return ret;
-}
-
-status_t MediaPlayer::restoreKeys(Vector<uint8_t> const& keySetId)
-{
-    Mutex::Autolock _l(mLock);
-    if (mPlayer == NULL) {
-        return NO_INIT;
-    }
-
-    // Not enforcing a particular state beyond the checks enforced by the Java layer
-    // Key exchange can happen after the start.
-    status_t ret = mPlayer->restoreKeys(keySetId);
-    ALOGV("restoreKeys: ret=%d", ret);
-
-    return ret;
-}
-
-status_t MediaPlayer::getDrmPropertyString(String8 const& name, String8& value)
-{
-    Mutex::Autolock _l(mLock);
-    if (mPlayer == NULL) {
-        return NO_INIT;
-    }
-
-    // Not enforcing a particular state beyond the checks enforced by the Java layer
-    status_t ret = mPlayer->getDrmPropertyString(name, value);
-    ALOGV("getDrmPropertyString: ret=%d", ret);
-
-    return ret;
-}
-
-status_t MediaPlayer::setDrmPropertyString(String8 const& name, String8 const& value)
-{
-    Mutex::Autolock _l(mLock);
-    if (mPlayer == NULL) {
-        return NO_INIT;
-    }
-
-    // Not enforcing a particular state beyond the checks enforced by the Java layer
-    status_t ret = mPlayer->setDrmPropertyString(name, value);
-    ALOGV("setDrmPropertyString: ret=%d", ret);
-
-    return ret;
+    return status;
 }
 
 } // namespace android
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index 1786e6b..e6d9b71 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -25,8 +25,9 @@
     liblog                      \
     libdl                       \
     libgui                      \
-    libmedia                    \
     libaudioclient              \
+    libmedia                    \
+    libmediadrm                 \
     libmediautils               \
     libmemunreachable           \
     libstagefright              \
@@ -35,6 +36,8 @@
     libstagefright_omx          \
     libstagefright_wfd          \
     libutils                    \
+    libhidlbase                 \
+    android.hardware.media.omx@1.0 \
 
 LOCAL_STATIC_LIBRARIES :=       \
     libstagefright_nuplayer     \
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index cdae456..a8b6c66 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -60,6 +60,7 @@
 #include <media/stagefright/Utils.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/ALooperRoster.h>
+#include <media/stagefright/SurfaceUtils.h>
 #include <mediautils/BatteryNotifier.h>
 
 #include <memunreachable/memunreachable.h>
@@ -597,6 +598,7 @@
     if (mAudioAttributes != NULL) {
         free(mAudioAttributes);
     }
+    clearDeathNotifiers();
 }
 
 void MediaPlayerService::Client::disconnect()
@@ -654,12 +656,22 @@
         const sp<MediaPlayerBase>& listener,
         int which) {
     mService = service;
+    mOmx = nullptr;
+    mListener = listener;
+    mWhich = which;
+}
+
+MediaPlayerService::Client::ServiceDeathNotifier::ServiceDeathNotifier(
+        const sp<IOmx>& omx,
+        const sp<MediaPlayerBase>& listener,
+        int which) {
+    mService = nullptr;
+    mOmx = omx;
     mListener = listener;
     mWhich = which;
 }
 
 MediaPlayerService::Client::ServiceDeathNotifier::~ServiceDeathNotifier() {
-    mService->unlinkToDeath(this);
 }
 
 void MediaPlayerService::Client::ServiceDeathNotifier::binderDied(const wp<IBinder>& /*who*/) {
@@ -671,10 +683,43 @@
     }
 }
 
+void MediaPlayerService::Client::ServiceDeathNotifier::serviceDied(
+        uint64_t /* cookie */,
+        const wp<::android::hidl::base::V1_0::IBase>& /* who */) {
+    sp<MediaPlayerBase> listener = mListener.promote();
+    if (listener != NULL) {
+        listener->sendEvent(MEDIA_ERROR, MEDIA_ERROR_SERVER_DIED, mWhich);
+    } else {
+        ALOGW("listener for process %d death is gone", mWhich);
+    }
+}
+
+void MediaPlayerService::Client::ServiceDeathNotifier::unlinkToDeath() {
+    if (mService != nullptr) {
+        mService->unlinkToDeath(this);
+        mService = nullptr;
+    } else if (mOmx != nullptr) {
+        mOmx->unlinkToDeath(this);
+        mOmx = nullptr;
+    }
+}
+
+void MediaPlayerService::Client::clearDeathNotifiers() {
+    if (mExtractorDeathListener != nullptr) {
+        mExtractorDeathListener->unlinkToDeath();
+        mExtractorDeathListener = nullptr;
+    }
+    if (mCodecDeathListener != nullptr) {
+        mCodecDeathListener->unlinkToDeath();
+        mCodecDeathListener = nullptr;
+    }
+}
+
 sp<MediaPlayerBase> MediaPlayerService::Client::setDataSource_pre(
         player_type playerType)
 {
     ALOGV("player type = %d", playerType);
+    clearDeathNotifiers();
 
     // create the right type of player
     sp<MediaPlayerBase> p = createPlayer(playerType);
@@ -691,13 +736,27 @@
     mExtractorDeathListener = new ServiceDeathNotifier(binder, p, MEDIAEXTRACTOR_PROCESS_DEATH);
     binder->linkToDeath(mExtractorDeathListener);
 
-    binder = sm->getService(String16("media.codec"));
-    if (binder == NULL) {
-        ALOGE("codec service not available");
-        return NULL;
+    int32_t trebleOmx = property_get_int32("persist.media.treble_omx", -1);
+    if ((trebleOmx == 1) || ((trebleOmx == -1) &&
+            property_get_bool("persist.hal.binderization", 0))) {
+        // Treble IOmx
+        sp<IOmx> omx = IOmx::getService();
+        if (omx == nullptr) {
+            ALOGE("Treble IOmx not available");
+            return NULL;
+        }
+        mCodecDeathListener = new ServiceDeathNotifier(omx, p, MEDIACODEC_PROCESS_DEATH);
+        omx->linkToDeath(mCodecDeathListener, 0);
+    } else {
+        // Legacy IOMX
+        binder = sm->getService(String16("media.codec"));
+        if (binder == NULL) {
+            ALOGE("codec service not available");
+            return NULL;
+        }
+        mCodecDeathListener = new ServiceDeathNotifier(binder, p, MEDIACODEC_PROCESS_DEATH);
+        binder->linkToDeath(mCodecDeathListener);
     }
-    mCodecDeathListener = new ServiceDeathNotifier(binder, p, MEDIACODEC_PROCESS_DEATH);
-    binder->linkToDeath(mCodecDeathListener);
 
     if (!p->hardwareOutput()) {
         Mutex::Autolock l(mLock);
@@ -845,11 +904,11 @@
 
 void MediaPlayerService::Client::disconnectNativeWindow() {
     if (mConnectedWindow != NULL) {
-        status_t err = native_window_api_disconnect(mConnectedWindow.get(),
-                NATIVE_WINDOW_API_MEDIA);
+        status_t err = nativeWindowDisconnect(
+                mConnectedWindow.get(), "disconnectNativeWindow");
 
         if (err != OK) {
-            ALOGW("native_window_api_disconnect returned an error: %s (%d)",
+            ALOGW("nativeWindowDisconnect returned an error: %s (%d)",
                     strerror(-err), err);
         }
     }
@@ -871,8 +930,7 @@
     sp<ANativeWindow> anw;
     if (bufferProducer != NULL) {
         anw = new Surface(bufferProducer, true /* controlledByApp */);
-        status_t err = native_window_api_connect(anw.get(),
-                NATIVE_WINDOW_API_MEDIA);
+        status_t err = nativeWindowConnect(anw.get(), "setVideoSurfaceTexture");
 
         if (err != OK) {
             ALOGE("setVideoSurfaceTexture failed: %d", err);
@@ -1428,6 +1486,32 @@
     }
 }
 
+// Modular DRM
+status_t MediaPlayerService::Client::prepareDrm(const uint8_t uuid[16],
+        const Vector<uint8_t>& drmSessionId)
+{
+    ALOGV("[%d] prepareDrm", mConnId);
+    sp<MediaPlayerBase> p = getPlayer();
+    if (p == 0) return UNKNOWN_ERROR;
+
+    status_t ret = p->prepareDrm(uuid, drmSessionId);
+    ALOGV("prepareDrm ret: %d", ret);
+
+    return ret;
+}
+
+status_t MediaPlayerService::Client::releaseDrm()
+{
+    ALOGV("[%d] releaseDrm", mConnId);
+    sp<MediaPlayerBase> p = getPlayer();
+    if (p == 0) return UNKNOWN_ERROR;
+
+    status_t ret = p->releaseDrm();
+    ALOGV("releaseDrm ret: %d", ret);
+
+    return ret;
+}
+
 #if CALLBACK_ANTAGONIZER
 const int Antagonizer::interval = 10000; // 10 msecs
 
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index cbaf21c..dff7322 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -30,6 +30,8 @@
 #include <media/Metadata.h>
 #include <media/stagefright/foundation/ABase.h>
 
+#include <android/hardware/media/omx/1.0/IOmx.h>
+
 #include <system/audio.h>
 
 namespace android {
@@ -69,6 +71,7 @@
 class MediaPlayerService : public BnMediaPlayerService
 {
     class Client;
+    typedef ::android::hardware::media::omx::V1_0::IOmx IOmx;
 
     class AudioOutput : public MediaPlayerBase::AudioSink
     {
@@ -357,50 +360,40 @@
         virtual status_t        dump(int fd, const Vector<String16>& args);
 
                 audio_session_t getAudioSessionId() { return mAudioSessionId; }
-        // ModDrm
-        virtual status_t prepareDrm(const uint8_t /*uuid*/[16], const int /*mode*/)
-                            { return INVALID_OPERATION; }
-        virtual status_t releaseDrm()
-                            { return INVALID_OPERATION; }
-        virtual status_t getKeyRequest(Vector<uint8_t> const& /*scope*/,
-                                 String8 const& /*mimeType*/,
-                                 DrmPlugin::KeyType /*keyType*/,
-                                 KeyedVector<String8, String8>& /*optionalParameters*/,
-                                 Vector<uint8_t>& /*request*/,
-                                 String8& /*defaultUrl*/,
-                                 DrmPlugin::KeyRequestType& /*keyRequestType*/)
-                            { return INVALID_OPERATION; }
-        virtual status_t provideKeyResponse(Vector<uint8_t>& /*releaseKeySetId*/,
-                                 Vector<uint8_t>& /*response*/,
-                                 Vector<uint8_t>& /*keySetId*/)
-                            { return INVALID_OPERATION; }
-        virtual status_t restoreKeys(Vector<uint8_t> const& /*keySetId*/)
-                            { return INVALID_OPERATION; }
-        virtual status_t getDrmPropertyString(String8 const& /*name*/,
-                                              String8& /*value*/)
-                            { return INVALID_OPERATION; }
-        virtual status_t setDrmPropertyString(String8 const& /*name*/,
-                                              String8 const& /*value*/)
-                            { return INVALID_OPERATION; }
-
+        // Modular DRM
+        virtual status_t prepareDrm(const uint8_t uuid[16], const Vector<uint8_t>& drmSessionId);
+        virtual status_t releaseDrm();
 
     private:
-        class ServiceDeathNotifier: public IBinder::DeathRecipient
+        class ServiceDeathNotifier:
+                public IBinder::DeathRecipient,
+                public ::android::hardware::hidl_death_recipient
         {
         public:
             ServiceDeathNotifier(
                     const sp<IBinder>& service,
                     const sp<MediaPlayerBase>& listener,
                     int which);
+            ServiceDeathNotifier(
+                    const sp<IOmx>& omx,
+                    const sp<MediaPlayerBase>& listener,
+                    int which);
             virtual ~ServiceDeathNotifier();
             virtual void binderDied(const wp<IBinder>& who);
+            virtual void serviceDied(
+                    uint64_t cookie,
+                    const wp<::android::hidl::base::V1_0::IBase>& who);
+            void unlinkToDeath();
 
         private:
             int mWhich;
             sp<IBinder> mService;
+            sp<IOmx> mOmx;
             wp<MediaPlayerBase> mListener;
         };
 
+        void clearDeathNotifiers();
+
         friend class MediaPlayerService;
                                 Client( const sp<MediaPlayerService>& service,
                                         pid_t pid,
@@ -460,8 +453,8 @@
         // getMetadata clears this set.
         media::Metadata::Filter mMetadataUpdated;  // protected by mLock
 
-        sp<IBinder::DeathRecipient> mExtractorDeathListener;
-        sp<IBinder::DeathRecipient> mCodecDeathListener;
+        sp<ServiceDeathNotifier> mExtractorDeathListener;
+        sp<ServiceDeathNotifier> mCodecDeathListener;
 #if CALLBACK_ANTAGONIZER
                     Antagonizer*                mAntagonizer;
 #endif
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index bb2d28b..763f509 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -328,6 +328,7 @@
         wp<MediaRecorderClient> client(this);
         mMediaPlayerService->removeMediaRecorderClient(client);
     }
+    clearDeathNotifiers();
     return NO_ERROR;
 }
 
@@ -351,15 +352,25 @@
         const sp<IMediaRecorderClient>& listener,
         int which) {
     mService = service;
+    mOmx = nullptr;
+    mListener = listener;
+    mWhich = which;
+}
+
+MediaRecorderClient::ServiceDeathNotifier::ServiceDeathNotifier(
+        const sp<IOmx>& omx,
+        const sp<IMediaRecorderClient>& listener,
+        int which) {
+    mService = nullptr;
+    mOmx = omx;
     mListener = listener;
     mWhich = which;
 }
 
 MediaRecorderClient::ServiceDeathNotifier::~ServiceDeathNotifier() {
-    mService->unlinkToDeath(this);
 }
 
-void  MediaRecorderClient::ServiceDeathNotifier::binderDied(const wp<IBinder>& /*who*/) {
+void MediaRecorderClient::ServiceDeathNotifier::binderDied(const wp<IBinder>& /*who*/) {
     sp<IMediaRecorderClient> listener = mListener.promote();
     if (listener != NULL) {
         listener->notify(MEDIA_ERROR, MEDIA_ERROR_SERVER_DIED, mWhich);
@@ -368,9 +379,42 @@
     }
 }
 
+void MediaRecorderClient::ServiceDeathNotifier::serviceDied(
+        uint64_t /* cookie */,
+        const wp<::android::hidl::base::V1_0::IBase>& /* who */) {
+    sp<IMediaRecorderClient> listener = mListener.promote();
+    if (listener != NULL) {
+        listener->notify(MEDIA_ERROR, MEDIA_ERROR_SERVER_DIED, mWhich);
+    } else {
+        ALOGW("listener for process %d death is gone", mWhich);
+    }
+}
+
+void MediaRecorderClient::ServiceDeathNotifier::unlinkToDeath() {
+    if (mService != nullptr) {
+        mService->unlinkToDeath(this);
+        mService = nullptr;
+    } else if (mOmx != nullptr) {
+        mOmx->unlinkToDeath(this);
+        mOmx = nullptr;
+    }
+}
+
+void MediaRecorderClient::clearDeathNotifiers() {
+    if (mCameraDeathListener != nullptr) {
+        mCameraDeathListener->unlinkToDeath();
+        mCameraDeathListener = nullptr;
+    }
+    if (mCodecDeathListener != nullptr) {
+        mCodecDeathListener->unlinkToDeath();
+        mCodecDeathListener = nullptr;
+    }
+}
+
 status_t MediaRecorderClient::setListener(const sp<IMediaRecorderClient>& listener)
 {
     ALOGV("setListener");
+    clearDeathNotifiers();
     Mutex::Autolock lock(mLock);
     if (mRecorder == NULL) {
         ALOGE("recorder is not initialized");
@@ -395,10 +439,25 @@
     }
     sCameraChecked = true;
 
-    binder = sm->getService(String16("media.codec"));
-    mCodecDeathListener = new ServiceDeathNotifier(binder, listener,
-            MediaPlayerService::MEDIACODEC_PROCESS_DEATH);
-    binder->linkToDeath(mCodecDeathListener);
+    int32_t trebleOmx = property_get_int32("persist.media.treble_omx", -1);
+    if ((trebleOmx == 1) || ((trebleOmx == -1) &&
+            property_get_bool("persist.hal.binderization", 0))) {
+        // Treble IOmx
+        sp<IOmx> omx = IOmx::getService();
+        if (omx == nullptr) {
+            ALOGE("Treble IOmx not available");
+            return NO_INIT;
+        }
+        mCodecDeathListener = new ServiceDeathNotifier(omx, listener,
+                MediaPlayerService::MEDIACODEC_PROCESS_DEATH);
+        omx->linkToDeath(mCodecDeathListener, 0);
+    } else {
+        // Legacy IOMX
+        binder = sm->getService(String16("media.codec"));
+        mCodecDeathListener = new ServiceDeathNotifier(binder, listener,
+                MediaPlayerService::MEDIACODEC_PROCESS_DEATH);
+        binder->linkToDeath(mCodecDeathListener);
+    }
 
     return OK;
 }
diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h
index 8ddd680..101b8f6 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.h
+++ b/media/libmediaplayerservice/MediaRecorderClient.h
@@ -20,6 +20,8 @@
 
 #include <media/IMediaRecorder.h>
 
+#include <android/hardware/media/omx/1.0/IOmx.h>
+
 namespace android {
 
 struct MediaRecorderBase;
@@ -28,22 +30,36 @@
 
 class MediaRecorderClient : public BnMediaRecorder
 {
-    class ServiceDeathNotifier: public IBinder::DeathRecipient
+    typedef ::android::hardware::media::omx::V1_0::IOmx IOmx;
+
+    class ServiceDeathNotifier :
+            public IBinder::DeathRecipient,
+            public ::android::hardware::hidl_death_recipient
     {
     public:
         ServiceDeathNotifier(
                 const sp<IBinder>& service,
                 const sp<IMediaRecorderClient>& listener,
                 int which);
+        ServiceDeathNotifier(
+                const sp<IOmx>& omx,
+                const sp<IMediaRecorderClient>& listener,
+                int which);
         virtual ~ServiceDeathNotifier();
         virtual void binderDied(const wp<IBinder>& who);
-
+        virtual void serviceDied(
+                uint64_t cookie,
+                const wp<::android::hidl::base::V1_0::IBase>& who);
+        void unlinkToDeath();
     private:
         int mWhich;
         sp<IBinder> mService;
+        sp<IOmx> mOmx;
         wp<IMediaRecorderClient> mListener;
     };
 
+    void clearDeathNotifiers();
+
 public:
     virtual     status_t   setCamera(const sp<hardware::ICamera>& camera,
                                     const sp<ICameraRecordingProxy>& proxy);
@@ -84,8 +100,8 @@
                                                                const String16& opPackageName);
     virtual                ~MediaRecorderClient();
 
-    sp<IBinder::DeathRecipient> mCameraDeathListener;
-    sp<IBinder::DeathRecipient> mCodecDeathListener;
+    sp<ServiceDeathNotifier> mCameraDeathListener;
+    sp<ServiceDeathNotifier> mCodecDeathListener;
 
     pid_t                  mPid;
     Mutex                  mLock;
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index d00e377..170659a 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -1812,15 +1812,17 @@
         return OK;
     }
 
+    mPauseStartTimeUs = systemTime() / 1000;
+    sp<MetaData> meta = new MetaData;
+    meta->setInt64(kKeyTime, mPauseStartTimeUs);
+
     if (mAudioEncoderSource != NULL) {
         mAudioEncoderSource->pause();
     }
     if (mVideoEncoderSource != NULL) {
-        mVideoEncoderSource->pause();
+        mVideoEncoderSource->pause(meta.get());
     }
 
-    mPauseStartTimeUs = systemTime() / 1000;
-
     return OK;
 }
 
@@ -1835,6 +1837,8 @@
         return OK;
     }
 
+    int64_t resumeStartTimeUs = systemTime() / 1000;
+
     int64_t bufferStartTimeUs = 0;
     bool allSourcesStarted = true;
     for (const auto &source : { mAudioEncoderSource, mVideoEncoderSource }) {
@@ -1855,18 +1859,20 @@
             mPauseStartTimeUs = bufferStartTimeUs;
         }
         // 30 ms buffer to avoid timestamp overlap
-        mTotalPausedDurationUs += (systemTime() / 1000) - mPauseStartTimeUs - 30000;
+        mTotalPausedDurationUs += resumeStartTimeUs - mPauseStartTimeUs - 30000;
     }
     double timeOffset = -mTotalPausedDurationUs;
     if (mCaptureFpsEnable) {
         timeOffset *= mCaptureFps / mFrameRate;
     }
+    sp<MetaData> meta = new MetaData;
+    meta->setInt64(kKeyTime, resumeStartTimeUs);
     for (const auto &source : { mAudioEncoderSource, mVideoEncoderSource }) {
         if (source == nullptr) {
             continue;
         }
         source->setInputBufferTimeOffset((int64_t)timeOffset);
-        source->start();
+        source->start(meta.get());
     }
     mPauseStartTimeUs = 0;
 
@@ -1883,6 +1889,12 @@
         mCameraSourceTimeLapse = NULL;
     }
 
+    if (mVideoEncoderSource != NULL) {
+        int64_t stopTimeUs = systemTime() / 1000;
+        sp<MetaData> meta = new MetaData;
+        err = mVideoEncoderSource->setStopStimeUs(stopTimeUs);
+    }
+
     if (mWriter != NULL) {
         err = mWriter->stop();
         mWriter.clear();
diff --git a/media/libmediaplayerservice/nuplayer/Android.mk b/media/libmediaplayerservice/nuplayer/Android.mk
index a0e633c..8686560 100644
--- a/media/libmediaplayerservice/nuplayer/Android.mk
+++ b/media/libmediaplayerservice/nuplayer/Android.mk
@@ -10,6 +10,7 @@
         NuPlayerDecoderBase.cpp         \
         NuPlayerDecoderPassThrough.cpp  \
         NuPlayerDriver.cpp              \
+        NuPlayerDrm.cpp                 \
         NuPlayerRenderer.cpp            \
         NuPlayerStreamListener.cpp      \
         RTSPSource.cpp                  \
@@ -32,7 +33,10 @@
 LOCAL_CFLAGS += -DENABLE_STAGEFRIGHT_EXPERIMENTS
 endif
 
-LOCAL_SHARED_LIBRARIES := libmedia
+LOCAL_SHARED_LIBRARIES :=       \
+    libbinder                   \
+    libmedia                    \
+    libmediadrm                 \
 
 LOCAL_MODULE:= libstagefright_nuplayer
 
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 91a2b7b..c949080 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -18,6 +18,7 @@
 #define LOG_TAG "GenericSource"
 
 #include "GenericSource.h"
+#include "NuPlayerDrm.h"
 
 #include "AnotherPacketSource.h"
 
@@ -63,14 +64,17 @@
       mUIDValid(uidValid),
       mUID(uid),
       mFd(-1),
-      mDrmManagerClient(NULL),
       mBitrate(-1ll),
       mPendingReadBufferTypes(0) {
+    ALOGV("GenericSource");
+
     mBufferingMonitor = new BufferingMonitor(notify);
     resetDataSource();
 }
 
 void NuPlayer::GenericSource::resetDataSource() {
+    ALOGV("resetDataSource");
+
     mHTTPService.clear();
     mHttpSource.clear();
     mUri.clear();
@@ -81,9 +85,6 @@
     }
     mOffset = 0;
     mLength = 0;
-    setDrmPlaybackStatusIfNeeded(Playback::STOP, 0);
-    mDecryptHandle = NULL;
-    mDrmManagerClient = NULL;
     mStarted = false;
     mStopRead = true;
 
@@ -93,12 +94,18 @@
         mBufferingMonitorLooper = NULL;
     }
     mBufferingMonitor->stop();
+
+    mIsDrmProtected = false;
+    mIsSecure = false;
+    mMimes.clear();
 }
 
 status_t NuPlayer::GenericSource::setDataSource(
         const sp<IMediaHTTPService> &httpService,
         const char *url,
         const KeyedVector<String8, String8> *headers) {
+    ALOGV("setDataSource url: %s", url);
+
     resetDataSource();
 
     mHTTPService = httpService;
@@ -115,6 +122,8 @@
 
 status_t NuPlayer::GenericSource::setDataSource(
         int fd, int64_t offset, int64_t length) {
+    ALOGV("setDataSource %d/%lld/%lld", fd, (long long)offset, (long long)length);
+
     resetDataSource();
 
     mFd = dup(fd);
@@ -127,6 +136,8 @@
 }
 
 status_t NuPlayer::GenericSource::setDataSource(const sp<DataSource>& source) {
+    ALOGV("setDataSource (source: %p)", source.get());
+
     resetDataSource();
     mDataSource = source;
     return OK;
@@ -161,6 +172,8 @@
         return UNKNOWN_ERROR;
     }
 
+    mMimes.clear();
+
     for (size_t i = 0; i < numtracks; ++i) {
         sp<IMediaSource> track = extractor->getTrack(i);
         if (track == NULL) {
@@ -176,6 +189,8 @@
         const char *mime;
         CHECK(meta->findCString(kKeyMIMEType, &mime));
 
+        ALOGV("initFromDataSource track[%zu]: %s", i, mime);
+
         // Do the string compare immediately with "mime",
         // we can't assume "mime" would stay valid after another
         // extractor operation, some extractors might modify meta
@@ -192,6 +207,8 @@
                 } else {
                     mAudioIsVorbis = false;
                 }
+
+                mMimes.add(String8(mime));
             }
         } else if (!strncasecmp(mime, "video/", 6)) {
             if (mVideoTrack.mSource == NULL) {
@@ -200,15 +217,8 @@
                 mVideoTrack.mPackets =
                     new AnotherPacketSource(mVideoTrack.mSource->getFormat());
 
-                // check if the source requires secure buffers
-                int32_t secure;
-                if (meta->findInt32(kKeyRequiresSecureBuffers, &secure)
-                        && secure) {
-                    mIsSecure = true;
-                    if (mUIDValid) {
-                        extractor->setUID(mUID);
-                    }
-                }
+                // video always at the beginning
+                mMimes.insertAt(String8(mime), 0);
             }
         }
 
@@ -228,11 +238,17 @@
         }
     }
 
+    ALOGV("initFromDataSource mSources.size(): %zu  mIsSecure: %d  mime[0]: %s", mSources.size(),
+            mIsSecure, (mMimes.isEmpty() ? "NONE" : mMimes[0].string()));
+
     if (mSources.size() == 0) {
         ALOGE("b/23705695");
         return UNKNOWN_ERROR;
     }
 
+    // Modular DRM: The return value doesn't affect source initialization.
+    (void)checkDrmInfo();
+
     mBitrate = totalBitrate;
 
     return OK;
@@ -296,6 +312,7 @@
 }
 
 NuPlayer::GenericSource::~GenericSource() {
+    ALOGV("~GenericSource");
     if (mLooper != NULL) {
         mLooper->unregisterHandler(id());
         mLooper->stop();
@@ -304,6 +321,8 @@
 }
 
 void NuPlayer::GenericSource::prepareAsync() {
+    ALOGV("prepareAsync: (looper: %d)", (mLooper != NULL));
+
     if (mLooper == NULL) {
         mLooper = new ALooper;
         mLooper->setName("generic");
@@ -317,6 +336,8 @@
 }
 
 void NuPlayer::GenericSource::onPrepareAsync() {
+    ALOGV("onPrepareAsync: mDataSource: %d", (mDataSource != NULL));
+
     // delayed data source creation
     if (mDataSource == NULL) {
         // set to false first, if the extractor
@@ -380,35 +401,21 @@
     }
 
     notifyFlagsChanged(
-            (mIsSecure ? FLAG_SECURE : 0)
-            | (mDecryptHandle != NULL ? FLAG_PROTECTED : 0)
-            | FLAG_CAN_PAUSE
-            | FLAG_CAN_SEEK_BACKWARD
-            | FLAG_CAN_SEEK_FORWARD
-            | FLAG_CAN_SEEK);
+            // FLAG_SECURE will be known if/when prepareDrm is called by the app
+            // FLAG_PROTECTED will be known if/when prepareDrm is called by the app
+            FLAG_CAN_PAUSE |
+            FLAG_CAN_SEEK_BACKWARD |
+            FLAG_CAN_SEEK_FORWARD |
+            FLAG_CAN_SEEK);
 
-    if (mIsSecure) {
-        // secure decoders must be instantiated before starting widevine source
-        //
-        // TODO: mIsSecure and FLAG_SECURE may be obsolete, revisit after
-        // removing widevine
-        sp<AMessage> reply = new AMessage(kWhatSecureDecodersInstantiated, this);
-        notifyInstantiateSecureDecoders(reply);
-    } else {
-        finishPrepareAsync();
-    }
-}
-
-void NuPlayer::GenericSource::onSecureDecodersInstantiated(status_t err) {
-    if (err != OK) {
-        ALOGE("Failed to instantiate secure decoders!");
-        notifyPreparedAndCleanup(err);
-        return;
-    }
     finishPrepareAsync();
+
+    ALOGV("onPrepareAsync: Done");
 }
 
 void NuPlayer::GenericSource::finishPrepareAsync() {
+    ALOGV("finishPrepareAsync");
+
     status_t err = startSources();
     if (err != OK) {
         ALOGE("Failed to init start data source!");
@@ -443,8 +450,6 @@
             {
                 Mutex::Autolock _l(mDisconnectLock);
                 mDataSource.clear();
-                mDecryptHandle = NULL;
-                mDrmManagerClient = NULL;
                 mCachedSource.clear();
                 mHttpSource.clear();
             }
@@ -468,27 +473,20 @@
         postReadBuffer(MEDIA_TRACK_TYPE_VIDEO);
     }
 
-    setDrmPlaybackStatusIfNeeded(Playback::START, getLastReadPosition() / 1000);
     mStarted = true;
 
     (new AMessage(kWhatStart, this))->post();
 }
 
 void NuPlayer::GenericSource::stop() {
-    // nothing to do, just account for DRM playback status
-    setDrmPlaybackStatusIfNeeded(Playback::STOP, 0);
     mStarted = false;
 }
 
 void NuPlayer::GenericSource::pause() {
-    // nothing to do, just account for DRM playback status
-    setDrmPlaybackStatusIfNeeded(Playback::PAUSE, 0);
     mStarted = false;
 }
 
 void NuPlayer::GenericSource::resume() {
-    // nothing to do, just account for DRM playback status
-    setDrmPlaybackStatusIfNeeded(Playback::START, getLastReadPosition() / 1000);
     mStarted = true;
 
     (new AMessage(kWhatResume, this))->post();
@@ -512,14 +510,6 @@
     }
 }
 
-void NuPlayer::GenericSource::setDrmPlaybackStatusIfNeeded(int playbackStatus, int64_t position) {
-    if (mDecryptHandle != NULL) {
-        mDrmManagerClient->setPlaybackStatus(mDecryptHandle, playbackStatus, position);
-    }
-    mSubtitleTrack.mPackets = new AnotherPacketSource(NULL);
-    mTimedTextTrack.mPackets = new AnotherPacketSource(NULL);
-}
-
 status_t NuPlayer::GenericSource::feedMoreTSData() {
     return OK;
 }
@@ -653,11 +643,14 @@
           break;
       }
 
-      case kWhatSecureDecodersInstantiated:
+      case kWhatPrepareDrm:
       {
-          int32_t err;
-          CHECK(msg->findInt32("err", &err));
-          onSecureDecodersInstantiated(err);
+          status_t status = onPrepareDrm(msg);
+          sp<AMessage> response = new AMessage;
+          response->setInt32("status", status);
+          sp<AReplyToken> replyID;
+          CHECK(msg->senderAwaitsResponse(&replyID));
+          response->postReply(replyID);
           break;
       }
 
@@ -1194,11 +1187,6 @@
         mAudioLastDequeueTimeUs = seekTimeUs;
     }
 
-    setDrmPlaybackStatusIfNeeded(Playback::START, seekTimeUs / 1000);
-    if (!mStarted) {
-        setDrmPlaybackStatusIfNeeded(Playback::PAUSE, 0);
-    }
-
     // If currently buffering, post kWhatBufferingEnd first, so that
     // NuPlayer resumes. Otherwise, if cache hits high watermark
     // before new polling happens, no one will resume the playback.
@@ -1219,11 +1207,26 @@
     }
 
     sp<ABuffer> ab;
-    if (mIsSecure && !audio) {
+
+    if (mIsDrmProtected)   {
+        // Modular DRM
+        // Enabled for both video/audio so 1) media buffer is reused without extra copying
+        // 2) meta data can be retrieved in onInputBufferFetched for calling queueSecureInputBuffer.
+
         // data is already provided in the buffer
         ab = new ABuffer(NULL, mb->range_length());
         mb->add_ref();
         ab->setMediaBufferBase(mb);
+
+        // Modular DRM: Required b/c of the above add_ref.
+        // If ref>0, there must be an observer, or it'll crash at release().
+        // TODO: MediaBuffer might need to be revised to ease such need.
+        mb->setObserver(this);
+        // setMediaBufferBase() interestingly doesn't increment the ref count on its own.
+        // Extra increment (since we want to keep mb alive and attached to ab beyond this function
+        // call. This is to counter the effect of mb->release() towards the end.
+        mb->add_ref();
+
     } else {
         ab = new ABuffer(outLength);
         memcpy(ab->data(),
@@ -1828,4 +1831,128 @@
     }
 }
 
+// Modular DRM
+status_t NuPlayer::GenericSource::prepareDrm(
+        const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId, sp<ICrypto> *crypto)
+{
+    ALOGV("prepareDrm");
+
+    sp<AMessage> msg = new AMessage(kWhatPrepareDrm, this);
+    // synchronous call so just passing the address but with local copies of "const" args
+    uint8_t UUID[16];
+    memcpy(UUID, uuid, sizeof(UUID));
+    Vector<uint8_t> sessionId = drmSessionId;
+    msg->setPointer("uuid", (void*)UUID);
+    msg->setPointer("drmSessionId", (void*)&sessionId);
+    msg->setPointer("crypto", (void*)crypto);
+
+    sp<AMessage> response;
+    status_t status = msg->postAndAwaitResponse(&response);
+
+    if (status == OK && response != NULL) {
+        CHECK(response->findInt32("status", &status));
+        ALOGV_IF(status == OK, "prepareDrm: mCrypto: %p (%d)", crypto->get(),
+                (*crypto != NULL ? (*crypto)->getStrongCount() : 0));
+        ALOGD("prepareDrm ret: %d ", status);
+    } else {
+        ALOGE("prepareDrm err: %d", status);
+    }
+
+    return status;
+}
+
+status_t NuPlayer::GenericSource::onPrepareDrm(const sp<AMessage> &msg)
+{
+    ALOGV("onPrepareDrm ");
+
+    mIsDrmProtected = false;
+    mIsSecure = false;
+
+    uint8_t *uuid;
+    Vector<uint8_t> *drmSessionId;
+    sp<ICrypto> *outCrypto;
+    CHECK(msg->findPointer("uuid", (void**)&uuid));
+    CHECK(msg->findPointer("drmSessionId", (void**)&drmSessionId));
+    CHECK(msg->findPointer("crypto", (void**)&outCrypto));
+
+    status_t status = OK;
+    sp<ICrypto> crypto = NuPlayerDrm::createCryptoAndPlugin(uuid, *drmSessionId, status);
+    if (crypto == NULL) {
+        ALOGE("onPrepareDrm: createCrypto failed. status: %d", status);
+        return status;
+    }
+    ALOGV("onPrepareDrm: createCryptoAndPlugin succeeded for uuid: %s",
+            DrmUUID::toHexString(uuid).string());
+
+    *outCrypto = crypto;
+    // as long a there is an active crypto
+    mIsDrmProtected = true;
+
+    if (mMimes.size() == 0) {
+        status = UNKNOWN_ERROR;
+        ALOGE("onPrepareDrm: Unexpected. Must have at least one track. status: %d", status);
+        return status;
+    }
+
+    // first mime in this list is either the video track, or the first audio track
+    const char *mime = mMimes[0].string();
+    mIsSecure = crypto->requiresSecureDecoderComponent(mime);
+    ALOGV("onPrepareDrm: requiresSecureDecoderComponent mime: %s  isSecure: %d",
+            mime, mIsSecure);
+
+    // Checking the member flags while in the looper to send out the notification.
+    // The legacy mDecryptHandle!=NULL check (for FLAG_PROTECTED) is equivalent to mIsDrmProtected.
+    notifyFlagsChanged(
+            (mIsSecure ? FLAG_SECURE : 0) |
+            (mIsDrmProtected ? FLAG_PROTECTED : 0) |
+            FLAG_CAN_PAUSE |
+            FLAG_CAN_SEEK_BACKWARD |
+            FLAG_CAN_SEEK_FORWARD |
+            FLAG_CAN_SEEK);
+
+    return status;
+}
+
+status_t NuPlayer::GenericSource::checkDrmInfo()
+{
+    if (mFileMeta == NULL) {
+        ALOGE("checkDrmInfo: No metadata");
+        return OK; // letting the caller responds accordingly
+    }
+
+    uint32_t type;
+    const void *pssh;
+    size_t psshsize;
+
+    if (!mFileMeta->findData(kKeyPssh, &type, &pssh, &psshsize)) {
+        ALOGE("checkDrmInfo: No PSSH");
+        return OK; // source without DRM info
+    }
+
+    Parcel parcel;
+    NuPlayerDrm::retrieveDrmInfo(pssh, psshsize, mMimes, &parcel);
+    ALOGV("checkDrmInfo: MEDIA_DRM_INFO PSSH size: %d  Parcel size: %d  objects#: %d",
+          (int)psshsize, (int)parcel.dataSize(), (int)parcel.objectsCount());
+
+    if (parcel.dataSize() == 0) {
+        ALOGE("checkDrmInfo: Unexpected parcel size: 0");
+        return UNKNOWN_ERROR;
+    }
+
+    // Can't pass parcel as a message to the player. Converting Parcel->ABuffer to pass it
+    // to the Player's onSourceNotify then back to Parcel for calling driver's notifyListener.
+    sp<ABuffer> drmInfoBuffer = ABuffer::CreateAsCopy(parcel.data(), parcel.dataSize());
+    notifyDrmInfo(drmInfoBuffer);
+
+    return OK;
+}
+
+void NuPlayer::GenericSource::signalBufferReturned(MediaBuffer *buffer)
+{
+    //ALOGV("signalBufferReturned %p  refCount: %d", buffer, buffer->localRefcount());
+
+    buffer->setObserver(NULL);
+    buffer->release(); // this leads to delete since that there is no observor
+}
+
 }  // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index e1949f3..64f21a6 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -28,7 +28,6 @@
 namespace android {
 
 class DecryptHandle;
-class DrmManagerClient;
 struct AnotherPacketSource;
 struct ARTSPController;
 class DataSource;
@@ -38,7 +37,9 @@
 class MediaBuffer;
 struct NuCachedSource2;
 
-struct NuPlayer::GenericSource : public NuPlayer::Source {
+struct NuPlayer::GenericSource : public NuPlayer::Source,
+                                 public MediaBufferObserver // Modular DRM
+{
     GenericSource(const sp<AMessage> &notify, bool uidValid, uid_t uid);
 
     status_t setDataSource(
@@ -84,6 +85,13 @@
 
     virtual void setOffloadAudio(bool offload);
 
+    // Modular DRM
+    virtual void signalBufferReturned(MediaBuffer *buffer);
+
+    virtual status_t prepareDrm(
+            const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId, sp<ICrypto> *crypto);
+
+
 protected:
     virtual ~GenericSource();
 
@@ -109,6 +117,8 @@
         kWhatStart,
         kWhatResume,
         kWhatSecureDecodersInstantiated,
+        // Modular DRM
+        kWhatPrepareDrm,
     };
 
     struct Track {
@@ -224,8 +234,6 @@
     sp<NuCachedSource2> mCachedSource;
     sp<DataSource> mHttpSource;
     sp<MetaData> mFileMeta;
-    DrmManagerClient *mDrmManagerClient;
-    sp<DecryptHandle> mDecryptHandle;
     bool mStarted;
     bool mStopRead;
     int64_t mBitrate;
@@ -243,7 +251,6 @@
 
     status_t initFromDataSource();
     int64_t getLastReadPosition();
-    void setDrmPlaybackStatusIfNeeded(int playbackStatus, int64_t position);
 
     void notifyPreparedAndCleanup(status_t err);
     void onSecureDecodersInstantiated(status_t err);
@@ -299,6 +306,13 @@
     void queueDiscontinuityIfNeeded(
             bool seeking, bool formatChange, media_track_type trackType, Track *track);
 
+    // Modular DRM
+    bool mIsDrmProtected;
+    Vector<String8> mMimes;
+
+    status_t checkDrmInfo();
+    status_t onPrepareDrm(const sp<AMessage> &msg);
+
     DISALLOW_EVIL_CONSTRUCTORS(GenericSource);
 };
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 6593fcd..50d5343 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -199,7 +199,8 @@
       mSourceStarted(false),
       mPaused(false),
       mPausedByClient(true),
-      mPausedForBuffering(false) {
+      mPausedForBuffering(false),
+      mIsDrmProtected(false) {
     clearFlushComplete();
 }
 
@@ -254,16 +255,21 @@
     sp<Source> source;
     if (IsHTTPLiveURL(url)) {
         source = new HTTPLiveSource(notify, httpService, url, headers);
+        ALOGV("setDataSourceAsync HTTPLiveSource %s", url);
     } else if (!strncasecmp(url, "rtsp://", 7)) {
         source = new RTSPSource(
                 notify, httpService, url, headers, mUIDValid, mUID);
+        ALOGV("setDataSourceAsync RTSPSource %s", url);
     } else if ((!strncasecmp(url, "http://", 7)
                 || !strncasecmp(url, "https://", 8))
                     && ((len >= 4 && !strcasecmp(".sdp", &url[len - 4]))
                     || strstr(url, ".sdp?"))) {
         source = new RTSPSource(
                 notify, httpService, url, headers, mUIDValid, mUID, true);
+        ALOGV("setDataSourceAsync RTSPSource http/https/.sdp %s", url);
     } else {
+        ALOGV("setDataSourceAsync GenericSource %s", url);
+
         sp<GenericSource> genericSource =
                 new GenericSource(notify, mUIDValid, mUID);
 
@@ -287,6 +293,9 @@
     sp<GenericSource> source =
             new GenericSource(notify, mUIDValid, mUID);
 
+    ALOGV("setDataSourceAsync fd %d/%lld/%lld source: %p",
+            fd, (long long)offset, (long long)length, source.get());
+
     status_t err = source->setDataSource(fd, offset, length);
 
     if (err != OK) {
@@ -340,6 +349,8 @@
 }
 
 void NuPlayer::prepareAsync() {
+    ALOGV("prepareAsync");
+
     (new AMessage(kWhatPrepare, this))->post();
 }
 
@@ -577,6 +588,8 @@
 
         case kWhatPrepare:
         {
+            ALOGV("onMessageReceived kWhatPrepare");
+
             mSource->prepareAsync();
             break;
         }
@@ -1133,11 +1146,6 @@
                     case SHUTTING_DOWN_DECODER:
                         break; // Wait for shutdown to complete.
                     case FLUSHED:
-                        // Widevine source reads must stop before releasing the video decoder.
-                        if (!audio && mSource != NULL && mSourceFlags & Source::FLAG_SECURE) {
-                            mSource->stop();
-                            mSourceStarted = false;
-                        }
                         getDecoder(audio)->initiateShutdown(); // In the middle of a seek.
                         *flushing = SHUTTING_DOWN_DECODER;     // Shut down.
                         break;
@@ -1330,6 +1338,30 @@
             break;
         }
 
+        case kWhatPrepareDrm:
+        {
+            status_t status = onPrepareDrm(msg);
+
+            sp<AMessage> response = new AMessage;
+            response->setInt32("status", status);
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+            response->postReply(replyID);
+            break;
+        }
+
+        case kWhatReleaseDrm:
+        {
+            status_t status = onReleaseDrm();
+
+            sp<AMessage> response = new AMessage;
+            response->setInt32("status", status);
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+            response->postReply(replyID);
+            break;
+        }
+
         default:
             TRESPASS();
             break;
@@ -1391,6 +1423,9 @@
 }
 
 void NuPlayer::onStart(int64_t startPositionUs, MediaPlayerSeekMode mode) {
+    ALOGV("onStart: mCrypto: %p (%d)", mCrypto.get(),
+            (mCrypto != NULL ? mCrypto->getStrongCount() : 0));
+
     if (!mSourceStarted) {
         mSourceStarted = true;
         mSource->start();
@@ -1435,6 +1470,13 @@
     mOffloadAudio =
         canOffloadStream(audioMeta, hasVideo, mSource->isStreaming(), streamType)
                 && (mPlaybackSettings.mSpeed == 1.f && mPlaybackSettings.mPitch == 1.f);
+
+    // Modular DRM: Disabling audio offload if the source is protected
+    if (mOffloadAudio && mIsDrmProtected) {
+        mOffloadAudio = false;
+        ALOGV("onStart: Disabling mOffloadAudio now that the source is protected.");
+    }
+
     if (mOffloadAudio) {
         flags |= Renderer::FLAG_OFFLOAD_AUDIO;
     }
@@ -1527,13 +1569,6 @@
             *state = SHUTTING_DOWN_DECODER;
 
             ALOGV("initiating %s decoder shutdown", audio ? "audio" : "video");
-            if (!audio) {
-                // Widevine source reads must stop before releasing the video decoder.
-                if (mSource != NULL && mSourceFlags & Source::FLAG_SECURE) {
-                    mSource->stop();
-                    mSourceStarted = false;
-                }
-            }
             getDecoder(audio)->initiateShutdown();
             break;
         }
@@ -1650,9 +1685,16 @@
     sp<AMessage> videoFormat = mSource->getFormat(false /* audio */);
     audio_stream_type_t streamType = mAudioSink->getAudioStreamType();
     const bool hasVideo = (videoFormat != NULL);
-    const bool canOffload = canOffloadStream(
+    bool canOffload = canOffloadStream(
             audioMeta, hasVideo, mSource->isStreaming(), streamType)
                     && (mPlaybackSettings.mSpeed == 1.f && mPlaybackSettings.mPitch == 1.f);
+
+    // Modular DRM: Disabling audio offload if the source is protected
+    if (canOffload && mIsDrmProtected) {
+        canOffload = false;
+        ALOGV("determineAudioModeChange: Disabling mOffloadAudio b/c the source is protected.");
+    }
+
     if (canOffload) {
         if (!mOffloadAudio) {
             mRenderer->signalEnableOffloadAudio();
@@ -1725,10 +1767,12 @@
             const bool hasVideo = (mSource->getFormat(false /*audio */) != NULL);
             format->setInt32("has-video", hasVideo);
             *decoder = new DecoderPassThrough(notify, mSource, mRenderer);
+            ALOGV("instantiateDecoder audio DecoderPassThrough  hasVideo: %d", hasVideo);
         } else {
             mSource->setOffloadAudio(false /* offload */);
 
             *decoder = new Decoder(notify, mSource, mPID, mUID, mRenderer);
+            ALOGV("instantiateDecoder audio Decoder");
         }
     } else {
         sp<AMessage> notify = new AMessage(kWhatVideoNotify, this);
@@ -1748,6 +1792,15 @@
         }
     }
     (*decoder)->init();
+
+    // Modular DRM
+    if (mIsDrmProtected) {
+        format->setPointer("crypto", mCrypto.get());
+        ALOGV("instantiateDecoder: mCrypto: %p (%d) isSecure: %d", mCrypto.get(),
+                (mCrypto != NULL ? mCrypto->getStrongCount() : 0),
+                (mSourceFlags & Source::FLAG_SECURE) != 0);
+    }
+
     (*decoder)->configure(format);
 
     if (!audio) {
@@ -2142,6 +2195,16 @@
     mPrepared = false;
     mResetting = false;
     mSourceStarted = false;
+
+    // Modular DRM
+    if (mCrypto != NULL) {
+        // decoders will be flushed before this so their mCrypto would go away on their own
+        // TODO change to ALOGV
+        ALOGD("performReset mCrypto: %p (%d)", mCrypto.get(),
+                (mCrypto != NULL ? mCrypto->getStrongCount() : 0));
+        mCrypto.clear();
+    }
+    mIsDrmProtected = false;
 }
 
 void NuPlayer::performScanSources() {
@@ -2236,6 +2299,7 @@
 
         case Source::kWhatPrepared:
         {
+            ALOGV("NuPlayer::onSourceNotify Source::kWhatPrepared source: %p", mSource.get());
             if (mSource == NULL) {
                 // This is a stale notification from a source that was
                 // asynchronously preparing when the client called reset().
@@ -2270,6 +2334,22 @@
             break;
         }
 
+        // Modular DRM
+        case Source::kWhatDrmInfo:
+        {
+            Parcel parcel;
+            sp<ABuffer> drmInfo;
+            CHECK(msg->findBuffer("drmInfo", &drmInfo));
+            parcel.setData(drmInfo->data(), drmInfo->size());
+
+            ALOGV("onSourceNotify() kWhatDrmInfo MEDIA_DRM_INFO drmInfo: %p  parcel size: %zu",
+                    drmInfo.get(), parcel.dataSize());
+
+            notifyListener(MEDIA_DRM_INFO, 0 /* ext1 */, 0 /* ext2 */, &parcel);
+
+            break;
+        }
+
         case Source::kWhatFlagsChanged:
         {
             uint32_t flags;
@@ -2277,6 +2357,19 @@
 
             sp<NuPlayerDriver> driver = mDriver.promote();
             if (driver != NULL) {
+
+                ALOGV("onSourceNotify() kWhatFlagsChanged  FLAG_CAN_PAUSE: %d  "
+                        "FLAG_CAN_SEEK_BACKWARD: %d \n\t\t\t\t FLAG_CAN_SEEK_FORWARD: %d  "
+                        "FLAG_CAN_SEEK: %d  FLAG_DYNAMIC_DURATION: %d \n"
+                        "\t\t\t\t FLAG_SECURE: %d  FLAG_PROTECTED: %d",
+                        (flags & Source::FLAG_CAN_PAUSE) != 0,
+                        (flags & Source::FLAG_CAN_SEEK_BACKWARD) != 0,
+                        (flags & Source::FLAG_CAN_SEEK_FORWARD) != 0,
+                        (flags & Source::FLAG_CAN_SEEK) != 0,
+                        (flags & Source::FLAG_DYNAMIC_DURATION) != 0,
+                        (flags & Source::FLAG_SECURE) != 0,
+                        (flags & Source::FLAG_PROTECTED) != 0);
+
                 if ((flags & NuPlayer::Source::FLAG_CAN_SEEK) == 0) {
                     driver->notifyListener(
                             MEDIA_INFO, MEDIA_INFO_NOT_SEEKABLE, 0);
@@ -2527,6 +2620,136 @@
         notifyListener(MEDIA_TIMED_TEXT, 0, 0);
     }
 }
+
+// Modular DRM begin
+status_t NuPlayer::prepareDrm(const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId)
+{
+    ALOGV("prepareDrm ");
+
+    // Passing to the looper anyway; called in a pre-config prepared state so no race on mCrypto
+    sp<AMessage> msg = new AMessage(kWhatPrepareDrm, this);
+    // synchronous call so just passing the address but with local copies of "const" args
+    uint8_t UUID[16];
+    memcpy(UUID, uuid, sizeof(UUID));
+    Vector<uint8_t> sessionId = drmSessionId;
+    msg->setPointer("uuid", (void*)UUID);
+    msg->setPointer("drmSessionId", (void*)&sessionId);
+
+    sp<AMessage> response;
+    status_t status = msg->postAndAwaitResponse(&response);
+
+    if (status == OK && response != NULL) {
+        CHECK(response->findInt32("status", &status));
+        ALOGV("prepareDrm ret: %d ", status);
+    } else {
+        ALOGE("prepareDrm err: %d", status);
+    }
+
+    return status;
+}
+
+status_t NuPlayer::releaseDrm()
+{
+    ALOGV("releaseDrm ");
+
+    sp<AMessage> msg = new AMessage(kWhatReleaseDrm, this);
+
+    sp<AMessage> response;
+    status_t status = msg->postAndAwaitResponse(&response);
+
+    if (status == OK && response != NULL) {
+        CHECK(response->findInt32("status", &status));
+        ALOGV("releaseDrm ret: %d ", status);
+    } else {
+        ALOGE("releaseDrm err: %d", status);
+    }
+
+    return status;
+}
+
+status_t NuPlayer::onPrepareDrm(const sp<AMessage> &msg)
+{
+    // TODO change to ALOGV
+    ALOGD("onPrepareDrm ");
+
+    status_t status = INVALID_OPERATION;
+    if (mSource == NULL) {
+        ALOGE("onPrepareDrm: No source. onPrepareDrm failed with %d.", status);
+        return status;
+    }
+
+    uint8_t *uuid;
+    Vector<uint8_t> *drmSessionId;
+    CHECK(msg->findPointer("uuid", (void**)&uuid));
+    CHECK(msg->findPointer("drmSessionId", (void**)&drmSessionId));
+
+    status = OK;
+    sp<ICrypto> crypto = NULL;
+
+    status = mSource->prepareDrm(uuid, *drmSessionId, &crypto);
+    if (crypto == NULL) {
+        ALOGE("onPrepareDrm: mSource->prepareDrm failed. status: %d", status);
+        return status;
+    }
+    ALOGV("onPrepareDrm: mSource->prepareDrm succeeded");
+
+    if (mCrypto != NULL) {
+        ALOGE("onPrepareDrm: Unexpected. Already having mCrypto: %p (%d)",
+                mCrypto.get(), mCrypto->getStrongCount());
+        mCrypto.clear();
+    }
+
+    mCrypto = crypto;
+    mIsDrmProtected = true;
+    // TODO change to ALOGV
+    ALOGD("onPrepareDrm: mCrypto: %p (%d)", mCrypto.get(),
+            (mCrypto != NULL ? mCrypto->getStrongCount() : 0));
+
+    return status;
+}
+
+status_t NuPlayer::onReleaseDrm()
+{
+    // TODO change to ALOGV
+    ALOGD("onReleaseDrm ");
+
+    if (!mIsDrmProtected) {
+        ALOGW("onReleaseDrm: Unexpected. mIsDrmProtected is already false.");
+    }
+
+    mIsDrmProtected = false;
+
+    status_t status;
+    if (mCrypto != NULL) {
+        status=OK;
+        // first making sure the codecs have released their crypto reference
+        const sp<DecoderBase> &videoDecoder = getDecoder(false/*audio*/);
+        if (videoDecoder != NULL) {
+            status = videoDecoder->releaseCrypto();
+            ALOGV("onReleaseDrm: video decoder ret: %d", status);
+        }
+
+        const sp<DecoderBase> &audioDecoder = getDecoder(true/*audio*/);
+        if (audioDecoder != NULL) {
+            status_t status_audio = audioDecoder->releaseCrypto();
+            if (status == OK) {   // otherwise, returning the first error
+                status = status_audio;
+            }
+            ALOGV("onReleaseDrm: audio decoder ret: %d", status_audio);
+        }
+
+        // TODO change to ALOGV
+        ALOGD("onReleaseDrm: mCrypto: %p (%d)", mCrypto.get(),
+                (mCrypto != NULL ? mCrypto->getStrongCount() : 0));
+        mCrypto.clear();
+    } else {   // mCrypto == NULL
+        ALOGE("onReleaseDrm: Unexpected. There is no crypto.");
+        status = INVALID_OPERATION;
+    }
+
+    return status;
+}
+// Modular DRM end
 ////////////////////////////////////////////////////////////////////////////////
 
 sp<AMessage> NuPlayer::Source::getFormat(bool audio) {
@@ -2559,12 +2782,24 @@
 }
 
 void NuPlayer::Source::notifyPrepared(status_t err) {
+    ALOGV("Source::notifyPrepared %d", err);
     sp<AMessage> notify = dupNotify();
     notify->setInt32("what", kWhatPrepared);
     notify->setInt32("err", err);
     notify->post();
 }
 
+void NuPlayer::Source::notifyDrmInfo(const sp<ABuffer> &drmInfoBuffer)
+{
+    ALOGV("Source::notifyDrmInfo");
+
+    sp<AMessage> notify = dupNotify();
+    notify->setInt32("what", kWhatDrmInfo);
+    notify->setBuffer("drmInfo", drmInfoBuffer);
+
+    notify->post();
+}
+
 void NuPlayer::Source::notifyInstantiateSecureDecoders(const sp<AMessage> &reply) {
     sp<AMessage> notify = dupNotify();
     notify->setInt32("what", kWhatInstantiateSecureDecoders);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index cc8c97a..d3cb7c1 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -19,6 +19,7 @@
 #define NU_PLAYER_H_
 
 #include <media/AudioResamplerPublic.h>
+#include <media/ICrypto.h>
 #include <media/MediaPlayerInterface.h>
 #include <media/stagefright/foundation/AHandler.h>
 
@@ -88,6 +89,10 @@
     sp<MetaData> getFileMeta();
     float getFrameRate();
 
+    // Modular DRM
+    status_t prepareDrm(const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId);
+    status_t releaseDrm();
+
 protected:
     virtual ~NuPlayer();
 
@@ -142,6 +147,8 @@
         kWhatSelectTrack                = 'selT',
         kWhatGetDefaultBufferingSettings = 'gDBS',
         kWhatSetBufferingSettings       = 'sBuS',
+        kWhatPrepareDrm                 = 'pDrm',
+        kWhatReleaseDrm                 = 'rDrm',
     };
 
     wp<NuPlayerDriver> mDriver;
@@ -223,6 +230,10 @@
     // Pause state as requested by source (internally) due to buffering
     bool mPausedForBuffering;
 
+    // Modular DRM
+    sp<ICrypto> mCrypto;
+    bool mIsDrmProtected;
+
     inline const sp<DecoderBase> &getDecoder(bool audio) {
         return audio ? mAudioDecoder : mVideoDecoder;
     }
@@ -294,6 +305,9 @@
 
     void writeTrackInfo(Parcel* reply, const sp<AMessage>& format) const;
 
+    status_t onPrepareDrm(const sp<AMessage> &msg);
+    status_t onReleaseDrm();
+
     DISALLOW_EVIL_CONSTRUCTORS(NuPlayer);
 };
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 0a0a8aa..9a2224e 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -23,6 +23,7 @@
 
 #include "NuPlayerCCDecoder.h"
 #include "NuPlayerDecoder.h"
+#include "NuPlayerDrm.h"
 #include "NuPlayerRenderer.h"
 #include "NuPlayerSource.h"
 
@@ -36,7 +37,7 @@
 #include <media/stagefright/MediaCodec.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
-
+#include <media/stagefright/SurfaceUtils.h>
 #include <gui/Surface.h>
 
 #include "avc_utils.h"
@@ -231,21 +232,21 @@
                 //
                 // at this point MediaPlayerService::client has already connected to the
                 // surface, which MediaCodec does not expect
-                err = native_window_api_disconnect(surface.get(), NATIVE_WINDOW_API_MEDIA);
+                err = nativeWindowDisconnect(surface.get(), "kWhatSetVideoSurface(surface)");
                 if (err == OK) {
                     err = mCodec->setSurface(surface);
                     ALOGI_IF(err, "codec setSurface returned: %d", err);
                     if (err == OK) {
                         // reconnect to the old surface as MPS::Client will expect to
                         // be able to disconnect from it.
-                        (void)native_window_api_connect(mSurface.get(), NATIVE_WINDOW_API_MEDIA);
+                        (void)nativeWindowConnect(mSurface.get(), "kWhatSetVideoSurface(mSurface)");
                         mSurface = surface;
                     }
                 }
                 if (err != OK) {
                     // reconnect to the new surface on error as MPS::Client will expect to
                     // be able to disconnect from it.
-                    (void)native_window_api_connect(surface.get(), NATIVE_WINDOW_API_MEDIA);
+                    (void)nativeWindowConnect(surface.get(), "kWhatSetVideoSurface(err)");
                 }
             }
 
@@ -255,6 +256,13 @@
             break;
         }
 
+        case kWhatDrmReleaseCrypto:
+        {
+            ALOGV("kWhatDrmReleaseCrypto");
+            onReleaseCrypto(msg);
+            break;
+        }
+
         default:
             DecoderBase::onMessageReceived(msg);
             break;
@@ -305,15 +313,25 @@
     status_t err;
     if (mSurface != NULL) {
         // disconnect from surface as MediaCodec will reconnect
-        err = native_window_api_disconnect(
-                mSurface.get(), NATIVE_WINDOW_API_MEDIA);
+        err = nativeWindowDisconnect(mSurface.get(), "onConfigure");
         // We treat this as a warning, as this is a preparatory step.
         // Codec will try to connect to the surface, which is where
         // any error signaling will occur.
         ALOGW_IF(err != OK, "failed to disconnect from surface: %d", err);
     }
+
+    // Modular DRM
+    void *pCrypto;
+    if (!format->findPointer("crypto", &pCrypto)) {
+        pCrypto = NULL;
+    }
+    sp<ICrypto> crypto = (ICrypto*)pCrypto;
+    ALOGV("onConfigure mCrypto: %p (%d)  mIsSecure: %d",
+            crypto.get(), (crypto != NULL ? crypto->getStrongCount() : 0), mIsSecure);
+
     err = mCodec->configure(
-            format, mSurface, NULL /* crypto */, 0 /* flags */);
+            format, mSurface, crypto, 0 /* flags */);
+
     if (err != OK) {
         ALOGE("Failed to configure %s decoder (err=%d)", mComponentName.c_str(), err);
         mCodec->release();
@@ -491,8 +509,7 @@
 
         if (mSurface != NULL) {
             // reconnect to surface as MediaCodec disconnected from it
-            status_t error =
-                    native_window_api_connect(mSurface.get(), NATIVE_WINDOW_API_MEDIA);
+            status_t error = nativeWindowConnect(mSurface.get(), "onShutdown");
             ALOGW_IF(error != NO_ERROR,
                     "[%s] failed to connect to native window, error=%d",
                     mComponentName.c_str(), error);
@@ -559,6 +576,43 @@
     notify->post();
 }
 
+status_t NuPlayer::Decoder::releaseCrypto()
+{
+    ALOGV("releaseCrypto");
+
+    sp<AMessage> msg = new AMessage(kWhatDrmReleaseCrypto, this);
+
+    sp<AMessage> response;
+    status_t status = msg->postAndAwaitResponse(&response);
+    if (status == OK && response != NULL) {
+        CHECK(response->findInt32("status", &status));
+        ALOGV("releaseCrypto ret: %d ", status);
+    } else {
+        ALOGE("releaseCrypto err: %d", status);
+    }
+
+    return status;
+}
+
+void NuPlayer::Decoder::onReleaseCrypto(const sp<AMessage>& msg)
+{
+    status_t status = INVALID_OPERATION;
+    if (mCodec != NULL) {
+        status = mCodec->releaseCrypto();
+    } else {
+        // returning OK if the codec has been already released
+        status = OK;
+        ALOGE("onReleaseCrypto No mCodec. err: %d", status);
+    }
+
+    sp<AMessage> response = new AMessage;
+    response->setInt32("status", status);
+
+    sp<AReplyToken> replyID;
+    CHECK(msg->senderAwaitsResponse(&replyID));
+    response->postReply(replyID);
+}
+
 bool NuPlayer::Decoder::handleAnInputBuffer(size_t index) {
     if (isDiscontinuityPending()) {
         return false;
@@ -929,6 +983,10 @@
             flags |= MediaCodec::BUFFER_FLAG_CODECCONFIG;
         }
 
+        // Modular DRM
+        MediaBuffer *mediaBuf = NULL;
+        NuPlayerDrm::CryptoInfo *cryptInfo = NULL;
+
         // copy into codec buffer
         if (needsCopy) {
             if (buffer->size() > codecBuffer->capacity()) {
@@ -936,24 +994,68 @@
                 mDequeuedInputBuffers.push_back(bufferIx);
                 return false;
             }
-            codecBuffer->setRange(0, buffer->size());
-            memcpy(codecBuffer->data(), buffer->data(), buffer->size());
-        }
 
-        status_t err = mCodec->queueInputBuffer(
-                        bufferIx,
-                        codecBuffer->offset(),
-                        codecBuffer->size(),
-                        timeUs,
-                        flags);
+            if (buffer->data() != NULL) {
+                codecBuffer->setRange(0, buffer->size());
+                memcpy(codecBuffer->data(), buffer->data(), buffer->size());
+            } else { // No buffer->data()
+                //Modular DRM
+                mediaBuf = (MediaBuffer*)buffer->getMediaBufferBase();
+                if (mediaBuf != NULL) {
+                    codecBuffer->setRange(0, mediaBuf->size());
+                    memcpy(codecBuffer->data(), mediaBuf->data(), mediaBuf->size());
+
+                    sp<MetaData> meta_data = mediaBuf->meta_data();
+                    cryptInfo = NuPlayerDrm::getSampleCryptoInfo(meta_data);
+
+                    // since getMediaBuffer() has incremented the refCount
+                    mediaBuf->release();
+                } else { // No mediaBuf
+                    ALOGE("onInputBufferFetched: buffer->data()/mediaBuf are NULL for %p",
+                            buffer.get());
+                    handleError(UNKNOWN_ERROR);
+                    return false;
+                }
+            } // buffer->data()
+        } // needsCopy
+
+        status_t err;
+        AString errorDetailMsg;
+        if (cryptInfo != NULL) {
+            err = mCodec->queueSecureInputBuffer(
+                    bufferIx,
+                    codecBuffer->offset(),
+                    cryptInfo->subSamples,
+                    cryptInfo->numSubSamples,
+                    cryptInfo->key,
+                    cryptInfo->iv,
+                    cryptInfo->mode,
+                    cryptInfo->pattern,
+                    timeUs,
+                    flags,
+                    &errorDetailMsg);
+            // synchronous call so done with cryptInfo here
+            free(cryptInfo);
+        } else {
+            err = mCodec->queueInputBuffer(
+                    bufferIx,
+                    codecBuffer->offset(),
+                    codecBuffer->size(),
+                    timeUs,
+                    flags,
+                    &errorDetailMsg);
+        } // no cryptInfo
+
         if (err != OK) {
-            ALOGE("Failed to queue input buffer for %s (err=%d)",
-                    mComponentName.c_str(), err);
+            ALOGE("onInputBufferFetched: queue%sInputBuffer failed for %s (err=%d, %s)",
+                    (cryptInfo != NULL ? "Secure" : ""),
+                    mComponentName.c_str(), err, errorDetailMsg.c_str());
             handleError(err);
         } else {
             mInputBufferIsDequeued.editItemAt(bufferIx) = false;
         }
-    }
+
+    }   // buffer != NULL
     return true;
 }
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
index 82db59c..de21379 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
@@ -39,6 +39,8 @@
     // sets the output surface of video decoders.
     virtual status_t setVideoSurface(const sp<Surface> &surface);
 
+    virtual status_t releaseCrypto();
+
 protected:
     virtual ~Decoder();
 
@@ -57,7 +59,8 @@
         kWhatCodecNotify         = 'cdcN',
         kWhatRenderBuffer        = 'rndr',
         kWhatSetVideoSurface     = 'sSur',
-        kWhatAudioOutputFormatChanged = 'aofc'
+        kWhatAudioOutputFormatChanged = 'aofc',
+        kWhatDrmReleaseCrypto    = 'rDrm',
     };
 
     enum {
@@ -135,6 +138,8 @@
 
     void notifyResumeCompleteIfNecessary();
 
+    void onReleaseCrypto(const sp<AMessage>& msg);
+
     DISALLOW_EVIL_CONSTRUCTORS(Decoder);
 };
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
index 6811903..dcdfcaf 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
@@ -51,6 +51,10 @@
         return mStats;
     }
 
+    virtual status_t releaseCrypto() {
+        return INVALID_OPERATION;
+    }
+
     enum {
         kWhatInputDiscontinuity  = 'inDi',
         kWhatVideoSizeChanged    = 'viSC',
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 0ddbd63..abea5bc 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -884,8 +884,8 @@
 
 void NuPlayerDriver::notifyListener_l(
         int msg, int ext1, int ext2, const Parcel *in) {
-    ALOGD("notifyListener_l(%p), (%d, %d, %d), loop setting(%d, %d)",
-            this, msg, ext1, ext2, mAutoLoop, mLooping);
+    ALOGD("notifyListener_l(%p), (%d, %d, %d, %d), loop setting(%d, %d)",
+            this, msg, ext1, ext2, (in == NULL ? -1 : (int)in->dataSize()), mAutoLoop, mLooping);
     switch (msg) {
         case MEDIA_PLAYBACK_COMPLETE:
         {
@@ -943,6 +943,8 @@
 }
 
 void NuPlayerDriver::notifyPrepareCompleted(status_t err) {
+    ALOGV("notifyPrepareCompleted %d", err);
+
     Mutex::Autolock autoLock(mLock);
 
     if (mState != STATE_PREPARING) {
@@ -987,4 +989,33 @@
     mPlayerFlags = flags;
 }
 
+// Modular DRM
+status_t NuPlayerDriver::prepareDrm(const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId)
+{
+    ALOGV("prepareDrm(%p) state: %d", this, mState);
+
+    Mutex::Autolock autoLock(mLock);
+
+    // leaving the state verification for mediaplayer.cpp
+    status_t ret = mPlayer->prepareDrm(uuid, drmSessionId);
+
+    ALOGV("prepareDrm ret: %d", ret);
+
+    return ret;
+}
+
+status_t NuPlayerDriver::releaseDrm()
+{
+    ALOGV("releaseDrm(%p) state: %d", this, mState);
+
+    Mutex::Autolock autoLock(mLock);
+
+    // leaving the state verification for mediaplayer.cpp
+    status_t ret = mPlayer->releaseDrm();
+
+    ALOGV("releaseDrm ret: %d", ret);
+
+    return ret;
+}
+
 }  // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
index 5bfc539..972a348 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
@@ -87,6 +87,10 @@
     void notifyListener(int msg, int ext1 = 0, int ext2 = 0, const Parcel *in = NULL);
     void notifyFlagsChanged(uint32_t flags);
 
+    // Modular DRM
+    virtual status_t prepareDrm(const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId);
+    virtual status_t releaseDrm();
+
 protected:
     virtual ~NuPlayerDriver();
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
new file mode 100644
index 0000000..ce6cedc
--- /dev/null
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
@@ -0,0 +1,366 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "NuPlayerDrm"
+
+#include "NuPlayerDrm.h"
+
+#include <binder/IServiceManager.h>
+#include <media/IMediaDrmService.h>
+#include <utils/Log.h>
+
+
+namespace android {
+
+// static helpers - internal
+
+sp<IDrm> NuPlayerDrm::CreateDrm(status_t *pstatus)
+{
+    status_t &status = *pstatus;
+    sp<IServiceManager> sm = defaultServiceManager();
+    sp<IBinder> binder = sm->getService(String16("media.drm"));
+    ALOGV("CreateDrm binder %p", (binder != NULL ? binder.get() : 0));
+
+    sp<IMediaDrmService> service = interface_cast<IMediaDrmService>(binder);
+    if (service == NULL) {
+        ALOGE("CreateDrm failed at IMediaDrmService");
+        return NULL;
+    }
+
+    sp<IDrm> drm = service->makeDrm();
+    if (drm == NULL) {
+        ALOGE("CreateDrm failed at makeDrm");
+        return NULL;
+    }
+
+    // this is before plugin creation so NO_INIT is fine
+    status = drm->initCheck();
+    if (status != OK && status != NO_INIT) {
+        ALOGE("CreateDrm failed drm->initCheck(): %d", status);
+        return NULL;
+    }
+    return drm;
+}
+
+sp<ICrypto> NuPlayerDrm::createCrypto(status_t *pstatus)
+{
+    status_t &status = *pstatus;
+    sp<IServiceManager> sm = defaultServiceManager();
+    sp<IBinder> binder = sm->getService(String16("media.drm"));
+
+    sp<IMediaDrmService> service = interface_cast<IMediaDrmService>(binder);
+    if (service == NULL) {
+        status = UNKNOWN_ERROR;
+        ALOGE("CreateCrypto failed at IMediaDrmService");
+        return NULL;
+    }
+
+    sp<ICrypto> crypto = service->makeCrypto();
+    if (crypto == NULL) {
+        status = UNKNOWN_ERROR;
+        ALOGE("createCrypto failed");
+        return NULL;
+    }
+
+    // this is before plugin creation so NO_INIT is fine
+    status = crypto->initCheck();
+    if (status != OK && status != NO_INIT) {
+        ALOGE("createCrypto failed crypto->initCheck(): %d", status);
+        return NULL;
+    }
+
+    return crypto;
+}
+
+Vector<DrmUUID> NuPlayerDrm::parsePSSH(const void *pssh, size_t psshsize)
+{
+    Vector<DrmUUID> drmSchemes, empty;
+    const int DATALEN_SIZE = 4;
+
+    // the format of the buffer is 1 or more of:
+    //    {
+    //        16 byte uuid
+    //        4 byte data length N
+    //        N bytes of data
+    //    }
+    // Determine the number of entries in the source data.
+    // Since we got the data from stagefright, we trust it is valid and properly formatted.
+
+    const uint8_t *data = (const uint8_t*)pssh;
+    size_t len = psshsize;
+    size_t numentries = 0;
+    while (len > 0) {
+        if (len < DrmUUID::UUID_SIZE) {
+            ALOGE("ParsePSSH: invalid PSSH data");
+            return empty;
+        }
+
+        const uint8_t *uuidPtr = data;
+
+        // skip uuid
+        data += DrmUUID::UUID_SIZE;
+        len -= DrmUUID::UUID_SIZE;
+
+        // get data length
+        if (len < DATALEN_SIZE) {
+            ALOGE("ParsePSSH: invalid PSSH data");
+            return empty;
+        }
+
+        uint32_t datalen = *((uint32_t*)data);
+        data += DATALEN_SIZE;
+        len -= DATALEN_SIZE;
+
+        if (len < datalen) {
+            ALOGE("ParsePSSH: invalid PSSH data");
+            return empty;
+        }
+
+        // skip the data
+        data += datalen;
+        len -= datalen;
+
+        DrmUUID _uuid(uuidPtr);
+        drmSchemes.add(_uuid);
+
+        ALOGV("ParsePSSH[%zu]: %s: %s", numentries,
+                _uuid.toHexString().string(),
+                DrmUUID::arrayToHex(data, datalen).string()
+             );
+
+        numentries++;
+    }
+
+    return drmSchemes;
+}
+
+Vector<DrmUUID> NuPlayerDrm::getSupportedDrmSchemes(const void *pssh, size_t psshsize)
+{
+    Vector<DrmUUID> psshDRMs = parsePSSH(pssh, psshsize);
+
+    Vector<DrmUUID> supportedDRMs;
+     // temporary DRM object for crypto Scheme enquiry (without creating a plugin)
+    status_t status = OK;
+    sp<IDrm> drm = CreateDrm(&status);
+    if (drm != NULL) {
+        for (size_t i = 0; i < psshDRMs.size(); i++) {
+            DrmUUID uuid = psshDRMs[i];
+            if (drm->isCryptoSchemeSupported(uuid.ptr(), String8()))
+                supportedDRMs.add(uuid);
+        }
+
+        drm.clear();
+    } else {
+        ALOGE("getSupportedDrmSchemes: Can't create Drm obj: %d", status);
+    }
+
+    ALOGV("getSupportedDrmSchemes: psshDRMs: %zu supportedDRMs: %zu",
+            psshDRMs.size(), supportedDRMs.size());
+
+    return supportedDRMs;
+}
+
+// static helpers - public
+
+sp<ICrypto> NuPlayerDrm::createCryptoAndPlugin(const uint8_t uuid[16],
+        const Vector<uint8_t> &drmSessionId, status_t &status)
+{
+    // Extra check
+    if (drmSessionId.isEmpty()) {
+        status = INVALID_OPERATION;
+        ALOGE("createCryptoAndPlugin: Failed. Empty drmSessionId. status: %d", status);
+        return NULL;
+    }
+
+    status = OK;
+    sp<ICrypto> crypto = createCrypto(&status);
+    if (crypto == NULL) {
+        ALOGE("createCryptoAndPlugin: createCrypto failed. status: %d", status);
+        return NULL;
+    }
+    ALOGV("createCryptoAndPlugin: createCrypto succeeded");
+
+    status = crypto->createPlugin(uuid, drmSessionId.array(), drmSessionId.size());
+    if (status != OK) {
+        ALOGE("createCryptoAndPlugin: createCryptoPlugin failed. status: %d", status);
+        // crypto will clean itself when leaving the current scope
+        return NULL;
+    }
+
+    return crypto;
+}
+
+// Parcel has only private copy constructor so passing it in rather than returning
+void NuPlayerDrm::retrieveDrmInfo(const void *pssh, size_t psshsize,
+        const Vector<String8> &mimes_in, Parcel *parcel)
+{
+    // 0) Make mimes a vector of unique items while keeping the original order; video first
+    Vector<String8> mimes;
+    for (size_t j = 0; j < mimes_in.size(); j++) {
+        String8 mime = mimes_in[j];
+        bool exists = false;
+        for (size_t i = 0; i < mimes.size() && !exists; i++) {
+            if (mimes[i] == mime) {
+                exists = true;
+            }
+        } // for i
+
+        if (!exists) {
+            mimes.add(mime);
+        }
+    } // for j
+
+
+    // 1) PSSH bytes
+    parcel->writeUint32(psshsize);
+    parcel->writeByteArray(psshsize, (const uint8_t*)pssh);
+
+    ALOGV("retrieveDrmInfo: MEDIA_DRM_INFO  PSSH: size: %zu %s", psshsize,
+            DrmUUID::arrayToHex((uint8_t*)pssh, psshsize).string());
+
+    // 2) supportedDRMs
+    Vector<DrmUUID> supportedDRMs = getSupportedDrmSchemes(pssh, psshsize);
+    parcel->writeUint32(supportedDRMs.size());
+    for (size_t i = 0; i < supportedDRMs.size(); i++) {
+        DrmUUID uuid = supportedDRMs[i];
+        parcel->writeByteArray(DrmUUID::UUID_SIZE, uuid.ptr());
+
+        ALOGV("retrieveDrmInfo: MEDIA_DRM_INFO  supportedScheme[%zu] %s", i,
+                uuid.toHexString().string());
+    }
+
+    // TODO: remove mimes after it's removed from Java DrmInfo
+    // 3) mimes
+    parcel->writeUint32(mimes.size());
+    for (size_t i = 0; i < mimes.size(); i++) {
+        // writing as String16 so the Java framework side can unpack it to Java String
+        String16 mime(mimes[i]);
+        parcel->writeString16(mime);
+
+        ALOGV("retrieveDrmInfo: MEDIA_DRM_INFO  MIME[%zu] %s",
+                i, mimes[i].string());
+    }
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////
+/// Helpers for NuPlayerDecoder
+////////////////////////////////////////////////////////////////////////////////////////////
+
+NuPlayerDrm::CryptoInfo *NuPlayerDrm::makeCryptoInfo(
+        int numSubSamples,
+        uint8_t key[kBlockSize],
+        uint8_t iv[kBlockSize],
+        CryptoPlugin::Mode mode,
+        size_t *clearbytes,
+        size_t *encryptedbytes)
+{
+    // size needed to store all the crypto data
+    size_t cryptosize = sizeof(CryptoInfo) +
+                        sizeof(CryptoPlugin::SubSample) * numSubSamples;
+    CryptoInfo *ret = (CryptoInfo*) malloc(cryptosize);
+    if (ret == NULL) {
+        ALOGE("couldn't allocate %zu bytes", cryptosize);
+        return NULL;
+    }
+    ret->numSubSamples = numSubSamples;
+    memcpy(ret->key, key, kBlockSize);
+    memcpy(ret->iv, iv, kBlockSize);
+    ret->mode = mode;
+    ret->pattern.mEncryptBlocks = 0;
+    ret->pattern.mSkipBlocks = 0;
+    ret->subSamples = (CryptoPlugin::SubSample*)(ret + 1);
+    CryptoPlugin::SubSample *subSamples = ret->subSamples;
+
+    for (int i = 0; i < numSubSamples; i++) {
+        subSamples[i].mNumBytesOfClearData = (clearbytes == NULL) ? 0 : clearbytes[i];
+        subSamples[i].mNumBytesOfEncryptedData = (encryptedbytes == NULL) ?
+                                                  0 :
+                                                  encryptedbytes[i];
+    }
+
+    return ret;
+}
+
+NuPlayerDrm::CryptoInfo *NuPlayerDrm::getSampleCryptoInfo(sp<MetaData> meta)
+{
+    uint32_t type;
+    const void *crypteddata;
+    size_t cryptedsize;
+
+    if (meta == NULL) {
+        ALOGE("getSampleCryptoInfo: Unexpected. No meta data for sample.");
+        return NULL;
+    }
+
+    if (!meta->findData(kKeyEncryptedSizes, &type, &crypteddata, &cryptedsize)) {
+        return NULL;
+    }
+    size_t numSubSamples = cryptedsize / sizeof(size_t);
+
+    if (numSubSamples <= 0) {
+        ALOGE("getSampleCryptoInfo INVALID numSubSamples: %zu", numSubSamples);
+        return NULL;
+    }
+
+    const void *cleardata;
+    size_t clearsize;
+    if (meta->findData(kKeyPlainSizes, &type, &cleardata, &clearsize)) {
+        if (clearsize != cryptedsize) {
+            // The two must be of the same length.
+            ALOGE("getSampleCryptoInfo mismatch cryptedsize: %zu != clearsize: %zu",
+                    cryptedsize, clearsize);
+            return NULL;
+        }
+    }
+
+    const void *key;
+    size_t keysize;
+    if (meta->findData(kKeyCryptoKey, &type, &key, &keysize)) {
+        if (keysize != kBlockSize) {
+            ALOGE("getSampleCryptoInfo Keys must be %d bytes in length: %zu",
+                    kBlockSize, keysize);
+            // Keys must be 16 bytes in length.
+            return NULL;
+        }
+    }
+
+    const void *iv;
+    size_t ivsize;
+    if (meta->findData(kKeyCryptoIV, &type, &iv, &ivsize)) {
+        if (ivsize != kBlockSize) {
+            ALOGE("getSampleCryptoInfo IV must be %d bytes in length: %zu",
+                    kBlockSize, ivsize);
+            // IVs must be 16 bytes in length.
+            return NULL;
+        }
+    }
+
+    int32_t mode;
+    if (!meta->findInt32(kKeyCryptoMode, &mode)) {
+        mode = CryptoPlugin::kMode_AES_CTR;
+    }
+
+    return makeCryptoInfo(numSubSamples,
+            (uint8_t*) key,
+            (uint8_t*) iv,
+            (CryptoPlugin::Mode)mode,
+            (size_t*) cleardata,
+            (size_t*) crypteddata);
+}
+
+}   // namespace android
+
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.h b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.h
new file mode 100644
index 0000000..6704bd1
--- /dev/null
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NUPLAYER_DRM_H_
+#define NUPLAYER_DRM_H_
+
+#include <binder/Parcel.h>
+#include <media/ICrypto.h>
+#include <media/IDrm.h>
+#include <media/stagefright/MetaData.h> // for CryptInfo
+
+
+namespace android {
+
+    struct DrmUUID {
+        static const int UUID_SIZE = 16;
+
+        DrmUUID() {
+            memset(this->uuid, 0, sizeof(uuid));
+        }
+
+        // to allow defining Vector/KeyedVector of UUID type
+        DrmUUID(const DrmUUID &a) {
+            memcpy(this->uuid, a.uuid, sizeof(uuid));
+        }
+
+        // to allow defining Vector/KeyedVector of UUID type
+        DrmUUID(const uint8_t uuid_in[UUID_SIZE]) {
+            memcpy(this->uuid, uuid_in, sizeof(uuid));
+        }
+
+        const uint8_t *ptr() const {
+            return uuid;
+        }
+
+        String8 toHexString() const {
+            return arrayToHex(uuid, UUID_SIZE);
+        }
+
+        static String8 toHexString(const uint8_t uuid_in[UUID_SIZE]) {
+            return arrayToHex(uuid_in, UUID_SIZE);
+        }
+
+        static String8 arrayToHex(const uint8_t *array, int bytes) {
+            String8 result;
+            for (int i = 0; i < bytes; i++) {
+                result.appendFormat("%02x", array[i]);
+            }
+
+            return result;
+        }
+
+    protected:
+        uint8_t uuid[UUID_SIZE];
+    };
+
+
+    struct NuPlayerDrm {
+
+        // static helpers - internal
+
+    protected:
+        static sp<IDrm> CreateDrm(status_t *pstatus);
+        static sp<ICrypto> createCrypto(status_t *pstatus);
+        static Vector<DrmUUID> parsePSSH(const void *pssh, size_t psshsize);
+        static Vector<DrmUUID> getSupportedDrmSchemes(const void *pssh, size_t psshsize);
+
+        // static helpers - public
+
+    public:
+        static sp<ICrypto> createCryptoAndPlugin(const uint8_t uuid[16],
+                const Vector<uint8_t> &drmSessionId, status_t &status);
+        // Parcel has only private copy constructor so passing it in rather than returning
+        static void retrieveDrmInfo(const void *pssh, size_t psshsize,
+                const Vector<String8> &mimes_in, Parcel *parcel);
+
+        ////////////////////////////////////////////////////////////////////////////////////////////
+        /// Helpers for NuPlayerDecoder
+        ////////////////////////////////////////////////////////////////////////////////////////////
+
+        static const uint8_t kBlockSize = 16; // AES_BLOCK_SIZE
+
+        struct CryptoInfo {
+            int numSubSamples;
+            uint8_t key[kBlockSize];
+            uint8_t iv[kBlockSize];
+            CryptoPlugin::Mode mode;
+            CryptoPlugin::Pattern pattern;
+            CryptoPlugin::SubSample *subSamples;
+        };
+
+        static CryptoInfo *makeCryptoInfo(
+                int numSubSamples,
+                uint8_t key[kBlockSize],
+                uint8_t iv[kBlockSize],
+                CryptoPlugin::Mode mode,
+                size_t *clearbytes,
+                size_t *encryptedbytes);
+
+        static CryptoInfo *getSampleCryptoInfo(sp<MetaData> meta);
+
+    };  // NuPlayerDrm
+
+}   // android
+
+#endif     //NUPLAYER_DRM_H_
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
index 0429ef1..e7cca27 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
@@ -20,9 +20,10 @@
 
 #include "NuPlayer.h"
 
+#include <media/ICrypto.h>
+#include <media/mediaplayer.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MetaData.h>
-#include <media/mediaplayer.h>
 #include <utils/Vector.h>
 
 namespace android {
@@ -55,6 +56,8 @@
         kWhatQueueDecoderShutdown,
         kWhatDrmNoLicense,
         kWhatInstantiateSecureDecoders,
+        // Modular DRM
+        kWhatDrmInfo,
     };
 
     // The provides message is used to notify the player about various
@@ -132,6 +135,17 @@
 
     virtual void setOffloadAudio(bool /* offload */) {}
 
+    // Modular DRM
+    virtual status_t prepareDrm(
+            const uint8_t /*uuid*/[16], const Vector<uint8_t> &/*drmSessionId*/,
+            sp<ICrypto> */*crypto*/) {
+        return INVALID_OPERATION;
+    }
+
+    virtual status_t releaseDrm() {
+        return INVALID_OPERATION;
+    }
+
 protected:
     virtual ~Source() {}
 
@@ -143,6 +157,8 @@
     void notifyVideoSizeChanged(const sp<AMessage> &format = NULL);
     void notifyInstantiateSecureDecoders(const sp<AMessage> &reply);
     void notifyPrepared(status_t err = OK);
+    // Modular DRM
+    void notifyDrmInfo(const sp<ABuffer> &buffer);
 
 private:
     sp<AMessage> mNotify;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 3235e81..3f56725 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -575,7 +575,7 @@
 
     changeState(mUninitializedState);
 
-    updateTrebleFlag();
+    mTrebleFlag = false;
 }
 
 ACodec::~ACodec() {
@@ -6229,11 +6229,12 @@
     CHECK(mCodec->mOMXNode == NULL);
 
     OMXClient client;
-    if ((mCodec->updateTrebleFlag() ?
-            client.connectTreble() : client.connect()) != OK) {
+    bool trebleFlag;
+    if (client.connect(&trebleFlag) != OK) {
         mCodec->signalError(OMX_ErrorUndefined, NO_INIT);
         return false;
     }
+    mCodec->setTrebleFlag(trebleFlag);
 
     sp<IOMX> omx = client.interface();
 
@@ -6553,7 +6554,7 @@
 
     if (mCodec->mCreateInputBuffersSuspended) {
         err = statusFromBinderStatus(
-                mCodec->mGraphicBufferSource->setSuspend(true));
+                mCodec->mGraphicBufferSource->setSuspend(true, -1));
 
         if (err != OK) {
             ALOGE("[%s] Unable to configure option to suspend (err %d)",
@@ -7117,8 +7118,10 @@
             return INVALID_OPERATION;
         }
 
+        int64_t suspendStartTimeUs = -1;
+        (void) params->findInt64("drop-start-time-us", &suspendStartTimeUs);
         status_t err = statusFromBinderStatus(
-                mGraphicBufferSource->setSuspend(dropInputFrames != 0));
+                mGraphicBufferSource->setSuspend(dropInputFrames != 0, suspendStartTimeUs));
 
         if (err != OK) {
             ALOGE("Failed to set parameter 'drop-input-frames' (err %d)", err);
@@ -7126,6 +7129,22 @@
         }
     }
 
+    int64_t stopTimeUs;
+    if (params->findInt64("stop-time-us", &stopTimeUs)) {
+        if (mGraphicBufferSource == NULL) {
+            ALOGE("[%s] Invalid to set stop time without surface",
+                    mComponentName.c_str());
+            return INVALID_OPERATION;
+        }
+        status_t err = statusFromBinderStatus(
+                mGraphicBufferSource->setStopTimeUs(stopTimeUs));
+
+        if (err != OK) {
+            ALOGE("Failed to set parameter 'stop-time-us' (err %d)", err);
+            return err;
+        }
+    }
+
     int32_t dummy;
     if (params->findInt32("request-sync", &dummy)) {
         status_t err = requestIDRFrame();
@@ -7676,8 +7695,7 @@
     }
 
     OMXClient client;
-    status_t err = getTrebleFlag() ?
-            client.connectTreble() : client.connect();
+    status_t err = client.connect();
     if (err != OK) {
         return err;
     }
@@ -7893,11 +7911,8 @@
     return OK;
 }
 
-bool ACodec::updateTrebleFlag() {
-    mTrebleFlag = bool(property_get_bool("debug.treble_omx", 0));
-    ALOGV("updateTrebleFlag() returns %s",
-            mTrebleFlag ? "true" : "false");
-    return mTrebleFlag;
+void ACodec::setTrebleFlag(bool trebleFlag) {
+    mTrebleFlag = trebleFlag;
 }
 
 bool ACodec::getTrebleFlag() const {
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index a332cce..03010ab 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -758,6 +758,51 @@
     return err;
 }
 
+status_t MediaCodec::releaseCrypto()
+{
+    ALOGV("releaseCrypto");
+
+    sp<AMessage> msg = new AMessage(kWhatDrmReleaseCrypto, this);
+
+    sp<AMessage> response;
+    status_t status = msg->postAndAwaitResponse(&response);
+
+    if (status == OK && response != NULL) {
+        CHECK(response->findInt32("status", &status));
+        ALOGV("releaseCrypto ret: %d ", status);
+    }
+    else {
+        ALOGE("releaseCrypto err: %d", status);
+    }
+
+    return status;
+}
+
+void MediaCodec::onReleaseCrypto(const sp<AMessage>& msg)
+{
+    status_t status = INVALID_OPERATION;
+    if (mCrypto != NULL) {
+        ALOGV("onReleaseCrypto: mCrypto: %p (%d)", mCrypto.get(), mCrypto->getStrongCount());
+        mBufferChannel->setCrypto(NULL);
+        // TODO change to ALOGV
+        ALOGD("onReleaseCrypto: [before clear]  mCrypto: %p (%d)",
+                mCrypto.get(), mCrypto->getStrongCount());
+        mCrypto.clear();
+
+        status = OK;
+    }
+    else {
+        ALOGW("onReleaseCrypto: No mCrypto. err: %d", status);
+    }
+
+    sp<AMessage> response = new AMessage;
+    response->setInt32("status", status);
+
+    sp<AReplyToken> replyID;
+    CHECK(msg->senderAwaitsResponse(&replyID));
+    response->postReply(replyID);
+}
+
 status_t MediaCodec::setInputSurface(
         const sp<PersistentSurface> &surface) {
     sp<AMessage> msg = new AMessage(kWhatSetInputSurface, this);
@@ -1938,9 +1983,15 @@
                 crypto = NULL;
             }
 
+            ALOGV("kWhatConfigure: Old mCrypto: %p (%d)",
+                    mCrypto.get(), (mCrypto != NULL ? mCrypto->getStrongCount() : 0));
+
             mCrypto = static_cast<ICrypto *>(crypto);
             mBufferChannel->setCrypto(mCrypto);
 
+            ALOGV("kWhatConfigure: New mCrypto: %p (%d)",
+                    mCrypto.get(), (mCrypto != NULL ? mCrypto->getStrongCount() : 0));
+
             uint32_t flags;
             CHECK(msg->findInt32("flags", (int32_t *)&flags));
 
@@ -2471,6 +2522,12 @@
             break;
         }
 
+        case kWhatDrmReleaseCrypto:
+        {
+            onReleaseCrypto(msg);
+            break;
+        }
+
         default:
             TRESPASS();
     }
@@ -2530,6 +2587,10 @@
         delete mSoftRenderer;
         mSoftRenderer = NULL;
 
+        if ( mCrypto != NULL ) {
+            ALOGV("setState: ~mCrypto: %p (%d)",
+                    mCrypto.get(), (mCrypto != NULL ? mCrypto->getStrongCount() : 0));
+        }
         mCrypto.clear();
         handleSetSurface(NULL);
 
@@ -2853,7 +2914,7 @@
             return ALREADY_EXISTS;
         }
 
-        err = native_window_api_connect(surface.get(), NATIVE_WINDOW_API_MEDIA);
+        err = nativeWindowConnect(surface.get(), "connectToSurface");
         if (err == OK) {
             // Require a fresh set of buffers after each connect by using a unique generation
             // number. Rely on the fact that max supported process id by Linux is 2^22.
@@ -2868,12 +2929,12 @@
             // This is needed as the consumer may be holding onto stale frames that it can reattach
             // to this surface after disconnect/connect, and those free frames would inherit the new
             // generation number. Disconnecting after setting a unique generation prevents this.
-            native_window_api_disconnect(surface.get(), NATIVE_WINDOW_API_MEDIA);
-            err = native_window_api_connect(surface.get(), NATIVE_WINDOW_API_MEDIA);
+            nativeWindowDisconnect(surface.get(), "connectToSurface(reconnect)");
+            err = nativeWindowConnect(surface.get(), "connectToSurface(reconnect)");
         }
 
         if (err != OK) {
-            ALOGE("native_window_api_connect returned an error: %s (%d)", strerror(-err), err);
+            ALOGE("nativeWindowConnect returned an error: %s (%d)", strerror(-err), err);
         }
     }
     // do not return ALREADY_EXISTS unless surfaces are the same
@@ -2885,9 +2946,9 @@
     if (mSurface != NULL) {
         // Resetting generation is not technically needed, but there is no need to keep it either
         mSurface->setGenerationNumber(0);
-        err = native_window_api_disconnect(mSurface.get(), NATIVE_WINDOW_API_MEDIA);
+        err = nativeWindowDisconnect(mSurface.get(), "disconnectFromSurface");
         if (err != OK) {
-            ALOGW("native_window_api_disconnect returned an error: %s (%d)", strerror(-err), err);
+            ALOGW("nativeWindowDisconnect returned an error: %s (%d)", strerror(-err), err);
         }
         // assume disconnected even on error
         mSurface.clear();
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index 5981b35..059a730 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -363,8 +363,20 @@
     return postSynchronouslyAndReturnError(msg);
 }
 
-status_t MediaCodecSource::pause() {
-    (new AMessage(kWhatPause, mReflector))->post();
+
+status_t MediaCodecSource::setStopStimeUs(int64_t stopTimeUs) {
+    if (!(mFlags & FLAG_USE_SURFACE_INPUT)) {
+        return OK;
+    }
+    sp<AMessage> msg = new AMessage(kWhatSetStopTimeOffset, mReflector);
+    msg->setInt64("stop-time-us", stopTimeUs);
+    return postSynchronouslyAndReturnError(msg);
+}
+
+status_t MediaCodecSource::pause(MetaData* params) {
+    sp<AMessage> msg = new AMessage(kWhatPause, mReflector);
+    msg->setObject("meta", params);
+    msg->post();
     return OK;
 }
 
@@ -624,22 +636,13 @@
     }
 }
 
-void MediaCodecSource::suspend() {
-    CHECK(mFlags & FLAG_USE_SURFACE_INPUT);
-    if (mEncoder != NULL) {
-        sp<AMessage> params = new AMessage;
-        params->setInt32("drop-input-frames", true);
-        mEncoder->setParameters(params);
-    }
-}
-
-void MediaCodecSource::resume(int64_t skipFramesBeforeUs) {
+void MediaCodecSource::resume(int64_t resumeStartTimeUs) {
     CHECK(mFlags & FLAG_USE_SURFACE_INPUT);
     if (mEncoder != NULL) {
         sp<AMessage> params = new AMessage;
         params->setInt32("drop-input-frames", false);
-        if (skipFramesBeforeUs > 0) {
-            params->setInt64("skip-frames-before", skipFramesBeforeUs);
+        if (resumeStartTimeUs > 0) {
+            params->setInt64("drop-start-time-us", resumeStartTimeUs);
         }
         mEncoder->setParameters(params);
     }
@@ -661,7 +664,7 @@
                 mFirstSampleSystemTimeUs = systemTime() / 1000;
                 if (mPausePending) {
                     mPausePending = false;
-                    onPause();
+                    onPause(mFirstSampleSystemTimeUs);
                     mbuf->release();
                     mAvailEncoderInputIndices.push_back(bufferIndex);
                     return OK;
@@ -728,6 +731,10 @@
         ALOGE("Failed to start while we're stopping");
         return INVALID_OPERATION;
     }
+    int64_t startTimeUs;
+    if (params == NULL || !params->findInt64(kKeyTime, &startTimeUs)) {
+        startTimeUs = -1ll;
+    }
 
     if (mStarted) {
         ALOGI("MediaCodecSource (%s) resuming", mIsVideo ? "video" : "audio");
@@ -739,7 +746,7 @@
             mEncoder->requestIDRFrame();
         }
         if (mFlags & FLAG_USE_SURFACE_INPUT) {
-            resume();
+            resume(startTimeUs);
         } else {
             CHECK(mPuller != NULL);
             mPuller->resume();
@@ -752,11 +759,14 @@
     status_t err = OK;
 
     if (mFlags & FLAG_USE_SURFACE_INPUT) {
-        int64_t startTimeUs;
-        if (!params || !params->findInt64(kKeyTime, &startTimeUs)) {
-            startTimeUs = -1ll;
+        if (mEncoder != NULL) {
+            sp<AMessage> params = new AMessage;
+            params->setInt32("drop-input-frames", false);
+            if (startTimeUs >= 0) {
+                params->setInt64("skip-frames-before", startTimeUs);
+            }
+            mEncoder->setParameters(params);
         }
-        resume(startTimeUs);
     } else {
         CHECK(mPuller != NULL);
         sp<MetaData> meta = params;
@@ -781,9 +791,12 @@
     return OK;
 }
 
-void MediaCodecSource::onPause() {
-    if (mFlags & FLAG_USE_SURFACE_INPUT) {
-        suspend();
+void MediaCodecSource::onPause(int64_t pauseStartTimeUs) {
+    if ((mFlags & FLAG_USE_SURFACE_INPUT) && (mEncoder != NULL)) {
+        sp<AMessage> params = new AMessage;
+        params->setInt32("drop-input-frames", true);
+        params->setInt64("drop-start-time-us", pauseStartTimeUs);
+        mEncoder->setParameters(params);
     } else {
         CHECK(mPuller != NULL);
         mPuller->pause();
@@ -871,7 +884,7 @@
                             mFirstSampleSystemTimeUs = systemTime() / 1000;
                             if (mPausePending) {
                                 mPausePending = false;
-                                onPause();
+                                onPause(mFirstSampleSystemTimeUs);
                                 mbuf->release();
                                 break;
                             }
@@ -1000,6 +1013,7 @@
             ALOGV("source (%s) stopped", mIsVideo ? "video" : "audio");
         }
         signalEOS();
+        break;
     }
 
     case kWhatPause:
@@ -1007,7 +1021,14 @@
         if (mFirstSampleSystemTimeUs < 0) {
             mPausePending = true;
         } else {
-            onPause();
+            sp<RefBase> obj;
+            CHECK(msg->findObject("meta", &obj));
+            MetaData *params = static_cast<MetaData *>(obj.get());
+            int64_t pauseStartTimeUs = -1;
+            if (params == NULL || !params->findInt64(kKeyTime, &pauseStartTimeUs)) {
+                pauseStartTimeUs = -1ll;
+            }
+            onPause(pauseStartTimeUs);
         }
         break;
     }
@@ -1030,6 +1051,26 @@
         response->postReply(replyID);
         break;
     }
+    case kWhatSetStopTimeOffset:
+    {
+        sp<AReplyToken> replyID;
+        CHECK(msg->senderAwaitsResponse(&replyID));
+        status_t err = OK;
+        int64_t stopTimeUs;
+        CHECK(msg->findInt64("stop-time-us", &stopTimeUs));
+
+        // Propagate the timestamp offset to GraphicBufferSource.
+        if (mFlags & FLAG_USE_SURFACE_INPUT) {
+            sp<AMessage> params = new AMessage;
+            params->setInt64("stop-time-us", stopTimeUs);
+            err = mEncoder->setParameters(params);
+        }
+
+        sp<AMessage> response = new AMessage;
+        response->setInt32("err", err);
+        response->postReply(replyID);
+        break;
+    }
     case kWhatGetFirstSampleSystemTimeUs:
     {
         sp<AReplyToken> replyID;
diff --git a/media/libstagefright/OMXClient.cpp b/media/libstagefright/OMXClient.cpp
index b4e694c..b77ee1d 100644
--- a/media/libstagefright/OMXClient.cpp
+++ b/media/libstagefright/OMXClient.cpp
@@ -22,6 +22,7 @@
 #endif
 
 #include <utils/Log.h>
+#include <cutils/properties.h>
 
 #include <binder/IServiceManager.h>
 #include <media/IMediaCodecService.h>
@@ -36,7 +37,22 @@
 OMXClient::OMXClient() {
 }
 
-status_t OMXClient::connect() {
+status_t OMXClient::connect(bool* trebleFlag) {
+    int32_t trebleOmx = property_get_int32("persist.media.treble_omx", -1);
+    if ((trebleOmx == 1) || ((trebleOmx == -1) &&
+            property_get_bool("persist.hal.binderization", 0))) {
+        if (trebleFlag != nullptr) {
+            *trebleFlag = true;
+        }
+        return connectTreble();
+    }
+    if (trebleFlag != nullptr) {
+        *trebleFlag = false;
+    }
+    return connectLegacy();
+}
+
+status_t OMXClient::connectLegacy() {
     sp<IServiceManager> sm = defaultServiceManager();
     sp<IBinder> codecbinder = sm->getService(String16("media.codec"));
     sp<IMediaCodecService> codecservice = interface_cast<IMediaCodecService>(codecbinder);
@@ -67,6 +83,7 @@
         return NO_INIT;
     }
     mOMX = new utils::LWOmx(tOmx);
+    ALOGI("Treble IOmx obtained");
     return OK;
 }
 
diff --git a/media/libstagefright/SurfaceUtils.cpp b/media/libstagefright/SurfaceUtils.cpp
index 568837a..82e959e 100644
--- a/media/libstagefright/SurfaceUtils.cpp
+++ b/media/libstagefright/SurfaceUtils.cpp
@@ -31,15 +31,15 @@
 
     // In some cases we need to reconnect so that we can dequeue all buffers
     if (reconnect) {
-        err = native_window_api_disconnect(nativeWindow, NATIVE_WINDOW_API_MEDIA);
+        err = nativeWindowDisconnect(nativeWindow, "setNativeWindowSizeFormatAndUsage");
         if (err != NO_ERROR) {
-            ALOGE("native_window_api_disconnect failed: %s (%d)", strerror(-err), -err);
+            ALOGE("nativeWindowDisconnect failed: %s (%d)", strerror(-err), -err);
             return err;
         }
 
-        err = native_window_api_connect(nativeWindow, NATIVE_WINDOW_API_MEDIA);
+        err = nativeWindowConnect(nativeWindow, "setNativeWindowSizeFormatAndUsage");
         if (err != NO_ERROR) {
-            ALOGE("native_window_api_connect failed: %s (%d)", strerror(-err), -err);
+            ALOGE("nativeWindowConnect failed: %s (%d)", strerror(-err), -err);
             return err;
         }
     }
@@ -127,7 +127,7 @@
     // We need to reconnect to the ANativeWindow as a CPU client to ensure that
     // no frames get dropped by SurfaceFlinger assuming that these are video
     // frames.
-    err = native_window_api_disconnect(nativeWindow, NATIVE_WINDOW_API_MEDIA);
+    err = nativeWindowDisconnect(nativeWindow, "pushBlankBuffersToNativeWindow");
     if (err != NO_ERROR) {
         ALOGE("error pushing blank frames: api_disconnect failed: %s (%d)", strerror(-err), -err);
         return err;
@@ -136,7 +136,7 @@
     err = native_window_api_connect(nativeWindow, NATIVE_WINDOW_API_CPU);
     if (err != NO_ERROR) {
         ALOGE("error pushing blank frames: api_connect failed: %s (%d)", strerror(-err), -err);
-        (void)native_window_api_connect(nativeWindow, NATIVE_WINDOW_API_MEDIA);
+        (void)nativeWindowConnect(nativeWindow, "pushBlankBuffersToNativeWindow(err)");
         return err;
     }
 
@@ -219,7 +219,7 @@
         }
     }
 
-    err2 = native_window_api_connect(nativeWindow, NATIVE_WINDOW_API_MEDIA);
+    err2 = nativeWindowConnect(nativeWindow, "pushBlankBuffersToNativeWindow(err2)");
     if (err2 != NO_ERROR) {
         ALOGE("error pushing blank frames: api_connect failed: %s (%d)", strerror(-err), -err);
         if (err == NO_ERROR) {
@@ -230,5 +230,22 @@
     return err;
 }
 
+status_t nativeWindowConnect(ANativeWindow *surface, const char *reason) {
+    ALOGD("connecting to surface %p, reason %s", surface, reason);
+
+    status_t err = native_window_api_connect(surface, NATIVE_WINDOW_API_MEDIA);
+    ALOGE_IF(err != OK, "Failed to connect to surface %p, err %d", surface, err);
+
+    return err;
+}
+
+status_t nativeWindowDisconnect(ANativeWindow *surface, const char *reason) {
+    ALOGD("disconnecting from surface %p, reason %s", surface, reason);
+
+    status_t err = native_window_api_disconnect(surface, NATIVE_WINDOW_API_MEDIA);
+    ALOGE_IF(err != OK, "Failed to disconnect from surface %p, err %d", surface, err);
+
+    return err;
+}
 }  // namespace android
 
diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder.cpp b/media/libstagefright/codecs/aacenc/SoftAACEncoder.cpp
index ab0a228..96bbb85 100644
--- a/media/libstagefright/codecs/aacenc/SoftAACEncoder.cpp
+++ b/media/libstagefright/codecs/aacenc/SoftAACEncoder.cpp
@@ -62,8 +62,7 @@
 }
 
 SoftAACEncoder::~SoftAACEncoder() {
-    delete[] mInputFrame;
-    mInputFrame = NULL;
+    onReset();
 
     if (mEncoderHandle) {
         CHECK_EQ(VO_ERR_NONE, mApiHandle->Uninit(mEncoderHandle));
@@ -579,6 +578,17 @@
     }
 }
 
+void SoftAACEncoder::onReset() {
+    delete[] mInputFrame;
+    mInputFrame = NULL;
+    mInputSize = 0;
+
+    mSentCodecSpecificData = false;
+    mInputTimeUs = -1ll;
+    mSawInputEOS = false;
+    mSignalledError = false;
+}
+
 }  // namespace android
 
 android::SoftOMXComponent *createSoftOMXComponent(
diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder.h b/media/libstagefright/codecs/aacenc/SoftAACEncoder.h
index d148eb7..981cbbb 100644
--- a/media/libstagefright/codecs/aacenc/SoftAACEncoder.h
+++ b/media/libstagefright/codecs/aacenc/SoftAACEncoder.h
@@ -43,6 +43,8 @@
 
     virtual void onQueueFilled(OMX_U32 portIndex);
 
+    virtual void onReset();
+
 private:
     enum {
         kNumBuffers             = 4,
diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp
index 63215ec..5f516cb 100644
--- a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp
+++ b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp
@@ -72,8 +72,7 @@
 SoftAACEncoder2::~SoftAACEncoder2() {
     aacEncClose(&mAACEncoder);
 
-    delete[] mInputFrame;
-    mInputFrame = NULL;
+    onReset();
 }
 
 void SoftAACEncoder2::initPorts() {
@@ -703,6 +702,17 @@
     }
 }
 
+void SoftAACEncoder2::onReset() {
+    delete[] mInputFrame;
+    mInputFrame = NULL;
+    mInputSize = 0;
+
+    mSentCodecSpecificData = false;
+    mInputTimeUs = -1ll;
+    mSawInputEOS = false;
+    mSignalledError = false;
+}
+
 }  // namespace android
 
 android::SoftOMXComponent *createSoftOMXComponent(
diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h
index bce9c24..f1b81e1 100644
--- a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h
+++ b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h
@@ -42,6 +42,8 @@
 
     virtual void onQueueFilled(OMX_U32 portIndex);
 
+    virtual void onReset();
+
 private:
     enum {
         kNumBuffers             = 4,
diff --git a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
index f496b0c..d5a26d3 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
+++ b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
@@ -99,6 +99,7 @@
 
 SoftMPEG4Encoder::~SoftMPEG4Encoder() {
     ALOGV("Destruct SoftMPEG4Encoder");
+    onReset();
     releaseEncoder();
     List<BufferInfo *> &outQueue = getPortQueue(1);
     List<BufferInfo *> &inQueue = getPortQueue(0);
@@ -201,22 +202,15 @@
 }
 
 OMX_ERRORTYPE SoftMPEG4Encoder::releaseEncoder() {
-    if (!mStarted) {
-        return OMX_ErrorNone;
+    if (mEncParams) {
+        delete mEncParams;
+        mEncParams = NULL;
     }
 
-    PVCleanUpVideoEncoder(mHandle);
-
-    free(mInputFrameData);
-    mInputFrameData = NULL;
-
-    delete mEncParams;
-    mEncParams = NULL;
-
-    delete mHandle;
-    mHandle = NULL;
-
-    mStarted = false;
+    if (mHandle) {
+        delete mHandle;
+        mHandle = NULL;
+    }
 
     return OMX_ErrorNone;
 }
@@ -514,6 +508,19 @@
     }
 }
 
+void SoftMPEG4Encoder::onReset() {
+    if (!mStarted) {
+        return;
+    }
+
+    PVCleanUpVideoEncoder(mHandle);
+
+    free(mInputFrameData);
+    mInputFrameData = NULL;
+
+    mStarted = false;
+}
+
 }  // namespace android
 
 android::SoftOMXComponent *createSoftOMXComponent(
diff --git a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h
index bb6ea92..ae8cb6f 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h
+++ b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h
@@ -48,6 +48,8 @@
 
     virtual void onQueueFilled(OMX_U32 portIndex);
 
+    virtual void onReset();
+
 protected:
     virtual ~SoftMPEG4Encoder();
 
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
index 5609032..8d69bd5 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
@@ -712,7 +712,9 @@
         if (inputBufferHeader->nTimeStamp > mLastTimestamp) {
             frameDuration = (uint32_t)(inputBufferHeader->nTimeStamp - mLastTimestamp);
         } else {
-            frameDuration = (uint32_t)(((uint64_t)1000000 << 16) / mFramerate);
+            // Use default of 30 fps in case of 0 frame rate.
+            uint32_t framerate = mFramerate ?: (30 << 16);
+            frameDuration = (uint32_t)(((uint64_t)1000000 << 16) / framerate);
         }
         mLastTimestamp = inputBufferHeader->nTimeStamp;
         codec_return = vpx_codec_encode(
@@ -766,6 +768,11 @@
     }
 }
 
+void SoftVPXEncoder::onReset() {
+    releaseEncoder();
+    mLastTimestamp = 0x7FFFFFFFFFFFFFFFLL;
+}
+
 }  // namespace android
 
 android::SoftOMXComponent *createSoftOMXComponent(
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
index 86e71da..86dfad7 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
@@ -93,6 +93,8 @@
     // encoding of the frame
     virtual void onQueueFilled(OMX_U32 portIndex);
 
+    virtual void onReset();
+
     // Initializes vpx encoder with available settings.
     status_t initEncoder();
 
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index a0ddc28..2f457ac 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -76,11 +76,13 @@
     mInitCheck(UNKNOWN_ERROR),
     mExecuting(false),
     mSuspended(false),
+    mStopTimeUs(-1),
     mLastDataSpace(HAL_DATASPACE_UNKNOWN),
     mNumFramesAvailable(0),
     mNumBufferAcquired(0),
     mEndOfStream(false),
     mEndOfStreamSent(false),
+    mLastActionTimeUs(-1ll),
     mPrevOriginalTimeUs(-1ll),
     mSkipFramesBeforeNs(-1ll),
     mRepeatAfterUs(-1ll),
@@ -171,7 +173,7 @@
 
     // If EOS has already been signaled, and there are no more frames to
     // submit, try to send EOS now as well.
-    if (mEndOfStream && mNumFramesAvailable == 0) {
+    if (mStopTimeUs == -1 && mEndOfStream && mNumFramesAvailable == 0) {
         submitEndOfInputStream_l();
     }
 
@@ -348,8 +350,8 @@
         ALOGV("buffer freed, %zu frames avail (eos=%d)",
                 mNumFramesAvailable, mEndOfStream);
         fillCodecBuffer_l();
-    } else if (mEndOfStream) {
-        // No frames available, but EOS is pending, so use this buffer to
+    } else if (mEndOfStream && mStopTimeUs == -1) {
+        // No frames available, but EOS is pending and no stop time, so use this buffer to
         // send that.
         ALOGV("buffer freed, EOS pending");
         submitEndOfInputStream_l();
@@ -387,7 +389,7 @@
 bool GraphicBufferSource::fillCodecBuffer_l() {
     CHECK(mExecuting && mNumFramesAvailable > 0);
 
-    if (mSuspended) {
+    if (mSuspended && mActionQueue.empty()) {
         return false;
     }
 
@@ -408,8 +410,85 @@
         return false;
     }
 
+    int64_t itemTimeUs = item.mTimestamp / 1000;
+
     mNumFramesAvailable--;
 
+    // Process ActionItem in the Queue if there is any. If a buffer's timestamp
+    // is smaller than the first action's timestamp, no action need to be performed.
+    // If buffer's timestamp is larger or equal than the last action's timestamp,
+    // only the last action needs to be performed as all the acitions before the
+    // the action are overridden by the last action. For the other cases, traverse
+    // the Queue to find the newest action that with timestamp smaller or equal to
+    // the buffer's timestamp. For example, an action queue like
+    // [pause, 1s], [resume 2us], [pause 3us], [resume 4us], [pause 5us].... Upon
+    // receiving a buffer with timestamp 3.5us, only the action [pause, 3us] needs
+    // to be handled and [pause, 1us], [resume 2us] will be discarded.
+    bool dropped = false;
+    bool done = false;
+    if (!mActionQueue.empty()) {
+        // First scan to check if bufferTimestamp is smaller than first action's timestamp.
+        ActionItem nextAction = *(mActionQueue.begin());
+        if (itemTimeUs < nextAction.mActionTimeUs) {
+            ALOGV("No action. buffer timestamp %lld us < action timestamp: %lld us",
+                (long long)itemTimeUs, (long long)nextAction.mActionTimeUs);
+            // All the actions are ahead. No action need to perform now.
+            // Release the buffer if is in suspended state, or process the buffer
+            // if not in suspended state.
+            dropped = mSuspended;
+            done = true;
+        }
+
+        if (!done) {
+            List<ActionItem>::iterator it = mActionQueue.begin();
+            while(it != mActionQueue.end()) {
+                nextAction = *it;
+                mActionQueue.erase(it);
+                if (nextAction.mActionTimeUs > itemTimeUs) {
+                    break;
+                }
+                ++it;
+            }
+
+            CHECK(itemTimeUs >= nextAction.mActionTimeUs);
+            switch (nextAction.mAction) {
+                case ActionItem::PAUSE:
+                {
+                    mSuspended = true;
+                    dropped = true;
+                    ALOGV("RUNNING/PAUSE -> PAUSE at buffer %lld us  PAUSE Time: %lld us",
+                            (long long)itemTimeUs, (long long)nextAction.mActionTimeUs);
+                    break;
+                }
+                case ActionItem::RESUME:
+                {
+                    mSuspended = false;
+                    ALOGV("PAUSE/RUNNING -> RUNNING at buffer %lld us  RESUME Time: %lld us",
+                            (long long)itemTimeUs, (long long)nextAction.mActionTimeUs);
+                    break;
+                }
+                case ActionItem::STOP:
+                {
+                    ALOGV("RUNNING/PAUSE -> STOP at buffer %lld us  STOP Time: %lld us",
+                            (long long)itemTimeUs, (long long)nextAction.mActionTimeUs);
+                    dropped = true;
+                    // Clear the whole ActionQueue as recording is done
+                    mActionQueue.clear();
+                    submitEndOfInputStream_l();
+                    break;
+                }
+                default:
+                    ALOGE("Unknown action type");
+                    return false;
+            }
+        }
+    }
+
+    if (dropped) {
+        releaseBuffer(item.mSlot, item.mFrameNumber, item.mFence);
+        return true;
+    }
+
     if (item.mDataSpace != mLastDataSpace) {
         onDataSpaceChanged_l(
                 item.mDataSpace, (android_pixel_format)mBufferSlot[item.mSlot]->getPixelFormat());
@@ -419,7 +498,6 @@
 
     // only submit sample if start time is unspecified, or sample
     // is queued after the specified start time
-    bool dropped = false;
     if (mSkipFramesBeforeNs < 0ll || item.mTimestamp >= mSkipFramesBeforeNs) {
         // if start time is set, offset time stamp by start time
         if (mSkipFramesBeforeNs > 0) {
@@ -719,12 +797,12 @@
     ALOGV("onFrameAvailable exec=%d avail=%zu",
             mExecuting, mNumFramesAvailable);
 
-    if (mOMXNode == NULL || mEndOfStream || mSuspended) {
-        if (mEndOfStream) {
+    if (mOMXNode == NULL || mEndOfStreamSent || (mSuspended && mActionQueue.empty())) {
+        if (mEndOfStreamSent) {
             // This should only be possible if a new buffer was queued after
             // EOS was signaled, i.e. the app is misbehaving.
 
-            ALOGW("onFrameAvailable: EOS is set, ignoring frame");
+            ALOGW("onFrameAvailable: EOS is sent, ignoring frame");
         } else {
             ALOGV("onFrameAvailable: suspended, ignoring frame");
         }
@@ -875,44 +953,74 @@
         mPrevCaptureUs = -1ll;
         mPrevFrameUs = -1ll;
         mInputBufferTimeOffsetUs = 0;
+        mStopTimeUs = -1;
+        mActionQueue.clear();
     }
 
     return Status::ok();
 }
 
-Status GraphicBufferSource::setSuspend(bool suspend) {
-    ALOGV("setSuspend=%d", suspend);
+Status GraphicBufferSource::setSuspend(bool suspend, int64_t suspendStartTimeUs) {
+    ALOGV("setSuspend=%d at time %lld us", suspend, (long long)suspendStartTimeUs);
 
     Mutex::Autolock autoLock(mMutex);
 
-    if (suspend) {
-        mSuspended = true;
-
-        while (mNumFramesAvailable > 0) {
-            BufferItem item;
-            status_t err = acquireBuffer(&item);
-
-            if (err != OK) {
-                ALOGE("setSuspend: acquireBuffer returned err=%d", err);
-                break;
-            }
-
-            --mNumFramesAvailable;
-
-            releaseBuffer(item.mSlot, item.mFrameNumber, item.mFence);
-        }
-        return Status::ok();
+    if (mStopTimeUs != -1) {
+        ALOGE("setSuspend failed as STOP action is pending");
+        return Status::fromServiceSpecificError(INVALID_OPERATION);
     }
 
-    mSuspended = false;
+    // Push the action to the queue.
+    if (suspendStartTimeUs != -1) {
+        // suspendStartTimeUs must be smaller or equal to current systemTime.
+        int64_t currentSystemTimeUs = systemTime() / 1000;
+        if (suspendStartTimeUs > currentSystemTimeUs) {
+            ALOGE("setSuspend failed. %lld is larger than current system time %lld us",
+                    (long long)suspendStartTimeUs, (long long)currentSystemTimeUs);
+            return Status::fromServiceSpecificError(INVALID_OPERATION);
+        }
+        if (mLastActionTimeUs != -1 && suspendStartTimeUs < mLastActionTimeUs) {
+            ALOGE("setSuspend failed. %lld is smaller than last action time %lld us",
+                    (long long)suspendStartTimeUs, (long long)mLastActionTimeUs);
+            return Status::fromServiceSpecificError(INVALID_OPERATION);
+        }
+        mLastActionTimeUs = suspendStartTimeUs;
+        ActionItem action;
+        action.mAction = suspend ? ActionItem::PAUSE : ActionItem::RESUME;
+        action.mActionTimeUs = suspendStartTimeUs;
+        ALOGV("Push %s action into actionQueue", suspend ? "PAUSE" : "RESUME");
+        mActionQueue.push_back(action);
+    } else {
+        if (suspend) {
+            mSuspended = true;
 
-    if (mExecuting && mNumFramesAvailable == 0 && mRepeatBufferDeferred) {
-        if (repeatLatestBuffer_l()) {
-            ALOGV("suspend/deferred repeatLatestBuffer_l SUCCESS");
+            while (mNumFramesAvailable > 0) {
+                BufferItem item;
+                status_t err = acquireBuffer(&item);
 
-            mRepeatBufferDeferred = false;
+                if (err != OK) {
+                    ALOGE("setSuspend: acquireBuffer returned err=%d", err);
+                    break;
+                }
+
+                --mNumFramesAvailable;
+
+                releaseBuffer(item.mSlot, item.mFrameNumber, item.mFence);
+            }
+            return Status::ok();
         } else {
-            ALOGV("suspend/deferred repeatLatestBuffer_l FAILURE");
+
+            mSuspended = false;
+
+            if (mExecuting && mNumFramesAvailable == 0 && mRepeatBufferDeferred) {
+                if (repeatLatestBuffer_l()) {
+                    ALOGV("suspend/deferred repeatLatestBuffer_l SUCCESS");
+
+                    mRepeatBufferDeferred = false;
+                } else {
+                    ALOGV("suspend/deferred repeatLatestBuffer_l FAILURE");
+                }
+            }
         }
     }
     return Status::ok();
@@ -973,6 +1081,36 @@
     return Status::ok();
 }
 
+Status GraphicBufferSource::setStopTimeUs(int64_t stopTimeUs) {
+    ALOGV("setStopTimeUs: %lld us", (long long)stopTimeUs);
+    Mutex::Autolock autoLock(mMutex);
+
+    if (mStopTimeUs != -1) {
+        // Ignore if stop time has already been set
+        return Status::ok();
+    }
+
+    // stopTimeUs must be smaller or equal to current systemTime.
+    int64_t currentSystemTimeUs = systemTime() / 1000;
+    if (stopTimeUs > currentSystemTimeUs) {
+        ALOGE("setStopTimeUs failed. %lld is larger than current system time %lld us",
+            (long long)stopTimeUs, (long long)currentSystemTimeUs);
+        return Status::fromServiceSpecificError(INVALID_OPERATION);
+    }
+    if (mLastActionTimeUs != -1 && stopTimeUs < mLastActionTimeUs) {
+        ALOGE("setSuspend failed. %lld is smaller than last action time %lld us",
+            (long long)stopTimeUs, (long long)mLastActionTimeUs);
+        return Status::fromServiceSpecificError(INVALID_OPERATION);
+    }
+    mLastActionTimeUs = stopTimeUs;
+    ActionItem action;
+    action.mAction = ActionItem::STOP;
+    action.mActionTimeUs = stopTimeUs;
+    mActionQueue.push_back(action);
+    mStopTimeUs = stopTimeUs;
+    return Status::ok();
+}
+
 Status GraphicBufferSource::setTimeLapseConfig(int64_t timePerFrameUs, int64_t timePerCaptureUs) {
     ALOGV("setTimeLapseConfig: timePerFrameUs=%lld, timePerCaptureUs=%lld",
             (long long)timePerFrameUs, (long long)timePerCaptureUs);
@@ -1013,15 +1151,15 @@
 
     // Set the end-of-stream flag.  If no frames are pending from the
     // BufferQueue, and a codec buffer is available, and we're executing,
-    // we initiate the EOS from here.  Otherwise, we'll let
-    // codecBufferEmptied() (or omxExecuting) do it.
+    // and there is no stop timestamp, we initiate the EOS from here.
+    // Otherwise, we'll let codecBufferEmptied() (or omxExecuting) do it.
     //
     // Note: if there are no pending frames and all codec buffers are
     // available, we *must* submit the EOS from here or we'll just
     // stall since no future events are expected.
     mEndOfStream = true;
 
-    if (mExecuting && mNumFramesAvailable == 0) {
+    if (mStopTimeUs == -1 && mExecuting && mNumFramesAvailable == 0) {
         submitEndOfInputStream_l();
     }
 
diff --git a/media/libstagefright/omx/GraphicBufferSource.h b/media/libstagefright/omx/GraphicBufferSource.h
index 153a035..475548e 100644
--- a/media/libstagefright/omx/GraphicBufferSource.h
+++ b/media/libstagefright/omx/GraphicBufferSource.h
@@ -101,14 +101,18 @@
     // data space.
     Status configure(const sp<IOMXNode>& omxNode, int32_t dataSpace) override;
 
-    // This is called after the last input frame has been submitted.  We
-    // need to submit an empty buffer with the EOS flag set.  If we don't
-    // have a codec buffer ready, we just set the mEndOfStream flag.
+    // This is called after the last input frame has been submitted or buffer
+    // timestamp is greater or equal than stopTimeUs. We need to submit an empty
+    // buffer with the EOS flag set.  If we don't have a codec buffer ready,
+    // we just set the mEndOfStream flag.
     Status signalEndOfInputStream() override;
 
     // If suspend is true, all incoming buffers (including those currently
-    // in the BufferQueue) will be discarded until the suspension is lifted.
-    Status setSuspend(bool suspend) override;
+    // in the BufferQueue) with timestamp larger than timeUs will be discarded
+    // until the suspension is lifted. If suspend is false, all incoming buffers
+    // including those currently in the BufferQueue) with timestamp larger than
+    // timeUs will be processed. timeUs uses SYSTEM_TIME_MONOTONIC time base.
+    Status setSuspend(bool suspend, int64_t timeUs) override;
 
     // Specifies the interval after which we requeue the buffer previously
     // queued to the encoder. This is useful in the case of surface flinger
@@ -135,6 +139,10 @@
     // be dropped and not submitted to encoder
     Status setStartTimeUs(int64_t startTimeUs) override;
 
+    // Sets the stop time us (in system time), samples after which should be dropped
+    // and not submitted to encoder. timeUs uses SYSTEM_TIME_MONOTONIC time base.
+    Status setStopTimeUs(int64_t stopTimeUs) override;
+
     // Sets the desired color aspects, e.g. to be used when producer does not specify a dataspace.
     Status setColorAspects(int32_t aspectsPacked) override;
 
@@ -229,6 +237,9 @@
 
     bool mSuspended;
 
+    // The time to stop sending buffers.
+    int64_t mStopTimeUs;
+
     // Last dataspace seen
     android_dataspace mLastDataSpace;
 
@@ -259,6 +270,25 @@
     // Tracks codec buffers.
     Vector<CodecBuffer> mCodecBuffers;
 
+    struct ActionItem {
+        typedef enum {
+            PAUSE,
+            RESUME,
+            STOP
+        } ActionType;
+        ActionType mAction;
+        int64_t mActionTimeUs;
+    };
+
+    // Maintain last action timestamp to ensure all the action timestamps are
+    // monotonically increasing.
+    int64_t mLastActionTimeUs;
+
+    // An action queue that queue up all the actions sent to GraphicBufferSource.
+    // STOP action should only show up at the end of the list as all the actions
+    // after a STOP action will be discarded. mActionQueue is protected by mMutex.
+    List<ActionItem> mActionQueue;
+
     ////
     friend struct AHandlerReflector<GraphicBufferSource>;
 
diff --git a/media/libstagefright/omx/hal/1.0/impl/Conversion.h b/media/libstagefright/omx/hal/1.0/impl/Conversion.h
index 3433bb4..d164faa 100644
--- a/media/libstagefright/omx/hal/1.0/impl/Conversion.h
+++ b/media/libstagefright/omx/hal/1.0/impl/Conversion.h
@@ -560,6 +560,76 @@
 }
 
 /**
+ * \brief Wrap `GraphicBuffer` in `AnwBuffer`.
+ *
+ * \param[out] t The wrapper of type `AnwBuffer`.
+ * \param[in] l The source `GraphicBuffer`.
+ */
+// wrap: GraphicBuffer -> AnwBuffer
+inline void wrapAs(AnwBuffer* t, GraphicBuffer const& l) {
+    t->attr.width = l.getWidth();
+    t->attr.height = l.getHeight();
+    t->attr.stride = l.getStride();
+    t->attr.format = static_cast<PixelFormat>(l.getPixelFormat());
+    t->attr.layerCount = l.getLayerCount();
+    t->attr.usage = l.getUsage();
+    t->attr.id = l.getId();
+    t->attr.generationNumber = l.getGenerationNumber();
+    t->nativeHandle = hidl_handle(l.handle);
+}
+
+/**
+ * \brief Convert `AnwBuffer` to `GraphicBuffer`.
+ *
+ * \param[out] l The destination `GraphicBuffer`.
+ * \param[in] t The source `AnwBuffer`.
+ *
+ * This function will duplicate all file descriptors in \p t.
+ */
+// convert: AnwBuffer -> GraphicBuffer
+// Ref: frameworks/native/libs/ui/GraphicBuffer.cpp: GraphicBuffer::flatten
+inline bool convertTo(GraphicBuffer* l, AnwBuffer const& t) {
+    native_handle_t* handle = t.nativeHandle == nullptr ?
+            nullptr : native_handle_clone(t.nativeHandle);
+
+    size_t const numInts = 12 + (handle ? handle->numInts : 0);
+    int32_t* ints = new int32_t[numInts];
+
+    size_t numFds = static_cast<size_t>(handle ? handle->numFds : 0);
+    int* fds = new int[numFds];
+
+    ints[0] = 'GBFR';
+    ints[1] = static_cast<int32_t>(t.attr.width);
+    ints[2] = static_cast<int32_t>(t.attr.height);
+    ints[3] = static_cast<int32_t>(t.attr.stride);
+    ints[4] = static_cast<int32_t>(t.attr.format);
+    ints[5] = static_cast<int32_t>(t.attr.layerCount);
+    ints[6] = static_cast<int32_t>(t.attr.usage);
+    ints[7] = static_cast<int32_t>(t.attr.id >> 32);
+    ints[8] = static_cast<int32_t>(t.attr.id & 0xFFFFFFFF);
+    ints[9] = static_cast<int32_t>(t.attr.generationNumber);
+    ints[10] = 0;
+    ints[11] = 0;
+    if (handle) {
+        ints[10] = static_cast<int32_t>(handle->numFds);
+        ints[11] = static_cast<int32_t>(handle->numInts);
+        int* intsStart = handle->data + handle->numFds;
+        std::copy(handle->data, intsStart, fds);
+        std::copy(intsStart, intsStart + handle->numInts, &ints[12]);
+    }
+
+    void const* constBuffer = static_cast<void const*>(ints);
+    size_t size = numInts * sizeof(int32_t);
+    int const* constFds = static_cast<int const*>(fds);
+    status_t status = l->unflatten(constBuffer, size, constFds, numFds);
+
+    delete [] fds;
+    delete [] ints;
+    native_handle_delete(handle);
+    return status == NO_ERROR;
+}
+
+/**
  * \brief Wrap `OMXBuffer` in `CodecBuffer`.
  *
  * \param[out] t The wrapper of type `CodecBuffer`.
@@ -568,8 +638,8 @@
  */
 // wrap: OMXBuffer -> CodecBuffer
 inline bool wrapAs(CodecBuffer* t, OMXBuffer const& l) {
-    t->nativeHandle = hidl_handle();
     t->sharedMemory = hidl_memory();
+    t->nativeHandle = hidl_handle();
     switch (l.mBufferType) {
         case OMXBuffer::kBufferTypeInvalid: {
             t->type = CodecBuffer::Type::INVALID;
@@ -599,7 +669,6 @@
                 t->attr.anwBuffer.format = static_cast<PixelFormat>(1);
                 t->attr.anwBuffer.layerCount = 0;
                 t->attr.anwBuffer.usage = 0;
-                t->nativeHandle = hidl_handle();
                 return true;
             }
             t->attr.anwBuffer.width = l.mGraphicBuffer->getWidth();
@@ -609,12 +678,12 @@
                     l.mGraphicBuffer->getPixelFormat());
             t->attr.anwBuffer.layerCount = l.mGraphicBuffer->getLayerCount();
             t->attr.anwBuffer.usage = l.mGraphicBuffer->getUsage();
-            t->nativeHandle = hidl_handle(l.mGraphicBuffer->handle);
+            t->nativeHandle = l.mGraphicBuffer->handle;
             return true;
         }
         case OMXBuffer::kBufferTypeNativeHandle: {
             t->type = CodecBuffer::Type::NATIVE_HANDLE;
-            t->nativeHandle = hidl_handle(l.mNativeHandle->handle());
+            t->nativeHandle = l.mNativeHandle->handle();
             return true;
         }
     }
@@ -650,16 +719,14 @@
                 *l = OMXBuffer(sp<GraphicBuffer>(nullptr));
                 return true;
             }
-            *l = OMXBuffer(sp<GraphicBuffer>(new GraphicBuffer(
-                    t.attr.anwBuffer.width,
-                    t.attr.anwBuffer.height,
-                    static_cast<::android::PixelFormat>(
-                            t.attr.anwBuffer.format),
-                    t.attr.anwBuffer.layerCount,
-                    t.attr.anwBuffer.usage,
-                    t.attr.anwBuffer.stride,
-                    native_handle_clone(t.nativeHandle),
-                    true)));
+            AnwBuffer anwBuffer;
+            anwBuffer.nativeHandle = t.nativeHandle;
+            anwBuffer.attr = t.attr.anwBuffer;
+            sp<GraphicBuffer> graphicBuffer = new GraphicBuffer();
+            if (!convertTo(graphicBuffer.get(), anwBuffer)) {
+                return false;
+            }
+            *l = OMXBuffer(graphicBuffer);
             return true;
         }
         case CodecBuffer::Type::NATIVE_HANDLE: {
@@ -834,76 +901,6 @@
 }
 
 /**
- * \brief Wrap `GraphicBuffer` in `AnwBuffer`.
- *
- * \param[out] t The wrapper of type `AnwBuffer`.
- * \param[in] l The source `GraphicBuffer`.
- */
-// wrap: GraphicBuffer -> AnwBuffer
-inline void wrapAs(AnwBuffer* t, GraphicBuffer const& l) {
-    t->attr.width = l.getWidth();
-    t->attr.height = l.getHeight();
-    t->attr.stride = l.getStride();
-    t->attr.format = static_cast<PixelFormat>(l.getPixelFormat());
-    t->attr.layerCount = l.getLayerCount();
-    t->attr.usage = l.getUsage();
-    t->attr.id = l.getId();
-    t->attr.generationNumber = l.getGenerationNumber();
-    t->nativeHandle = hidl_handle(l.handle);
-}
-
-/**
- * \brief Convert `AnwBuffer` to `GraphicBuffer`.
- *
- * \param[out] l The destination `GraphicBuffer`.
- * \param[in] t The source `AnwBuffer`.
- *
- * This function will duplicate all file descriptors in \p t.
- */
-// convert: AnwBuffer -> GraphicBuffer
-// Ref: frameworks/native/libs/ui/GraphicBuffer.cpp: GraphicBuffer::flatten
-inline bool convertTo(GraphicBuffer* l, AnwBuffer const& t) {
-    native_handle_t* handle = t.nativeHandle == nullptr ?
-            nullptr : native_handle_clone(t.nativeHandle);
-
-    size_t const numInts = 12 + (handle ? handle->numInts : 0);
-    int32_t* ints = new int32_t[numInts];
-
-    size_t numFds = static_cast<size_t>(handle ? handle->numFds : 0);
-    int* fds = new int[numFds];
-
-    ints[0] = 'GBFR';
-    ints[1] = static_cast<int32_t>(t.attr.width);
-    ints[2] = static_cast<int32_t>(t.attr.height);
-    ints[3] = static_cast<int32_t>(t.attr.stride);
-    ints[4] = static_cast<int32_t>(t.attr.format);
-    ints[5] = static_cast<int32_t>(t.attr.layerCount);
-    ints[6] = static_cast<int32_t>(t.attr.usage);
-    ints[7] = static_cast<int32_t>(t.attr.id >> 32);
-    ints[8] = static_cast<int32_t>(t.attr.id & 0xFFFFFFFF);
-    ints[9] = static_cast<int32_t>(t.attr.generationNumber);
-    ints[10] = 0;
-    ints[11] = 0;
-    if (handle) {
-        ints[10] = static_cast<int32_t>(handle->numFds);
-        ints[11] = static_cast<int32_t>(handle->numInts);
-        int* intsStart = handle->data + handle->numFds;
-        std::copy(handle->data, intsStart, fds);
-        std::copy(intsStart, intsStart + handle->numInts, &ints[12]);
-    }
-
-    void const* constBuffer = static_cast<void const*>(ints);
-    size_t size = numInts * sizeof(int32_t);
-    int const* constFds = static_cast<int const*>(fds);
-    status_t status = l->unflatten(constBuffer, size, constFds, numFds);
-
-    delete [] fds;
-    delete [] ints;
-    native_handle_delete(handle);
-    return status == NO_ERROR;
-}
-
-/**
  * Conversion functions for types outside media
  * ============================================
  *
diff --git a/media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.cpp b/media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.cpp
index 8ba2924..af9cf03 100644
--- a/media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.cpp
+++ b/media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.cpp
@@ -39,8 +39,9 @@
             new TWOmxNode(omxNode), toHardwareDataspace(dataSpace)));
 }
 
-::android::binder::Status LWGraphicBufferSource::setSuspend(bool suspend) {
-    return toBinderStatus(mBase->setSuspend(suspend));
+::android::binder::Status LWGraphicBufferSource::setSuspend(
+        bool suspend, int64_t timeUs) {
+    return toBinderStatus(mBase->setSuspend(suspend, timeUs));
 }
 
 ::android::binder::Status LWGraphicBufferSource::setRepeatPreviousFrameDelayUs(
@@ -63,6 +64,11 @@
     return toBinderStatus(mBase->setStartTimeUs(startTimeUs));
 }
 
+::android::binder::Status LWGraphicBufferSource::setStopTimeUs(
+        int64_t stopTimeUs) {
+    return toBinderStatus(mBase->setStopTimeUs(stopTimeUs));
+}
+
 ::android::binder::Status LWGraphicBufferSource::setColorAspects(
         int32_t aspects) {
     return toBinderStatus(mBase->setColorAspects(
@@ -89,8 +95,9 @@
     return Void();
 }
 
-Return<void> TWGraphicBufferSource::setSuspend(bool suspend) {
-    mBase->setSuspend(suspend);
+Return<void> TWGraphicBufferSource::setSuspend(
+        bool suspend, int64_t timeUs) {
+    mBase->setSuspend(suspend, timeUs);
     return Void();
 }
 
@@ -116,6 +123,10 @@
     return Void();
 }
 
+Return<void> TWGraphicBufferSource::setStopTimeUs(int64_t stopTimeUs) {
+    return toHardwareStatus(mBase->setStopTimeUs(stopTimeUs));
+}
+
 Return<void> TWGraphicBufferSource::setColorAspects(
         const ColorAspects& aspects) {
     mBase->setColorAspects(toCompactColorAspects(aspects));
diff --git a/media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.h b/media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.h
index 69efdde..dd6168e 100644
--- a/media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.h
+++ b/media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.h
@@ -69,13 +69,14 @@
     LWGraphicBufferSource(sp<TGraphicBufferSource> const& base);
     ::android::binder::Status configure(
             const sp<IOMXNode>& omxNode, int32_t dataSpace) override;
-    ::android::binder::Status setSuspend(bool suspend) override;
+    ::android::binder::Status setSuspend(bool suspend, int64_t timeUs) override;
     ::android::binder::Status setRepeatPreviousFrameDelayUs(
             int64_t repeatAfterUs) override;
     ::android::binder::Status setMaxFps(float maxFps) override;
     ::android::binder::Status setTimeLapseConfig(
             int64_t timePerFrameUs, int64_t timePerCaptureUs) override;
     ::android::binder::Status setStartTimeUs(int64_t startTimeUs) override;
+    ::android::binder::Status setStopTimeUs(int64_t stopTimeUs) override;
     ::android::binder::Status setColorAspects(int32_t aspects) override;
     ::android::binder::Status setTimeOffsetUs(int64_t timeOffsetsUs) override;
     ::android::binder::Status signalEndOfInputStream() override;
@@ -86,12 +87,13 @@
     TWGraphicBufferSource(sp<LGraphicBufferSource> const& base);
     Return<void> configure(
             const sp<IOmxNode>& omxNode, Dataspace dataspace) override;
-    Return<void> setSuspend(bool suspend) override;
+    Return<void> setSuspend(bool suspend, int64_t timeUs) override;
     Return<void> setRepeatPreviousFrameDelayUs(int64_t repeatAfterUs) override;
     Return<void> setMaxFps(float maxFps) override;
     Return<void> setTimeLapseConfig(
             int64_t timePerFrameUs, int64_t timePerCaptureUs) override;
     Return<void> setStartTimeUs(int64_t startTimeUs) override;
+    Return<void> setStopTimeUs(int64_t stopTimeUs) override;
     Return<void> setColorAspects(const ColorAspects& aspects) override;
     Return<void> setTimeOffsetUs(int64_t timeOffsetUs) override;
     Return<void> signalEndOfInputStream() override;
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmxObserver.cpp b/media/libstagefright/omx/hal/1.0/impl/WOmxObserver.cpp
index ecd1db5..433a8b8 100644
--- a/media/libstagefright/omx/hal/1.0/impl/WOmxObserver.cpp
+++ b/media/libstagefright/omx/hal/1.0/impl/WOmxObserver.cpp
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#define LOG_TAG "WOmxObserver-impl"
 #include "WOmxObserver.h"
 
 #include <vector>
@@ -43,7 +44,10 @@
         wrapAs(&tMessages[i], &handles[i], message);
         ++i;
     }
-    mBase->onMessages(tMessages);
+    auto transResult = mBase->onMessages(tMessages);
+    if (!transResult.isOk()) {
+        ALOGE("LWOmxObserver::onMessages transaction failed");
+    }
     for (auto& handle : handles) {
         native_handle_close(handle);
         native_handle_delete(handle);
diff --git a/media/libstagefright/omx/hal/1.0/utils/Conversion.h b/media/libstagefright/omx/hal/1.0/utils/Conversion.h
index 2319cd9..5050687 100644
--- a/media/libstagefright/omx/hal/1.0/utils/Conversion.h
+++ b/media/libstagefright/omx/hal/1.0/utils/Conversion.h
@@ -560,6 +560,76 @@
 }
 
 /**
+ * \brief Wrap `GraphicBuffer` in `AnwBuffer`.
+ *
+ * \param[out] t The wrapper of type `AnwBuffer`.
+ * \param[in] l The source `GraphicBuffer`.
+ */
+// wrap: GraphicBuffer -> AnwBuffer
+inline void wrapAs(AnwBuffer* t, GraphicBuffer const& l) {
+    t->attr.width = l.getWidth();
+    t->attr.height = l.getHeight();
+    t->attr.stride = l.getStride();
+    t->attr.format = static_cast<PixelFormat>(l.getPixelFormat());
+    t->attr.layerCount = l.getLayerCount();
+    t->attr.usage = l.getUsage();
+    t->attr.id = l.getId();
+    t->attr.generationNumber = l.getGenerationNumber();
+    t->nativeHandle = hidl_handle(l.handle);
+}
+
+/**
+ * \brief Convert `AnwBuffer` to `GraphicBuffer`.
+ *
+ * \param[out] l The destination `GraphicBuffer`.
+ * \param[in] t The source `AnwBuffer`.
+ *
+ * This function will duplicate all file descriptors in \p t.
+ */
+// convert: AnwBuffer -> GraphicBuffer
+// Ref: frameworks/native/libs/ui/GraphicBuffer.cpp: GraphicBuffer::flatten
+inline bool convertTo(GraphicBuffer* l, AnwBuffer const& t) {
+    native_handle_t* handle = t.nativeHandle == nullptr ?
+            nullptr : native_handle_clone(t.nativeHandle);
+
+    size_t const numInts = 12 + (handle ? handle->numInts : 0);
+    int32_t* ints = new int32_t[numInts];
+
+    size_t numFds = static_cast<size_t>(handle ? handle->numFds : 0);
+    int* fds = new int[numFds];
+
+    ints[0] = 'GBFR';
+    ints[1] = static_cast<int32_t>(t.attr.width);
+    ints[2] = static_cast<int32_t>(t.attr.height);
+    ints[3] = static_cast<int32_t>(t.attr.stride);
+    ints[4] = static_cast<int32_t>(t.attr.format);
+    ints[5] = static_cast<int32_t>(t.attr.layerCount);
+    ints[6] = static_cast<int32_t>(t.attr.usage);
+    ints[7] = static_cast<int32_t>(t.attr.id >> 32);
+    ints[8] = static_cast<int32_t>(t.attr.id & 0xFFFFFFFF);
+    ints[9] = static_cast<int32_t>(t.attr.generationNumber);
+    ints[10] = 0;
+    ints[11] = 0;
+    if (handle) {
+        ints[10] = static_cast<int32_t>(handle->numFds);
+        ints[11] = static_cast<int32_t>(handle->numInts);
+        int* intsStart = handle->data + handle->numFds;
+        std::copy(handle->data, intsStart, fds);
+        std::copy(intsStart, intsStart + handle->numInts, &ints[12]);
+    }
+
+    void const* constBuffer = static_cast<void const*>(ints);
+    size_t size = numInts * sizeof(int32_t);
+    int const* constFds = static_cast<int const*>(fds);
+    status_t status = l->unflatten(constBuffer, size, constFds, numFds);
+
+    delete [] fds;
+    delete [] ints;
+    native_handle_delete(handle);
+    return status == NO_ERROR;
+}
+
+/**
  * \brief Wrap `OMXBuffer` in `CodecBuffer`.
  *
  * \param[out] t The wrapper of type `CodecBuffer`.
@@ -568,8 +638,8 @@
  */
 // wrap: OMXBuffer -> CodecBuffer
 inline bool wrapAs(CodecBuffer* t, OMXBuffer const& l) {
-    t->nativeHandle = hidl_handle();
     t->sharedMemory = hidl_memory();
+    t->nativeHandle = hidl_handle();
     switch (l.mBufferType) {
         case OMXBuffer::kBufferTypeInvalid: {
             t->type = CodecBuffer::Type::INVALID;
@@ -599,7 +669,6 @@
                 t->attr.anwBuffer.format = static_cast<PixelFormat>(1);
                 t->attr.anwBuffer.layerCount = 0;
                 t->attr.anwBuffer.usage = 0;
-                t->nativeHandle = hidl_handle();
                 return true;
             }
             t->attr.anwBuffer.width = l.mGraphicBuffer->getWidth();
@@ -609,12 +678,12 @@
                     l.mGraphicBuffer->getPixelFormat());
             t->attr.anwBuffer.layerCount = l.mGraphicBuffer->getLayerCount();
             t->attr.anwBuffer.usage = l.mGraphicBuffer->getUsage();
-            t->nativeHandle = hidl_handle(l.mGraphicBuffer->handle);
+            t->nativeHandle = l.mGraphicBuffer->handle;
             return true;
         }
         case OMXBuffer::kBufferTypeNativeHandle: {
             t->type = CodecBuffer::Type::NATIVE_HANDLE;
-            t->nativeHandle = hidl_handle(l.mNativeHandle->handle());
+            t->nativeHandle = l.mNativeHandle->handle();
             return true;
         }
     }
@@ -650,16 +719,14 @@
                 *l = OMXBuffer(sp<GraphicBuffer>(nullptr));
                 return true;
             }
-            *l = OMXBuffer(sp<GraphicBuffer>(new GraphicBuffer(
-                    t.attr.anwBuffer.width,
-                    t.attr.anwBuffer.height,
-                    static_cast<::android::PixelFormat>(
-                            t.attr.anwBuffer.format),
-                    t.attr.anwBuffer.layerCount,
-                    t.attr.anwBuffer.usage,
-                    t.attr.anwBuffer.stride,
-                    native_handle_clone(t.nativeHandle),
-                    true)));
+            AnwBuffer anwBuffer;
+            anwBuffer.nativeHandle = t.nativeHandle;
+            anwBuffer.attr = t.attr.anwBuffer;
+            sp<GraphicBuffer> graphicBuffer = new GraphicBuffer();
+            if (!convertTo(graphicBuffer.get(), anwBuffer)) {
+                return false;
+            }
+            *l = OMXBuffer(graphicBuffer);
             return true;
         }
         case CodecBuffer::Type::NATIVE_HANDLE: {
@@ -834,76 +901,6 @@
 }
 
 /**
- * \brief Wrap `GraphicBuffer` in `AnwBuffer`.
- *
- * \param[out] t The wrapper of type `AnwBuffer`.
- * \param[in] l The source `GraphicBuffer`.
- */
-// wrap: GraphicBuffer -> AnwBuffer
-inline void wrapAs(AnwBuffer* t, GraphicBuffer const& l) {
-    t->attr.width = l.getWidth();
-    t->attr.height = l.getHeight();
-    t->attr.stride = l.getStride();
-    t->attr.format = static_cast<PixelFormat>(l.getPixelFormat());
-    t->attr.layerCount = l.getLayerCount();
-    t->attr.usage = l.getUsage();
-    t->attr.id = l.getId();
-    t->attr.generationNumber = l.getGenerationNumber();
-    t->nativeHandle = hidl_handle(l.handle);
-}
-
-/**
- * \brief Convert `AnwBuffer` to `GraphicBuffer`.
- *
- * \param[out] l The destination `GraphicBuffer`.
- * \param[in] t The source `AnwBuffer`.
- *
- * This function will duplicate all file descriptors in \p t.
- */
-// convert: AnwBuffer -> GraphicBuffer
-// Ref: frameworks/native/libs/ui/GraphicBuffer.cpp: GraphicBuffer::flatten
-inline bool convertTo(GraphicBuffer* l, AnwBuffer const& t) {
-    native_handle_t* handle = t.nativeHandle == nullptr ?
-            nullptr : native_handle_clone(t.nativeHandle);
-
-    size_t const numInts = 12 + (handle ? handle->numInts : 0);
-    int32_t* ints = new int32_t[numInts];
-
-    size_t numFds = static_cast<size_t>(handle ? handle->numFds : 0);
-    int* fds = new int[numFds];
-
-    ints[0] = 'GBFR';
-    ints[1] = static_cast<int32_t>(t.attr.width);
-    ints[2] = static_cast<int32_t>(t.attr.height);
-    ints[3] = static_cast<int32_t>(t.attr.stride);
-    ints[4] = static_cast<int32_t>(t.attr.format);
-    ints[5] = static_cast<int32_t>(t.attr.layerCount);
-    ints[6] = static_cast<int32_t>(t.attr.usage);
-    ints[7] = static_cast<int32_t>(t.attr.id >> 32);
-    ints[8] = static_cast<int32_t>(t.attr.id & 0xFFFFFFFF);
-    ints[9] = static_cast<int32_t>(t.attr.generationNumber);
-    ints[10] = 0;
-    ints[11] = 0;
-    if (handle) {
-        ints[10] = static_cast<int32_t>(handle->numFds);
-        ints[11] = static_cast<int32_t>(handle->numInts);
-        int* intsStart = handle->data + handle->numFds;
-        std::copy(handle->data, intsStart, fds);
-        std::copy(intsStart, intsStart + handle->numInts, &ints[12]);
-    }
-
-    void const* constBuffer = static_cast<void const*>(ints);
-    size_t size = numInts * sizeof(int32_t);
-    int const* constFds = static_cast<int const*>(fds);
-    status_t status = l->unflatten(constBuffer, size, constFds, numFds);
-
-    delete [] fds;
-    delete [] ints;
-    native_handle_delete(handle);
-    return status == NO_ERROR;
-}
-
-/**
  * Conversion functions for types outside media
  * ============================================
  *
diff --git a/media/libstagefright/omx/hal/1.0/utils/WGraphicBufferSource.cpp b/media/libstagefright/omx/hal/1.0/utils/WGraphicBufferSource.cpp
index a23b48a..afe8bc5 100644
--- a/media/libstagefright/omx/hal/1.0/utils/WGraphicBufferSource.cpp
+++ b/media/libstagefright/omx/hal/1.0/utils/WGraphicBufferSource.cpp
@@ -39,8 +39,9 @@
             new TWOmxNode(omxNode), toHardwareDataspace(dataSpace)));
 }
 
-::android::binder::Status LWGraphicBufferSource::setSuspend(bool suspend) {
-    return toBinderStatus(mBase->setSuspend(suspend));
+::android::binder::Status LWGraphicBufferSource::setSuspend(
+        bool suspend, int64_t timeUs) {
+    return toBinderStatus(mBase->setSuspend(suspend, timeUs));
 }
 
 ::android::binder::Status LWGraphicBufferSource::setRepeatPreviousFrameDelayUs(
@@ -63,6 +64,11 @@
     return toBinderStatus(mBase->setStartTimeUs(startTimeUs));
 }
 
+::android::binder::Status LWGraphicBufferSource::setStopTimeUs(
+        int64_t stopTimeUs) {
+    return toBinderStatus(mBase->setStopTimeUs(stopTimeUs));
+}
+
 ::android::binder::Status LWGraphicBufferSource::setColorAspects(
         int32_t aspects) {
     return toBinderStatus(mBase->setColorAspects(
@@ -89,8 +95,8 @@
     return Void();
 }
 
-Return<void> TWGraphicBufferSource::setSuspend(bool suspend) {
-    mBase->setSuspend(suspend);
+Return<void> TWGraphicBufferSource::setSuspend(bool suspend, int64_t timeUs) {
+    mBase->setSuspend(suspend, timeUs);
     return Void();
 }
 
@@ -116,6 +122,10 @@
     return Void();
 }
 
+Return<void> TWGraphicBufferSource::setStopTimeUs(int64_t stopTimeUs) {
+    return toHardwareStatus(mBase->setStopTimeUs(stopTimeUs));
+}
+
 Return<void> TWGraphicBufferSource::setColorAspects(
         const ColorAspects& aspects) {
     mBase->setColorAspects(toCompactColorAspects(aspects));
diff --git a/media/libstagefright/omx/hal/1.0/utils/WGraphicBufferSource.h b/media/libstagefright/omx/hal/1.0/utils/WGraphicBufferSource.h
index d21de42..1b09cbd 100644
--- a/media/libstagefright/omx/hal/1.0/utils/WGraphicBufferSource.h
+++ b/media/libstagefright/omx/hal/1.0/utils/WGraphicBufferSource.h
@@ -69,13 +69,14 @@
     LWGraphicBufferSource(sp<TGraphicBufferSource> const& base);
     ::android::binder::Status configure(
             const sp<IOMXNode>& omxNode, int32_t dataSpace) override;
-    ::android::binder::Status setSuspend(bool suspend) override;
+    ::android::binder::Status setSuspend(bool suspend, int64_t timeUs) override;
     ::android::binder::Status setRepeatPreviousFrameDelayUs(
             int64_t repeatAfterUs) override;
     ::android::binder::Status setMaxFps(float maxFps) override;
     ::android::binder::Status setTimeLapseConfig(
             int64_t timePerFrameUs, int64_t timePerCaptureUs) override;
     ::android::binder::Status setStartTimeUs(int64_t startTimeUs) override;
+    ::android::binder::Status setStopTimeUs(int64_t stopTimeUs) override;
     ::android::binder::Status setColorAspects(int32_t aspects) override;
     ::android::binder::Status setTimeOffsetUs(int64_t timeOffsetsUs) override;
     ::android::binder::Status signalEndOfInputStream() override;
@@ -86,12 +87,13 @@
     TWGraphicBufferSource(sp<LGraphicBufferSource> const& base);
     Return<void> configure(
             const sp<IOmxNode>& omxNode, Dataspace dataspace) override;
-    Return<void> setSuspend(bool suspend) override;
+    Return<void> setSuspend(bool suspend, int64_t timeUs) override;
     Return<void> setRepeatPreviousFrameDelayUs(int64_t repeatAfterUs) override;
     Return<void> setMaxFps(float maxFps) override;
     Return<void> setTimeLapseConfig(
             int64_t timePerFrameUs, int64_t timePerCaptureUs) override;
     Return<void> setStartTimeUs(int64_t startTimeUs) override;
+    Return<void> setStopTimeUs(int64_t stopTimeUs) override;
     Return<void> setColorAspects(const ColorAspects& aspects) override;
     Return<void> setTimeOffsetUs(int64_t timeOffsetUs) override;
     Return<void> signalEndOfInputStream() override;
diff --git a/media/libstagefright/omx/hal/1.0/utils/WOmxObserver.cpp b/media/libstagefright/omx/hal/1.0/utils/WOmxObserver.cpp
index db971f8..bab6a09 100644
--- a/media/libstagefright/omx/hal/1.0/utils/WOmxObserver.cpp
+++ b/media/libstagefright/omx/hal/1.0/utils/WOmxObserver.cpp
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#define LOG_TAG "WOmxObserver-utils"
 #include "WOmxObserver.h"
 
 #include <vector>
@@ -43,7 +44,10 @@
         wrapAs(&tMessages[i], &handles[i], message);
         ++i;
     }
-    mBase->onMessages(tMessages);
+    auto transResult = mBase->onMessages(tMessages);
+    if (!transResult.isOk()) {
+        ALOGE("LWOmxObserver::onMessages transaction failed");
+    }
     for (auto& handle : handles) {
         native_handle_close(handle);
         native_handle_delete(handle);
diff --git a/media/libstagefright/omx/tests/Android.mk b/media/libstagefright/omx/tests/Android.mk
index 5e4ba10..08deaab 100644
--- a/media/libstagefright/omx/tests/Android.mk
+++ b/media/libstagefright/omx/tests/Android.mk
@@ -2,15 +2,26 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES =       \
-	OMXHarness.cpp  \
+        OMXHarness.cpp  \
 
 LOCAL_SHARED_LIBRARIES := \
-	libstagefright libbinder libmedia libutils liblog libstagefright_foundation
+        libstagefright \
+        libbinder \
+        libmedia \
+        libutils \
+        liblog \
+        libstagefright_foundation \
+        libcutils \
+        libhidlbase \
+        libhidlmemory \
+        android.hidl.memory@1.0 \
+        android.hardware.media.omx@1.0 \
+        android.hardware.media.omx@1.0-utils
 
 LOCAL_C_INCLUDES := \
-	$(TOP)/frameworks/av/media/libstagefright \
-	$(TOP)/frameworks/native/include/media/openmax \
-	$(TOP)/system/libhidl/base/include \
+        $(TOP)/frameworks/av/media/libstagefright \
+        $(TOP)/frameworks/native/include/media/openmax \
+        $(TOP)/system/libhidl/base/include \
 
 LOCAL_CFLAGS += -Werror -Wall
 
@@ -29,14 +40,14 @@
 LOCAL_MODULE_TAGS := tests
 
 LOCAL_SRC_FILES := \
-	FrameDropper_test.cpp \
+        FrameDropper_test.cpp \
 
 LOCAL_SHARED_LIBRARIES := \
-	libstagefright_omx \
-	libutils \
+        libstagefright_omx \
+        libutils \
 
 LOCAL_C_INCLUDES := \
-	frameworks/av/media/libstagefright/omx \
+        frameworks/av/media/libstagefright/omx \
 
 LOCAL_CFLAGS += -Werror -Wall
 
diff --git a/media/libstagefright/omx/tests/OMXHarness.cpp b/media/libstagefright/omx/tests/OMXHarness.cpp
index 1ce5d1a..8817cf9 100644
--- a/media/libstagefright/omx/tests/OMXHarness.cpp
+++ b/media/libstagefright/omx/tests/OMXHarness.cpp
@@ -39,6 +39,8 @@
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/SimpleDecodingSource.h>
 #include <media/OMXBuffer.h>
+#include <android/hardware/media/omx/1.0/IOmx.h>
+#include <omx/hal/1.0/utils/WOmx.h>
 
 #define DEFAULT_TIMEOUT         500000
 
@@ -64,7 +66,7 @@
 /////////////////////////////////////////////////////////////////////
 
 Harness::Harness()
-    : mInitCheck(NO_INIT) {
+    : mInitCheck(NO_INIT), mUseTreble(false) {
     mInitCheck = initOMX();
 }
 
@@ -76,10 +78,23 @@
 }
 
 status_t Harness::initOMX() {
-    sp<IServiceManager> sm = defaultServiceManager();
-    sp<IBinder> binder = sm->getService(String16("media.codec"));
-    sp<IMediaCodecService> service = interface_cast<IMediaCodecService>(binder);
-    mOMX = service->getOMX();
+    int32_t trebleOmx = property_get_int32("persist.media.treble_omx", -1);
+    if ((trebleOmx == 1) || ((trebleOmx == -1) &&
+            property_get_bool("persist.hal.binderization", 0))) {
+        using namespace ::android::hardware::media::omx::V1_0;
+        sp<IOmx> tOmx = IOmx::getService();
+        if (tOmx == nullptr) {
+            return NO_INIT;
+        }
+        mOMX = new utils::LWOmx(tOmx);
+        mUseTreble = true;
+    } else {
+        sp<IServiceManager> sm = defaultServiceManager();
+        sp<IBinder> binder = sm->getService(String16("media.codec"));
+        sp<IMediaCodecService> service = interface_cast<IMediaCodecService>(binder);
+        mOMX = service->getOMX();
+        mUseTreble = false;
+    }
 
     return mOMX != 0 ? OK : NO_INIT;
 }
@@ -197,7 +212,6 @@
     EXPECT((err) == OK, info " failed")
 
 status_t Harness::allocatePortBuffers(
-        const sp<MemoryDealer> &dealer,
         OMX_U32 portIndex, Vector<Buffer> *buffers) {
     buffers->clear();
 
@@ -207,11 +221,27 @@
 
     for (OMX_U32 i = 0; i < def.nBufferCountActual; ++i) {
         Buffer buffer;
-        buffer.mMemory = dealer->allocate(def.nBufferSize);
         buffer.mFlags = 0;
-        CHECK(buffer.mMemory != NULL);
+        if (mUseTreble) {
+            bool success;
+            auto transStatus = mAllocator->allocate(def.nBufferSize,
+                    [&success, &buffer](
+                            bool s,
+                            hidl_memory const& m) {
+                        success = s;
+                        buffer.mHidlMemory = m;
+                    });
+            EXPECT(transStatus.isOk(),
+                    "Cannot call allocator");
+            EXPECT(success,
+                    "Cannot allocate memory");
+            err = mOMXNode->useBuffer(portIndex, buffer.mHidlMemory, &buffer.mID);
+        } else {
+            buffer.mMemory = mDealer->allocate(def.nBufferSize);
+            CHECK(buffer.mMemory != NULL);
+            err = mOMXNode->useBuffer(portIndex, buffer.mMemory, &buffer.mID);
+        }
 
-        err = mOMXNode->useBuffer(portIndex, buffer.mMemory, &buffer.mID);
         EXPECT_SUCCESS(err, "useBuffer");
 
         buffers->push(buffer);
@@ -279,7 +309,13 @@
         return OK;
     }
 
-    sp<MemoryDealer> dealer = new MemoryDealer(16 * 1024 * 1024, "OMXHarness");
+    if (mUseTreble) {
+        mAllocator = IAllocator::getService("ashmem");
+        EXPECT(mAllocator != nullptr,
+                "Cannot obtain hidl AshmemAllocator");
+    } else {
+        mDealer = new MemoryDealer(16 * 1024 * 1024, "OMXHarness");
+    }
 
     sp<CodecObserver> observer = new CodecObserver(this, ++mCurGeneration);
 
@@ -305,14 +341,14 @@
 
     // Now allocate buffers.
     Vector<Buffer> inputBuffers;
-    err = allocatePortBuffers(dealer, 0, &inputBuffers);
+    err = allocatePortBuffers(0, &inputBuffers);
     EXPECT_SUCCESS(err, "allocatePortBuffers(input)");
 
     err = dequeueMessageForNode(&msg, DEFAULT_TIMEOUT);
     CHECK_EQ(err, (status_t)TIMED_OUT);
 
     Vector<Buffer> outputBuffers;
-    err = allocatePortBuffers(dealer, 1, &outputBuffers);
+    err = allocatePortBuffers(1, &outputBuffers);
     EXPECT_SUCCESS(err, "allocatePortBuffers(output)");
 
     err = dequeueMessageForNode(&msg, DEFAULT_TIMEOUT);
diff --git a/media/libstagefright/omx/tests/OMXHarness.h b/media/libstagefright/omx/tests/OMXHarness.h
index 0fe00a6..022707f 100644
--- a/media/libstagefright/omx/tests/OMXHarness.h
+++ b/media/libstagefright/omx/tests/OMXHarness.h
@@ -23,6 +23,9 @@
 #include <utils/Vector.h>
 #include <utils/threads.h>
 
+#include <binder/MemoryDealer.h>
+#include <android/hidl/memory/1.0/IAllocator.h>
+#include <android/hidl/memory/1.0/IMemory.h>
 #include <OMX_Component.h>
 
 namespace android {
@@ -30,12 +33,15 @@
 class MemoryDealer;
 
 struct Harness : public RefBase {
+    typedef hidl::memory::V1_0::IMemory TMemory;
+    typedef hardware::hidl_memory hidl_memory;
     enum BufferFlags {
         kBufferBusy = 1
     };
     struct Buffer {
         IOMX::buffer_id mID;
         sp<IMemory> mMemory;
+        hidl_memory mHidlMemory;
         uint32_t mFlags;
     };
 
@@ -54,7 +60,6 @@
             OMX_U32 portIndex, OMX_PARAM_PORTDEFINITIONTYPE *def);
 
     status_t allocatePortBuffers(
-            const sp<MemoryDealer> &dealer,
             OMX_U32 portIndex, Vector<Buffer> *buffers);
 
     status_t setRole(const char *role);
@@ -74,6 +79,8 @@
     virtual ~Harness();
 
 private:
+    typedef hidl::memory::V1_0::IAllocator IAllocator;
+
     friend struct NodeReaper;
     struct CodecObserver;
 
@@ -86,6 +93,9 @@
     Condition mMessageAddedCondition;
     int32_t mLastMsgGeneration;
     int32_t mCurGeneration;
+    bool mUseTreble;
+    sp<MemoryDealer> mDealer;
+    sp<IAllocator> mAllocator;
 
     status_t initOMX();
 
diff --git a/media/mediaserver/Android.mk b/media/mediaserver/Android.mk
index a4cb66d..f7597db 100644
--- a/media/mediaserver/Android.mk
+++ b/media/mediaserver/Android.mk
@@ -12,23 +12,24 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:= \
-	main_mediaserver.cpp
+        main_mediaserver.cpp
 
 LOCAL_SHARED_LIBRARIES := \
-	libresourcemanagerservice \
-	liblog \
-	libmediaplayerservice \
-	libutils \
-	libbinder \
-	libicuuc \
+        libresourcemanagerservice \
+        liblog \
+        libmediaplayerservice \
+        libutils \
+        libbinder \
+        libicuuc \
+        android.hardware.media.omx@1.0 \
 
 LOCAL_STATIC_LIBRARIES := \
         libicuandroid_utils \
         libregistermsext
 
 LOCAL_C_INCLUDES := \
-    frameworks/av/media/libmediaplayerservice \
-    frameworks/av/services/mediaresourcemanager \
+        frameworks/av/media/libmediaplayerservice \
+        frameworks/av/services/mediaresourcemanager \
 
 LOCAL_MODULE:= mediaserver
 LOCAL_32_BIT_ONLY := true
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index ec1ebf5..3c32c16 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -3578,9 +3578,17 @@
         break;
     case FastMixer_Static:
     case FastMixer_Dynamic:
-        initFastMixer = mFrameCount < mNormalFrameCount;
+        // FastMixer was designed to operate with a HAL that pulls at a regular rate,
+        // where the period is less than an experimentally determined threshold that can be
+        // scheduled reliably with CFS. However, the BT A2DP HAL is
+        // bursty (does not pull at a regular rate) and so cannot operate with FastMixer.
+        initFastMixer = mFrameCount < mNormalFrameCount
+                && (mOutDevice & AUDIO_DEVICE_OUT_ALL_A2DP) == 0;
         break;
     }
+    ALOGW_IF(initFastMixer == false && mFrameCount < mNormalFrameCount,
+            "FastMixer is preferred for this sink as frameCount %zu is less than threshold %zu",
+            mFrameCount, mNormalFrameCount);
     if (initFastMixer) {
         audio_format_t fastMixerFormat;
         if (mMixerBufferEnabled && mEffectBufferEnabled) {
diff --git a/services/mediacodec/Android.mk b/services/mediacodec/Android.mk
index e8a8264..cb14bb5 100644
--- a/services/mediacodec/Android.mk
+++ b/services/mediacodec/Android.mk
@@ -3,7 +3,12 @@
 # service library
 include $(CLEAR_VARS)
 LOCAL_SRC_FILES := MediaCodecService.cpp
-LOCAL_SHARED_LIBRARIES := libmedia libbinder libutils liblog libstagefright_omx
+LOCAL_SHARED_LIBRARIES := \
+    libmedia \
+    libbinder \
+    libutils \
+    liblog \
+    libstagefright_omx
 LOCAL_C_INCLUDES := \
     $(TOP)/frameworks/av/media/libstagefright \
     $(TOP)/frameworks/native/include/media/openmax
@@ -16,8 +21,16 @@
 include $(CLEAR_VARS)
 LOCAL_REQUIRED_MODULES_arm := mediacodec-seccomp.policy
 LOCAL_SRC_FILES := main_codecservice.cpp
-LOCAL_SHARED_LIBRARIES := libmedia libmediacodecservice libbinder libutils \
-    libbase libavservices_minijail libcutils \
+LOCAL_SHARED_LIBRARIES := \
+    libmedia \
+    libmediacodecservice \
+    libbinder \
+    libutils \
+    liblog \
+    libbase \
+    libavservices_minijail \
+    libcutils \
+    libhwbinder \
     android.hardware.media.omx@1.0
 LOCAL_C_INCLUDES := \
     $(TOP)/frameworks/av/media/libstagefright \
diff --git a/services/mediacodec/main_codecservice.cpp b/services/mediacodec/main_codecservice.cpp
index 6cbb368..983bbba 100644
--- a/services/mediacodec/main_codecservice.cpp
+++ b/services/mediacodec/main_codecservice.cpp
@@ -32,6 +32,7 @@
 #include "minijail.h"
 
 #include <android/hardware/media/omx/1.0/IOmx.h>
+#include <hidl/HidlTransportSupport.h>
 
 using namespace android;
 
@@ -45,13 +46,13 @@
     SetUpMinijail(kSeccompPolicyPath, std::string());
 
     strcpy(argv[0], "media.codec");
-    sp<ProcessState> proc(ProcessState::self());
-    sp<IServiceManager> sm = defaultServiceManager();
-    MediaCodecService::instantiate();
 
-    // Treble
-    bool useTrebleOmx = bool(property_get_bool("debug.treble_omx", 0));
-    if (useTrebleOmx) {
+    ::android::hardware::configureRpcThreadpool(64, false);
+    sp<ProcessState> proc(ProcessState::self());
+
+    int32_t trebleOmx = property_get_int32("persist.media.treble_omx", -1);
+    if ((trebleOmx == 1) || ((trebleOmx == -1) &&
+            property_get_bool("persist.hal.binderization", 0))) {
         using namespace ::android::hardware::media::omx::V1_0;
         sp<IOmx> omx = IOmx::getService(true);
         if (omx == nullptr) {
@@ -59,8 +60,11 @@
         } else if (omx->registerAsService("default") != OK) {
             LOG(ERROR) << "Cannot register a Treble IOmx service.";
         } else {
-            LOG(VERBOSE) << "Treble IOmx service created.";
+            LOG(INFO) << "Treble IOmx service created.";
         }
+    } else {
+        MediaCodecService::instantiate();
+        LOG(INFO) << "Non-Treble IOMX service created.";
     }
 
     ProcessState::self()->startThreadPool();
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index dfa9753..99b0b4d 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -57,7 +57,7 @@
         ALOGE("AAudioService::openStream(): open returned %d", result);
         return result;
     } else {
-        AAudioStream handle = mHandleTracker.put(AAUDIO_HANDLE_TYPE_STREAM, serviceStream);
+        aaudio_handle_t handle = mHandleTracker.put(AAUDIO_HANDLE_TYPE_STREAM, serviceStream);
         ALOGD("AAudioService::openStream(): handle = 0x%08X", handle);
         if (handle < 0) {
             delete serviceStream;
@@ -127,7 +127,7 @@
 
 aaudio_result_t AAudioService::registerAudioThread(aaudio_handle_t streamHandle,
                                                          pid_t clientThreadId,
-                                                         aaudio_nanoseconds_t periodNanoseconds) {
+                                                         int64_t periodNanoseconds) {
     AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
     ALOGD("AAudioService::registerAudioThread(), serviceStream = %p", serviceStream);
     if (serviceStream == nullptr) {
diff --git a/services/oboeservice/AAudioService.h b/services/oboeservice/AAudioService.h
index e9625b2..a520d7a 100644
--- a/services/oboeservice/AAudioService.h
+++ b/services/oboeservice/AAudioService.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef AAUDIO_AAUDIO_AUDIO_SERVICE_H
-#define AAUDIO_AAUDIO_AUDIO_SERVICE_H
+#ifndef AAUDIO_AAUDIO_SERVICE_H
+#define AAUDIO_AAUDIO_SERVICE_H
 
 #include <time.h>
 #include <pthread.h>
@@ -58,7 +58,7 @@
     virtual aaudio_result_t flushStream(aaudio_handle_t streamHandle);
 
     virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle,
-                                              pid_t pid, aaudio_nanoseconds_t periodNanoseconds) ;
+                                              pid_t pid, int64_t periodNanoseconds) ;
 
     virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle, pid_t pid);
 
@@ -72,4 +72,4 @@
 
 } /* namespace android */
 
-#endif //AAUDIO_AAUDIO_AUDIO_SERVICE_H
+#endif //AAUDIO_AAUDIO_SERVICE_H
diff --git a/services/oboeservice/AAudioServiceDefinitions.h b/services/oboeservice/AAudioServiceDefinitions.h
index ee9aaa7..f98acbf 100644
--- a/services/oboeservice/AAudioServiceDefinitions.h
+++ b/services/oboeservice/AAudioServiceDefinitions.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef AAUDIO_AAUDIO_SERVICE_H
-#define AAUDIO_AAUDIO_SERVICE_H
+#ifndef AAUDIO_AAUDIO_SERVICE_DEFINITIONS_H
+#define AAUDIO_AAUDIO_SERVICE_DEFINITIONS_H
 
 #include <stdint.h>
 
@@ -28,9 +28,9 @@
 // TODO move this an "include" folder for the service.
 
 struct AAudioMessageTimestamp {
-    aaudio_position_frames_t position;
+    int64_t position;
     int64_t                deviceOffset; // add to client position to get device position
-    aaudio_nanoseconds_t     timestamp;
+    int64_t     timestamp;
 };
 
 typedef enum aaudio_service_event_e : uint32_t {
@@ -61,7 +61,6 @@
     };
 } AAudioServiceMessage;
 
-
 } /* namespace aaudio */
 
-#endif //AAUDIO_AAUDIO_SERVICE_H
+#endif //AAUDIO_AAUDIO_SERVICE_DEFINITIONS_H
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index 4a59253..7a812f9 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -72,7 +72,7 @@
 
     virtual void sendCurrentTimestamp() = 0;
 
-    aaudio_size_frames_t getFramesPerBurst() {
+    int32_t getFramesPerBurst() {
         return mFramesPerBurst;
     }
 
@@ -90,17 +90,17 @@
 
 protected:
 
-    pid_t                    mRegisteredClientThread = ILLEGAL_THREAD_ID;
+    pid_t              mRegisteredClientThread = ILLEGAL_THREAD_ID;
 
-    SharedRingBuffer *       mUpMessageQueue;
+    SharedRingBuffer*  mUpMessageQueue;
 
-    aaudio_sample_rate_t       mSampleRate = 0;
-    aaudio_size_bytes_t        mBytesPerFrame = 0;
-    aaudio_size_frames_t       mFramesPerBurst = 0;
-    aaudio_size_frames_t       mCapacityInFrames = 0;
-    aaudio_size_bytes_t        mCapacityInBytes = 0;
+    int32_t            mSampleRate = 0;
+    int32_t            mBytesPerFrame = 0;
+    int32_t            mFramesPerBurst = 0;
+    int32_t            mCapacityInFrames = 0;
+    int32_t            mCapacityInBytes = 0;
 
-    android::Mutex           mLockUpMessageQueue;
+    android::Mutex     mLockUpMessageQueue;
 };
 
 } /* namespace aaudio */
diff --git a/services/oboeservice/AAudioServiceStreamFakeHal.cpp b/services/oboeservice/AAudioServiceStreamFakeHal.cpp
index 1caeb3f..71d3542 100644
--- a/services/oboeservice/AAudioServiceStreamFakeHal.cpp
+++ b/services/oboeservice/AAudioServiceStreamFakeHal.cpp
@@ -191,7 +191,7 @@
     timestampScheduler.setBurstPeriod(mFramesPerBurst, mSampleRate);
     timestampScheduler.start(AudioClock::getNanoseconds());
     while(mThreadEnabled.load()) {
-        aaudio_nanoseconds_t nextTime = timestampScheduler.nextAbsoluteTime();
+        int64_t nextTime = timestampScheduler.nextAbsoluteTime();
         if (AudioClock::getNanoseconds() >= nextTime) {
             sendCurrentTimestamp();
         } else  {
diff --git a/services/oboeservice/AAudioThread.h b/services/oboeservice/AAudioThread.h
index 1f676dc..a5d43a4 100644
--- a/services/oboeservice/AAudioThread.h
+++ b/services/oboeservice/AAudioThread.h
@@ -62,9 +62,9 @@
     void dispatch(); // called internally from 'C' thread wrapper
 
 private:
-    Runnable*                mRunnable = nullptr; // TODO make atomic with memory barrier?
-    bool                     mHasThread = false;
-    pthread_t                mThread; // initialized in constructor
+    Runnable*          mRunnable = nullptr; // TODO make atomic with memory barrier?
+    bool               mHasThread = false;
+    pthread_t          mThread; // initialized in constructor
 
 };
 
diff --git a/services/oboeservice/TimestampScheduler.cpp b/services/oboeservice/TimestampScheduler.cpp
index 5875909..d54996f 100644
--- a/services/oboeservice/TimestampScheduler.cpp
+++ b/services/oboeservice/TimestampScheduler.cpp
@@ -21,12 +21,12 @@
 
 using namespace aaudio;
 
-void TimestampScheduler::start(aaudio_nanoseconds_t startTime) {
+void TimestampScheduler::start(int64_t startTime) {
     mStartTime = startTime;
     mLastTime = startTime;
 }
 
-aaudio_nanoseconds_t TimestampScheduler::nextAbsoluteTime() {
+int64_t TimestampScheduler::nextAbsoluteTime() {
     int64_t periodsElapsed = (mLastTime - mStartTime) / mBurstPeriod;
     // This is an arbitrary schedule that could probably be improved.
     // It starts out sending a timestamp on every period because we want to
@@ -35,10 +35,10 @@
     int64_t minPeriodsToDelay = (periodsElapsed < 10) ? 1 :
         (periodsElapsed < 100) ? 3 :
         (periodsElapsed < 1000) ? 10 : 50;
-    aaudio_nanoseconds_t sleepTime = minPeriodsToDelay * mBurstPeriod;
+    int64_t sleepTime = minPeriodsToDelay * mBurstPeriod;
     // Generate a random rectangular distribution one burst wide so that we get
     // an uncorrelated sampling of the MMAP pointer.
-    sleepTime += (aaudio_nanoseconds_t)(random() * mBurstPeriod / RAND_MAX);
+    sleepTime += (int64_t)(random() * mBurstPeriod / RAND_MAX);
     mLastTime += sleepTime;
     return mLastTime;
 }
diff --git a/services/oboeservice/TimestampScheduler.h b/services/oboeservice/TimestampScheduler.h
index efc9c5f..91a2477 100644
--- a/services/oboeservice/TimestampScheduler.h
+++ b/services/oboeservice/TimestampScheduler.h
@@ -17,7 +17,7 @@
 #ifndef AAUDIO_TIMESTAMP_SCHEDULER_H
 #define AAUDIO_TIMESTAMP_SCHEDULER_H
 
-//#include <stdlib.h> // random()
+
 
 #include "IAAudioService.h"
 #include "AAudioServiceDefinitions.h"
@@ -25,6 +25,7 @@
 #include "fifo/FifoBuffer.h"
 #include "SharedRingBuffer.h"
 #include "AudioEndpointParcelable.h"
+#include "utility/AudioClock.h"
 
 namespace aaudio {
 
@@ -43,32 +44,32 @@
     /**
      * Start the schedule at the given time.
      */
-    void start(aaudio_nanoseconds_t startTime);
+    void start(int64_t startTime);
 
     /**
      * Calculate the next time that the read position should be
      * measured.
      */
-    aaudio_nanoseconds_t nextAbsoluteTime();
+    int64_t nextAbsoluteTime();
 
-    void setBurstPeriod(aaudio_nanoseconds_t burstPeriod) {
+    void setBurstPeriod(int64_t burstPeriod) {
         mBurstPeriod = burstPeriod;
     }
 
-    void setBurstPeriod(aaudio_size_frames_t framesPerBurst,
-                        aaudio_sample_rate_t sampleRate) {
+    void setBurstPeriod(int32_t framesPerBurst,
+                        int32_t sampleRate) {
         mBurstPeriod = AAUDIO_NANOS_PER_SECOND * framesPerBurst / sampleRate;
     }
 
-    aaudio_nanoseconds_t getBurstPeriod() {
+    int64_t getBurstPeriod() {
         return mBurstPeriod;
     }
 
 private:
     // Start with an arbitrary default so we do not divide by zero.
-    aaudio_nanoseconds_t mBurstPeriod = AAUDIO_NANOS_PER_MILLISECOND;
-    aaudio_nanoseconds_t mStartTime;
-    aaudio_nanoseconds_t mLastTime;
+    int64_t mBurstPeriod = AAUDIO_NANOS_PER_MILLISECOND;
+    int64_t mStartTime;
+    int64_t mLastTime;
 };
 
 } /* namespace aaudio */
diff --git a/services/radio/HidlUtils.cpp b/services/radio/HidlUtils.cpp
index 3b33386..6895377 100644
--- a/services/radio/HidlUtils.cpp
+++ b/services/radio/HidlUtils.cpp
@@ -134,14 +134,18 @@
                            halInfo->channel, halInfo->subChannel);
 }
 
+// TODO(twasilczyk): drop unnecessary channel info
 //static
 void HidlUtils::convertMetaDataFromHal(radio_metadata_t **metadata,
                                        const hidl_vec<MetaData>& halMetadata,
-                                       uint32_t channel,
-                                       uint32_t subChannel)
+                                       uint32_t channel __unused,
+                                       uint32_t subChannel __unused)
 {
 
-    radio_metadata_allocate(metadata, channel, subChannel);
+    if (metadata == nullptr || *metadata == nullptr) {
+        ALOGE("destination metadata buffer is a nullptr");
+        return;
+    }
     for (size_t i = 0; i < halMetadata.size(); i++) {
         radio_metadata_key_t key = static_cast<radio_metadata_key_t>(halMetadata[i].key);
         radio_metadata_type_t type = static_cast<radio_metadata_key_t>(halMetadata[i].type);
diff --git a/services/radio/RadioHalHidl.cpp b/services/radio/RadioHalHidl.cpp
index 032d3fd..3dcf2f3 100644
--- a/services/radio/RadioHalHidl.cpp
+++ b/services/radio/RadioHalHidl.cpp
@@ -20,7 +20,7 @@
 #include <media/audiohal/hidl/HalDeathHandler.h>
 #include <utils/Log.h>
 #include <utils/misc.h>
-#include <system/radio_metadata.h>
+#include <system/RadioMetadataWrapper.h>
 #include <android/hardware/broadcastradio/1.0/IBroadcastRadioFactory.h>
 
 #include "RadioHalHidl.h"
@@ -261,27 +261,25 @@
 Return<void> RadioHalHidl::Tuner::tuneComplete(Result result, const ProgramInfo& info)
 {
     ALOGV("%s IN", __FUNCTION__);
-    radio_hal_event_t event;
-    memset(&event, 0, sizeof(radio_hal_event_t));
+    radio_hal_event_t event = {};
+    RadioMetadataWrapper metadataWrapper(&event.info.metadata);
+
     event.type = RADIO_EVENT_TUNED;
     event.status = HidlUtils::convertHalResult(result);
     HidlUtils::convertProgramInfoFromHal(&event.info, &info);
     onCallback(&event);
-    radio_metadata_deallocate(event.info.metadata);
     return Return<void>();
 }
 
 Return<void> RadioHalHidl::Tuner::afSwitch(const ProgramInfo& info)
 {
     ALOGV("%s IN", __FUNCTION__);
-    radio_hal_event_t event;
-    memset(&event, 0, sizeof(radio_hal_event_t));
+    radio_hal_event_t event = {};
+    RadioMetadataWrapper metadataWrapper(&event.info.metadata);
+
     event.type = RADIO_EVENT_AF_SWITCH;
     HidlUtils::convertProgramInfoFromHal(&event.info, &info);
     onCallback(&event);
-    if (event.info.metadata != NULL) {
-        radio_metadata_deallocate(event.info.metadata);
-    }
     return Return<void>();
 }
 
@@ -319,14 +317,12 @@
                                           const ::android::hardware::hidl_vec<MetaData>& metadata)
 {
     ALOGV("%s IN", __FUNCTION__);
-    radio_hal_event_t event;
-    memset(&event, 0, sizeof(radio_hal_event_t));
+    radio_hal_event_t event = {};
+    RadioMetadataWrapper metadataWrapper(&event.info.metadata);
+
     event.type = RADIO_EVENT_METADATA;
     HidlUtils::convertMetaDataFromHal(&event.metadata, metadata, channel, subChannel);
     onCallback(&event);
-    if (event.metadata != NULL) {
-        radio_metadata_deallocate(event.info.metadata);
-    }
     return Return<void>();
 }