libaaudio: changes for API council

Removed typedefs like aaudio_sample_rate_t
Removed use of handles. Just pass back opaque pointers.
Simplified gettersi in Stream.
Removed getters from Builder.
Update libaaudio.map.txt

Test: CTS test_aaudio.cpp
Change-Id: I63eaec3e5a8ecc516cfc1f950f4b4f54df1bd518
Signed-off-by: Phil Burk <philburk@google.com>
diff --git a/media/libaaudio/examples/write_sine/src/write_sine.cpp b/media/libaaudio/examples/write_sine/src/write_sine.cpp
index a16dfdc..9e6268a 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine.cpp
@@ -25,6 +25,8 @@
 
 #define SAMPLE_RATE   48000
 #define NUM_SECONDS   10
+#define NANOS_PER_MICROSECOND ((int64_t)1000)
+#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
 
 static const char *getSharingModeText(aaudio_sharing_mode_t mode) {
     const char *modeText = "unknown";
@@ -51,18 +53,18 @@
     int actualSamplesPerFrame = 0;
     const int requestedSampleRate = SAMPLE_RATE;
     int actualSampleRate = 0;
-    const aaudio_audio_format_t requestedDataFormat = AAUDIO_FORMAT_PCM16;
-    aaudio_audio_format_t actualDataFormat = AAUDIO_FORMAT_PCM16;
+    const aaudio_audio_format_t requestedDataFormat = AAUDIO_FORMAT_PCM_I16;
+    aaudio_audio_format_t actualDataFormat = AAUDIO_FORMAT_PCM_I16;
 
     const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
     aaudio_sharing_mode_t actualSharingMode = AAUDIO_SHARING_MODE_SHARED;
 
-    AAudioStreamBuilder aaudioBuilder = AAUDIO_STREAM_BUILDER_NONE;
-    AAudioStream aaudioStream = AAUDIO_STREAM_NONE;
+    AAudioStreamBuilder aaudioBuilder = nullptr;
+    AAudioStream aaudioStream = nullptr;
     aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNINITIALIZED;
-    aaudio_size_frames_t framesPerBurst = 0;
-    aaudio_size_frames_t framesToPlay = 0;
-    aaudio_size_frames_t framesLeft = 0;
+    int32_t framesPerBurst = 0;
+    int32_t framesToPlay = 0;
+    int32_t framesLeft = 0;
     int32_t xRunCount = 0;
     int16_t *data = nullptr;
 
@@ -82,57 +84,42 @@
     }
 
     // Request stream properties.
-    result = AAudioStreamBuilder_setSampleRate(aaudioBuilder, requestedSampleRate);
-    if (result != AAUDIO_OK) {
-        goto finish;
-    }
-    result = AAudioStreamBuilder_setSamplesPerFrame(aaudioBuilder, requestedSamplesPerFrame);
-    if (result != AAUDIO_OK) {
-        goto finish;
-    }
-    result = AAudioStreamBuilder_setFormat(aaudioBuilder, requestedDataFormat);
-    if (result != AAUDIO_OK) {
-        goto finish;
-    }
-    result = AAudioStreamBuilder_setSharingMode(aaudioBuilder, requestedSharingMode);
-    if (result != AAUDIO_OK) {
-        goto finish;
-    }
+    AAudioStreamBuilder_setSampleRate(aaudioBuilder, requestedSampleRate);
+    AAudioStreamBuilder_setSamplesPerFrame(aaudioBuilder, requestedSamplesPerFrame);
+    AAudioStreamBuilder_setFormat(aaudioBuilder, requestedDataFormat);
+    AAudioStreamBuilder_setSharingMode(aaudioBuilder, requestedSharingMode);
+
 
     // Create an AAudioStream using the Builder.
     result = AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream);
-    printf("aaudioStream 0x%08x\n", aaudioStream);
     if (result != AAUDIO_OK) {
         goto finish;
     }
 
-    result = AAudioStream_getState(aaudioStream, &state);
+    state = AAudioStream_getState(aaudioStream);
     printf("after open, state = %s\n", AAudio_convertStreamStateToText(state));
 
     // Check to see what kind of stream we actually got.
-    result = AAudioStream_getSampleRate(aaudioStream, &actualSampleRate);
+    actualSampleRate = AAudioStream_getSampleRate(aaudioStream);
     printf("SampleRate: requested = %d, actual = %d\n", requestedSampleRate, actualSampleRate);
 
     sineOsc1.setup(440.0, actualSampleRate);
     sineOsc2.setup(660.0, actualSampleRate);
 
-    result = AAudioStream_getSamplesPerFrame(aaudioStream, &actualSamplesPerFrame);
+    actualSamplesPerFrame = AAudioStream_getSamplesPerFrame(aaudioStream);
     printf("SamplesPerFrame: requested = %d, actual = %d\n",
             requestedSamplesPerFrame, actualSamplesPerFrame);
 
-    result = AAudioStream_getSharingMode(aaudioStream, &actualSharingMode);
+    actualSharingMode = AAudioStream_getSharingMode(aaudioStream);
     printf("SharingMode: requested = %s, actual = %s\n",
             getSharingModeText(requestedSharingMode),
             getSharingModeText(actualSharingMode));
 
     // This is the number of frames that are read in one chunk by a DMA controller
     // or a DSP or a mixer.
-    result = AAudioStream_getFramesPerBurst(aaudioStream, &framesPerBurst);
+    framesPerBurst = AAudioStream_getFramesPerBurst(aaudioStream);
     printf("DataFormat: original framesPerBurst = %d\n",framesPerBurst);
-    if (result != AAUDIO_OK) {
-        fprintf(stderr, "ERROR - AAudioStream_getFramesPerBurst() returned %d\n", result);
-        goto finish;
-    }
+
     // Some DMA might use very short bursts of 16 frames. We don't need to write such small
     // buffers. But it helps to use a multiple of the burst size for predictable scheduling.
     while (framesPerBurst < 48) {
@@ -140,7 +127,7 @@
     }
     printf("DataFormat: final framesPerBurst = %d\n",framesPerBurst);
 
-    AAudioStream_getFormat(aaudioStream, &actualDataFormat);
+    actualDataFormat = AAudioStream_getFormat(aaudioStream);
     printf("DataFormat: requested = %d, actual = %d\n", requestedDataFormat, actualDataFormat);
     // TODO handle other data formats
 
@@ -160,7 +147,7 @@
         goto finish;
     }
 
-    result = AAudioStream_getState(aaudioStream, &state);
+    state = AAudioStream_getState(aaudioStream);
     printf("after start, state = %s\n", AAudio_convertStreamStateToText(state));
 
     // Play for a while.
@@ -174,7 +161,7 @@
         }
 
         // Write audio data to the stream.
-        aaudio_nanoseconds_t timeoutNanos = 100 * AAUDIO_NANOS_PER_MILLISECOND;
+        int64_t timeoutNanos = 100 * NANOS_PER_MILLISECOND;
         int minFrames = (framesToPlay < framesPerBurst) ? framesToPlay : framesPerBurst;
         int actual = AAudioStream_write(aaudioStream, data, minFrames, timeoutNanos);
         if (actual < 0) {
@@ -187,7 +174,7 @@
         framesLeft -= actual;
     }
 
-    result = AAudioStream_getXRunCount(aaudioStream, &xRunCount);
+    xRunCount = AAudioStream_getXRunCount(aaudioStream);
     printf("AAudioStream_getXRunCount %d\n", xRunCount);
 
 finish:
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_threaded.cpp b/media/libaaudio/examples/write_sine/src/write_sine_threaded.cpp
index 7cb14f9..cc7ba5a 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine_threaded.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine_threaded.cpp
@@ -26,14 +26,18 @@
 #include <aaudio/AAudio.h>
 #include "SineGenerator.h"
 
-#define NUM_SECONDS   10
+#define NUM_SECONDS           10
+#define NANOS_PER_MICROSECOND ((int64_t)1000)
+#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
+#define MILLIS_PER_SECOND     1000
+#define NANOS_PER_SECOND      (NANOS_PER_MILLISECOND * MILLIS_PER_SECOND)
 
-#define SHARING_MODE  AAUDIO_SHARING_MODE_EXCLUSIVE
-//#define SHARING_MODE  AAUDIO_SHARING_MODE_SHARED
+//#define SHARING_MODE  AAUDIO_SHARING_MODE_EXCLUSIVE
+#define SHARING_MODE  AAUDIO_SHARING_MODE_SHARED
 
 // Prototype for a callback.
 typedef int audio_callback_proc_t(float *outputBuffer,
-                                     aaudio_size_frames_t numFrames,
+                                     int32_t numFrames,
                                      void *userContext);
 
 static void *SimpleAAudioPlayerThreadProc(void *arg);
@@ -75,33 +79,27 @@
         result = AAudio_createStreamBuilder(&mBuilder);
         if (result != AAUDIO_OK) return result;
 
-        result = AAudioStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
-        if (result != AAUDIO_OK) goto finish1;
+        AAudioStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
 
         // Open an AAudioStream using the Builder.
         result = AAudioStreamBuilder_openStream(mBuilder, &mStream);
         if (result != AAUDIO_OK) goto finish1;
 
         // Check to see what kind of stream we actually got.
-        result = AAudioStream_getSampleRate(mStream, &mFramesPerSecond);
+        mFramesPerSecond = AAudioStream_getSampleRate(mStream);
         printf("open() mFramesPerSecond = %d\n", mFramesPerSecond);
-        if (result != AAUDIO_OK) goto finish2;
 
-        result = AAudioStream_getSamplesPerFrame(mStream, &mSamplesPerFrame);
+        mSamplesPerFrame = AAudioStream_getSamplesPerFrame(mStream);
         printf("open() mSamplesPerFrame = %d\n", mSamplesPerFrame);
-        if (result != AAUDIO_OK) goto finish2;
 
         {
-            aaudio_size_frames_t bufferCapacity;
-            result = AAudioStream_getBufferCapacity(mStream, &bufferCapacity);
-            if (result != AAUDIO_OK) goto finish2;
+            int32_t bufferCapacity = AAudioStream_getBufferCapacityInFrames(mStream);
             printf("open() got bufferCapacity = %d\n", bufferCapacity);
         }
 
         // This is the number of frames that are read in one chunk by a DMA controller
         // or a DSP or a mixer.
-        result = AAudioStream_getFramesPerBurst(mStream, &mFramesPerBurst);
-        if (result != AAUDIO_OK) goto finish2;
+        mFramesPerBurst = AAudioStream_getFramesPerBurst(mStream);
         // Some DMA might use very short bursts. We don't need to write such small
         // buffers. But it helps to use a multiple of the burst size for predictable scheduling.
         while (mFramesPerBurst < 48) {
@@ -109,11 +107,7 @@
         }
         printf("DataFormat: final framesPerBurst = %d\n",mFramesPerBurst);
 
-        result = AAudioStream_getFormat(mStream, &mDataFormat);
-        if (result != AAUDIO_OK) {
-            fprintf(stderr, "ERROR - AAudioStream_getFormat() returned %d\n", result);
-            goto finish2;
-        }
+        mDataFormat = AAudioStream_getFormat(mStream);
 
         // Allocate a buffer for the audio data.
         mOutputBuffer = new float[mFramesPerBurst * mSamplesPerFrame];
@@ -123,7 +117,7 @@
         }
 
         // If needed allocate a buffer for converting float to int16_t.
-        if (mDataFormat == AAUDIO_FORMAT_PCM16) {
+        if (mDataFormat == AAUDIO_FORMAT_PCM_I16) {
             mConversionBuffer = new int16_t[mFramesPerBurst * mSamplesPerFrame];
             if (mConversionBuffer == nullptr) {
                 fprintf(stderr, "ERROR - could not allocate conversion buffer\n");
@@ -132,23 +126,20 @@
         }
         return result;
 
-     finish2:
-        AAudioStream_close(mStream);
-        mStream = AAUDIO_HANDLE_INVALID;
      finish1:
         AAudioStreamBuilder_delete(mBuilder);
-        mBuilder = AAUDIO_HANDLE_INVALID;
+        mBuilder = nullptr;
         return result;
     }
 
     aaudio_result_t close() {
-        if (mStream != AAUDIO_HANDLE_INVALID) {
+        if (mStream != nullptr) {
             stop();
-            printf("call AAudioStream_close(0x%08x)\n", mStream);  fflush(stdout);
+            printf("call AAudioStream_close(%p)\n", mStream);  fflush(stdout);
             AAudioStream_close(mStream);
-            mStream = AAUDIO_HANDLE_INVALID;
+            mStream = nullptr;
             AAudioStreamBuilder_delete(mBuilder);
-            mBuilder = AAUDIO_HANDLE_INVALID;
+            mBuilder = nullptr;
             delete mOutputBuffer;
             mOutputBuffer = nullptr;
             delete mConversionBuffer;
@@ -160,7 +151,7 @@
     // Start a thread that will call the callback proc.
     aaudio_result_t start() {
         mEnabled = true;
-        aaudio_nanoseconds_t nanosPerBurst = mFramesPerBurst * AAUDIO_NANOS_PER_SECOND
+        int64_t nanosPerBurst = mFramesPerBurst * NANOS_PER_SECOND
                                            / mFramesPerSecond;
         return AAudioStream_createThread(mStream, nanosPerBurst,
                                        SimpleAAudioPlayerThreadProc,
@@ -170,7 +161,7 @@
     // Tell the thread to stop.
     aaudio_result_t stop() {
         mEnabled = false;
-        return AAudioStream_joinThread(mStream, nullptr, 2 * AAUDIO_NANOS_PER_SECOND);
+        return AAudioStream_joinThread(mStream, nullptr, 2 * NANOS_PER_SECOND);
     }
 
     aaudio_result_t callbackLoop() {
@@ -186,8 +177,8 @@
 
         // Give up after several burst periods have passed.
         const int burstsPerTimeout = 8;
-        aaudio_nanoseconds_t nanosPerTimeout =
-                        burstsPerTimeout * mFramesPerBurst * AAUDIO_NANOS_PER_SECOND
+        int64_t nanosPerTimeout =
+                        burstsPerTimeout * mFramesPerBurst * NANOS_PER_SECOND
                         / mFramesPerSecond;
 
         while (mEnabled && result >= 0) {
@@ -213,7 +204,7 @@
             }
         }
 
-        result = AAudioStream_getXRunCount(mStream, &xRunCount);
+        xRunCount = AAudioStream_getXRunCount(mStream);
         printf("AAudioStream_getXRunCount %d\n", xRunCount);
 
         result = AAudioStream_requestStop(mStream);
@@ -226,20 +217,20 @@
     }
 
 private:
-    AAudioStreamBuilder   mBuilder = AAUDIO_HANDLE_INVALID;
-    AAudioStream          mStream = AAUDIO_HANDLE_INVALID;
-    float            *  mOutputBuffer = nullptr;
-    int16_t          *  mConversionBuffer = nullptr;
+    AAudioStreamBuilder   mBuilder = nullptr;
+    AAudioStream          mStream = nullptr;
+    float                *mOutputBuffer = nullptr;
+    int16_t              *mConversionBuffer = nullptr;
 
-    audio_callback_proc_t * mCallbackProc = nullptr;
-    void             *  mUserContext = nullptr;
+    audio_callback_proc_t *mCallbackProc = nullptr;
+    void                 *mUserContext = nullptr;
     aaudio_sharing_mode_t mRequestedSharingMode = SHARING_MODE;
-    int32_t             mSamplesPerFrame = 0;
-    int32_t             mFramesPerSecond = 0;
-    aaudio_size_frames_t  mFramesPerBurst = 0;
-    aaudio_audio_format_t mDataFormat = AAUDIO_FORMAT_PCM16;
+    int32_t               mSamplesPerFrame = 0;
+    int32_t               mFramesPerSecond = 0;
+    int32_t               mFramesPerBurst = 0;
+    aaudio_audio_format_t mDataFormat = AAUDIO_FORMAT_PCM_I16;
 
-    volatile bool       mEnabled = false; // used to request that callback exit its loop
+    volatile bool         mEnabled = false; // used to request that callback exit its loop
 };
 
 static void *SimpleAAudioPlayerThreadProc(void *arg) {
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index dad5285..43b5205 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -20,21 +20,21 @@
 #ifndef AAUDIO_AAUDIO_H
 #define AAUDIO_AAUDIO_H
 
+#include <time.h>
 #include "AAudioDefinitions.h"
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-typedef aaudio_handle_t AAudioStream;
-typedef aaudio_handle_t AAudioStreamBuilder;
+typedef struct AAudioStreamStruct * AAudioStream;
+typedef struct AAudioStreamBuilderStruct *  AAudioStreamBuilder;
 
-#define AAUDIO_STREAM_NONE         ((AAudioStream)AAUDIO_HANDLE_INVALID)
-#define AAUDIO_STREAM_BUILDER_NONE ((AAudioStreamBuilder)AAUDIO_HANDLE_INVALID)
+#define AAUDIO_STREAM_NONE         ((AAudioStream)nullptr)
+#define AAUDIO_STREAM_BUILDER_NONE ((AAudioStreamBuilder)nullptr)
 
-/* AAUDIO_API will probably get defined in a Makefile for a specific platform. */
 #ifndef AAUDIO_API
-#define AAUDIO_API /* for exporting symbols */
+#define AAUDIO_API /* export this symbol */
 #endif
 
 // ============================================================
@@ -42,11 +42,6 @@
 // ============================================================
 
 /**
- * @return time in the same clock domain as the timestamps
- */
-AAUDIO_API aaudio_nanoseconds_t AAudio_getNanoseconds(aaudio_clockid_t clockid);
-
-/**
  * The text is the ASCII symbol corresponding to the returnCode,
  * or an English message saying the returnCode is unrecognized.
  * This is intended for developers to use when debugging.
@@ -76,7 +71,7 @@
  * The deviceId is initially unspecified, meaning that the current default device will be used.
  *
  * The default direction is AAUDIO_DIRECTION_OUTPUT.
- * The default sharing mode is AAUDIO_SHARING_MODE_LEGACY.
+ * The default sharing mode is AAUDIO_SHARING_MODE_SHARED.
  * The data format, samplesPerFrames and sampleRate are unspecified and will be
  * chosen by the device when it is opened.
  *
@@ -86,23 +81,15 @@
 
 /**
  * Request an audio device identified device using an ID.
- * The ID is platform specific.
  * On Android, for example, the ID could be obtained from the Java AudioManager.
  *
  * By default, the primary device will be used.
  *
- * @param builder handle provided by AAudio_createStreamBuilder()
- * @param deviceId platform specific identifier or AAUDIO_DEVICE_UNSPECIFIED
- * @return AAUDIO_OK or a negative error.
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param deviceId device identifier or AAUDIO_DEVICE_UNSPECIFIED
  */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setDeviceId(AAudioStreamBuilder builder,
-                                                     aaudio_device_id_t deviceId);
-/**
- * Passes back requested device ID.
- * @return AAUDIO_OK or a negative error.
- */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getDeviceId(AAudioStreamBuilder builder,
-                                                     aaudio_device_id_t *deviceId);
+AAUDIO_API void AAudioStreamBuilder_setDeviceId(AAudioStreamBuilder builder,
+                                                     int32_t deviceId);
 
 /**
  * Request a sample rate in Hz.
@@ -114,19 +101,10 @@
  * But it is traditionally called "sample rate". Se we use that term.
  *
  * Default is AAUDIO_UNSPECIFIED.
- *
- * @return AAUDIO_OK or a negative error.
- */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setSampleRate(AAudioStreamBuilder builder,
-                                                       aaudio_sample_rate_t sampleRate);
 
-/**
- * Returns sample rate in Hertz (samples per second).
- * @return AAUDIO_OK or a negative error.
  */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getSampleRate(AAudioStreamBuilder builder,
-                                                       aaudio_sample_rate_t *sampleRate);
-
+AAUDIO_API void AAudioStreamBuilder_setSampleRate(AAudioStreamBuilder builder,
+                                                       int32_t sampleRate);
 
 /**
  * Request a number of samples per frame.
@@ -136,98 +114,48 @@
  * Default is AAUDIO_UNSPECIFIED.
  *
  * Note, this quantity is sometimes referred to as "channel count".
- *
- * @return AAUDIO_OK or a negative error.
  */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder builder,
+AAUDIO_API void AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder builder,
                                                    int32_t samplesPerFrame);
 
 /**
- * Note, this quantity is sometimes referred to as "channel count".
- *
- * @param builder handle provided by AAudio_createStreamBuilder()
- * @param samplesPerFrame pointer to a variable to be set to samplesPerFrame.
- * @return AAUDIO_OK or a negative error.
- */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getSamplesPerFrame(AAudioStreamBuilder builder,
-                                                   int32_t *samplesPerFrame);
-
-
-/**
  * Request a sample data format, for example AAUDIO_FORMAT_PCM_I16.
  * The application should query for the actual format after the stream is opened.
- *
- * @return AAUDIO_OK or a negative error.
  */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setFormat(AAudioStreamBuilder builder,
+AAUDIO_API void AAudioStreamBuilder_setFormat(AAudioStreamBuilder builder,
                                                    aaudio_audio_format_t format);
 
 /**
- * @return AAUDIO_OK or a negative error.
- */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getFormat(AAudioStreamBuilder builder,
-                                                   aaudio_audio_format_t *format);
-
-/**
  * Request a mode for sharing the device.
  * The requested sharing mode may not be available.
  * So the application should query for the actual mode after the stream is opened.
  *
- * @param builder handle provided by AAudio_createStreamBuilder()
+ * @param builder reference provided by AAudio_createStreamBuilder()
  * @param sharingMode AAUDIO_SHARING_MODE_LEGACY or AAUDIO_SHARING_MODE_EXCLUSIVE
- * @return AAUDIO_OK or a negative error.
  */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setSharingMode(AAudioStreamBuilder builder,
+AAUDIO_API void AAudioStreamBuilder_setSharingMode(AAudioStreamBuilder builder,
                                                         aaudio_sharing_mode_t sharingMode);
 
 /**
- * Return requested sharing mode.
- * @return AAUDIO_OK or a negative error
- */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getSharingMode(AAudioStreamBuilder builder,
-                                                        aaudio_sharing_mode_t *sharingMode);
-
-/**
  * Request the direction for a stream. The default is AAUDIO_DIRECTION_OUTPUT.
  *
- * @param builder handle provided by AAudio_createStreamBuilder()
+ * @param builder reference provided by AAudio_createStreamBuilder()
  * @param direction AAUDIO_DIRECTION_OUTPUT or AAUDIO_DIRECTION_INPUT
- * @return AAUDIO_OK or a negative error.
  */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setDirection(AAudioStreamBuilder builder,
+AAUDIO_API void AAudioStreamBuilder_setDirection(AAudioStreamBuilder builder,
                                                             aaudio_direction_t direction);
 
 /**
- * @param builder handle provided by AAudio_createStreamBuilder()
- * @param direction pointer to a variable to be set to the currently requested direction.
- * @return AAUDIO_OK or a negative error.
- */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getDirection(AAudioStreamBuilder builder,
-                                                            aaudio_direction_t *direction);
-
-/**
  * Set the requested maximum buffer capacity in frames.
  * The final AAudioStream capacity may differ, but will probably be at least this big.
  *
  * Default is AAUDIO_UNSPECIFIED.
  *
- * @param builder handle provided by AAudio_createStreamBuilder()
+ * @param builder reference provided by AAudio_createStreamBuilder()
  * @param frames the desired buffer capacity in frames or AAUDIO_UNSPECIFIED
- * @return AAUDIO_OK or a negative error.
  */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setBufferCapacity(AAudioStreamBuilder builder,
-                                                                 aaudio_size_frames_t frames);
-
-/**
- * Query the requested maximum buffer capacity in frames that was passed to
- * AAudioStreamBuilder_setBufferCapacity().
- *
- * @param builder handle provided by AAudio_createStreamBuilder()
- * @param frames pointer to variable to receive the requested buffer capacity
- * @return AAUDIO_OK or a negative error.
- */
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getBufferCapacity(AAudioStreamBuilder builder,
-                                                                 aaudio_size_frames_t *frames);
+AAUDIO_API void AAudioStreamBuilder_setBufferCapacityInFrames(AAudioStreamBuilder builder,
+                                                                 int32_t frames);
 
 /**
  * Open a stream based on the options in the StreamBuilder.
@@ -235,8 +163,8 @@
  * AAudioStream_close must be called when finished with the stream to recover
  * the memory and to free the associated resources.
  *
- * @param builder handle provided by AAudio_createStreamBuilder()
- * @param stream pointer to a variable to receive the new stream handle
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param stream pointer to a variable to receive the new stream reference
  * @return AAUDIO_OK or a negative error.
  */
 AAUDIO_API aaudio_result_t  AAudioStreamBuilder_openStream(AAudioStreamBuilder builder,
@@ -245,7 +173,7 @@
 /**
  * Delete the resources associated with the StreamBuilder.
  *
- * @param builder handle provided by AAudio_createStreamBuilder()
+ * @param builder reference provided by AAudio_createStreamBuilder()
  * @return AAUDIO_OK or a negative error.
  */
 AAUDIO_API aaudio_result_t  AAudioStreamBuilder_delete(AAudioStreamBuilder builder);
@@ -257,7 +185,7 @@
 /**
  * Free the resources associated with a stream created by AAudioStreamBuilder_openStream()
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return AAUDIO_OK or a negative error.
  */
 AAUDIO_API aaudio_result_t  AAudioStream_close(AAudioStream stream);
@@ -268,7 +196,7 @@
  * Otherwise it will underflow.
  * After this call the state will be in AAUDIO_STREAM_STATE_STARTING or AAUDIO_STREAM_STATE_STARTED.
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return AAUDIO_OK or a negative error.
  */
 AAUDIO_API aaudio_result_t  AAudioStream_requestStart(AAudioStream stream);
@@ -279,7 +207,7 @@
  * Use AAudioStream_Start() to resume playback after a pause.
  * After this call the state will be in AAUDIO_STREAM_STATE_PAUSING or AAUDIO_STREAM_STATE_PAUSED.
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return AAUDIO_OK or a negative error.
  */
 AAUDIO_API aaudio_result_t  AAudioStream_requestPause(AAudioStream stream);
@@ -291,7 +219,7 @@
  * Frame counters are not reset by a flush. They may be advanced.
  * After this call the state will be in AAUDIO_STREAM_STATE_FLUSHING or AAUDIO_STREAM_STATE_FLUSHED.
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return AAUDIO_OK or a negative error.
  */
 AAUDIO_API aaudio_result_t  AAudioStream_requestFlush(AAudioStream stream);
@@ -301,23 +229,29 @@
  * The stream will stop after all of the data currently buffered has been played.
  * After this call the state will be in AAUDIO_STREAM_STATE_STOPPING or AAUDIO_STREAM_STATE_STOPPED.
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return AAUDIO_OK or a negative error.
  */
 AAUDIO_API aaudio_result_t  AAudioStream_requestStop(AAudioStream stream);
 
 /**
- * Query the current state, eg. AAUDIO_STREAM_STATE_PAUSING
+ * Query the current state of the client, eg. AAUDIO_STREAM_STATE_PAUSING
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * This function will immediately return the state without updating the state.
+ * If you want to update the client state based on the server state then
+ * call AAudioStream_waitForStateChange() with currentState
+ * set to AAUDIO_STREAM_STATE_UNKNOWN and a zero timeout.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @param state pointer to a variable that will be set to the current state
- * @return AAUDIO_OK or a negative error.
  */
-AAUDIO_API aaudio_result_t AAudioStream_getState(AAudioStream stream, aaudio_stream_state_t *state);
+AAUDIO_API aaudio_stream_state_t AAudioStream_getState(AAudioStream stream);
 
 /**
  * Wait until the current state no longer matches the input state.
  *
+ * This will update the current client state.
+ *
  * <pre><code>
  * aaudio_stream_state_t currentState;
  * aaudio_result_t result = AAudioStream_getState(stream, &currentState);
@@ -327,7 +261,7 @@
  * }
  * </code></pre>
  *
- * @param stream A handle provided by AAudioStreamBuilder_openStream()
+ * @param stream A reference provided by AAudioStreamBuilder_openStream()
  * @param inputState The state we want to avoid.
  * @param nextState Pointer to a variable that will be set to the new state.
  * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
@@ -336,7 +270,7 @@
 AAUDIO_API aaudio_result_t AAudioStream_waitForStateChange(AAudioStream stream,
                                             aaudio_stream_state_t inputState,
                                             aaudio_stream_state_t *nextState,
-                                            aaudio_nanoseconds_t timeoutNanoseconds);
+                                            int64_t timeoutNanoseconds);
 
 // ============================================================
 // Stream I/O
@@ -358,12 +292,12 @@
  * @param buffer The address of the first sample.
  * @param numFrames Number of frames to read. Only complete frames will be written.
  * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
- * @return The number of frames actually written or a negative error.
+ * @return The number of frames actually read or a negative error.
  */
 AAUDIO_API aaudio_result_t AAudioStream_read(AAudioStream stream,
                                void *buffer,
-                               aaudio_size_frames_t numFrames,
-                               aaudio_nanoseconds_t timeoutNanoseconds);
+                               int32_t numFrames,
+                               int64_t timeoutNanoseconds);
 
 /**
  * Write data to the stream.
@@ -385,8 +319,8 @@
  */
 AAUDIO_API aaudio_result_t AAudioStream_write(AAudioStream stream,
                                const void *buffer,
-                               aaudio_size_frames_t numFrames,
-                               aaudio_nanoseconds_t timeoutNanoseconds);
+                               int32_t numFrames,
+                               int64_t timeoutNanoseconds);
 
 
 // ============================================================
@@ -401,6 +335,11 @@
  *
  * Only one thread may be associated with a stream.
  *
+ * If you are using multiple streams then we recommend that you only do
+ * blocking reads or writes on one stream. You can do non-blocking I/O on the
+ * other streams by setting the timeout to zero.
+ * This thread should be created for the stream that you will block on.
+ *
  * Note that this API is in flux.
  *
  * @param stream A stream created using AAudioStreamBuilder_openStream().
@@ -410,13 +349,12 @@
  * @return AAUDIO_OK or a negative error.
  */
 AAUDIO_API aaudio_result_t AAudioStream_createThread(AAudioStream stream,
-                                     aaudio_nanoseconds_t periodNanoseconds,
+                                     int64_t periodNanoseconds,
                                      aaudio_audio_thread_proc_t *threadProc,
                                      void *arg);
 
 /**
  * Wait until the thread exits or an error occurs.
- * The thread handle will be deleted.
  *
  * @param stream A stream created using AAudioStreamBuilder_openStream().
  * @param returnArg a pointer to a variable to receive the return value
@@ -425,7 +363,7 @@
  */
 AAUDIO_API aaudio_result_t AAudioStream_joinThread(AAudioStream stream,
                                    void **returnArg,
-                                   aaudio_nanoseconds_t timeoutNanoseconds);
+                                   int64_t timeoutNanoseconds);
 
 // ============================================================
 // Stream - queries
@@ -435,49 +373,51 @@
 /**
  * This can be used to adjust the latency of the buffer by changing
  * the threshold where blocking will occur.
- * By combining this with AAudioStream_getUnderrunCount(), the latency can be tuned
+ * By combining this with AAudioStream_getXRunCount(), the latency can be tuned
  * at run-time for each device.
  *
- * This cannot be set higher than AAudioStream_getBufferCapacity().
+ * This cannot be set higher than AAudioStream_getBufferCapacityInFrames().
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * Note that you will probably not get the exact size you request.
+ * Call AAudioStream_getBufferSizeInFrames() to see what the actual final size is.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @param requestedFrames requested number of frames that can be filled without blocking
- * @param actualFrames receives final number of frames
- * @return AAUDIO_OK or a negative error
+ * @return actual buffer size in frames or a negative error
  */
-AAUDIO_API aaudio_result_t AAudioStream_setBufferSize(AAudioStream stream,
-                                                      aaudio_size_frames_t requestedFrames,
-                                                      aaudio_size_frames_t *actualFrames);
+AAUDIO_API aaudio_result_t AAudioStream_setBufferSizeInFrames(AAudioStream stream,
+                                                      int32_t requestedFrames);
 
 /**
  * Query the maximum number of frames that can be filled without blocking.
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
- * @param frames pointer to variable to receive the buffer size
- * @return AAUDIO_OK or a negative error.
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return buffer size in frames.
  */
-AAUDIO_API aaudio_result_t AAudioStream_getBufferSize(AAudioStream stream,
-                                                      aaudio_size_frames_t *frames);
+AAUDIO_API int32_t AAudioStream_getBufferSizeInFrames(AAudioStream stream);
 
 /**
- * Query the number of frames that are read or written by the endpoint at one time.
+ * Query the number of frames that the application should read or write at
+ * one time for optimal performance. It is OK if an application writes
+ * a different number of frames. But the buffer size may need to be larger
+ * in order to avoid underruns or overruns.
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
- * @param frames pointer to variable to receive the burst size
- * @return AAUDIO_OK or a negative error.
+ * Note that this may or may not match the actual device burst size.
+ * For some endpoints, the burst size can vary dynamically.
+ * But these tend to be devices with high latency.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return burst size
  */
-AAUDIO_API aaudio_result_t AAudioStream_getFramesPerBurst(AAudioStream stream,
-                                                          aaudio_size_frames_t *frames);
+AAUDIO_API int32_t AAudioStream_getFramesPerBurst(AAudioStream stream);
 
 /**
  * Query maximum buffer capacity in frames.
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
- * @param frames pointer to variable to receive the buffer capacity
- * @return AAUDIO_OK or a negative error.
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return  the buffer capacity in frames
  */
-AAUDIO_API aaudio_result_t AAudioStream_getBufferCapacity(AAudioStream stream,
-                                                          aaudio_size_frames_t *frames);
+AAUDIO_API int32_t AAudioStream_getBufferCapacityInFrames(AAudioStream stream);
 
 /**
  * An XRun is an Underrun or an Overrun.
@@ -488,90 +428,75 @@
  *
  * An underrun or overrun can cause an audible "pop" or "glitch".
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
- * @param xRunCount pointer to variable to receive the underrun or overrun count
- * @return AAUDIO_OK or a negative error.
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return the underrun or overrun count
  */
-AAUDIO_API aaudio_result_t AAudioStream_getXRunCount(AAudioStream stream, int32_t *xRunCount);
+AAUDIO_API int32_t AAudioStream_getXRunCount(AAudioStream stream);
 
 /**
- * @param stream handle provided by AAudioStreamBuilder_openStream()
- * @param sampleRate pointer to variable to receive the actual sample rate
- * @return AAUDIO_OK or a negative error.
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return actual sample rate
  */
-AAUDIO_API aaudio_result_t AAudioStream_getSampleRate(AAudioStream stream,
-                                                      aaudio_sample_rate_t *sampleRate);
+AAUDIO_API int32_t AAudioStream_getSampleRate(AAudioStream stream);
 
 /**
  * The samplesPerFrame is also known as channelCount.
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
- * @param samplesPerFrame pointer to variable to receive the actual samples per frame
- * @return AAUDIO_OK or a negative error.
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return actual samples per frame
  */
-AAUDIO_API aaudio_result_t AAudioStream_getSamplesPerFrame(AAudioStream stream,
-                                                           int32_t *samplesPerFrame);
+AAUDIO_API int32_t AAudioStream_getSamplesPerFrame(AAudioStream stream);
 
 /**
- * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @param deviceId pointer to variable to receive the actual device ID
  * @return AAUDIO_OK or a negative error.
  */
-AAUDIO_API aaudio_result_t AAudioStream_getDeviceId(AAudioStream stream,
-                                                    aaudio_device_id_t *deviceId);
+AAUDIO_API int32_t AAudioStream_getDeviceId(AAudioStream stream,
+                                                    int32_t *deviceId);
 
 /**
- * @param stream handle provided by AAudioStreamBuilder_openStream()
- * @param format pointer to variable to receive the actual data format
- * @return AAUDIO_OK or a negative error.
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return actual data format
  */
-AAUDIO_API aaudio_result_t AAudioStream_getFormat(AAudioStream stream,
-                                                  aaudio_audio_format_t *format);
+AAUDIO_API aaudio_audio_format_t AAudioStream_getFormat(AAudioStream stream);
 
 /**
  * Provide actual sharing mode.
- * @param stream handle provided by AAudioStreamBuilder_openStream()
- * @param sharingMode pointer to variable to receive the actual sharing mode
- * @return AAUDIO_OK or a negative error.
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return  actual sharing mode
  */
-AAUDIO_API aaudio_result_t AAudioStream_getSharingMode(AAudioStream stream,
-                                        aaudio_sharing_mode_t *sharingMode);
+AAUDIO_API aaudio_sharing_mode_t AAudioStream_getSharingMode(AAudioStream stream);
 
 /**
- * @param stream handle provided by AAudioStreamBuilder_openStream()
- * @param direction pointer to a variable to be set to the current direction.
- * @return AAUDIO_OK or a negative error.
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return direction
  */
-AAUDIO_API aaudio_result_t AAudioStream_getDirection(AAudioStream stream,
-                                                     aaudio_direction_t *direction);
+AAUDIO_API aaudio_direction_t AAudioStream_getDirection(AAudioStream stream);
 
 /**
  * Passes back the number of frames that have been written since the stream was created.
  * For an output stream, this will be advanced by the application calling write().
- * For an input stream, this will be advanced by the device or service.
+ * For an input stream, this will be advanced by the endpoint.
  *
  * The frame position is monotonically increasing.
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
- * @param frames pointer to variable to receive the frames written
- * @return AAUDIO_OK or a negative error.
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return frames written
  */
-AAUDIO_API aaudio_result_t AAudioStream_getFramesWritten(AAudioStream stream,
-                                                   aaudio_position_frames_t *frames);
+AAUDIO_API int64_t AAudioStream_getFramesWritten(AAudioStream stream);
 
 /**
  * Passes back the number of frames that have been read since the stream was created.
- * For an output stream, this will be advanced by the device or service.
+ * For an output stream, this will be advanced by the endpoint.
  * For an input stream, this will be advanced by the application calling read().
  *
  * The frame position is monotonically increasing.
  *
- * @param stream handle provided by AAudioStreamBuilder_openStream()
- * @param frames pointer to variable to receive the frames written
- * @return AAUDIO_OK or a negative error.
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return frames read
  */
-AAUDIO_API aaudio_result_t AAudioStream_getFramesRead(AAudioStream stream,
-                                                      aaudio_position_frames_t *frames);
+AAUDIO_API int64_t AAudioStream_getFramesRead(AAudioStream stream);
 
 /**
  * Passes back the time at which a particular frame was presented.
@@ -589,16 +514,16 @@
  *
  * The position and time passed back are monotonically increasing.
  *
- * @param stream A handle provided by AAudioStreamBuilder_openStream()
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @param clockid AAUDIO_CLOCK_MONOTONIC or AAUDIO_CLOCK_BOOTTIME
  * @param framePosition pointer to a variable to receive the position
  * @param timeNanoseconds pointer to a variable to receive the time
  * @return AAUDIO_OK or a negative error
  */
 AAUDIO_API aaudio_result_t AAudioStream_getTimestamp(AAudioStream stream,
-                                      aaudio_clockid_t clockid,
-                                      aaudio_position_frames_t *framePosition,
-                                      aaudio_nanoseconds_t *timeNanoseconds);
+                                      clockid_t clockid,
+                                      int64_t *framePosition,
+                                      int64_t *timeNanoseconds);
 
 #ifdef __cplusplus
 }
diff --git a/media/libaaudio/include/aaudio/AAudioDefinitions.h b/media/libaaudio/include/aaudio/AAudioDefinitions.h
index 5b72b94..5b7b819 100644
--- a/media/libaaudio/include/aaudio/AAudioDefinitions.h
+++ b/media/libaaudio/include/aaudio/AAudioDefinitions.h
@@ -23,25 +23,7 @@
 extern "C" {
 #endif
 
-typedef int32_t  aaudio_handle_t; // negative handles are error codes
 typedef int32_t  aaudio_result_t;
-/**
- * A platform specific identifier for a device.
- */
-typedef int32_t  aaudio_device_id_t;
-typedef int32_t  aaudio_sample_rate_t;
-/** This is used for small quantities such as the number of frames in a buffer. */
-typedef int32_t  aaudio_size_frames_t;
-/** This is used for small quantities such as the number of bytes in a frame. */
-typedef int32_t  aaudio_size_bytes_t;
-/**
- * This is used for large quantities, such as the number of frames that have
- * been played since a stream was started.
- * At 48000 Hz, a 32-bit integer would wrap around in just over 12 hours.
- */
-typedef int64_t  aaudio_position_frames_t;
-
-typedef int64_t  aaudio_nanoseconds_t;
 
 /**
  * This is used to represent a value that has not been specified.
@@ -50,18 +32,11 @@
  * and would accept whatever it was given.
  */
 #define AAUDIO_UNSPECIFIED           0
-#define AAUDIO_DEVICE_UNSPECIFIED    ((aaudio_device_id_t) -1)
-#define AAUDIO_NANOS_PER_MICROSECOND ((int64_t)1000)
-#define AAUDIO_NANOS_PER_MILLISECOND (AAUDIO_NANOS_PER_MICROSECOND * 1000)
-#define AAUDIO_MILLIS_PER_SECOND     1000
-#define AAUDIO_NANOS_PER_SECOND      (AAUDIO_NANOS_PER_MILLISECOND * AAUDIO_MILLIS_PER_SECOND)
-
-#define AAUDIO_HANDLE_INVALID     ((aaudio_handle_t)-1)
+#define AAUDIO_DEVICE_UNSPECIFIED    ((int32_t) -1)
 
 enum aaudio_direction_t {
     AAUDIO_DIRECTION_OUTPUT,
-    AAUDIO_DIRECTION_INPUT,
-    AAUDIO_DIRECTION_COUNT // This should always be last.
+    AAUDIO_DIRECTION_INPUT
 };
 
 enum aaudio_audio_format_t {
@@ -73,11 +48,6 @@
     AAUDIO_FORMAT_PCM_I32
 };
 
-// TODO These are deprecated. Remove these aliases once all references are replaced.
-#define AAUDIO_FORMAT_PCM16    AAUDIO_FORMAT_PCM_I16
-#define AAUDIO_FORMAT_PCM824   AAUDIO_FORMAT_PCM_I8_24
-#define AAUDIO_FORMAT_PCM32    AAUDIO_FORMAT_PCM_I32
-
 enum {
     AAUDIO_OK,
     AAUDIO_ERROR_BASE = -900, // TODO review
@@ -102,15 +72,10 @@
     AAUDIO_ERROR_NO_SERVICE
 };
 
-typedef enum {
-    AAUDIO_CLOCK_MONOTONIC, // Clock since booted, pauses when CPU is sleeping.
-    AAUDIO_CLOCK_BOOTTIME,  // Clock since booted, runs all the time.
-    AAUDIO_CLOCK_COUNT // This should always be last.
-} aaudio_clockid_t;
-
 typedef enum
 {
     AAUDIO_STREAM_STATE_UNINITIALIZED = 0,
+    AAUDIO_STREAM_STATE_UNKNOWN,
     AAUDIO_STREAM_STATE_OPEN,
     AAUDIO_STREAM_STATE_STARTING,
     AAUDIO_STREAM_STATE_STARTED,
@@ -135,9 +100,7 @@
      * Multiple applications will be mixed by the AAudio Server.
      * This will have higher latency than the EXCLUSIVE mode.
      */
-    AAUDIO_SHARING_MODE_SHARED,
-
-    AAUDIO_SHARING_MODE_COUNT // This should always be last.
+    AAUDIO_SHARING_MODE_SHARED
 } aaudio_sharing_mode_t;
 
 #ifdef __cplusplus
diff --git a/media/libaaudio/libaaudio.map.txt b/media/libaaudio/libaaudio.map.txt
index ecae991..a9e9109 100644
--- a/media/libaaudio/libaaudio.map.txt
+++ b/media/libaaudio/libaaudio.map.txt
@@ -1,20 +1,15 @@
 LIBAAUDIO {
   global:
-    AAudio_getNanoseconds;
     AAudio_convertResultToText;
     AAudio_convertStreamStateToText;
     AAudio_createStreamBuilder;
     AAudioStreamBuilder_setDeviceId;
     AAudioStreamBuilder_setSampleRate;
-    AAudioStreamBuilder_getSampleRate;
     AAudioStreamBuilder_setSamplesPerFrame;
-    AAudioStreamBuilder_getSamplesPerFrame;
     AAudioStreamBuilder_setFormat;
-    AAudioStreamBuilder_getFormat;
     AAudioStreamBuilder_setSharingMode;
-    AAudioStreamBuilder_getSharingMode;
     AAudioStreamBuilder_setDirection;
-    AAudioStreamBuilder_getDirection;
+    AAudioStreamBuilder_setBufferCapacityInFrames;
     AAudioStreamBuilder_openStream;
     AAudioStreamBuilder_delete;
     AAudioStream_close;
@@ -28,13 +23,14 @@
     AAudioStream_write;
     AAudioStream_createThread;
     AAudioStream_joinThread;
-    AAudioStream_setBufferSize;
-    AAudioStream_getBufferSize;
+    AAudioStream_setBufferSizeInFrames;
+    AAudioStream_getBufferSizeInFrames;
     AAudioStream_getFramesPerBurst;
-    AAudioStream_getBufferCapacity;
+    AAudioStream_getBufferCapacityInFrames;
     AAudioStream_getXRunCount;
     AAudioStream_getSampleRate;
     AAudioStream_getSamplesPerFrame;
+    AAudioStream_getDeviceId;
     AAudioStream_getFormat;
     AAudioStream_getSharingMode;
     AAudioStream_getDirection;
diff --git a/media/libaaudio/scripts/convert_typedefs_int32.sh b/media/libaaudio/scripts/convert_typedefs_int32.sh
new file mode 100755
index 0000000..7bdbe3a
--- /dev/null
+++ b/media/libaaudio/scripts/convert_typedefs_int32.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+echo "Use SED to convert typedefs in AAudio API"
+
+echo "Top is ${ANDROID_BUILD_TOP}"
+LIBAAUDIO_DIR=${ANDROID_BUILD_TOP}/frameworks/av/media/libaaudio
+echo "LIBAAUDIO_DIR is ${LIBAAUDIO_DIR}"
+OBOESERVICE_DIR=${ANDROID_BUILD_TOP}/frameworks/av/services/oboeservice
+echo "OBOESERVICE_DIR is ${OBOESERVICE_DIR}"
+OBOETEST_DIR=${ANDROID_BUILD_TOP}/cts/tests/tests/nativemedia/aaudio/src/
+echo "OBOETEST_DIR is ${OBOETEST_DIR}"
+
+function convertPathPattern {
+    path=$1
+    pattern=$2
+    find $path -type f  -name $pattern -exec sed -i -f ${LIBAAUDIO_DIR}/scripts/typedefs_to_int32.sed {} \;
+}
+
+function convertPath {
+    path=$1
+    convertPathPattern $1 '*.cpp'
+    convertPathPattern $1 '*.h'
+}
+
+convertPath ${LIBAAUDIO_DIR}
+convertPath ${OBOESERVICE_DIR}
+convertPathPattern ${OBOETEST_DIR} test_aaudio.cpp
+
diff --git a/media/libaaudio/scripts/revert_all_aaudio.sh b/media/libaaudio/scripts/revert_all_aaudio.sh
index de3fa7a..19c7f81 100755
--- a/media/libaaudio/scripts/revert_all_aaudio.sh
+++ b/media/libaaudio/scripts/revert_all_aaudio.sh
@@ -1,27 +1,18 @@
 #!/bin/bash
 
-echo "Revert Oboe names to AAudio names"
+echo "Revert typedefs"
 
 echo "Top is ${ANDROID_BUILD_TOP}"
-LIBOBOE_DIR=${ANDROID_BUILD_TOP}/frameworks/av/media/liboboe
-echo "LIBOBOE_DIR is ${LIBOBOE_DIR}"
+LIBAAUDIO_DIR=${ANDROID_BUILD_TOP}/frameworks/av/media/libaaudio
+echo "LIBAAUDIO_DIR is ${LIBAAUDIO_DIR}"
 OBOESERVICE_DIR=${ANDROID_BUILD_TOP}/frameworks/av/services/oboeservice
 echo "OBOESERVICE_DIR is ${OBOESERVICE_DIR}"
 OBOETEST_DIR=${ANDROID_BUILD_TOP}/cts/tests/tests/nativemedia/aaudio/src/
 echo "OBOETEST_DIR is ${OBOETEST_DIR}"
 
-git checkout -- ${LIBOBOE_DIR}/examples
-git checkout -- ${LIBOBOE_DIR}/include
-git checkout -- ${LIBOBOE_DIR}/src
-git checkout -- ${LIBOBOE_DIR}/tests
-git checkout -- ${LIBOBOE_DIR}/Android.bp
-git checkout -- ${LIBOBOE_DIR}/README.md
-git checkout -- ${LIBOBOE_DIR}/liboboe.map.txt
+git checkout -- ${LIBAAUDIO_DIR}/examples
+git checkout -- ${LIBAAUDIO_DIR}/include
+git checkout -- ${LIBAAUDIO_DIR}/src
+git checkout -- ${LIBAAUDIO_DIR}/tests
 git checkout -- ${OBOESERVICE_DIR}
-git checkout -- ${OBOETEST_DIR}
 
-rm -rf ${LIBOBOE_DIR}/include/aaudio
-
-find . -name "*aaudio*.cpp" -print -delete
-find . -name "*AAudio*.cpp" -print -delete
-find . -name "*AAudio*.h"   -print -delete
diff --git a/media/libaaudio/scripts/typedefs_to_int32.sed b/media/libaaudio/scripts/typedefs_to_int32.sed
new file mode 100644
index 0000000..392c9a0
--- /dev/null
+++ b/media/libaaudio/scripts/typedefs_to_int32.sed
@@ -0,0 +1,8 @@
+s/aaudio_device_id_t/int32_t/g
+s/aaudio_sample_rate_t/int32_t/g
+s/aaudio_size_frames_t/int32_t/g
+s/aaudio_size_bytes_t/int32_t/g
+s/aaudio_sample_rate_t/int32_t/g
+
+s/aaudio_position_frames_t/int64_t/g
+s/aaudio_nanoseconds_t/int64_t/g
diff --git a/media/libaaudio/src/binding/AAudioServiceDefinitions.h b/media/libaaudio/src/binding/AAudioServiceDefinitions.h
index ca637ef..b58d170 100644
--- a/media/libaaudio/src/binding/AAudioServiceDefinitions.h
+++ b/media/libaaudio/src/binding/AAudioServiceDefinitions.h
@@ -44,6 +44,10 @@
 
 namespace aaudio {
 
+typedef int32_t  aaudio_handle_t;
+
+#define AAUDIO_HANDLE_INVALID  ((aaudio_handle_t) -1)
+
 enum aaudio_commands_t {
     OPEN_STREAM = IBinder::FIRST_CALL_TRANSACTION,
     CLOSE_STREAM,
@@ -57,9 +61,9 @@
 
 // TODO Expand this to include all the open parameters.
 typedef struct AAudioServiceStreamInfo_s {
-    int32_t             deviceId;
-    int32_t             samplesPerFrame;  // number of channels
-    aaudio_sample_rate_t  sampleRate;
+    int32_t               deviceId;
+    int32_t               samplesPerFrame;  // number of channels
+    int32_t               sampleRate;
     aaudio_audio_format_t audioFormat;
 } AAudioServiceStreamInfo;
 
diff --git a/media/libaaudio/src/binding/AAudioServiceMessage.h b/media/libaaudio/src/binding/AAudioServiceMessage.h
index 16cb5eb..cc77d59 100644
--- a/media/libaaudio/src/binding/AAudioServiceMessage.h
+++ b/media/libaaudio/src/binding/AAudioServiceMessage.h
@@ -23,12 +23,12 @@
 
 namespace aaudio {
 
-// TODO move this an "include" folder for the service.
+// TODO move this to an "include" folder for the service.
 
 struct AAudioMessageTimestamp {
-    aaudio_position_frames_t position;
-    int64_t                deviceOffset; // add to client position to get device position
-    aaudio_nanoseconds_t     timestamp;
+    int64_t    position;
+    int64_t    deviceOffset; // add to client position to get device position
+    int64_t    timestamp;
 };
 
 typedef enum aaudio_service_event_e : uint32_t {
@@ -41,8 +41,8 @@
 
 struct AAudioMessageEvent {
     aaudio_service_event_t event;
-    int32_t data1;
-    int64_t data2;
+    int32_t                data1;
+    int64_t                data2;
 };
 
 typedef struct AAudioServiceMessage_s {
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.h b/media/libaaudio/src/binding/AAudioStreamConfiguration.h
index efcdae8..57b1c59 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.h
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.h
@@ -34,19 +34,19 @@
     AAudioStreamConfiguration();
     virtual ~AAudioStreamConfiguration();
 
-    aaudio_device_id_t getDeviceId() const {
+    int32_t getDeviceId() const {
         return mDeviceId;
     }
 
-    void setDeviceId(aaudio_device_id_t deviceId) {
+    void setDeviceId(int32_t deviceId) {
         mDeviceId = deviceId;
     }
 
-    aaudio_sample_rate_t getSampleRate() const {
+    int32_t getSampleRate() const {
         return mSampleRate;
     }
 
-    void setSampleRate(aaudio_sample_rate_t sampleRate) {
+    void setSampleRate(int32_t sampleRate) {
         mSampleRate = sampleRate;
     }
 
@@ -66,11 +66,11 @@
         mAudioFormat = audioFormat;
     }
 
-    aaudio_size_frames_t getBufferCapacity() const {
+    int32_t getBufferCapacity() const {
         return mBufferCapacity;
     }
 
-    void setBufferCapacity(aaudio_size_frames_t frames) {
+    void setBufferCapacity(int32_t frames) {
         mBufferCapacity = frames;
     }
 
@@ -83,11 +83,11 @@
     void dump();
 
 protected:
-    aaudio_device_id_t    mDeviceId        = AAUDIO_DEVICE_UNSPECIFIED;
-    aaudio_sample_rate_t  mSampleRate      = AAUDIO_UNSPECIFIED;
+    int32_t               mDeviceId        = AAUDIO_DEVICE_UNSPECIFIED;
+    int32_t               mSampleRate      = AAUDIO_UNSPECIFIED;
     int32_t               mSamplesPerFrame = AAUDIO_UNSPECIFIED;
     aaudio_audio_format_t mAudioFormat     = AAUDIO_FORMAT_UNSPECIFIED;
-    aaudio_size_frames_t  mBufferCapacity  = AAUDIO_UNSPECIFIED;
+    int32_t               mBufferCapacity  = AAUDIO_UNSPECIFIED;
 };
 
 } /* namespace aaudio */
diff --git a/media/libaaudio/src/binding/IAAudioService.cpp b/media/libaaudio/src/binding/IAAudioService.cpp
index 899ebc0..c21033e 100644
--- a/media/libaaudio/src/binding/IAAudioService.cpp
+++ b/media/libaaudio/src/binding/IAAudioService.cpp
@@ -18,12 +18,15 @@
 
 #include "binding/AudioEndpointParcelable.h"
 #include "binding/AAudioStreamRequest.h"
+#include "binding/AAudioServiceDefinitions.h"
 #include "binding/AAudioStreamConfiguration.h"
 #include "binding/IAAudioService.h"
 #include "utility/AAudioUtilities.h"
 
 namespace android {
 
+using aaudio::aaudio_handle_t;
+
 /**
  * This is used by the AAudio Client to talk to the AAudio Service.
  *
@@ -137,7 +140,7 @@
     }
 
     virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle, pid_t clientThreadId,
-                                              aaudio_nanoseconds_t periodNanoseconds)
+                                              int64_t periodNanoseconds)
     override {
         Parcel data, reply;
         // send command
@@ -182,11 +185,11 @@
 
 status_t BnAAudioService::onTransact(uint32_t code, const Parcel& data,
                                         Parcel* reply, uint32_t flags) {
-    AAudioStream stream;
+    aaudio_handle_t stream;
     aaudio::AAudioStreamRequest request;
     aaudio::AAudioStreamConfiguration configuration;
     pid_t pid;
-    aaudio_nanoseconds_t nanoseconds;
+    int64_t nanoseconds;
     aaudio_result_t result;
     ALOGV("BnAAudioService::onTransact(%i) %i", code, flags);
     data.checkInterface(this);
diff --git a/media/libaaudio/src/binding/IAAudioService.h b/media/libaaudio/src/binding/IAAudioService.h
index f3b297e..53c3b45 100644
--- a/media/libaaudio/src/binding/IAAudioService.h
+++ b/media/libaaudio/src/binding/IAAudioService.h
@@ -29,7 +29,6 @@
 #include "binding/AAudioStreamRequest.h"
 #include "binding/AAudioStreamConfiguration.h"
 
-
 namespace android {
 
 // Interface (our AIDL) - Shared by server and client
@@ -43,39 +42,39 @@
      * @param configuration contains information about the created stream
      * @return handle to the stream or a negative error
      */
-    virtual aaudio_handle_t openStream(aaudio::AAudioStreamRequest &request,
+    virtual aaudio::aaudio_handle_t openStream(aaudio::AAudioStreamRequest &request,
                                      aaudio::AAudioStreamConfiguration &configuration) = 0;
 
-    virtual aaudio_result_t closeStream(aaudio_handle_t streamHandle) = 0;
+    virtual aaudio_result_t closeStream(aaudio::aaudio_handle_t streamHandle) = 0;
 
     /* Get an immutable description of the in-memory queues
     * used to communicate with the underlying HAL or Service.
     */
-    virtual aaudio_result_t getStreamDescription(aaudio_handle_t streamHandle,
+    virtual aaudio_result_t getStreamDescription(aaudio::aaudio_handle_t streamHandle,
                                                aaudio::AudioEndpointParcelable &parcelable) = 0;
 
     /**
      * Start the flow of data.
      */
-    virtual aaudio_result_t startStream(aaudio_handle_t streamHandle) = 0;
+    virtual aaudio_result_t startStream(aaudio::aaudio_handle_t streamHandle) = 0;
 
     /**
      * Stop the flow of data such that start() can resume without loss of data.
      */
-    virtual aaudio_result_t pauseStream(aaudio_handle_t streamHandle) = 0;
+    virtual aaudio_result_t pauseStream(aaudio::aaudio_handle_t streamHandle) = 0;
 
     /**
      *  Discard any data held by the underlying HAL or Service.
      */
-    virtual aaudio_result_t flushStream(aaudio_handle_t streamHandle) = 0;
+    virtual aaudio_result_t flushStream(aaudio::aaudio_handle_t streamHandle) = 0;
 
     /**
      * Manage the specified thread as a low latency audio thread.
      */
-    virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle, pid_t clientThreadId,
-                                              aaudio_nanoseconds_t periodNanoseconds) = 0;
+    virtual aaudio_result_t registerAudioThread(aaudio::aaudio_handle_t streamHandle, pid_t clientThreadId,
+                                              int64_t periodNanoseconds) = 0;
 
-    virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle,
+    virtual aaudio_result_t unregisterAudioThread(aaudio::aaudio_handle_t streamHandle,
                                                 pid_t clientThreadId) = 0;
 };
 
diff --git a/media/libaaudio/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
index 5cd9782..47c4774 100644
--- a/media/libaaudio/src/client/AudioEndpoint.cpp
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -167,8 +167,8 @@
     return mDownDataQueue->getWriteCounter();
 }
 
-aaudio_size_frames_t AudioEndpoint::setBufferSizeInFrames(aaudio_size_frames_t requestedFrames,
-                                            aaudio_size_frames_t *actualFrames)
+int32_t AudioEndpoint::setBufferSizeInFrames(int32_t requestedFrames,
+                                            int32_t *actualFrames)
 {
     if (requestedFrames < ENDPOINT_DATA_QUEUE_SIZE_MIN) {
         requestedFrames = ENDPOINT_DATA_QUEUE_SIZE_MIN;
diff --git a/media/libaaudio/src/client/AudioEndpoint.h b/media/libaaudio/src/client/AudioEndpoint.h
index e786513..caee488 100644
--- a/media/libaaudio/src/client/AudioEndpoint.h
+++ b/media/libaaudio/src/client/AudioEndpoint.h
@@ -71,13 +71,13 @@
      */
     bool isOutputFreeRunning() const { return mOutputFreeRunning; }
 
-    int32_t setBufferSizeInFrames(aaudio_size_frames_t requestedFrames,
-                                  aaudio_size_frames_t *actualFrames);
-    aaudio_size_frames_t getBufferSizeInFrames() const;
+    int32_t setBufferSizeInFrames(int32_t requestedFrames,
+                                  int32_t *actualFrames);
+    int32_t getBufferSizeInFrames() const;
 
-    aaudio_size_frames_t getBufferCapacityInFrames() const;
+    int32_t getBufferCapacityInFrames() const;
 
-    aaudio_size_frames_t getFullFramesAvailable();
+    int32_t getFullFramesAvailable();
 
 private:
     FifoBuffer   * mUpCommandQueue;
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 19f2300..54f4870 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -172,14 +172,14 @@
 
 aaudio_result_t AudioStreamInternal::requestStart()
 {
-    aaudio_nanoseconds_t startTime;
+    int64_t startTime;
     ALOGD("AudioStreamInternal(): start()");
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
     const sp<IAAudioService>& aaudioService = getAAudioService();
     if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
-    startTime = AAudio_getNanoseconds(AAUDIO_CLOCK_MONOTONIC);
+    startTime = AudioClock::getNanoseconds();
     mClockModel.start(startTime);
     processTimestamp(0, startTime);
     setState(AAUDIO_STREAM_STATE_STARTING);
@@ -194,7 +194,7 @@
     }
     const sp<IAAudioService>& aaudioService = getAAudioService();
     if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
-    mClockModel.stop(AAudio_getNanoseconds(AAUDIO_CLOCK_MONOTONIC));
+    mClockModel.stop(AudioClock::getNanoseconds());
     setState(AAUDIO_STREAM_STATE_PAUSING);
     return aaudioService->pauseStream(mServiceStreamHandle);
 }
@@ -212,10 +212,10 @@
 
 void AudioStreamInternal::onFlushFromServer() {
     ALOGD("AudioStreamInternal(): onFlushFromServer()");
-    aaudio_position_frames_t readCounter = mAudioEndpoint.getDownDataReadCounter();
-    aaudio_position_frames_t writeCounter = mAudioEndpoint.getDownDataWriteCounter();
+    int64_t readCounter = mAudioEndpoint.getDownDataReadCounter();
+    int64_t writeCounter = mAudioEndpoint.getDownDataWriteCounter();
     // Bump offset so caller does not see the retrograde motion in getFramesRead().
-    aaudio_position_frames_t framesFlushed = writeCounter - readCounter;
+    int64_t framesFlushed = writeCounter - readCounter;
     mFramesOffsetFromService += framesFlushed;
     // Flush written frames by forcing writeCounter to readCounter.
     // This is because we cannot move the read counter in the hardware.
@@ -262,10 +262,10 @@
 
 // TODO use aaudio_clockid_t all the way down to AudioClock
 aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
-                           aaudio_position_frames_t *framePosition,
-                           aaudio_nanoseconds_t *timeNanoseconds) {
+                           int64_t *framePosition,
+                           int64_t *timeNanoseconds) {
 // TODO implement using real HAL
-    aaudio_nanoseconds_t time = AudioClock::getNanoseconds();
+    int64_t time = AudioClock::getNanoseconds();
     *framePosition = mClockModel.convertTimeToPosition(time);
     *timeNanoseconds = time + (10 * AAUDIO_NANOS_PER_MILLISECOND); // Fake hardware delay
     return AAUDIO_OK;
@@ -278,9 +278,9 @@
 #if LOG_TIMESTAMPS
 static void AudioStreamInternal_LogTimestamp(AAudioServiceMessage &command) {
     static int64_t oldPosition = 0;
-    static aaudio_nanoseconds_t oldTime = 0;
+    static int64_t oldTime = 0;
     int64_t framePosition = command.timestamp.position;
-    aaudio_nanoseconds_t nanoTime = command.timestamp.timestamp;
+    int64_t nanoTime = command.timestamp.timestamp;
     ALOGD("AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %llu",
          (long long) framePosition,
          (long long) nanoTime);
@@ -298,7 +298,7 @@
 #endif
 
 aaudio_result_t AudioStreamInternal::onTimestampFromServer(AAudioServiceMessage *message) {
-    aaudio_position_frames_t framePosition = 0;
+    int64_t framePosition = 0;
 #if LOG_TIMESTAMPS
     AudioStreamInternal_LogTimestamp(command);
 #endif
@@ -370,12 +370,12 @@
 
 // Write the data, block if needed and timeoutMillis > 0
 aaudio_result_t AudioStreamInternal::write(const void *buffer, int32_t numFrames,
-                                         aaudio_nanoseconds_t timeoutNanoseconds)
+                                         int64_t timeoutNanoseconds)
 {
     aaudio_result_t result = AAUDIO_OK;
     uint8_t* source = (uint8_t*)buffer;
-    aaudio_nanoseconds_t currentTimeNanos = AudioClock::getNanoseconds();
-    aaudio_nanoseconds_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
+    int64_t currentTimeNanos = AudioClock::getNanoseconds();
+    int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
     int32_t framesLeft = numFrames;
 //    ALOGD("AudioStreamInternal::write(%p, %d) at time %08llu , mState = %d ------------------",
 //         buffer, numFrames, (unsigned long long) currentTimeNanos, mState);
@@ -383,7 +383,7 @@
     // Write until all the data has been written or until a timeout occurs.
     while (framesLeft > 0) {
         // The call to writeNow() will not block. It will just write as much as it can.
-        aaudio_nanoseconds_t wakeTimeNanos = 0;
+        int64_t wakeTimeNanos = 0;
         aaudio_result_t framesWritten = writeNow(source, framesLeft,
                                                currentTimeNanos, &wakeTimeNanos);
 //        ALOGD("AudioStreamInternal::write() writeNow() framesLeft = %d --> framesWritten = %d", framesLeft, framesWritten);
@@ -422,7 +422,7 @@
 
 // Write as much data as we can without blocking.
 aaudio_result_t AudioStreamInternal::writeNow(const void *buffer, int32_t numFrames,
-                                         aaudio_nanoseconds_t currentNanoTime, aaudio_nanoseconds_t *wakeTimePtr) {
+                                         int64_t currentNanoTime, int64_t *wakeTimePtr) {
     {
         aaudio_result_t result = processCommands();
         if (result != AAUDIO_OK) {
@@ -452,7 +452,7 @@
     // Calculate an ideal time to wake up.
     if (wakeTimePtr != nullptr && framesWritten >= 0) {
         // By default wake up a few milliseconds from now.  // TODO review
-        aaudio_nanoseconds_t wakeTime = currentNanoTime + (2 * AAUDIO_NANOS_PER_MILLISECOND);
+        int64_t wakeTime = currentNanoTime + (2 * AAUDIO_NANOS_PER_MILLISECOND);
         switch (getState()) {
             case AAUDIO_STREAM_STATE_OPEN:
             case AAUDIO_STREAM_STATE_STARTING:
@@ -487,7 +487,7 @@
 
 aaudio_result_t AudioStreamInternal::waitForStateChange(aaudio_stream_state_t currentState,
                                                       aaudio_stream_state_t *nextState,
-                                                      aaudio_nanoseconds_t timeoutNanoseconds)
+                                                      int64_t timeoutNanoseconds)
 
 {
     aaudio_result_t result = processCommands();
@@ -522,33 +522,38 @@
 }
 
 
-void AudioStreamInternal::processTimestamp(uint64_t position, aaudio_nanoseconds_t time) {
+void AudioStreamInternal::processTimestamp(uint64_t position, int64_t time) {
     mClockModel.processTimestamp( position, time);
 }
 
-aaudio_result_t AudioStreamInternal::setBufferSize(aaudio_size_frames_t requestedFrames,
-                                        aaudio_size_frames_t *actualFrames) {
-    return mAudioEndpoint.setBufferSizeInFrames(requestedFrames, actualFrames);
+aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {
+    int32_t actualFrames = 0;
+    aaudio_result_t result = mAudioEndpoint.setBufferSizeInFrames(requestedFrames, &actualFrames);
+    if (result < 0) {
+        return result;
+    } else {
+        return (aaudio_result_t) actualFrames;
+    }
 }
 
-aaudio_size_frames_t AudioStreamInternal::getBufferSize() const
+int32_t AudioStreamInternal::getBufferSize() const
 {
     return mAudioEndpoint.getBufferSizeInFrames();
 }
 
-aaudio_size_frames_t AudioStreamInternal::getBufferCapacity() const
+int32_t AudioStreamInternal::getBufferCapacity() const
 {
     return mAudioEndpoint.getBufferCapacityInFrames();
 }
 
-aaudio_size_frames_t AudioStreamInternal::getFramesPerBurst() const
+int32_t AudioStreamInternal::getFramesPerBurst() const
 {
     return mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
 }
 
-aaudio_position_frames_t AudioStreamInternal::getFramesRead()
+int64_t AudioStreamInternal::getFramesRead()
 {
-    aaudio_position_frames_t framesRead =
+    int64_t framesRead =
             mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
             + mFramesOffsetFromService;
     // Prevent retrograde motion.
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 666df3a..6f3a7ac 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -49,8 +49,8 @@
 
     // TODO use aaudio_clockid_t all the way down to AudioClock
     virtual aaudio_result_t getTimestamp(clockid_t clockId,
-                                       aaudio_position_frames_t *framePosition,
-                                       aaudio_nanoseconds_t *timeNanoseconds) override;
+                                       int64_t *framePosition,
+                                       int64_t *timeNanoseconds) override;
 
 
     virtual aaudio_result_t updateState() override;
@@ -62,22 +62,21 @@
 
     virtual aaudio_result_t write(const void *buffer,
                              int32_t numFrames,
-                             aaudio_nanoseconds_t timeoutNanoseconds) override;
+                             int64_t timeoutNanoseconds) override;
 
     virtual aaudio_result_t waitForStateChange(aaudio_stream_state_t currentState,
                                           aaudio_stream_state_t *nextState,
-                                          aaudio_nanoseconds_t timeoutNanoseconds) override;
+                                          int64_t timeoutNanoseconds) override;
 
-    virtual aaudio_result_t setBufferSize(aaudio_size_frames_t requestedFrames,
-                                        aaudio_size_frames_t *actualFrames) override;
+    virtual aaudio_result_t setBufferSize(int32_t requestedFrames) override;
 
-    virtual aaudio_size_frames_t getBufferSize() const override;
+    virtual int32_t getBufferSize() const override;
 
-    virtual aaudio_size_frames_t getBufferCapacity() const override;
+    virtual int32_t getBufferCapacity() const override;
 
-    virtual aaudio_size_frames_t getFramesPerBurst() const override;
+    virtual int32_t getFramesPerBurst() const override;
 
-    virtual aaudio_position_frames_t getFramesRead() override;
+    virtual int64_t getFramesRead() override;
 
     virtual int32_t getXRunCount() const override {
         return mXRunCount;
@@ -100,8 +99,8 @@
  */
     virtual aaudio_result_t writeNow(const void *buffer,
                                 int32_t numFrames,
-                                aaudio_nanoseconds_t currentTimeNanos,
-                                aaudio_nanoseconds_t *wakeTimePtr);
+                                int64_t currentTimeNanos,
+                                int64_t *wakeTimePtr);
 
     void onFlushFromServer();
 
@@ -112,15 +111,15 @@
 private:
     IsochronousClockModel    mClockModel;
     AudioEndpoint            mAudioEndpoint;
-    aaudio_handle_t            mServiceStreamHandle;
+    aaudio_handle_t          mServiceStreamHandle;
     EndpointDescriptor       mEndpointDescriptor;
     // Offset from underlying frame position.
-    aaudio_position_frames_t   mFramesOffsetFromService = 0;
-    aaudio_position_frames_t   mLastFramesRead = 0;
-    aaudio_size_frames_t       mFramesPerBurst;
+    int64_t                  mFramesOffsetFromService = 0;
+    int64_t                  mLastFramesRead = 0;
+    int32_t                  mFramesPerBurst;
     int32_t                  mXRunCount = 0;
 
-    void processTimestamp(uint64_t position, aaudio_nanoseconds_t time);
+    void processTimestamp(uint64_t position, int64_t time);
 };
 
 } /* namespace aaudio */
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index bdb491d..4c8aabc 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -21,6 +21,7 @@
 #include <stdint.h>
 #include <aaudio/AAudioDefinitions.h>
 
+#include "utility/AudioClock.h"
 #include "IsochronousClockModel.h"
 
 #define MIN_LATENESS_NANOS (10 * AAUDIO_NANOS_PER_MICROSECOND)
@@ -29,11 +30,11 @@
 using namespace aaudio;
 
 IsochronousClockModel::IsochronousClockModel()
-        : mSampleRate(48000)
+        : mMarkerFramePosition(0)
+        , mMarkerNanoTime(0)
+        , mSampleRate(48000)
         , mFramesPerBurst(64)
         , mMaxLatenessInNanos(0)
-        , mMarkerFramePosition(0)
-        , mMarkerNanoTime(0)
         , mState(STATE_STOPPED)
 {
 }
@@ -41,21 +42,21 @@
 IsochronousClockModel::~IsochronousClockModel() {
 }
 
-void IsochronousClockModel::start(aaudio_nanoseconds_t nanoTime)
+void IsochronousClockModel::start(int64_t nanoTime)
 {
     mMarkerNanoTime = nanoTime;
     mState = STATE_STARTING;
 }
 
-void IsochronousClockModel::stop(aaudio_nanoseconds_t nanoTime)
+void IsochronousClockModel::stop(int64_t nanoTime)
 {
     mMarkerNanoTime = nanoTime;
     mMarkerFramePosition = convertTimeToPosition(nanoTime); // TODO should we do this?
     mState = STATE_STOPPED;
 }
 
-void IsochronousClockModel::processTimestamp(aaudio_position_frames_t framePosition,
-                                             aaudio_nanoseconds_t nanoTime) {
+void IsochronousClockModel::processTimestamp(int64_t framePosition,
+                                             int64_t nanoTime) {
     int64_t framesDelta = framePosition - mMarkerFramePosition;
     int64_t nanosDelta = nanoTime - mMarkerNanoTime;
     if (nanosDelta < 1000) {
@@ -115,7 +116,6 @@
     default:
         break;
     }
-    ++mTimestampCount;
 }
 
 void IsochronousClockModel::setSampleRate(int32_t sampleRate) {
@@ -133,41 +133,41 @@
     mMaxLatenessInNanos = (nanosLate > MIN_LATENESS_NANOS) ? nanosLate : MIN_LATENESS_NANOS;
 }
 
-aaudio_nanoseconds_t IsochronousClockModel::convertDeltaPositionToTime(
-        aaudio_position_frames_t framesDelta) const {
+int64_t IsochronousClockModel::convertDeltaPositionToTime(
+        int64_t framesDelta) const {
     return (AAUDIO_NANOS_PER_SECOND * framesDelta) / mSampleRate;
 }
 
-int64_t IsochronousClockModel::convertDeltaTimeToPosition(aaudio_nanoseconds_t nanosDelta) const {
+int64_t IsochronousClockModel::convertDeltaTimeToPosition(int64_t nanosDelta) const {
     return (mSampleRate * nanosDelta) / AAUDIO_NANOS_PER_SECOND;
 }
 
-aaudio_nanoseconds_t IsochronousClockModel::convertPositionToTime(
-        aaudio_position_frames_t framePosition) const {
+int64_t IsochronousClockModel::convertPositionToTime(
+        int64_t framePosition) const {
     if (mState == STATE_STOPPED) {
         return mMarkerNanoTime;
     }
-    aaudio_position_frames_t nextBurstIndex = (framePosition + mFramesPerBurst - 1) / mFramesPerBurst;
-    aaudio_position_frames_t nextBurstPosition = mFramesPerBurst * nextBurstIndex;
-    aaudio_position_frames_t framesDelta = nextBurstPosition - mMarkerFramePosition;
-    aaudio_nanoseconds_t nanosDelta = convertDeltaPositionToTime(framesDelta);
-    aaudio_nanoseconds_t time = (aaudio_nanoseconds_t) (mMarkerNanoTime + nanosDelta);
+    int64_t nextBurstIndex = (framePosition + mFramesPerBurst - 1) / mFramesPerBurst;
+    int64_t nextBurstPosition = mFramesPerBurst * nextBurstIndex;
+    int64_t framesDelta = nextBurstPosition - mMarkerFramePosition;
+    int64_t nanosDelta = convertDeltaPositionToTime(framesDelta);
+    int64_t time = (int64_t) (mMarkerNanoTime + nanosDelta);
 //    ALOGI("IsochronousClockModel::convertPositionToTime: pos = %llu --> time = %llu",
 //         (unsigned long long)framePosition,
 //         (unsigned long long)time);
     return time;
 }
 
-aaudio_position_frames_t IsochronousClockModel::convertTimeToPosition(
-        aaudio_nanoseconds_t nanoTime) const {
+int64_t IsochronousClockModel::convertTimeToPosition(
+        int64_t nanoTime) const {
     if (mState == STATE_STOPPED) {
         return mMarkerFramePosition;
     }
-    aaudio_nanoseconds_t nanosDelta = nanoTime - mMarkerNanoTime;
-    aaudio_position_frames_t framesDelta = convertDeltaTimeToPosition(nanosDelta);
-    aaudio_position_frames_t nextBurstPosition = mMarkerFramePosition + framesDelta;
-    aaudio_position_frames_t nextBurstIndex = nextBurstPosition / mFramesPerBurst;
-    aaudio_position_frames_t position = nextBurstIndex * mFramesPerBurst;
+    int64_t nanosDelta = nanoTime - mMarkerNanoTime;
+    int64_t framesDelta = convertDeltaTimeToPosition(nanosDelta);
+    int64_t nextBurstPosition = mMarkerFramePosition + framesDelta;
+    int64_t nextBurstIndex = nextBurstPosition / mFramesPerBurst;
+    int64_t position = nextBurstIndex * mFramesPerBurst;
 //    ALOGI("IsochronousClockModel::convertTimeToPosition: time = %llu --> pos = %llu",
 //         (unsigned long long)nanoTime,
 //         (unsigned long long)position);
diff --git a/media/libaaudio/src/client/IsochronousClockModel.h b/media/libaaudio/src/client/IsochronousClockModel.h
index b188a3d..524c286 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.h
+++ b/media/libaaudio/src/client/IsochronousClockModel.h
@@ -34,17 +34,17 @@
     IsochronousClockModel();
     virtual ~IsochronousClockModel();
 
-    void start(aaudio_nanoseconds_t nanoTime);
-    void stop(aaudio_nanoseconds_t nanoTime);
+    void start(int64_t nanoTime);
+    void stop(int64_t nanoTime);
 
-    void processTimestamp(aaudio_position_frames_t framePosition, aaudio_nanoseconds_t nanoTime);
+    void processTimestamp(int64_t framePosition, int64_t nanoTime);
 
     /**
      * @param sampleRate rate of the stream in frames per second
      */
-    void setSampleRate(aaudio_sample_rate_t sampleRate);
+    void setSampleRate(int32_t sampleRate);
 
-    aaudio_sample_rate_t getSampleRate() const {
+    int32_t getSampleRate() const {
         return mSampleRate;
     }
 
@@ -53,9 +53,9 @@
      *
      * @param framesPerBurst number of frames that stream advance at one time.
      */
-    void setFramesPerBurst(aaudio_size_frames_t framesPerBurst);
+    void setFramesPerBurst(int32_t framesPerBurst);
 
-    aaudio_size_frames_t getFramesPerBurst() const {
+    int32_t getFramesPerBurst() const {
         return mFramesPerBurst;
     }
 
@@ -65,7 +65,7 @@
      * @param framePosition position of the stream in frames
      * @return time in nanoseconds
      */
-    aaudio_nanoseconds_t convertPositionToTime(aaudio_position_frames_t framePosition) const;
+    int64_t convertPositionToTime(int64_t framePosition) const;
 
     /**
      * Calculate an estimated position where the stream will be at the specified time.
@@ -73,19 +73,19 @@
      * @param nanoTime time of interest
      * @return position in frames
      */
-    aaudio_position_frames_t convertTimeToPosition(aaudio_nanoseconds_t nanoTime) const;
+    int64_t convertTimeToPosition(int64_t nanoTime) const;
 
     /**
      * @param framesDelta difference in frames
      * @return duration in nanoseconds
      */
-    aaudio_nanoseconds_t convertDeltaPositionToTime(aaudio_position_frames_t framesDelta) const;
+    int64_t convertDeltaPositionToTime(int64_t framesDelta) const;
 
     /**
      * @param nanosDelta duration in nanoseconds
      * @return frames that stream will advance in that time
      */
-    aaudio_position_frames_t convertDeltaTimeToPosition(aaudio_nanoseconds_t nanosDelta) const;
+    int64_t convertDeltaTimeToPosition(int64_t nanosDelta) const;
 
 private:
     enum clock_model_state_t {
@@ -95,13 +95,12 @@
         STATE_RUNNING
     };
 
-    aaudio_sample_rate_t     mSampleRate;
-    aaudio_size_frames_t     mFramesPerBurst;
-    int32_t                mMaxLatenessInNanos;
-    aaudio_position_frames_t mMarkerFramePosition;
-    aaudio_nanoseconds_t     mMarkerNanoTime;
-    int32_t                mTimestampCount;
-    clock_model_state_t     mState;
+    int64_t             mMarkerFramePosition;
+    int64_t             mMarkerNanoTime;
+    int32_t             mSampleRate;
+    int32_t             mFramesPerBurst;
+    int32_t             mMaxLatenessInNanos;
+    clock_model_state_t mState;
 
     void update();
 };
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index 04dbda1..1208f66 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -32,18 +32,11 @@
 
 using namespace aaudio;
 
-// This is not the maximum theoretic possible number of handles that the HandlerTracker
-// class could support; instead it is the maximum number of handles that we are configuring
-// for our HandleTracker instance (sHandleTracker).
-#define AAUDIO_MAX_HANDLES  64
 
 // Macros for common code that includes a return.
 // TODO Consider using do{}while(0) construct. I tried but it hung AndroidStudio
 #define CONVERT_BUILDER_HANDLE_OR_RETURN() \
-    convertAAudioBuilderToStreamBuilder(builder); \
-    if (streamBuilder == nullptr) { \
-        return AAUDIO_ERROR_INVALID_HANDLE; \
-    }
+    convertAAudioBuilderToStreamBuilder(builder);
 
 #define COMMON_GET_FROM_BUILDER_OR_RETURN(resultPtr) \
     CONVERT_BUILDER_HANDLE_OR_RETURN() \
@@ -51,31 +44,6 @@
         return AAUDIO_ERROR_NULL; \
     }
 
-#define CONVERT_STREAM_HANDLE_OR_RETURN() \
-    convertAAudioStreamToAudioStream(stream); \
-    if (audioStream == nullptr) { \
-        return AAUDIO_ERROR_INVALID_HANDLE; \
-    }
-
-#define COMMON_GET_FROM_STREAM_OR_RETURN(resultPtr) \
-    CONVERT_STREAM_HANDLE_OR_RETURN(); \
-    if ((resultPtr) == nullptr) { \
-        return AAUDIO_ERROR_NULL; \
-    }
-
-// Static data.
-// TODO static constructors are discouraged, alternatives?
-static HandleTracker sHandleTracker(AAUDIO_MAX_HANDLES);
-
-typedef enum
-{
-    AAUDIO_HANDLE_TYPE_STREAM,
-    AAUDIO_HANDLE_TYPE_STREAM_BUILDER,
-    AAUDIO_HANDLE_TYPE_COUNT
-} aaudio_handle_type_t;
-static_assert(AAUDIO_HANDLE_TYPE_COUNT <= HANDLE_TRACKER_MAX_TYPES, "Too many handle types.");
-
-
 #define AAUDIO_CASE_ENUM(name) case name: return #name
 
 AAUDIO_API const char * AAudio_convertResultToText(aaudio_result_t returnCode) {
@@ -104,6 +72,7 @@
 AAUDIO_API const char * AAudio_convertStreamStateToText(aaudio_stream_state_t state) {
     switch (state) {
         AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_UNINITIALIZED);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_UNKNOWN);
         AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_OPEN);
         AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STARTING);
         AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STARTED);
@@ -123,157 +92,73 @@
 
 static AudioStream *convertAAudioStreamToAudioStream(AAudioStream stream)
 {
-    return (AudioStream *) sHandleTracker.get(AAUDIO_HANDLE_TYPE_STREAM,
-                                              (aaudio_handle_t) stream);
+    return (AudioStream *) stream;
 }
 
 static AudioStreamBuilder *convertAAudioBuilderToStreamBuilder(AAudioStreamBuilder builder)
 {
-    return (AudioStreamBuilder *) sHandleTracker.get(AAUDIO_HANDLE_TYPE_STREAM_BUILDER,
-                                                     (aaudio_handle_t) builder);
+    return (AudioStreamBuilder *) builder;
 }
 
 AAUDIO_API aaudio_result_t AAudio_createStreamBuilder(AAudioStreamBuilder *builder)
 {
     ALOGD("AAudio_createStreamBuilder(): check sHandleTracker.isInitialized ()");
-    if (!sHandleTracker.isInitialized()) {
-        return AAUDIO_ERROR_NO_MEMORY;
-    }
     AudioStreamBuilder *audioStreamBuilder =  new AudioStreamBuilder();
     if (audioStreamBuilder == nullptr) {
         return AAUDIO_ERROR_NO_MEMORY;
     }
-    ALOGD("AAudio_createStreamBuilder(): created AudioStreamBuilder = %p", audioStreamBuilder);
-    // TODO protect the put() with a Mutex
-    AAudioStreamBuilder handle = sHandleTracker.put(AAUDIO_HANDLE_TYPE_STREAM_BUILDER,
-            audioStreamBuilder);
-    if (handle < 0) {
-        delete audioStreamBuilder;
-        return static_cast<aaudio_result_t>(handle);
-    } else {
-        *builder = handle;
-    }
+    *builder = (AAudioStreamBuilder) audioStreamBuilder;
     return AAUDIO_OK;
 }
 
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setDeviceId(AAudioStreamBuilder builder,
-                                                     aaudio_device_id_t deviceId)
+AAUDIO_API void AAudioStreamBuilder_setDeviceId(AAudioStreamBuilder builder,
+                                                     int32_t deviceId)
 {
-    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
     streamBuilder->setDeviceId(deviceId);
-    return AAUDIO_OK;
 }
 
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getDeviceId(AAudioStreamBuilder builder,
-                                              aaudio_device_id_t *deviceId)
+AAUDIO_API void AAudioStreamBuilder_setSampleRate(AAudioStreamBuilder builder,
+                                              int32_t sampleRate)
 {
-    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(deviceId);
-    *deviceId = streamBuilder->getDeviceId();
-    return AAUDIO_OK;
-}
-
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setSampleRate(AAudioStreamBuilder builder,
-                                              aaudio_sample_rate_t sampleRate)
-{
-    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
     streamBuilder->setSampleRate(sampleRate);
-    return AAUDIO_OK;
 }
 
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getSampleRate(AAudioStreamBuilder builder,
-                                              aaudio_sample_rate_t *sampleRate)
-{
-    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(sampleRate);
-    *sampleRate = streamBuilder->getSampleRate();
-    return AAUDIO_OK;
-}
-
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder builder,
+AAUDIO_API void AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder builder,
                                                    int32_t samplesPerFrame)
 {
-    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
     streamBuilder->setSamplesPerFrame(samplesPerFrame);
-    return AAUDIO_OK;
 }
 
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getSamplesPerFrame(AAudioStreamBuilder builder,
-                                                   int32_t *samplesPerFrame)
-{
-    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(samplesPerFrame);
-    *samplesPerFrame = streamBuilder->getSamplesPerFrame();
-    return AAUDIO_OK;
-}
-
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setDirection(AAudioStreamBuilder builder,
+AAUDIO_API void AAudioStreamBuilder_setDirection(AAudioStreamBuilder builder,
                                              aaudio_direction_t direction)
 {
-    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
     streamBuilder->setDirection(direction);
-    return AAUDIO_OK;
 }
 
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getDirection(AAudioStreamBuilder builder,
-                                             aaudio_direction_t *direction)
-{
-    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(direction);
-    *direction = streamBuilder->getDirection();
-    return AAUDIO_OK;
-}
 
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setFormat(AAudioStreamBuilder builder,
+AAUDIO_API void AAudioStreamBuilder_setFormat(AAudioStreamBuilder builder,
                                                    aaudio_audio_format_t format)
 {
-    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
     streamBuilder->setFormat(format);
-    return AAUDIO_OK;
 }
 
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getFormat(AAudioStreamBuilder builder,
-                                                   aaudio_audio_format_t *format)
-{
-    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(format);
-    *format = streamBuilder->getFormat();
-    return AAUDIO_OK;
-}
-
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setSharingMode(AAudioStreamBuilder builder,
+AAUDIO_API void AAudioStreamBuilder_setSharingMode(AAudioStreamBuilder builder,
                                                         aaudio_sharing_mode_t sharingMode)
 {
-    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
-    if ((sharingMode < 0) || (sharingMode >= AAUDIO_SHARING_MODE_COUNT)) {
-        return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
-    } else {
-        streamBuilder->setSharingMode(sharingMode);
-        return AAUDIO_OK;
-    }
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
+    streamBuilder->setSharingMode(sharingMode);
 }
 
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getSharingMode(AAudioStreamBuilder builder,
-                                                        aaudio_sharing_mode_t *sharingMode)
+AAUDIO_API void AAudioStreamBuilder_setBufferCapacityInFrames(AAudioStreamBuilder builder,
+                                                        int32_t frames)
 {
-    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(sharingMode);
-    *sharingMode = streamBuilder->getSharingMode();
-    return AAUDIO_OK;
-}
-
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_setBufferCapacity(AAudioStreamBuilder builder,
-                                                        aaudio_size_frames_t frames)
-{
-    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
-    if (frames < 0) {
-        return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
-    } else {
-        streamBuilder->setBufferCapacity(frames);
-        return AAUDIO_OK;
-    }
-}
-
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_getBufferCapacity(AAudioStreamBuilder builder,
-                                                        aaudio_size_frames_t *frames)
-{
-    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(frames);
-    *frames = streamBuilder->getBufferCapacity();
-    return AAUDIO_OK;
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
+    streamBuilder->setBufferCapacity(frames);
 }
 
 static aaudio_result_t  AAudioInternal_openStream(AudioStreamBuilder *streamBuilder,
@@ -284,14 +169,7 @@
     if (result != AAUDIO_OK) {
         return result;
     } else {
-        // Create a handle for referencing the object.
-        // TODO protect the put() with a Mutex
-        AAudioStream handle = sHandleTracker.put(AAUDIO_HANDLE_TYPE_STREAM, audioStream);
-        if (handle < 0) {
-            delete audioStream;
-            return static_cast<aaudio_result_t>(handle);
-        }
-        *streamPtr = handle;
+        *streamPtr = (AAudioStream) audioStream;
         return AAUDIO_OK;
     }
 }
@@ -299,15 +177,14 @@
 AAUDIO_API aaudio_result_t  AAudioStreamBuilder_openStream(AAudioStreamBuilder builder,
                                                      AAudioStream *streamPtr)
 {
-    ALOGD("AAudioStreamBuilder_openStream(): builder = 0x%08X", builder);
+    ALOGD("AAudioStreamBuilder_openStream(): builder = %p", builder);
     AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(streamPtr);
     return AAudioInternal_openStream(streamBuilder, streamPtr);
 }
 
 AAUDIO_API aaudio_result_t  AAudioStreamBuilder_delete(AAudioStreamBuilder builder)
 {
-    AudioStreamBuilder *streamBuilder = (AudioStreamBuilder *)
-            sHandleTracker.remove(AAUDIO_HANDLE_TYPE_STREAM_BUILDER, builder);
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
     if (streamBuilder != nullptr) {
         delete streamBuilder;
         return AAUDIO_OK;
@@ -317,9 +194,8 @@
 
 AAUDIO_API aaudio_result_t  AAudioStream_close(AAudioStream stream)
 {
-    AudioStream *audioStream = (AudioStream *)
-            sHandleTracker.remove(AAUDIO_HANDLE_TYPE_STREAM, (aaudio_handle_t)stream);
-    ALOGD("AAudioStream_close(0x%08X), audioStream = %p", stream, audioStream);
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    ALOGD("AAudioStream_close(%p)", stream);
     if (audioStream != nullptr) {
         audioStream->close();
         delete audioStream;
@@ -330,39 +206,39 @@
 
 AAUDIO_API aaudio_result_t  AAudioStream_requestStart(AAudioStream stream)
 {
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
-    ALOGD("AAudioStream_requestStart(0x%08X), audioStream = %p", stream, audioStream);
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    ALOGD("AAudioStream_requestStart(%p)", stream);
     return audioStream->requestStart();
 }
 
 AAUDIO_API aaudio_result_t  AAudioStream_requestPause(AAudioStream stream)
 {
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
-    ALOGD("AAudioStream_requestPause(0x%08X), audioStream = %p", stream, audioStream);
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    ALOGD("AAudioStream_requestPause(%p)", stream);
     return audioStream->requestPause();
 }
 
 AAUDIO_API aaudio_result_t  AAudioStream_requestFlush(AAudioStream stream)
 {
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
-    ALOGD("AAudioStream_requestFlush(0x%08X), audioStream = %p", stream, audioStream);
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    ALOGD("AAudioStream_requestFlush(%p)", stream);
     return audioStream->requestFlush();
 }
 
 AAUDIO_API aaudio_result_t  AAudioStream_requestStop(AAudioStream stream)
 {
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
-    ALOGD("AAudioStream_requestStop(0x%08X), audioStream = %p", stream, audioStream);
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    ALOGD("AAudioStream_requestStop(%p)", stream);
     return audioStream->requestStop();
 }
 
 AAUDIO_API aaudio_result_t AAudioStream_waitForStateChange(AAudioStream stream,
                                             aaudio_stream_state_t inputState,
                                             aaudio_stream_state_t *nextState,
-                                            aaudio_nanoseconds_t timeoutNanoseconds)
+                                            int64_t timeoutNanoseconds)
 {
 
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
     return audioStream->waitForStateChange(inputState, nextState, timeoutNanoseconds);
 }
 
@@ -372,10 +248,10 @@
 
 AAUDIO_API aaudio_result_t AAudioStream_read(AAudioStream stream,
                                void *buffer,
-                               aaudio_size_frames_t numFrames,
-                               aaudio_nanoseconds_t timeoutNanoseconds)
+                               int32_t numFrames,
+                               int64_t timeoutNanoseconds)
 {
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
     if (buffer == nullptr) {
         return AAUDIO_ERROR_NULL;
     }
@@ -393,10 +269,10 @@
 
 AAUDIO_API aaudio_result_t AAudioStream_write(AAudioStream stream,
                                const void *buffer,
-                               aaudio_size_frames_t numFrames,
-                               aaudio_nanoseconds_t timeoutNanoseconds)
+                               int32_t numFrames,
+                               int64_t timeoutNanoseconds)
 {
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
     if (buffer == nullptr) {
         return AAUDIO_ERROR_NULL;
     }
@@ -417,18 +293,18 @@
 // ============================================================
 
 AAUDIO_API aaudio_result_t AAudioStream_createThread(AAudioStream stream,
-                                     aaudio_nanoseconds_t periodNanoseconds,
+                                     int64_t periodNanoseconds,
                                      aaudio_audio_thread_proc_t *threadProc, void *arg)
 {
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
     return audioStream->createThread(periodNanoseconds, threadProc, arg);
 }
 
 AAUDIO_API aaudio_result_t AAudioStream_joinThread(AAudioStream stream,
                                    void **returnArg,
-                                   aaudio_nanoseconds_t timeoutNanoseconds)
+                                   int64_t timeoutNanoseconds)
 {
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
     return audioStream->joinThread(returnArg, timeoutNanoseconds);
 }
 
@@ -436,147 +312,104 @@
 // Stream - queries
 // ============================================================
 
-// TODO Use aaudio_clockid_t all the way down through the C++ streams.
-static clockid_t AAudioConvert_fromAAudioClockId(aaudio_clockid_t clockid)
+AAUDIO_API int32_t AAudioStream_getSampleRate(AAudioStream stream)
 {
-    clockid_t hostClockId;
-    switch (clockid) {
-        case AAUDIO_CLOCK_MONOTONIC:
-            hostClockId = CLOCK_MONOTONIC;
-            break;
-        case AAUDIO_CLOCK_BOOTTIME:
-            hostClockId = CLOCK_BOOTTIME;
-            break;
-        default:
-            hostClockId = 0; // TODO review
-    }
-    return hostClockId;
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getSampleRate();
 }
 
-aaudio_nanoseconds_t AAudio_getNanoseconds(aaudio_clockid_t clockid)
+AAUDIO_API int32_t AAudioStream_getSamplesPerFrame(AAudioStream stream)
 {
-    clockid_t hostClockId = AAudioConvert_fromAAudioClockId(clockid);
-   return AudioClock::getNanoseconds(hostClockId);
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getSamplesPerFrame();
 }
 
-AAUDIO_API aaudio_result_t AAudioStream_getSampleRate(AAudioStream stream, aaudio_sample_rate_t *sampleRate)
+AAUDIO_API aaudio_stream_state_t AAudioStream_getState(AAudioStream stream)
 {
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(sampleRate);
-    *sampleRate = audioStream->getSampleRate();
-    return AAUDIO_OK;
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getState();
 }
 
-AAUDIO_API aaudio_result_t AAudioStream_getSamplesPerFrame(AAudioStream stream, int32_t *samplesPerFrame)
+AAUDIO_API aaudio_audio_format_t AAudioStream_getFormat(AAudioStream stream)
 {
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(samplesPerFrame);
-    *samplesPerFrame = audioStream->getSamplesPerFrame();
-    return AAUDIO_OK;
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getFormat();
 }
 
-AAUDIO_API aaudio_result_t AAudioStream_getState(AAudioStream stream, aaudio_stream_state_t *state)
+AAUDIO_API aaudio_result_t AAudioStream_setBufferSizeInFrames(AAudioStream stream,
+                                                int32_t requestedFrames)
 {
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(state);
-    *state = audioStream->getState();
-    return AAUDIO_OK;
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->setBufferSize(requestedFrames);
 }
 
-AAUDIO_API aaudio_result_t AAudioStream_getFormat(AAudioStream stream, aaudio_audio_format_t *format)
+AAUDIO_API int32_t AAudioStream_getBufferSizeInFrames(AAudioStream stream)
 {
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(format);
-    *format = audioStream->getFormat();
-    return AAUDIO_OK;
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getBufferSize();
 }
 
-AAUDIO_API aaudio_result_t AAudioStream_setBufferSize(AAudioStream stream,
-                                                aaudio_size_frames_t requestedFrames,
-                                                aaudio_size_frames_t *actualFrames)
+AAUDIO_API aaudio_direction_t AAudioStream_getDirection(AAudioStream stream)
 {
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
-    return audioStream->setBufferSize(requestedFrames, actualFrames);
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getDirection();
 }
 
-AAUDIO_API aaudio_result_t AAudioStream_getBufferSize(AAudioStream stream, aaudio_size_frames_t *frames)
+AAUDIO_API int32_t AAudioStream_getFramesPerBurst(AAudioStream stream)
 {
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(frames);
-    *frames = audioStream->getBufferSize();
-    return AAUDIO_OK;
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getFramesPerBurst();
 }
 
-AAUDIO_API aaudio_result_t AAudioStream_getDirection(AAudioStream stream, int32_t *direction)
+AAUDIO_API int32_t AAudioStream_getBufferCapacityInFrames(AAudioStream stream)
 {
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(direction);
-    *direction = audioStream->getDirection();
-    return AAUDIO_OK;
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getBufferCapacity();
 }
 
-AAUDIO_API aaudio_result_t AAudioStream_getFramesPerBurst(AAudioStream stream,
-                                                    aaudio_size_frames_t *framesPerBurst)
+AAUDIO_API int32_t AAudioStream_getXRunCount(AAudioStream stream)
 {
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(framesPerBurst);
-    *framesPerBurst = audioStream->getFramesPerBurst();
-    return AAUDIO_OK;
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getXRunCount();
 }
 
-AAUDIO_API aaudio_result_t AAudioStream_getBufferCapacity(AAudioStream stream,
-                                           aaudio_size_frames_t *capacity)
+AAUDIO_API int32_t AAudioStream_getDeviceId(AAudioStream stream)
 {
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(capacity);
-    *capacity = audioStream->getBufferCapacity();
-    return AAUDIO_OK;
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getDeviceId();
 }
 
-AAUDIO_API aaudio_result_t AAudioStream_getXRunCount(AAudioStream stream, int32_t *xRunCount)
+AAUDIO_API aaudio_sharing_mode_t AAudioStream_getSharingMode(AAudioStream stream)
 {
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(xRunCount);
-    *xRunCount = audioStream->getXRunCount();
-    return AAUDIO_OK;
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getSharingMode();
 }
 
-AAUDIO_API aaudio_result_t AAudioStream_getDeviceId(AAudioStream stream,
-                                                 aaudio_device_id_t *deviceId)
+AAUDIO_API int64_t AAudioStream_getFramesWritten(AAudioStream stream)
 {
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(deviceId);
-    *deviceId = audioStream->getDeviceId();
-    return AAUDIO_OK;
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getFramesWritten();
 }
 
-AAUDIO_API aaudio_result_t AAudioStream_getSharingMode(AAudioStream stream,
-                                                 aaudio_sharing_mode_t *sharingMode)
+AAUDIO_API int64_t AAudioStream_getFramesRead(AAudioStream stream)
 {
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(sharingMode);
-    *sharingMode = audioStream->getSharingMode();
-    return AAUDIO_OK;
-}
-
-AAUDIO_API aaudio_result_t AAudioStream_getFramesWritten(AAudioStream stream,
-                                                   aaudio_position_frames_t *frames)
-{
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(frames);
-    *frames = audioStream->getFramesWritten();
-    return AAUDIO_OK;
-}
-
-AAUDIO_API aaudio_result_t AAudioStream_getFramesRead(AAudioStream stream, aaudio_position_frames_t *frames)
-{
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(frames);
-    *frames = audioStream->getFramesRead();
-    return AAUDIO_OK;
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getFramesRead();
 }
 
 AAUDIO_API aaudio_result_t AAudioStream_getTimestamp(AAudioStream stream,
-                                      aaudio_clockid_t clockid,
-                                      aaudio_position_frames_t *framePosition,
-                                      aaudio_nanoseconds_t *timeNanoseconds)
+                                      clockid_t clockid,
+                                      int64_t *framePosition,
+                                      int64_t *timeNanoseconds)
 {
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
     if (framePosition == nullptr) {
         return AAUDIO_ERROR_NULL;
     } else if (timeNanoseconds == nullptr) {
         return AAUDIO_ERROR_NULL;
-    } else if (clockid != AAUDIO_CLOCK_MONOTONIC && clockid != AAUDIO_CLOCK_BOOTTIME) {
+    } else if (clockid != CLOCK_MONOTONIC && clockid != CLOCK_BOOTTIME) {
         return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
     }
 
-    clockid_t hostClockId = AAudioConvert_fromAAudioClockId(clockid);
-    return audioStream->getTimestamp(hostClockId, framePosition, timeNanoseconds);
+    return audioStream->getTimestamp(clockid, framePosition, timeNanoseconds);
 }
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 77d3cc0..c4962ee 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -52,7 +52,7 @@
 
 aaudio_result_t AudioStream::waitForStateTransition(aaudio_stream_state_t startingState,
                                                aaudio_stream_state_t endingState,
-                                               aaudio_nanoseconds_t timeoutNanoseconds)
+                                               int64_t timeoutNanoseconds)
 {
     aaudio_stream_state_t state = getState();
     aaudio_stream_state_t nextState = state;
@@ -73,10 +73,10 @@
 
 aaudio_result_t AudioStream::waitForStateChange(aaudio_stream_state_t currentState,
                                                 aaudio_stream_state_t *nextState,
-                                                aaudio_nanoseconds_t timeoutNanoseconds)
+                                                int64_t timeoutNanoseconds)
 {
     // TODO replace this when similar functionality added to AudioTrack.cpp
-    aaudio_nanoseconds_t durationNanos = 20 * AAUDIO_NANOS_PER_MILLISECOND;
+    int64_t durationNanos = 20 * AAUDIO_NANOS_PER_MILLISECOND;
     aaudio_stream_state_t state = getState();
     while (state == currentState && timeoutNanoseconds > 0) {
         if (durationNanos > timeoutNanoseconds) {
@@ -120,7 +120,7 @@
     return audioStream->wrapUserThread();
 }
 
-aaudio_result_t AudioStream::createThread(aaudio_nanoseconds_t periodNanoseconds,
+aaudio_result_t AudioStream::createThread(int64_t periodNanoseconds,
                                      aaudio_audio_thread_proc_t *threadProc,
                                      void* threadArg)
 {
@@ -144,7 +144,7 @@
     }
 }
 
-aaudio_result_t AudioStream::joinThread(void** returnArg, aaudio_nanoseconds_t timeoutNanoseconds)
+aaudio_result_t AudioStream::joinThread(void** returnArg, int64_t timeoutNanoseconds)
 {
     if (!mHasThread) {
         return AAUDIO_ERROR_INVALID_STATE;
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index acfed97..f5f9d28 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -52,8 +52,8 @@
 
     // TODO use aaudio_clockid_t all the way down to AudioClock
     virtual aaudio_result_t getTimestamp(clockid_t clockId,
-                                       aaudio_position_frames_t *framePosition,
-                                       aaudio_nanoseconds_t *timeNanoseconds) = 0;
+                                       int64_t *framePosition,
+                                       int64_t *timeNanoseconds) = 0;
 
 
     virtual aaudio_result_t updateState() = 0;
@@ -63,7 +63,7 @@
 
     virtual aaudio_result_t waitForStateChange(aaudio_stream_state_t currentState,
                                           aaudio_stream_state_t *nextState,
-                                          aaudio_nanoseconds_t timeoutNanoseconds);
+                                          int64_t timeoutNanoseconds);
 
     /**
      * Open the stream using the parameters in the builder.
@@ -79,16 +79,15 @@
         return AAUDIO_OK;
     }
 
-    virtual aaudio_result_t setBufferSize(aaudio_size_frames_t requestedFrames,
-                                        aaudio_size_frames_t *actualFrames) {
+    virtual aaudio_result_t setBufferSize(int32_t requestedFrames) {
         return AAUDIO_ERROR_UNIMPLEMENTED;
     }
 
-    virtual aaudio_result_t createThread(aaudio_nanoseconds_t periodNanoseconds,
+    virtual aaudio_result_t createThread(int64_t periodNanoseconds,
                                        aaudio_audio_thread_proc_t *threadProc,
                                        void *threadArg);
 
-    virtual aaudio_result_t joinThread(void **returnArg, aaudio_nanoseconds_t timeoutNanoseconds);
+    virtual aaudio_result_t joinThread(void **returnArg, int64_t timeoutNanoseconds);
 
     virtual aaudio_result_t registerThread() {
         return AAUDIO_OK;
@@ -106,19 +105,19 @@
 
     // ============== Queries ===========================
 
-    virtual aaudio_stream_state_t getState() const {
+    aaudio_stream_state_t getState() const {
         return mState;
     }
 
-    virtual aaudio_size_frames_t getBufferSize() const {
+    virtual int32_t getBufferSize() const {
         return AAUDIO_ERROR_UNIMPLEMENTED;
     }
 
-    virtual aaudio_size_frames_t getBufferCapacity() const {
+    virtual int32_t getBufferCapacity() const {
         return AAUDIO_ERROR_UNIMPLEMENTED;
     }
 
-    virtual aaudio_size_frames_t getFramesPerBurst() const {
+    virtual int32_t getFramesPerBurst() const {
         return AAUDIO_ERROR_UNIMPLEMENTED;
     }
 
@@ -142,7 +141,7 @@
         return mSamplesPerFrame;
     }
 
-    aaudio_device_id_t getDeviceId() const {
+    int32_t getDeviceId() const {
         return mDeviceId;
     }
 
@@ -154,19 +153,19 @@
         return mDirection;
     }
 
-    aaudio_size_bytes_t getBytesPerFrame() const {
+    int32_t getBytesPerFrame() const {
         return mSamplesPerFrame * getBytesPerSample();
     }
 
-    aaudio_size_bytes_t getBytesPerSample() const {
+    int32_t getBytesPerSample() const {
         return AAudioConvert_formatToSizeInBytes(mFormat);
     }
 
-    virtual aaudio_position_frames_t getFramesWritten() {
+    virtual int64_t getFramesWritten() {
         return mFramesWritten.get();
     }
 
-    virtual aaudio_position_frames_t getFramesRead() {
+    virtual int64_t getFramesRead() {
         return mFramesRead.get();
     }
 
@@ -174,25 +173,25 @@
     // ============== I/O ===========================
     // A Stream will only implement read() or write() depending on its direction.
     virtual aaudio_result_t write(const void *buffer,
-                             aaudio_size_frames_t numFrames,
-                             aaudio_nanoseconds_t timeoutNanoseconds) {
+                             int32_t numFrames,
+                             int64_t timeoutNanoseconds) {
         return AAUDIO_ERROR_UNIMPLEMENTED;
     }
 
     virtual aaudio_result_t read(void *buffer,
-                            aaudio_size_frames_t numFrames,
-                            aaudio_nanoseconds_t timeoutNanoseconds) {
+                            int32_t numFrames,
+                            int64_t timeoutNanoseconds) {
         return AAUDIO_ERROR_UNIMPLEMENTED;
     }
 
 protected:
 
-    virtual aaudio_position_frames_t incrementFramesWritten(aaudio_size_frames_t frames) {
-        return static_cast<aaudio_position_frames_t>(mFramesWritten.increment(frames));
+    virtual int64_t incrementFramesWritten(int32_t frames) {
+        return static_cast<int64_t>(mFramesWritten.increment(frames));
     }
 
-    virtual aaudio_position_frames_t incrementFramesRead(aaudio_size_frames_t frames) {
-        return static_cast<aaudio_position_frames_t>(mFramesRead.increment(frames));
+    virtual int64_t incrementFramesRead(int32_t frames) {
+        return static_cast<int64_t>(mFramesRead.increment(frames));
     }
 
     /**
@@ -202,13 +201,13 @@
      *   or AAUDIO_ERROR_TIMEOUT
      */
     virtual aaudio_result_t waitForStateTransition(aaudio_stream_state_t startingState,
-                                              aaudio_stream_state_t endingState,
-                                              aaudio_nanoseconds_t timeoutNanoseconds);
+                                                   aaudio_stream_state_t endingState,
+                                                   int64_t timeoutNanoseconds);
 
     /**
      * This should not be called after the open() call.
      */
-    void setSampleRate(aaudio_sample_rate_t sampleRate) {
+    void setSampleRate(int32_t sampleRate) {
         mSampleRate = sampleRate;
     }
 
@@ -243,33 +242,33 @@
     MonotonicCounter     mFramesWritten;
     MonotonicCounter     mFramesRead;
 
-    void setPeriodNanoseconds(aaudio_nanoseconds_t periodNanoseconds) {
+    void setPeriodNanoseconds(int64_t periodNanoseconds) {
         mPeriodNanoseconds.store(periodNanoseconds, std::memory_order_release);
     }
 
-    aaudio_nanoseconds_t getPeriodNanoseconds() {
+    int64_t getPeriodNanoseconds() {
         return mPeriodNanoseconds.load(std::memory_order_acquire);
     }
 
 private:
     // These do not change after open().
-    int32_t              mSamplesPerFrame = AAUDIO_UNSPECIFIED;
-    aaudio_sample_rate_t   mSampleRate = AAUDIO_UNSPECIFIED;
-    aaudio_stream_state_t  mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
-    aaudio_device_id_t     mDeviceId = AAUDIO_UNSPECIFIED;
+    int32_t                mSamplesPerFrame = AAUDIO_UNSPECIFIED;
+    int32_t                mSampleRate = AAUDIO_UNSPECIFIED;
+    int32_t                mDeviceId = AAUDIO_UNSPECIFIED;
     aaudio_sharing_mode_t  mSharingMode = AAUDIO_SHARING_MODE_SHARED;
     aaudio_audio_format_t  mFormat = AAUDIO_FORMAT_UNSPECIFIED;
     aaudio_direction_t     mDirection = AAUDIO_DIRECTION_OUTPUT;
+    aaudio_stream_state_t  mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
 
     // background thread ----------------------------------
-    bool                 mHasThread = false;
-    pthread_t            mThread; // initialized in constructor
+    bool                   mHasThread = false;
+    pthread_t              mThread; // initialized in constructor
 
     // These are set by the application thread and then read by the audio pthread.
-    std::atomic<aaudio_nanoseconds_t>  mPeriodNanoseconds; // for tuning SCHED_FIFO threads
+    std::atomic<int64_t>   mPeriodNanoseconds; // for tuning SCHED_FIFO threads
     // TODO make atomic?
     aaudio_audio_thread_proc_t* mThreadProc = nullptr;
-    void*                mThreadArg = nullptr;
+    void*                  mThreadArg = nullptr;
     aaudio_result_t        mThreadRegistrationResult = AAUDIO_OK;
 
 
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.h b/media/libaaudio/src/core/AudioStreamBuilder.h
index f366688..7b5f35c 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.h
+++ b/media/libaaudio/src/core/AudioStreamBuilder.h
@@ -56,11 +56,11 @@
         return this;
     }
 
-    aaudio_sample_rate_t getSampleRate() const {
+    int32_t getSampleRate() const {
         return mSampleRate;
     }
 
-    AudioStreamBuilder* setSampleRate(aaudio_sample_rate_t sampleRate) {
+    AudioStreamBuilder* setSampleRate(int32_t sampleRate) {
         mSampleRate = sampleRate;
         return this;
     }
@@ -83,20 +83,20 @@
         return this;
     }
 
-    aaudio_size_frames_t getBufferCapacity() const {
+    int32_t getBufferCapacity() const {
         return mBufferCapacity;
     }
 
-    AudioStreamBuilder* setBufferCapacity(aaudio_size_frames_t frames) {
+    AudioStreamBuilder* setBufferCapacity(int32_t frames) {
         mBufferCapacity = frames;
         return this;
     }
 
-    aaudio_device_id_t getDeviceId() const {
+    int32_t getDeviceId() const {
         return mDeviceId;
     }
 
-    AudioStreamBuilder* setDeviceId(aaudio_device_id_t deviceId) {
+    AudioStreamBuilder* setDeviceId(int32_t deviceId) {
         mDeviceId = deviceId;
         return this;
     }
@@ -105,12 +105,12 @@
 
 private:
     int32_t                mSamplesPerFrame = AAUDIO_UNSPECIFIED;
-    aaudio_sample_rate_t   mSampleRate = AAUDIO_UNSPECIFIED;
-    aaudio_device_id_t     mDeviceId = AAUDIO_DEVICE_UNSPECIFIED;
+    int32_t                mSampleRate = AAUDIO_UNSPECIFIED;
+    int32_t                mDeviceId = AAUDIO_DEVICE_UNSPECIFIED;
     aaudio_sharing_mode_t  mSharingMode = AAUDIO_SHARING_MODE_SHARED;
     aaudio_audio_format_t  mFormat = AAUDIO_FORMAT_UNSPECIFIED;
     aaudio_direction_t     mDirection = AAUDIO_DIRECTION_OUTPUT;
-    aaudio_size_frames_t   mBufferCapacity = AAUDIO_UNSPECIFIED;
+    int32_t                mBufferCapacity = AAUDIO_UNSPECIFIED;
 };
 
 } /* namespace aaudio */
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index 17d0a54..dd040a0 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -177,11 +177,11 @@
 }
 
 aaudio_result_t AudioStreamRecord::read(void *buffer,
-                                      aaudio_size_frames_t numFrames,
-                                      aaudio_nanoseconds_t timeoutNanoseconds)
+                                      int32_t numFrames,
+                                      int64_t timeoutNanoseconds)
 {
-    aaudio_size_frames_t bytesPerFrame = getBytesPerFrame();
-    aaudio_size_bytes_t numBytes;
+    int32_t bytesPerFrame = getBytesPerFrame();
+    int32_t numBytes;
     aaudio_result_t result = AAudioConvert_framesToBytes(numFrames, bytesPerFrame, &numBytes);
     if (result != AAUDIO_OK) {
         return result;
@@ -195,25 +195,23 @@
     } else if (bytesRead < 0) {
         return AAudioConvert_androidToAAudioResult(bytesRead);
     }
-    aaudio_size_frames_t framesRead = (aaudio_size_frames_t)(bytesRead / bytesPerFrame);
+    int32_t framesRead = (int32_t)(bytesRead / bytesPerFrame);
     return (aaudio_result_t) framesRead;
 }
 
-aaudio_result_t AudioStreamRecord::setBufferSize(aaudio_size_frames_t requestedFrames,
-                                             aaudio_size_frames_t *actualFrames)
+aaudio_result_t AudioStreamRecord::setBufferSize(int32_t requestedFrames)
 {
-    *actualFrames = getBufferCapacity();
-    return AAUDIO_OK;
+    return getBufferSize();
 }
 
-aaudio_size_frames_t AudioStreamRecord::getBufferSize() const
+int32_t AudioStreamRecord::getBufferSize() const
 {
     return getBufferCapacity(); // TODO implement in AudioRecord?
 }
 
-aaudio_size_frames_t AudioStreamRecord::getBufferCapacity() const
+int32_t AudioStreamRecord::getBufferCapacity() const
 {
-    return static_cast<aaudio_size_frames_t>(mAudioRecord->frameCount());
+    return static_cast<int32_t>(mAudioRecord->frameCount());
 }
 
 int32_t AudioStreamRecord::getXRunCount() const
@@ -221,7 +219,7 @@
     return AAUDIO_ERROR_UNIMPLEMENTED; // TODO implement when AudioRecord supports it
 }
 
-aaudio_size_frames_t AudioStreamRecord::getFramesPerBurst() const
+int32_t AudioStreamRecord::getFramesPerBurst() const
 {
     return 192; // TODO add query to AudioRecord.cpp
 }
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.h b/media/libaaudio/src/legacy/AudioStreamRecord.h
index a2ac9f3..c8d389b 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.h
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.h
@@ -44,25 +44,24 @@
     virtual aaudio_result_t requestStop() override;
 
     virtual aaudio_result_t getTimestamp(clockid_t clockId,
-                                       aaudio_position_frames_t *framePosition,
-                                       aaudio_nanoseconds_t *timeNanoseconds) override {
+                                       int64_t *framePosition,
+                                       int64_t *timeNanoseconds) override {
         return AAUDIO_ERROR_UNIMPLEMENTED; // TODO
     }
 
     virtual aaudio_result_t read(void *buffer,
-                             aaudio_size_frames_t numFrames,
-                             aaudio_nanoseconds_t timeoutNanoseconds) override;
+                             int32_t numFrames,
+                             int64_t timeoutNanoseconds) override;
 
-    virtual aaudio_result_t setBufferSize(aaudio_size_frames_t requestedFrames,
-                                             aaudio_size_frames_t *actualFrames) override;
+    virtual aaudio_result_t setBufferSize(int32_t requestedFrames) override;
 
-    virtual aaudio_size_frames_t getBufferSize() const override;
+    virtual int32_t getBufferSize() const override;
 
-    virtual aaudio_size_frames_t getBufferCapacity() const override;
+    virtual int32_t getBufferCapacity() const override;
 
     virtual int32_t getXRunCount() const override;
 
-    virtual aaudio_size_frames_t getFramesPerBurst() const override;
+    virtual int32_t getFramesPerBurst() const override;
 
     virtual aaudio_result_t updateState() override;
 
@@ -70,7 +69,7 @@
     android::sp<android::AudioRecord> mAudioRecord;
     // TODO add 64-bit position reporting to AudioRecord and use it.
     aaudio_wrapping_frames_t   mPositionWhenStarting = 0;
-    android::String16        mOpPackageName;
+    android::String16          mOpPackageName;
 };
 
 } /* namespace aaudio */
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index b7d8664..e0a04c3 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -221,11 +221,11 @@
 }
 
 aaudio_result_t AudioStreamTrack::write(const void *buffer,
-                                      aaudio_size_frames_t numFrames,
-                                      aaudio_nanoseconds_t timeoutNanoseconds)
+                                      int32_t numFrames,
+                                      int64_t timeoutNanoseconds)
 {
-    aaudio_size_frames_t bytesPerFrame = getBytesPerFrame();
-    aaudio_size_bytes_t numBytes;
+    int32_t bytesPerFrame = getBytesPerFrame();
+    int32_t numBytes;
     aaudio_result_t result = AAudioConvert_framesToBytes(numFrames, bytesPerFrame, &numBytes);
     if (result != AAUDIO_OK) {
         return result;
@@ -240,31 +240,29 @@
         ALOGE("invalid write, returned %d", (int)bytesWritten);
         return AAudioConvert_androidToAAudioResult(bytesWritten);
     }
-    aaudio_size_frames_t framesWritten = (aaudio_size_frames_t)(bytesWritten / bytesPerFrame);
+    int32_t framesWritten = (int32_t)(bytesWritten / bytesPerFrame);
     incrementFramesWritten(framesWritten);
     return framesWritten;
 }
 
-aaudio_result_t AudioStreamTrack::setBufferSize(aaudio_size_frames_t requestedFrames,
-                                             aaudio_size_frames_t *actualFrames)
+aaudio_result_t AudioStreamTrack::setBufferSize(int32_t requestedFrames)
 {
     ssize_t result = mAudioTrack->setBufferSizeInFrames(requestedFrames);
-    if (result != OK) {
+    if (result < 0) {
         return AAudioConvert_androidToAAudioResult(result);
     } else {
-        *actualFrames = result;
-        return AAUDIO_OK;
+        return result;
     }
 }
 
-aaudio_size_frames_t AudioStreamTrack::getBufferSize() const
+int32_t AudioStreamTrack::getBufferSize() const
 {
-    return static_cast<aaudio_size_frames_t>(mAudioTrack->getBufferSizeInFrames());
+    return static_cast<int32_t>(mAudioTrack->getBufferSizeInFrames());
 }
 
-aaudio_size_frames_t AudioStreamTrack::getBufferCapacity() const
+int32_t AudioStreamTrack::getBufferCapacity() const
 {
-    return static_cast<aaudio_size_frames_t>(mAudioTrack->frameCount());
+    return static_cast<int32_t>(mAudioTrack->frameCount());
 }
 
 int32_t AudioStreamTrack::getXRunCount() const
@@ -277,7 +275,7 @@
     return 192; // TODO add query to AudioTrack.cpp
 }
 
-aaudio_position_frames_t AudioStreamTrack::getFramesRead() {
+int64_t AudioStreamTrack::getFramesRead() {
     aaudio_wrapping_frames_t position;
     status_t result;
     switch (getState()) {
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.h b/media/libaaudio/src/legacy/AudioStreamTrack.h
index 73d0cac..1de07ce 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.h
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.h
@@ -46,31 +46,30 @@
     virtual aaudio_result_t requestStop() override;
 
     virtual aaudio_result_t getTimestamp(clockid_t clockId,
-                                       aaudio_position_frames_t *framePosition,
-                                       aaudio_nanoseconds_t *timeNanoseconds) override {
+                                       int64_t *framePosition,
+                                       int64_t *timeNanoseconds) override {
         return AAUDIO_ERROR_UNIMPLEMENTED; // TODO call getTimestamp(ExtendedTimestamp *timestamp);
     }
 
     virtual aaudio_result_t write(const void *buffer,
-                             aaudio_size_frames_t numFrames,
-                             aaudio_nanoseconds_t timeoutNanoseconds) override;
+                             int32_t numFrames,
+                             int64_t timeoutNanoseconds) override;
 
-    virtual aaudio_result_t setBufferSize(aaudio_size_frames_t requestedFrames,
-                                             aaudio_size_frames_t *actualFrames) override;
-    virtual aaudio_size_frames_t getBufferSize() const override;
-    virtual aaudio_size_frames_t getBufferCapacity() const override;
-    virtual aaudio_size_frames_t getFramesPerBurst()const  override;
+    virtual aaudio_result_t setBufferSize(int32_t requestedFrames) override;
+    virtual int32_t getBufferSize() const override;
+    virtual int32_t getBufferCapacity() const override;
+    virtual int32_t getFramesPerBurst()const  override;
     virtual int32_t getXRunCount() const override;
 
-    virtual aaudio_position_frames_t getFramesRead() override;
+    virtual int64_t getFramesRead() override;
 
     virtual aaudio_result_t updateState() override;
 
 private:
     android::sp<android::AudioTrack> mAudioTrack;
     // TODO add 64-bit position reporting to AudioRecord and use it.
-    aaudio_wrapping_frames_t           mPositionWhenStarting = 0;
-    aaudio_wrapping_frames_t           mPositionWhenPausing = 0;
+    aaudio_wrapping_frames_t         mPositionWhenStarting = 0;
+    aaudio_wrapping_frames_t         mPositionWhenPausing = 0;
 };
 
 } /* namespace aaudio */
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index 34c1ae4..26fa75d 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -27,8 +27,8 @@
 
 using namespace android;
 
-aaudio_size_bytes_t AAudioConvert_formatToSizeInBytes(aaudio_audio_format_t format) {
-    aaudio_size_bytes_t size = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+int32_t AAudioConvert_formatToSizeInBytes(aaudio_audio_format_t format) {
+    int32_t size = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
     switch (format) {
         case AAUDIO_FORMAT_PCM_I16:
             size = sizeof(int16_t);
@@ -172,12 +172,12 @@
     return aaudioFormat;
 }
 
-aaudio_size_bytes_t AAudioConvert_framesToBytes(aaudio_size_frames_t numFrames,
-                                            aaudio_size_bytes_t bytesPerFrame,
-                                            aaudio_size_bytes_t *sizeInBytes) {
+int32_t AAudioConvert_framesToBytes(int32_t numFrames,
+                                            int32_t bytesPerFrame,
+                                            int32_t *sizeInBytes) {
     // TODO implement more elegantly
     const int32_t maxChannels = 256; // ridiculously large
-    const aaudio_size_frames_t maxBytesPerFrame = maxChannels * sizeof(float);
+    const int32_t maxBytesPerFrame = maxChannels * sizeof(float);
     // Prevent overflow by limiting multiplicands.
     if (bytesPerFrame > maxBytesPerFrame || numFrames > (0x3FFFFFFF / maxBytesPerFrame)) {
         ALOGE("size overflow, numFrames = %d, frameSize = %zd", numFrames, bytesPerFrame);
diff --git a/media/libaaudio/src/utility/AAudioUtilities.h b/media/libaaudio/src/utility/AAudioUtilities.h
index 38696df..d3b5ffe 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.h
+++ b/media/libaaudio/src/utility/AAudioUtilities.h
@@ -46,9 +46,9 @@
  * @param sizeInBytes total size in bytes
  * @return AAUDIO_OK or negative error, eg. AAUDIO_ERROR_OUT_OF_RANGE
  */
-aaudio_size_bytes_t AAudioConvert_framesToBytes(aaudio_size_frames_t numFrames,
-                                            aaudio_size_bytes_t bytesPerFrame,
-                                            aaudio_size_bytes_t *sizeInBytes);
+int32_t AAudioConvert_framesToBytes(int32_t numFrames,
+                                            int32_t bytesPerFrame,
+                                            int32_t *sizeInBytes);
 
 audio_format_t AAudioConvert_aaudioToAndroidDataFormat(aaudio_audio_format_t aaudio_format);
 
@@ -57,6 +57,6 @@
 /**
  * @return the size of a sample of the given format in bytes or AAUDIO_ERROR_ILLEGAL_ARGUMENT
  */
-aaudio_size_bytes_t AAudioConvert_formatToSizeInBytes(aaudio_audio_format_t format);
+int32_t AAudioConvert_formatToSizeInBytes(aaudio_audio_format_t format);
 
 #endif //UTILITY_AAUDIO_UTILITIES_H
diff --git a/media/libaaudio/src/utility/AudioClock.h b/media/libaaudio/src/utility/AudioClock.h
index 9ac21d3..952c7b8 100644
--- a/media/libaaudio/src/utility/AudioClock.h
+++ b/media/libaaudio/src/utility/AudioClock.h
@@ -22,9 +22,15 @@
 
 #include <aaudio/AAudioDefinitions.h>
 
+// Time conversion constants.
+#define AAUDIO_NANOS_PER_MICROSECOND ((int64_t)1000)
+#define AAUDIO_NANOS_PER_MILLISECOND (AAUDIO_NANOS_PER_MICROSECOND * 1000)
+#define AAUDIO_MILLIS_PER_SECOND     1000
+#define AAUDIO_NANOS_PER_SECOND      (AAUDIO_NANOS_PER_MILLISECOND * AAUDIO_MILLIS_PER_SECOND)
+
 class AudioClock {
 public:
-    static aaudio_nanoseconds_t getNanoseconds(clockid_t clockId = CLOCK_MONOTONIC) {
+    static int64_t getNanoseconds(clockid_t clockId = CLOCK_MONOTONIC) {
         struct timespec time;
         int result = clock_gettime(clockId, &time);
         if (result < 0) {
@@ -42,7 +48,7 @@
      * @param clockId CLOCK_MONOTONIC is default
      * @return 0, a negative error, or 1 if the call is interrupted by a signal handler (EINTR)
      */
-    static int sleepUntilNanoTime(aaudio_nanoseconds_t nanoTime,
+    static int sleepUntilNanoTime(int64_t nanoTime,
                                   clockid_t clockId = CLOCK_MONOTONIC) {
         if (nanoTime > 0) {
             struct timespec time;
@@ -72,7 +78,7 @@
      * @param clockId CLOCK_MONOTONIC is default
      * @return 0, a negative error, or 1 if the call is interrupted by a signal handler (EINTR)
      */
-    static int sleepForNanos(aaudio_nanoseconds_t nanoseconds, clockid_t clockId = CLOCK_MONOTONIC) {
+    static int sleepForNanos(int64_t nanoseconds, clockid_t clockId = CLOCK_MONOTONIC) {
         if (nanoseconds > 0) {
             struct timespec time;
             time.tv_sec = nanoseconds / AAUDIO_NANOS_PER_SECOND;
diff --git a/media/libaaudio/src/utility/HandleTracker.h b/media/libaaudio/src/utility/HandleTracker.h
index c80860c..23a73ed 100644
--- a/media/libaaudio/src/utility/HandleTracker.h
+++ b/media/libaaudio/src/utility/HandleTracker.h
@@ -20,6 +20,7 @@
 #include <stdint.h>
 #include <utils/Mutex.h>
 
+typedef int32_t  aaudio_handle_t;
 typedef int32_t  handle_tracker_type_t;       // what kind of handle
 typedef int32_t  handle_tracker_slot_t;       // index in allocation table
 typedef int32_t  handle_tracker_generation_t; // incremented when slot used
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index dfa9753..99b0b4d 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -57,7 +57,7 @@
         ALOGE("AAudioService::openStream(): open returned %d", result);
         return result;
     } else {
-        AAudioStream handle = mHandleTracker.put(AAUDIO_HANDLE_TYPE_STREAM, serviceStream);
+        aaudio_handle_t handle = mHandleTracker.put(AAUDIO_HANDLE_TYPE_STREAM, serviceStream);
         ALOGD("AAudioService::openStream(): handle = 0x%08X", handle);
         if (handle < 0) {
             delete serviceStream;
@@ -127,7 +127,7 @@
 
 aaudio_result_t AAudioService::registerAudioThread(aaudio_handle_t streamHandle,
                                                          pid_t clientThreadId,
-                                                         aaudio_nanoseconds_t periodNanoseconds) {
+                                                         int64_t periodNanoseconds) {
     AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
     ALOGD("AAudioService::registerAudioThread(), serviceStream = %p", serviceStream);
     if (serviceStream == nullptr) {
diff --git a/services/oboeservice/AAudioService.h b/services/oboeservice/AAudioService.h
index e9625b2..a520d7a 100644
--- a/services/oboeservice/AAudioService.h
+++ b/services/oboeservice/AAudioService.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef AAUDIO_AAUDIO_AUDIO_SERVICE_H
-#define AAUDIO_AAUDIO_AUDIO_SERVICE_H
+#ifndef AAUDIO_AAUDIO_SERVICE_H
+#define AAUDIO_AAUDIO_SERVICE_H
 
 #include <time.h>
 #include <pthread.h>
@@ -58,7 +58,7 @@
     virtual aaudio_result_t flushStream(aaudio_handle_t streamHandle);
 
     virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle,
-                                              pid_t pid, aaudio_nanoseconds_t periodNanoseconds) ;
+                                              pid_t pid, int64_t periodNanoseconds) ;
 
     virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle, pid_t pid);
 
@@ -72,4 +72,4 @@
 
 } /* namespace android */
 
-#endif //AAUDIO_AAUDIO_AUDIO_SERVICE_H
+#endif //AAUDIO_AAUDIO_SERVICE_H
diff --git a/services/oboeservice/AAudioServiceDefinitions.h b/services/oboeservice/AAudioServiceDefinitions.h
index ee9aaa7..f98acbf 100644
--- a/services/oboeservice/AAudioServiceDefinitions.h
+++ b/services/oboeservice/AAudioServiceDefinitions.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef AAUDIO_AAUDIO_SERVICE_H
-#define AAUDIO_AAUDIO_SERVICE_H
+#ifndef AAUDIO_AAUDIO_SERVICE_DEFINITIONS_H
+#define AAUDIO_AAUDIO_SERVICE_DEFINITIONS_H
 
 #include <stdint.h>
 
@@ -28,9 +28,9 @@
 // TODO move this an "include" folder for the service.
 
 struct AAudioMessageTimestamp {
-    aaudio_position_frames_t position;
+    int64_t position;
     int64_t                deviceOffset; // add to client position to get device position
-    aaudio_nanoseconds_t     timestamp;
+    int64_t     timestamp;
 };
 
 typedef enum aaudio_service_event_e : uint32_t {
@@ -61,7 +61,6 @@
     };
 } AAudioServiceMessage;
 
-
 } /* namespace aaudio */
 
-#endif //AAUDIO_AAUDIO_SERVICE_H
+#endif //AAUDIO_AAUDIO_SERVICE_DEFINITIONS_H
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index 4a59253..7a812f9 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -72,7 +72,7 @@
 
     virtual void sendCurrentTimestamp() = 0;
 
-    aaudio_size_frames_t getFramesPerBurst() {
+    int32_t getFramesPerBurst() {
         return mFramesPerBurst;
     }
 
@@ -90,17 +90,17 @@
 
 protected:
 
-    pid_t                    mRegisteredClientThread = ILLEGAL_THREAD_ID;
+    pid_t              mRegisteredClientThread = ILLEGAL_THREAD_ID;
 
-    SharedRingBuffer *       mUpMessageQueue;
+    SharedRingBuffer*  mUpMessageQueue;
 
-    aaudio_sample_rate_t       mSampleRate = 0;
-    aaudio_size_bytes_t        mBytesPerFrame = 0;
-    aaudio_size_frames_t       mFramesPerBurst = 0;
-    aaudio_size_frames_t       mCapacityInFrames = 0;
-    aaudio_size_bytes_t        mCapacityInBytes = 0;
+    int32_t            mSampleRate = 0;
+    int32_t            mBytesPerFrame = 0;
+    int32_t            mFramesPerBurst = 0;
+    int32_t            mCapacityInFrames = 0;
+    int32_t            mCapacityInBytes = 0;
 
-    android::Mutex           mLockUpMessageQueue;
+    android::Mutex     mLockUpMessageQueue;
 };
 
 } /* namespace aaudio */
diff --git a/services/oboeservice/AAudioServiceStreamFakeHal.cpp b/services/oboeservice/AAudioServiceStreamFakeHal.cpp
index 1caeb3f..71d3542 100644
--- a/services/oboeservice/AAudioServiceStreamFakeHal.cpp
+++ b/services/oboeservice/AAudioServiceStreamFakeHal.cpp
@@ -191,7 +191,7 @@
     timestampScheduler.setBurstPeriod(mFramesPerBurst, mSampleRate);
     timestampScheduler.start(AudioClock::getNanoseconds());
     while(mThreadEnabled.load()) {
-        aaudio_nanoseconds_t nextTime = timestampScheduler.nextAbsoluteTime();
+        int64_t nextTime = timestampScheduler.nextAbsoluteTime();
         if (AudioClock::getNanoseconds() >= nextTime) {
             sendCurrentTimestamp();
         } else  {
diff --git a/services/oboeservice/AAudioThread.h b/services/oboeservice/AAudioThread.h
index 1f676dc..a5d43a4 100644
--- a/services/oboeservice/AAudioThread.h
+++ b/services/oboeservice/AAudioThread.h
@@ -62,9 +62,9 @@
     void dispatch(); // called internally from 'C' thread wrapper
 
 private:
-    Runnable*                mRunnable = nullptr; // TODO make atomic with memory barrier?
-    bool                     mHasThread = false;
-    pthread_t                mThread; // initialized in constructor
+    Runnable*          mRunnable = nullptr; // TODO make atomic with memory barrier?
+    bool               mHasThread = false;
+    pthread_t          mThread; // initialized in constructor
 
 };
 
diff --git a/services/oboeservice/TimestampScheduler.cpp b/services/oboeservice/TimestampScheduler.cpp
index 5875909..d54996f 100644
--- a/services/oboeservice/TimestampScheduler.cpp
+++ b/services/oboeservice/TimestampScheduler.cpp
@@ -21,12 +21,12 @@
 
 using namespace aaudio;
 
-void TimestampScheduler::start(aaudio_nanoseconds_t startTime) {
+void TimestampScheduler::start(int64_t startTime) {
     mStartTime = startTime;
     mLastTime = startTime;
 }
 
-aaudio_nanoseconds_t TimestampScheduler::nextAbsoluteTime() {
+int64_t TimestampScheduler::nextAbsoluteTime() {
     int64_t periodsElapsed = (mLastTime - mStartTime) / mBurstPeriod;
     // This is an arbitrary schedule that could probably be improved.
     // It starts out sending a timestamp on every period because we want to
@@ -35,10 +35,10 @@
     int64_t minPeriodsToDelay = (periodsElapsed < 10) ? 1 :
         (periodsElapsed < 100) ? 3 :
         (periodsElapsed < 1000) ? 10 : 50;
-    aaudio_nanoseconds_t sleepTime = minPeriodsToDelay * mBurstPeriod;
+    int64_t sleepTime = minPeriodsToDelay * mBurstPeriod;
     // Generate a random rectangular distribution one burst wide so that we get
     // an uncorrelated sampling of the MMAP pointer.
-    sleepTime += (aaudio_nanoseconds_t)(random() * mBurstPeriod / RAND_MAX);
+    sleepTime += (int64_t)(random() * mBurstPeriod / RAND_MAX);
     mLastTime += sleepTime;
     return mLastTime;
 }
diff --git a/services/oboeservice/TimestampScheduler.h b/services/oboeservice/TimestampScheduler.h
index efc9c5f..91a2477 100644
--- a/services/oboeservice/TimestampScheduler.h
+++ b/services/oboeservice/TimestampScheduler.h
@@ -17,7 +17,7 @@
 #ifndef AAUDIO_TIMESTAMP_SCHEDULER_H
 #define AAUDIO_TIMESTAMP_SCHEDULER_H
 
-//#include <stdlib.h> // random()
+
 
 #include "IAAudioService.h"
 #include "AAudioServiceDefinitions.h"
@@ -25,6 +25,7 @@
 #include "fifo/FifoBuffer.h"
 #include "SharedRingBuffer.h"
 #include "AudioEndpointParcelable.h"
+#include "utility/AudioClock.h"
 
 namespace aaudio {
 
@@ -43,32 +44,32 @@
     /**
      * Start the schedule at the given time.
      */
-    void start(aaudio_nanoseconds_t startTime);
+    void start(int64_t startTime);
 
     /**
      * Calculate the next time that the read position should be
      * measured.
      */
-    aaudio_nanoseconds_t nextAbsoluteTime();
+    int64_t nextAbsoluteTime();
 
-    void setBurstPeriod(aaudio_nanoseconds_t burstPeriod) {
+    void setBurstPeriod(int64_t burstPeriod) {
         mBurstPeriod = burstPeriod;
     }
 
-    void setBurstPeriod(aaudio_size_frames_t framesPerBurst,
-                        aaudio_sample_rate_t sampleRate) {
+    void setBurstPeriod(int32_t framesPerBurst,
+                        int32_t sampleRate) {
         mBurstPeriod = AAUDIO_NANOS_PER_SECOND * framesPerBurst / sampleRate;
     }
 
-    aaudio_nanoseconds_t getBurstPeriod() {
+    int64_t getBurstPeriod() {
         return mBurstPeriod;
     }
 
 private:
     // Start with an arbitrary default so we do not divide by zero.
-    aaudio_nanoseconds_t mBurstPeriod = AAUDIO_NANOS_PER_MILLISECOND;
-    aaudio_nanoseconds_t mStartTime;
-    aaudio_nanoseconds_t mLastTime;
+    int64_t mBurstPeriod = AAUDIO_NANOS_PER_MILLISECOND;
+    int64_t mStartTime;
+    int64_t mLastTime;
 };
 
 } /* namespace aaudio */