refactor AudioTrack and AudioFlinger createTrack()

Refactor the mechanism used by audio tracks to query and attach
to an output mixer/stream in audio flinger. This will:
- reduce the number of binder transactions needed to create a track
- move sample rate, framecount and flags validations to audio server
side
- move audio session allocation to audio server side
- prepare restriction of certain binder transactions to audioserver only

Test: CTS tests for AudioTrack

Change-Id: If4369aad6c080a56c0b42fbfcc97c8ade17a7439
diff --git a/include/media/AudioClient.h b/include/media/AudioClient.h
deleted file mode 100644
index 9efd76d..0000000
--- a/include/media/AudioClient.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-#ifndef ANDROID_AUDIO_CLIENT_H
-#define ANDROID_AUDIO_CLIENT_H
-
-#include <system/audio.h>
-#include <utils/String16.h>
-
-namespace android {
-
-class AudioClient {
- public:
-    AudioClient() :
-        clientUid(-1), clientPid(-1), packageName("") {}
-
-    uid_t clientUid;
-    pid_t clientPid;
-    String16 packageName;
-};
-
-}; // namespace android
-
-#endif  // ANDROID_AUDIO_CLIENT_H
diff --git a/include/media/AudioClient.h b/include/media/AudioClient.h
new file mode 120000
index 0000000..feac9b9
--- /dev/null
+++ b/include/media/AudioClient.h
@@ -0,0 +1 @@
+media/libaudioclient/include/media/AudioClient.h
\ No newline at end of file
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 58330ae..c284f73 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -20,6 +20,7 @@
 #include <utils/Log.h>
 #include <binder/IServiceManager.h>
 #include <binder/ProcessState.h>
+#include <media/AudioResamplerPublic.h>
 #include <media/AudioSystem.h>
 #include <media/IAudioFlinger.h>
 #include <media/IAudioPolicyService.h>
@@ -253,6 +254,31 @@
     return volume ? 100 - int(dBConvertInverse * log(volume) + 0.5) : 0;
 }
 
+/* static */ size_t AudioSystem::calculateMinFrameCount(
+        uint32_t afLatencyMs, uint32_t afFrameCount, uint32_t afSampleRate,
+        uint32_t sampleRate, float speed /*, uint32_t notificationsPerBufferReq*/)
+{
+    // Ensure that buffer depth covers at least audio hardware latency
+    uint32_t minBufCount = afLatencyMs / ((1000 * afFrameCount) / afSampleRate);
+    if (minBufCount < 2) {
+        minBufCount = 2;
+    }
+#if 0
+    // The notificationsPerBufferReq parameter is not yet used for non-fast tracks,
+    // but keeping the code here to make it easier to add later.
+    if (minBufCount < notificationsPerBufferReq) {
+        minBufCount = notificationsPerBufferReq;
+    }
+#endif
+    ALOGV("calculateMinFrameCount afLatency %u  afFrameCount %u  afSampleRate %u  "
+            "sampleRate %u  speed %f  minBufCount: %u" /*"  notificationsPerBufferReq %u"*/,
+            afLatencyMs, afFrameCount, afSampleRate, sampleRate, speed, minBufCount
+            /*, notificationsPerBufferReq*/);
+    return minBufCount * sourceFramesNeededWithTimestretch(
+            sampleRate, afFrameCount, afSampleRate, speed);
+}
+
+
 status_t AudioSystem::getOutputSamplingRate(uint32_t* samplingRate, audio_stream_type_t streamType)
 {
     audio_io_handle_t output;
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 356b321..36961d6 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -99,32 +99,6 @@
     return kFixPitch ? AUDIO_TIMESTRETCH_PITCH_NORMAL : pitch;
 }
 
-// Must match similar computation in createTrack_l in Threads.cpp.
-// TODO: Move to a common library
-static size_t calculateMinFrameCount(
-        uint32_t afLatencyMs, uint32_t afFrameCount, uint32_t afSampleRate,
-        uint32_t sampleRate, float speed /*, uint32_t notificationsPerBufferReq*/)
-{
-    // Ensure that buffer depth covers at least audio hardware latency
-    uint32_t minBufCount = afLatencyMs / ((1000 * afFrameCount) / afSampleRate);
-    if (minBufCount < 2) {
-        minBufCount = 2;
-    }
-#if 0
-    // The notificationsPerBufferReq parameter is not yet used for non-fast tracks,
-    // but keeping the code here to make it easier to add later.
-    if (minBufCount < notificationsPerBufferReq) {
-        minBufCount = notificationsPerBufferReq;
-    }
-#endif
-    ALOGV("calculateMinFrameCount afLatency %u  afFrameCount %u  afSampleRate %u  "
-            "sampleRate %u  speed %f  minBufCount: %u" /*"  notificationsPerBufferReq %u"*/,
-            afLatencyMs, afFrameCount, afSampleRate, sampleRate, speed, minBufCount
-            /*, notificationsPerBufferReq*/);
-    return minBufCount * sourceFramesNeededWithTimestretch(
-            sampleRate, afFrameCount, afSampleRate, speed);
-}
-
 // static
 status_t AudioTrack::getMinFrameCount(
         size_t* frameCount,
@@ -165,8 +139,8 @@
 
     // When called from createTrack, speed is 1.0f (normal speed).
     // This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
-    *frameCount = calculateMinFrameCount(afLatency, afFrameCount, afSampleRate, sampleRate, 1.0f
-            /*, 0 notificationsPerBufferReq*/);
+    *frameCount = AudioSystem::calculateMinFrameCount(afLatency, afFrameCount, afSampleRate,
+                                              sampleRate, 1.0f /*, 0 notificationsPerBufferReq*/);
 
     // The formula above should always produce a non-zero value under normal circumstances:
     // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
@@ -190,8 +164,7 @@
       mPreviousSchedulingGroup(SP_DEFAULT),
       mPausedPosition(0),
       mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
-      mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE),
-      mPortId(AUDIO_PORT_HANDLE_NONE)
+      mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE)
 {
     mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
     mAttributes.usage = AUDIO_USAGE_UNKNOWN;
@@ -222,8 +195,7 @@
       mState(STATE_STOPPED),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
       mPreviousSchedulingGroup(SP_DEFAULT),
-      mPausedPosition(0),
-      mPortId(AUDIO_PORT_HANDLE_NONE)
+      mPausedPosition(0)
 {
     mStatus = set(streamType, sampleRate, format, channelMask,
             frameCount, flags, cbf, user, notificationFrames,
@@ -254,8 +226,7 @@
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
       mPreviousSchedulingGroup(SP_DEFAULT),
       mPausedPosition(0),
-      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
-      mPortId(AUDIO_PORT_HANDLE_NONE)
+      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
 {
     mStatus = set(streamType, sampleRate, format, channelMask,
             0 /*frameCount*/, flags, cbf, user, notificationFrames,
@@ -320,6 +291,7 @@
 
     mThreadCanCallJava = threadCanCallJava;
     mSelectedDeviceId = selectedDeviceId;
+    mSessionId = sessionId;
 
     switch (transferType) {
     case TRANSFER_DEFAULT:
@@ -500,11 +472,6 @@
                 notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer);
     }
     mNotificationFramesAct = 0;
-    if (sessionId == AUDIO_SESSION_ALLOCATE) {
-        mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
-    } else {
-        mSessionId = sessionId;
-    }
     int callingpid = IPCThreadState::self()->getCallingPid();
     int mypid = getpid();
     if (uid == AUDIO_UID_INVALID || (callingpid != mypid)) {
@@ -1317,70 +1284,12 @@
         return NO_INIT;
     }
 
-    audio_io_handle_t output;
-    audio_stream_type_t streamType = mStreamType;
-    audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;
+    status_t status;
     bool callbackAdded = false;
 
+    {
     // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
     // After fast request is denied, we will request again if IAudioTrack is re-created.
-
-    status_t status;
-    audio_config_t config = AUDIO_CONFIG_INITIALIZER;
-    config.sample_rate = mSampleRate;
-    config.channel_mask = mChannelMask;
-    config.format = mFormat;
-    config.offload_info = mOffloadInfoCopy;
-    mRoutedDeviceId = mSelectedDeviceId;
-    status = AudioSystem::getOutputForAttr(attr, &output,
-                                           mSessionId, &streamType, mClientUid,
-                                           &config,
-                                           mFlags, &mRoutedDeviceId, &mPortId);
-
-    if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
-        ALOGE("Could not get audio output for session %d, stream type %d, usage %d, sample rate %u,"
-              " format %#x, channel mask %#x, flags %#x",
-              mSessionId, streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask,
-              mFlags);
-        return BAD_VALUE;
-    }
-    {
-    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
-    // we must release it ourselves if anything goes wrong.
-
-    // Not all of these values are needed under all conditions, but it is easier to get them all
-    status = AudioSystem::getLatency(output, &mAfLatency);
-    if (status != NO_ERROR) {
-        ALOGE("getLatency(%d) failed status %d", output, status);
-        goto release;
-    }
-    ALOGV("createTrack_l() output %d afLatency %u", output, mAfLatency);
-
-    status = AudioSystem::getFrameCount(output, &mAfFrameCount);
-    if (status != NO_ERROR) {
-        ALOGE("getFrameCount(output=%d) status %d", output, status);
-        goto release;
-    }
-
-    // TODO consider making this a member variable if there are other uses for it later
-    size_t afFrameCountHAL;
-    status = AudioSystem::getFrameCountHAL(output, &afFrameCountHAL);
-    if (status != NO_ERROR) {
-        ALOGE("getFrameCountHAL(output=%d) status %d", output, status);
-        goto release;
-    }
-    ALOG_ASSERT(afFrameCountHAL > 0);
-
-    status = AudioSystem::getSamplingRate(output, &mAfSampleRate);
-    if (status != NO_ERROR) {
-        ALOGE("getSamplingRate(output=%d) status %d", output, status);
-        goto release;
-    }
-    if (mSampleRate == 0) {
-        mSampleRate = mAfSampleRate;
-        mOriginalSampleRate = mAfSampleRate;
-    }
-
     // Client can only express a preference for FAST.  Server will perform additional tests.
     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
         // either of these use cases:
@@ -1394,130 +1303,78 @@
             // use case 4: synchronous write
             ((mTransfer == TRANSFER_SYNC) && mThreadCanCallJava);
 
-        bool useCaseAllowed = sharedBuffer || transferAllowed;
-        if (!useCaseAllowed) {
+        bool fastAllowed = sharedBuffer || transferAllowed;
+        if (!fastAllowed) {
             ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client, not shared buffer and transfer = %s",
                   convertTransferToText(mTransfer));
-        }
-
-        // sample rates must also match
-        bool sampleRateAllowed = mSampleRate == mAfSampleRate;
-        if (!sampleRateAllowed) {
-            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client, sample rate %u Hz but HAL needs %u Hz",
-                  mSampleRate, mAfSampleRate);
-        }
-
-        bool fastAllowed = useCaseAllowed && sampleRateAllowed;
-        if (!fastAllowed) {
             mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
         }
     }
 
-    mNotificationFramesAct = mNotificationFramesReq;
-
-    size_t frameCount = mReqFrameCount;
-    if (!audio_has_proportional_frames(mFormat)) {
-
-        if (mSharedBuffer != 0) {
-            // Same comment as below about ignoring frameCount parameter for set()
-            frameCount = mSharedBuffer->size();
-        } else if (frameCount == 0) {
-            frameCount = mAfFrameCount;
-        }
-        if (mNotificationFramesAct != frameCount) {
-            mNotificationFramesAct = frameCount;
-        }
-    } else if (mSharedBuffer != 0) {
-        // FIXME: Ensure client side memory buffers need
-        // not have additional alignment beyond sample
-        // (e.g. 16 bit stereo accessed as 32 bit frame).
-        size_t alignment = audio_bytes_per_sample(mFormat);
-        if (alignment & 1) {
-            // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
-            alignment = 1;
-        }
-        if (mChannelCount > 1) {
-            // More than 2 channels does not require stronger alignment than stereo
-            alignment <<= 1;
-        }
-        if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
-            ALOGE("Invalid buffer alignment: address %p, channel count %u",
-                    mSharedBuffer->pointer(), mChannelCount);
-            status = BAD_VALUE;
-            goto release;
-        }
-
-        // When initializing a shared buffer AudioTrack via constructors,
-        // there's no frameCount parameter.
-        // But when initializing a shared buffer AudioTrack via set(),
-        // there _is_ a frameCount parameter.  We silently ignore it.
-        frameCount = mSharedBuffer->size() / mFrameSize;
+    IAudioFlinger::CreateTrackInput input;
+    if (mStreamType != AUDIO_STREAM_DEFAULT) {
+        stream_type_to_audio_attributes(mStreamType, &input.attr);
     } else {
-        size_t minFrameCount = 0;
-        // For fast tracks the frame count calculations and checks are mostly done by server,
-        // but we try to respect the application's request for notifications per buffer.
-        if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
-            if (mNotificationsPerBufferReq > 0) {
-                // Avoid possible arithmetic overflow during multiplication.
-                // mNotificationsPerBuffer is clamped to a small integer earlier, so it is unlikely.
-                if (mNotificationsPerBufferReq > SIZE_MAX / afFrameCountHAL) {
-                    ALOGE("Requested notificationPerBuffer=%u ignored for HAL frameCount=%zu",
-                            mNotificationsPerBufferReq, afFrameCountHAL);
-                } else {
-                    minFrameCount = afFrameCountHAL * mNotificationsPerBufferReq;
-                }
-            }
-        } else {
-            // for normal tracks precompute the frame count based on speed.
-            const float speed = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f :
-                            max(mMaxRequiredSpeed, mPlaybackRate.mSpeed);
-            minFrameCount = calculateMinFrameCount(
-                    mAfLatency, mAfFrameCount, mAfSampleRate, mSampleRate,
-                    speed /*, 0 mNotificationsPerBufferReq*/);
-        }
-        if (frameCount < minFrameCount) {
-            frameCount = minFrameCount;
-        }
+        input.attr = mAttributes;
     }
-
-    audio_output_flags_t flags = mFlags;
-
-    pid_t tid = -1;
+    input.config = AUDIO_CONFIG_INITIALIZER;
+    input.config.sample_rate = mSampleRate;
+    input.config.channel_mask = mChannelMask;
+    input.config.format = mFormat;
+    input.config.offload_info = mOffloadInfoCopy;
+    input.clientInfo.clientUid = mClientUid;
+    input.clientInfo.clientPid = mClientPid;
+    input.clientInfo.clientTid = -1;
     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
         // It is currently meaningless to request SCHED_FIFO for a Java thread.  Even if the
         // application-level code follows all non-blocking design rules, the language runtime
         // doesn't also follow those rules, so the thread will not benefit overall.
         if (mAudioTrackThread != 0 && !mThreadCanCallJava) {
-            tid = mAudioTrackThread->getTid();
+            input.clientInfo.clientTid = mAudioTrackThread->getTid();
         }
     }
+    input.sharedBuffer = mSharedBuffer;
+    input.notificationsPerBuffer = mNotificationsPerBufferReq;
+    input.speed = 1.0;
+    if (audio_has_proportional_frames(mFormat) && mSharedBuffer == 0 &&
+            (mFlags & AUDIO_OUTPUT_FLAG_FAST) == 0) {
+        input.speed  = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f :
+                        max(mMaxRequiredSpeed, mPlaybackRate.mSpeed);
+    }
+    input.flags = mFlags;
+    input.frameCount = mReqFrameCount;
+    input.notificationFrameCount = mNotificationFramesReq;
+    input.selectedDeviceId = mSelectedDeviceId;
+    input.sessionId = mSessionId;
 
-    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
-                                // but we will still need the original value also
-    audio_session_t originalSessionId = mSessionId;
-    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
-                                                      mSampleRate,
-                                                      mFormat,
-                                                      mChannelMask,
-                                                      &temp,
-                                                      &flags,
-                                                      mSharedBuffer,
+    IAudioFlinger::CreateTrackOutput output;
+
+    sp<IAudioTrack> track = audioFlinger->createTrack(input,
                                                       output,
-                                                      mClientPid,
-                                                      tid,
-                                                      &mSessionId,
-                                                      mClientUid,
-                                                      &status,
-                                                      mPortId);
-    ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
-            "session ID changed from %d to %d", originalSessionId, mSessionId);
+                                                      &status);
 
-    if (status != NO_ERROR) {
-        ALOGE("AudioFlinger could not create track, status: %d", status);
-        goto release;
+    if (status != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
+        ALOGE("AudioFlinger could not create track, status: %d output %d", status, output.outputId);
+        goto error;
     }
     ALOG_ASSERT(track != 0);
 
+    mFrameCount = output.frameCount;
+    mNotificationFramesAct = (uint32_t)output.notificationFrameCount;
+    mRoutedDeviceId = output.selectedDeviceId;
+    mSessionId = output.sessionId;
+
+    mSampleRate = output.sampleRate;
+    if (mOriginalSampleRate == 0) {
+        mOriginalSampleRate = mSampleRate;
+    }
+
+    mAfFrameCount = output.afFrameCount;
+    mAfSampleRate = output.afSampleRate;
+    mAfLatency = output.afLatencyMs;
+
+    mLatency = mAfLatency + (1000LL * mFrameCount) / mSampleRate;
+
     // AudioFlinger now owns the reference to the I/O handle,
     // so we are no longer responsible for releasing it.
 
@@ -1526,13 +1383,13 @@
     if (iMem == 0) {
         ALOGE("Could not get control block");
         status = NO_INIT;
-        goto release;
+        goto error;
     }
     void *iMemPointer = iMem->pointer();
     if (iMemPointer == NULL) {
         ALOGE("Could not get control block pointer");
         status = NO_INIT;
-        goto release;
+        goto error;
     }
     // invariant that mAudioTrack != 0 is true only after set() returns successfully
     if (mAudioTrack != 0) {
@@ -1545,75 +1402,33 @@
 
     audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
     mCblk = cblk;
-    // note that temp is the (possibly revised) value of frameCount
-    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
-        // In current design, AudioTrack client checks and ensures frame count validity before
-        // passing it to AudioFlinger so AudioFlinger should not return a different value except
-        // for fast track as it uses a special method of assigning frame count.
-        ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
-    }
-    frameCount = temp;
 
     mAwaitBoost = false;
     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
-        if (flags & AUDIO_OUTPUT_FLAG_FAST) {
-            ALOGI("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu -> %zu", frameCount, temp);
+        if (output.flags & AUDIO_OUTPUT_FLAG_FAST) {
+            ALOGI("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu -> %zu",
+                  mReqFrameCount, mFrameCount);
             if (!mThreadCanCallJava) {
                 mAwaitBoost = true;
             }
         } else {
-            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu", frameCount,
-                    temp);
+            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu", mReqFrameCount,
+                  mFrameCount);
         }
     }
-    mFlags = flags;
-
-    // Make sure that application is notified with sufficient margin before underrun.
-    // The client can divide the AudioTrack buffer into sub-buffers,
-    // and expresses its desire to server as the notification frame count.
-    if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) {
-        size_t maxNotificationFrames;
-        if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
-            // notify every HAL buffer, regardless of the size of the track buffer
-            maxNotificationFrames = afFrameCountHAL;
-        } else {
-            // For normal tracks, use at least double-buffering if no sample rate conversion,
-            // or at least triple-buffering if there is sample rate conversion
-            const int nBuffering = mOriginalSampleRate == mAfSampleRate ? 2 : 3;
-            maxNotificationFrames = frameCount / nBuffering;
-            // If client requested a fast track but this was denied, then use the smaller maximum.
-            // FMS_20 is the minimum task wakeup period in ms for which CFS operates reliably.
-#define FMS_20 20   // FIXME share a common declaration with the same symbol in Threads.cpp
-            if (mOrigFlags & AUDIO_OUTPUT_FLAG_FAST) {
-                size_t maxNotificationFramesFastDenied = FMS_20 * mSampleRate / 1000;
-                if (maxNotificationFrames > maxNotificationFramesFastDenied) {
-                    maxNotificationFrames = maxNotificationFramesFastDenied;
-                }
-            }
-        }
-        if (mNotificationFramesAct == 0 || mNotificationFramesAct > maxNotificationFrames) {
-            if (mNotificationFramesAct == 0) {
-                ALOGD("Client defaulted notificationFrames to %zu for frameCount %zu",
-                    maxNotificationFrames, frameCount);
-            } else {
-                ALOGW("Client adjusted notificationFrames from %u to %zu for frameCount %zu",
-                    mNotificationFramesAct, maxNotificationFrames, frameCount);
-            }
-            mNotificationFramesAct = (uint32_t) maxNotificationFrames;
-        }
-    }
+    mFlags = output.flags;
 
     //mOutput != output includes the case where mOutput == AUDIO_IO_HANDLE_NONE for first creation
-    if (mDeviceCallback != 0 && mOutput != output) {
+    if (mDeviceCallback != 0 && mOutput != output.outputId) {
         if (mOutput != AUDIO_IO_HANDLE_NONE) {
             AudioSystem::removeAudioDeviceCallback(this, mOutput);
         }
-        AudioSystem::addAudioDeviceCallback(this, output);
+        AudioSystem::addAudioDeviceCallback(this, output.outputId);
         callbackAdded = true;
     }
 
     // We retain a copy of the I/O handle, but don't own the reference
-    mOutput = output;
+    mOutput = output.outputId;
     mRefreshRemaining = true;
 
     // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
@@ -1628,18 +1443,16 @@
         if (buffers == NULL) {
             ALOGE("Could not get buffer pointer");
             status = NO_INIT;
-            goto release;
+            goto error;
         }
     }
 
     mAudioTrack->attachAuxEffect(mAuxEffectId);
-    mFrameCount = frameCount;
-    updateLatency_l();  // this refetches mAfLatency and sets mLatency
 
     // If IAudioTrack is re-created, don't let the requested frameCount
     // decrease.  This can confuse clients that cache frameCount().
-    if (frameCount > mReqFrameCount) {
-        mReqFrameCount = frameCount;
+    if (mFrameCount > mReqFrameCount) {
+        mReqFrameCount = mFrameCount;
     }
 
     // reset server position to 0 as we have new cblk.
@@ -1648,9 +1461,9 @@
     // update proxy
     if (mSharedBuffer == 0) {
         mStaticProxy.clear();
-        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
+        mProxy = new AudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
     } else {
-        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
+        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
         mProxy = mStaticProxy;
     }
 
@@ -1676,8 +1489,7 @@
     return NO_ERROR;
     }
 
-release:
-    AudioSystem::releaseOutput(output, streamType, mSessionId);
+error:
     if (callbackAdded) {
         // note: mOutput is always valid is callbackAdded is true
         AudioSystem::removeAudioDeviceCallback(this, mOutput);
@@ -1685,6 +1497,8 @@
     if (status == NO_ERROR) {
         status = NO_INIT;
     }
+
+    // sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
     return status;
 }
 
@@ -2420,8 +2234,8 @@
         return true; // static tracks do not have issues with buffer sizing.
     }
     const size_t minFrameCount =
-            calculateMinFrameCount(mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed
-                /*, 0 mNotificationsPerBufferReq*/);
+            AudioSystem::calculateMinFrameCount(mAfLatency, mAfFrameCount, mAfSampleRate,
+                                            sampleRate, speed /*, 0 mNotificationsPerBufferReq*/);
     const bool allowed = mFrameCount >= minFrameCount;
     ALOGD_IF(!allowed,
             "isSampleRateSpeedAllowed_l denied "
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index fc8c11a..5cf2bdb 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -95,83 +95,38 @@
     {
     }
 
-    virtual sp<IAudioTrack> createTrack(
-                                audio_stream_type_t streamType,
-                                uint32_t sampleRate,
-                                audio_format_t format,
-                                audio_channel_mask_t channelMask,
-                                size_t *pFrameCount,
-                                audio_output_flags_t *flags,
-                                const sp<IMemory>& sharedBuffer,
-                                audio_io_handle_t output,
-                                pid_t pid,
-                                pid_t tid,
-                                audio_session_t *sessionId,
-                                int clientUid,
-                                status_t *status,
-                                audio_port_handle_t portId)
+    virtual sp<IAudioTrack> createTrack(const CreateTrackInput& input,
+                                        CreateTrackOutput& output,
+                                        status_t *status)
     {
         Parcel data, reply;
         sp<IAudioTrack> track;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32((int32_t) streamType);
-        data.writeInt32(sampleRate);
-        data.writeInt32(format);
-        data.writeInt32(channelMask);
-        size_t frameCount = pFrameCount != NULL ? *pFrameCount : 0;
-        data.writeInt64(frameCount);
-        audio_output_flags_t lFlags = flags != NULL ? *flags : AUDIO_OUTPUT_FLAG_NONE;
-        data.writeInt32(lFlags);
-        // haveSharedBuffer
-        if (sharedBuffer != 0) {
-            data.writeInt32(true);
-            data.writeStrongBinder(IInterface::asBinder(sharedBuffer));
-        } else {
-            data.writeInt32(false);
+
+        if (status == nullptr) {
+            return track;
         }
-        data.writeInt32((int32_t) output);
-        data.writeInt32((int32_t) pid);
-        data.writeInt32((int32_t) tid);
-        audio_session_t lSessionId = AUDIO_SESSION_ALLOCATE;
-        if (sessionId != NULL) {
-            lSessionId = *sessionId;
-        }
-        data.writeInt32(lSessionId);
-        data.writeInt32(clientUid);
-        data.writeInt32(portId);
+
+        input.writeToParcel(&data);
+
         status_t lStatus = remote()->transact(CREATE_TRACK, data, &reply);
         if (lStatus != NO_ERROR) {
-            ALOGE("createTrack error: %s", strerror(-lStatus));
-        } else {
-            frameCount = reply.readInt64();
-            if (pFrameCount != NULL) {
-                *pFrameCount = frameCount;
-            }
-            lFlags = (audio_output_flags_t)reply.readInt32();
-            if (flags != NULL) {
-                *flags = lFlags;
-            }
-            lSessionId = (audio_session_t) reply.readInt32();
-            if (sessionId != NULL) {
-                *sessionId = lSessionId;
-            }
-            lStatus = reply.readInt32();
-            track = interface_cast<IAudioTrack>(reply.readStrongBinder());
-            if (lStatus == NO_ERROR) {
-                if (track == 0) {
-                    ALOGE("createTrack should have returned an IAudioTrack");
-                    lStatus = UNKNOWN_ERROR;
-                }
-            } else {
-                if (track != 0) {
-                    ALOGE("createTrack returned an IAudioTrack but with status %d", lStatus);
-                    track.clear();
-                }
-            }
+            ALOGE("createTrack transaction error %d", lStatus);
+            *status = DEAD_OBJECT;
+            return track;
         }
-        if (status != NULL) {
-            *status = lStatus;
+        *status = reply.readInt32();
+        if (*status != NO_ERROR) {
+            ALOGE("createTrack returned error %d", *status);
+            return track;
         }
+        track = interface_cast<IAudioTrack>(reply.readStrongBinder());
+        if (track == 0) {
+            ALOGE("createTrack returned an NULL IAudioTrack with status OK");
+            *status = DEAD_OBJECT;
+            return track;
+        }
+        output.readFromParcel(&reply);
         return track;
     }
 
@@ -970,41 +925,27 @@
     switch (code) {
         case CREATE_TRACK: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            int streamType = data.readInt32();
-            uint32_t sampleRate = data.readInt32();
-            audio_format_t format = (audio_format_t) data.readInt32();
-            audio_channel_mask_t channelMask = data.readInt32();
-            size_t frameCount = data.readInt64();
-            audio_output_flags_t flags = (audio_output_flags_t) data.readInt32();
-            bool haveSharedBuffer = data.readInt32() != 0;
-            sp<IMemory> buffer;
-            if (haveSharedBuffer) {
-                buffer = interface_cast<IMemory>(data.readStrongBinder());
+
+            CreateTrackInput input;
+            if (input.readFromParcel((Parcel*)&data) != NO_ERROR) {
+                reply->writeInt32(DEAD_OBJECT);
+                return NO_ERROR;
             }
-            audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
-            pid_t pid = (pid_t) data.readInt32();
-            pid_t tid = (pid_t) data.readInt32();
-            audio_session_t sessionId = (audio_session_t) data.readInt32();
-            int clientUid = data.readInt32();
-            audio_port_handle_t portId = (audio_port_handle_t) data.readInt32();
-            status_t status = NO_ERROR;
-            sp<IAudioTrack> track;
-            if ((haveSharedBuffer && (buffer == 0)) ||
-                    ((buffer != 0) && (buffer->pointer() == NULL))) {
-                ALOGW("CREATE_TRACK: cannot retrieve shared memory");
-                status = DEAD_OBJECT;
-            } else {
-                track = createTrack(
-                        (audio_stream_type_t) streamType, sampleRate, format,
-                        channelMask, &frameCount, &flags, buffer, output, pid, tid,
-                        &sessionId, clientUid, &status, portId);
-                LOG_ALWAYS_FATAL_IF((track != 0) != (status == NO_ERROR));
-            }
-            reply->writeInt64(frameCount);
-            reply->writeInt32(flags);
-            reply->writeInt32(sessionId);
+
+            status_t status;
+            CreateTrackOutput output;
+
+            sp<IAudioTrack> track= createTrack(input,
+                                               output,
+                                               &status);
+
+            LOG_ALWAYS_FATAL_IF((track != 0) != (status == NO_ERROR));
             reply->writeInt32(status);
+            if (status != NO_ERROR) {
+                return NO_ERROR;
+            }
             reply->writeStrongBinder(IInterface::asBinder(track));
+            output.writeToParcel(reply);
             return NO_ERROR;
         } break;
         case OPEN_RECORD: {
diff --git a/media/libaudioclient/include/media/AudioClient.h b/media/libaudioclient/include/media/AudioClient.h
index 9efd76d..108e326 100644
--- a/media/libaudioclient/include/media/AudioClient.h
+++ b/media/libaudioclient/include/media/AudioClient.h
@@ -18,6 +18,7 @@
 #ifndef ANDROID_AUDIO_CLIENT_H
 #define ANDROID_AUDIO_CLIENT_H
 
+#include <binder/Parcel.h>
 #include <system/audio.h>
 #include <utils/String16.h>
 
@@ -26,11 +27,28 @@
 class AudioClient {
  public:
     AudioClient() :
-        clientUid(-1), clientPid(-1), packageName("") {}
+        clientUid(-1), clientPid(-1), clientTid(-1), packageName("") {}
 
     uid_t clientUid;
     pid_t clientPid;
+    pid_t clientTid;
     String16 packageName;
+
+    status_t readFromParcel(Parcel *parcel) {
+        clientUid = parcel->readInt32();
+        clientPid = parcel->readInt32();
+        clientTid = parcel->readInt32();
+        packageName = parcel->readString16();
+        return NO_ERROR;
+    }
+
+    status_t writeToParcel(Parcel *parcel) const {
+        parcel->writeInt32(clientUid);
+        parcel->writeInt32(clientPid);
+        parcel->writeInt32(clientTid);
+        parcel->writeString16(packageName);
+        return NO_ERROR;
+    }
 };
 
 }; // namespace android
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 327eba8..66601da 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -106,6 +106,9 @@
 
     static float linearToLog(int volume);
     static int logToLinear(float volume);
+    static size_t calculateMinFrameCount(
+            uint32_t afLatencyMs, uint32_t afFrameCount, uint32_t afSampleRate,
+            uint32_t sampleRate, float speed /*, uint32_t notificationsPerBufferReq*/);
 
     // Returned samplingRate and frameCount output values are guaranteed
     // to be non-zero if status == NO_ERROR
@@ -209,8 +212,6 @@
     static status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
     static audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage);
 
-    // Client must successfully hand off the handle reference to AudioFlinger via createTrack(),
-    // or release it with releaseOutput().
     static status_t getOutputForAttr(const audio_attributes_t *attr,
                                      audio_io_handle_t *output,
                                      audio_session_t session,
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index 8973133..9fbd04b 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -1182,7 +1182,6 @@
     pid_t                   mClientPid;
 
     wp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
-    audio_port_handle_t     mPortId;  // unique ID allocated by audio policy
 };
 
 }; // namespace android
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index 133d6c9..9061c26 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -24,6 +24,8 @@
 #include <utils/RefBase.h>
 #include <utils/Errors.h>
 #include <binder/IInterface.h>
+#include <binder/Parcel.h>
+#include <media/AudioClient.h>
 #include <media/IAudioTrack.h>
 #include <media/IAudioFlingerClient.h>
 #include <system/audio.h>
@@ -44,6 +46,135 @@
 public:
     DECLARE_META_INTERFACE(AudioFlinger);
 
+    /* CreateTrackInput contains all input arguments sent by AudioTrack to AudioFlinger
+     * when calling createTrack() including arguments that will be updated by AudioFlinger
+     * and returned in CreateTrackOutput object
+     */
+    class CreateTrackInput {
+    public:
+        status_t readFromParcel(Parcel *parcel) {
+            /* input arguments*/
+            memset(&attr, 0, sizeof(audio_attributes_t));
+            if (parcel->read(&attr, sizeof(audio_attributes_t)) != NO_ERROR) {
+                return DEAD_OBJECT;
+            }
+            attr.tags[AUDIO_ATTRIBUTES_TAGS_MAX_SIZE -1] = '\0';
+            memset(&config, 0, sizeof(audio_config_t));
+            if (parcel->read(&config, sizeof(audio_config_t)) != NO_ERROR) {
+                return DEAD_OBJECT;
+            }
+            (void)clientInfo.readFromParcel(parcel);
+            if (parcel->readInt32() != 0) {
+                sharedBuffer = interface_cast<IMemory>(parcel->readStrongBinder());
+                if (sharedBuffer == 0 || sharedBuffer->pointer() == NULL) {
+                    return BAD_VALUE;
+                }
+            }
+            notificationsPerBuffer = parcel->readInt32();
+            speed = parcel->readFloat();
+
+            /* input/output arguments*/
+            (void)parcel->read(&flags, sizeof(audio_output_flags_t));
+            frameCount = parcel->readInt64();
+            notificationFrameCount = parcel->readInt64();
+            (void)parcel->read(&selectedDeviceId, sizeof(audio_port_handle_t));
+            (void)parcel->read(&sessionId, sizeof(audio_session_t));
+            return NO_ERROR;
+        }
+
+        status_t writeToParcel(Parcel *parcel) const {
+            /* input arguments*/
+            (void)parcel->write(&attr, sizeof(audio_attributes_t));
+            (void)parcel->write(&config, sizeof(audio_config_t));
+            (void)clientInfo.writeToParcel(parcel);
+            if (sharedBuffer != 0) {
+                (void)parcel->writeInt32(1);
+                (void)parcel->writeStrongBinder(IInterface::asBinder(sharedBuffer));
+            } else {
+                (void)parcel->writeInt32(0);
+            }
+            (void)parcel->writeInt32(notificationsPerBuffer);
+            (void)parcel->writeFloat(speed);
+
+            /* input/output arguments*/
+            (void)parcel->write(&flags, sizeof(audio_output_flags_t));
+            (void)parcel->writeInt64(frameCount);
+            (void)parcel->writeInt64(notificationFrameCount);
+            (void)parcel->write(&selectedDeviceId, sizeof(audio_port_handle_t));
+            (void)parcel->write(&sessionId, sizeof(audio_session_t));
+            return NO_ERROR;
+        }
+
+        /* input */
+        audio_attributes_t attr;
+        audio_config_t config;
+        AudioClient clientInfo;
+        sp<IMemory> sharedBuffer;
+        uint32_t notificationsPerBuffer;
+        float speed;
+
+        /* input/output */
+        audio_output_flags_t flags;
+        size_t frameCount;
+        size_t notificationFrameCount;
+        audio_port_handle_t selectedDeviceId;
+        audio_session_t sessionId;
+    };
+
+    /* CreateTrackOutput contains all output arguments returned by AudioFlinger to AudioTrack
+     * when calling createTrack() including arguments that were passed as I/O for update by
+     * CreateTrackInput.
+     */
+    class CreateTrackOutput {
+    public:
+        status_t readFromParcel(Parcel *parcel) {
+            /* input/output arguments*/
+            (void)parcel->read(&flags, sizeof(audio_output_flags_t));
+            frameCount = parcel->readInt64();
+            notificationFrameCount = parcel->readInt64();
+            (void)parcel->read(&selectedDeviceId, sizeof(audio_port_handle_t));
+            (void)parcel->read(&sessionId, sizeof(audio_session_t));
+
+            /* output arguments*/
+            sampleRate = parcel->readUint32();
+            afFrameCount = parcel->readInt64();
+            afSampleRate = parcel->readInt64();
+            afLatencyMs = parcel->readInt32();
+            (void)parcel->read(&outputId, sizeof(audio_io_handle_t));
+            return NO_ERROR;
+        }
+
+        status_t writeToParcel(Parcel *parcel) const {
+            /* input/output arguments*/
+            (void)parcel->write(&flags, sizeof(audio_output_flags_t));
+            (void)parcel->writeInt64(frameCount);
+            (void)parcel->writeInt64(notificationFrameCount);
+            (void)parcel->write(&selectedDeviceId, sizeof(audio_port_handle_t));
+            (void)parcel->write(&sessionId, sizeof(audio_session_t));
+
+            /* output arguments*/
+            (void)parcel->writeUint32(sampleRate);
+            (void)parcel->writeInt64(afFrameCount);
+            (void)parcel->writeInt64(afSampleRate);
+            (void)parcel->writeInt32(afLatencyMs);
+            (void)parcel->write(&outputId, sizeof(audio_io_handle_t));
+            return NO_ERROR;
+        }
+
+        /* input/output */
+        audio_output_flags_t flags;
+        size_t frameCount;
+        size_t notificationFrameCount;
+        audio_port_handle_t selectedDeviceId;
+        audio_session_t sessionId;
+
+        /* output */
+        uint32_t sampleRate;
+        size_t   afFrameCount;
+        uint32_t afSampleRate;
+        uint32_t afLatencyMs;
+        audio_io_handle_t outputId;
+    };
 
     // invariant on exit for all APIs that return an sp<>:
     //   (return value != 0) == (*status == NO_ERROR)
@@ -51,24 +182,9 @@
     /* create an audio track and registers it with AudioFlinger.
      * return null if the track cannot be created.
      */
-    virtual sp<IAudioTrack> createTrack(
-                                audio_stream_type_t streamType,
-                                uint32_t sampleRate,
-                                audio_format_t format,
-                                audio_channel_mask_t channelMask,
-                                size_t *pFrameCount,
-                                audio_output_flags_t *flags,
-                                const sp<IMemory>& sharedBuffer,
-                                // On successful return, AudioFlinger takes over the handle
-                                // reference and will release it when the track is destroyed.
-                                // However on failure, the client is responsible for release.
-                                audio_io_handle_t output,
-                                pid_t pid,
-                                pid_t tid,  // -1 means unused, otherwise must be valid non-0
-                                audio_session_t *sessionId,
-                                int clientUid,
-                                status_t *status,
-                                audio_port_handle_t portId) = 0;
+    virtual sp<IAudioTrack> createTrack(const CreateTrackInput& input,
+                                        CreateTrackOutput& output,
+                                        status_t *status) = 0;
 
     virtual sp<media::IAudioRecord> openRecord(
                                 // On successful return, AudioFlinger takes over the handle
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 79e540a..9cb0357 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -641,38 +641,52 @@
 
 // IAudioFlinger interface
 
-
-sp<IAudioTrack> AudioFlinger::createTrack(
-        audio_stream_type_t streamType,
-        uint32_t sampleRate,
-        audio_format_t format,
-        audio_channel_mask_t channelMask,
-        size_t *frameCount,
-        audio_output_flags_t *flags,
-        const sp<IMemory>& sharedBuffer,
-        audio_io_handle_t output,
-        pid_t pid,
-        pid_t tid,
-        audio_session_t *sessionId,
-        int clientUid,
-        status_t *status,
-        audio_port_handle_t portId)
+sp<IAudioTrack> AudioFlinger::createTrack(const CreateTrackInput& input,
+                                          CreateTrackOutput& output,
+                                          status_t *status)
 {
     sp<PlaybackThread::Track> track;
     sp<TrackHandle> trackHandle;
     sp<Client> client;
     status_t lStatus;
-    audio_session_t lSessionId;
+    audio_stream_type_t streamType;
+    audio_port_handle_t portId;
 
+    bool updatePid = (input.clientInfo.clientPid == -1);
     const uid_t callingUid = IPCThreadState::self()->getCallingUid();
-    if (pid == -1 || !isTrustedCallingUid(callingUid)) {
+    uid_t clientUid = input.clientInfo.clientUid;
+    if (!isTrustedCallingUid(callingUid)) {
+        ALOGW_IF(clientUid != callingUid,
+                "%s uid %d tried to pass itself off as %d",
+                __FUNCTION__, callingUid, clientUid);
+        clientUid = callingUid;
+        updatePid = true;
+    }
+    pid_t clientPid = input.clientInfo.clientPid;
+    if (updatePid) {
         const pid_t callingPid = IPCThreadState::self()->getCallingPid();
-        ALOGW_IF(pid != -1 && pid != callingPid,
+        ALOGW_IF(clientPid != -1 && clientPid != callingPid,
                  "%s uid %d pid %d tried to pass itself off as pid %d",
-                 __func__, callingUid, callingPid, pid);
-        pid = callingPid;
+                 __func__, callingUid, callingPid, clientPid);
+        clientPid = callingPid;
     }
 
+    audio_session_t sessionId = input.sessionId;
+    if (sessionId == AUDIO_SESSION_ALLOCATE) {
+        sessionId = (audio_session_t) newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
+    }
+    output.sessionId = sessionId;
+    output.outputId = AUDIO_IO_HANDLE_NONE;
+    output.selectedDeviceId = input.selectedDeviceId;
+
+    lStatus = AudioSystem::getOutputForAttr(&input.attr, &output.outputId, sessionId, &streamType,
+                                            clientUid, &input.config, input.flags,
+                                            &output.selectedDeviceId, &portId);
+
+    if (lStatus != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
+        ALOGE("createTrack() getOutputForAttr() return error %d or invalid output handle", lStatus);
+        goto Exit;
+    }
     // client AudioTrack::set already implements AUDIO_STREAM_DEFAULT => AUDIO_STREAM_MUSIC,
     // but if someone uses binder directly they could bypass that and cause us to crash
     if (uint32_t(streamType) >= AUDIO_STREAM_CNT) {
@@ -681,91 +695,76 @@
         goto Exit;
     }
 
-    // further sample rate checks are performed by createTrack_l() depending on the thread type
-    if (sampleRate == 0) {
-        ALOGE("createTrack() invalid sample rate %u", sampleRate);
-        lStatus = BAD_VALUE;
-        goto Exit;
-    }
-
     // further channel mask checks are performed by createTrack_l() depending on the thread type
-    if (!audio_is_output_channel(channelMask)) {
-        ALOGE("createTrack() invalid channel mask %#x", channelMask);
+    if (!audio_is_output_channel(input.config.channel_mask)) {
+        ALOGE("createTrack() invalid channel mask %#x", input.config.channel_mask);
         lStatus = BAD_VALUE;
         goto Exit;
     }
 
     // further format checks are performed by createTrack_l() depending on the thread type
-    if (!audio_is_valid_format(format)) {
-        ALOGE("createTrack() invalid format %#x", format);
-        lStatus = BAD_VALUE;
-        goto Exit;
-    }
-
-    if (sharedBuffer != 0 && sharedBuffer->pointer() == NULL) {
-        ALOGE("createTrack() sharedBuffer is non-0 but has NULL pointer()");
+    if (!audio_is_valid_format(input.config.format)) {
+        ALOGE("createTrack() invalid format %#x", input.config.format);
         lStatus = BAD_VALUE;
         goto Exit;
     }
 
     {
         Mutex::Autolock _l(mLock);
-        PlaybackThread *thread = checkPlaybackThread_l(output);
+        PlaybackThread *thread = checkPlaybackThread_l(output.outputId);
         if (thread == NULL) {
-            ALOGE("no playback thread found for output handle %d", output);
+            ALOGE("no playback thread found for output handle %d", output.outputId);
             lStatus = BAD_VALUE;
             goto Exit;
         }
 
-        client = registerPid(pid);
+        client = registerPid(clientPid);
 
         PlaybackThread *effectThread = NULL;
-        if (sessionId != NULL && *sessionId != AUDIO_SESSION_ALLOCATE) {
-            if (audio_unique_id_get_use(*sessionId) != AUDIO_UNIQUE_ID_USE_SESSION) {
-                ALOGE("createTrack() invalid session ID %d", *sessionId);
-                lStatus = BAD_VALUE;
-                goto Exit;
-            }
-            lSessionId = *sessionId;
-            // check if an effect chain with the same session ID is present on another
-            // output thread and move it here.
-            for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
-                sp<PlaybackThread> t = mPlaybackThreads.valueAt(i);
-                if (mPlaybackThreads.keyAt(i) != output) {
-                    uint32_t sessions = t->hasAudioSession(lSessionId);
-                    if (sessions & ThreadBase::EFFECT_SESSION) {
-                        effectThread = t.get();
-                        break;
-                    }
+        // check if an effect chain with the same session ID is present on another
+        // output thread and move it here.
+        for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+            sp<PlaybackThread> t = mPlaybackThreads.valueAt(i);
+            if (mPlaybackThreads.keyAt(i) != output.outputId) {
+                uint32_t sessions = t->hasAudioSession(sessionId);
+                if (sessions & ThreadBase::EFFECT_SESSION) {
+                    effectThread = t.get();
+                    break;
                 }
             }
-        } else {
-            // if no audio session id is provided, create one here
-            lSessionId = (audio_session_t) nextUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
-            if (sessionId != NULL) {
-                *sessionId = lSessionId;
-            }
         }
-        ALOGV("createTrack() lSessionId: %d", lSessionId);
+        ALOGV("createTrack() sessionId: %d", sessionId);
 
-        track = thread->createTrack_l(client, streamType, sampleRate, format,
-                channelMask, frameCount, sharedBuffer, lSessionId, flags, tid,
-                clientUid, &lStatus, portId);
+        output.sampleRate = input.config.sample_rate;
+        output.frameCount = input.frameCount;
+        output.notificationFrameCount = input.notificationFrameCount;
+        output.flags = input.flags;
+
+        track = thread->createTrack_l(client, streamType, &output.sampleRate, input.config.format,
+                                      input.config.channel_mask,
+                                      &output.frameCount, &output.notificationFrameCount,
+                                      input.notificationsPerBuffer, input.speed,
+                                      input.sharedBuffer, sessionId, &output.flags,
+                                      input.clientInfo.clientTid, clientUid, &lStatus, portId);
         LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (track == 0));
         // we don't abort yet if lStatus != NO_ERROR; there is still work to be done regardless
 
+        output.afFrameCount = thread->frameCount();
+        output.afSampleRate = thread->sampleRate();
+        output.afLatencyMs = thread->latency();
+
         // move effect chain to this output thread if an effect on same session was waiting
         // for a track to be created
         if (lStatus == NO_ERROR && effectThread != NULL) {
             // no risk of deadlock because AudioFlinger::mLock is held
             Mutex::Autolock _dl(thread->mLock);
             Mutex::Autolock _sl(effectThread->mLock);
-            moveEffectChain_l(lSessionId, effectThread, thread, true);
+            moveEffectChain_l(sessionId, effectThread, thread, true);
         }
 
         // Look for sync events awaiting for a session to be used.
         for (size_t i = 0; i < mPendingSyncEvents.size(); i++) {
-            if (mPendingSyncEvents[i]->triggerSession() == lSessionId) {
+            if (mPendingSyncEvents[i]->triggerSession() == sessionId) {
                 if (thread->isValidSyncEvent(mPendingSyncEvents[i])) {
                     if (lStatus == NO_ERROR) {
                         (void) track->setSyncEvent(mPendingSyncEvents[i]);
@@ -778,7 +777,7 @@
             }
         }
 
-        setAudioHwSyncForSession_l(thread, lSessionId);
+        setAudioHwSyncForSession_l(thread, sessionId);
     }
 
     if (lStatus != NO_ERROR) {
@@ -798,6 +797,9 @@
     trackHandle = new TrackHandle(track);
 
 Exit:
+    if (lStatus != NO_ERROR && output.outputId != AUDIO_IO_HANDLE_NONE) {
+        AudioSystem::releaseOutput(output.outputId, streamType, sessionId);
+    }
     *status = lStatus;
     return trackHandle;
 }
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index dff94d2..7e9ef26 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -114,21 +114,9 @@
     virtual     status_t    dump(int fd, const Vector<String16>& args);
 
     // IAudioFlinger interface, in binder opcode order
-    virtual sp<IAudioTrack> createTrack(
-                                audio_stream_type_t streamType,
-                                uint32_t sampleRate,
-                                audio_format_t format,
-                                audio_channel_mask_t channelMask,
-                                size_t *pFrameCount,
-                                audio_output_flags_t *flags,
-                                const sp<IMemory>& sharedBuffer,
-                                audio_io_handle_t output,
-                                pid_t pid,
-                                pid_t tid,
-                                audio_session_t *sessionId,
-                                int clientUid,
-                                status_t *status /*non-NULL*/,
-                                audio_port_handle_t portId);
+    virtual sp<IAudioTrack> createTrack(const CreateTrackInput& input,
+                                        CreateTrackOutput& output,
+                                        status_t *status);
 
     virtual sp<media::IAudioRecord> openRecord(
                                 audio_io_handle_t input,
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 8c7c830..8e6c720 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -1837,10 +1837,13 @@
 sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrack_l(
         const sp<AudioFlinger::Client>& client,
         audio_stream_type_t streamType,
-        uint32_t sampleRate,
+        uint32_t *pSampleRate,
         audio_format_t format,
         audio_channel_mask_t channelMask,
         size_t *pFrameCount,
+        size_t *pNotificationFrameCount,
+        uint32_t notificationsPerBuffer,
+        float speed,
         const sp<IMemory>& sharedBuffer,
         audio_session_t sessionId,
         audio_output_flags_t *flags,
@@ -1850,9 +1853,16 @@
         audio_port_handle_t portId)
 {
     size_t frameCount = *pFrameCount;
+    size_t notificationFrameCount = *pNotificationFrameCount;
     sp<Track> track;
     status_t lStatus;
     audio_output_flags_t outputFlags = mOutput->flags;
+    audio_output_flags_t requestedFlags = *flags;
+
+    if (*pSampleRate == 0) {
+        *pSampleRate = mSampleRate;
+    }
+    uint32_t sampleRate = *pSampleRate;
 
     // special case for FAST flag considered OK if fast mixer is present
     if (hasFastMixer()) {
@@ -1929,36 +1939,114 @@
         *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_FAST);
       }
     }
-    // For normal PCM streaming tracks, update minimum frame count.
-    // For compatibility with AudioTrack calculation, buffer depth is forced
-    // to be at least 2 x the normal mixer frame count and cover audio hardware latency.
-    // This is probably too conservative, but legacy application code may depend on it.
-    // If you change this calculation, also review the start threshold which is related.
-    if (!(*flags & AUDIO_OUTPUT_FLAG_FAST)
-            && audio_has_proportional_frames(format) && sharedBuffer == 0) {
-        // this must match AudioTrack.cpp calculateMinFrameCount().
-        // TODO: Move to a common library
-        uint32_t latencyMs = 0;
-        lStatus = mOutput->stream->getLatency(&latencyMs);
-        if (lStatus != OK) {
-            ALOGE("Error when retrieving output stream latency: %d", lStatus);
+
+    if (!audio_has_proportional_frames(format)) {
+        if (sharedBuffer != 0) {
+            // Same comment as below about ignoring frameCount parameter for set()
+            frameCount = sharedBuffer->size();
+        } else if (frameCount == 0) {
+            frameCount = mNormalFrameCount;
+        }
+        if (notificationFrameCount != frameCount) {
+            notificationFrameCount = frameCount;
+        }
+    } else if (sharedBuffer != 0) {
+        // FIXME: Ensure client side memory buffers need
+        // not have additional alignment beyond sample
+        // (e.g. 16 bit stereo accessed as 32 bit frame).
+        size_t alignment = audio_bytes_per_sample(format);
+        if (alignment & 1) {
+            // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
+            alignment = 1;
+        }
+        uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
+        size_t frameSize = channelCount * audio_bytes_per_sample(format);
+        if (channelCount > 1) {
+            // More than 2 channels does not require stronger alignment than stereo
+            alignment <<= 1;
+        }
+        if (((uintptr_t)sharedBuffer->pointer() & (alignment - 1)) != 0) {
+            ALOGE("Invalid buffer alignment: address %p, channel count %u",
+                  sharedBuffer->pointer(), channelCount);
+            lStatus = BAD_VALUE;
             goto Exit;
         }
-        uint32_t minBufCount = latencyMs / ((1000 * mNormalFrameCount) / mSampleRate);
-        if (minBufCount < 2) {
-            minBufCount = 2;
+
+        // When initializing a shared buffer AudioTrack via constructors,
+        // there's no frameCount parameter.
+        // But when initializing a shared buffer AudioTrack via set(),
+        // there _is_ a frameCount parameter.  We silently ignore it.
+        frameCount = sharedBuffer->size() / frameSize;
+    } else {
+        size_t minFrameCount = 0;
+        // For fast tracks we try to respect the application's request for notifications per buffer.
+        if (*flags & AUDIO_OUTPUT_FLAG_FAST) {
+            if (notificationsPerBuffer > 0) {
+                // Avoid possible arithmetic overflow during multiplication.
+                if (notificationsPerBuffer > SIZE_MAX / mFrameCount) {
+                    ALOGE("Requested notificationPerBuffer=%u ignored for HAL frameCount=%zu",
+                          notificationsPerBuffer, mFrameCount);
+                } else {
+                    minFrameCount = mFrameCount * notificationsPerBuffer;
+                }
+            }
+        } else {
+            // For normal PCM streaming tracks, update minimum frame count.
+            // Buffer depth is forced to be at least 2 x the normal mixer frame count and
+            // cover audio hardware latency.
+            // This is probably too conservative, but legacy application code may depend on it.
+            // If you change this calculation, also review the start threshold which is related.
+            uint32_t latencyMs = latency_l();
+            if (latencyMs == 0) {
+                ALOGE("Error when retrieving output stream latency");
+                lStatus = UNKNOWN_ERROR;
+                goto Exit;
+            }
+
+            minFrameCount = AudioSystem::calculateMinFrameCount(latencyMs, mNormalFrameCount,
+                                mSampleRate, sampleRate, speed /*, 0 mNotificationsPerBufferReq*/);
+
         }
-        // For normal mixing tracks, if speed is > 1.0f (normal), AudioTrack
-        // or the client should compute and pass in a larger buffer request.
-        size_t minFrameCount =
-                minBufCount * sourceFramesNeededWithTimestretch(
-                        sampleRate, mNormalFrameCount,
-                        mSampleRate, AUDIO_TIMESTRETCH_SPEED_NORMAL /*speed*/);
-        if (frameCount < minFrameCount) { // including frameCount == 0
+        if (frameCount < minFrameCount) {
             frameCount = minFrameCount;
         }
     }
+
+    // Make sure that application is notified with sufficient margin before underrun.
+    // The client can divide the AudioTrack buffer into sub-buffers,
+    // and expresses its desire to server as the notification frame count.
+    if (sharedBuffer == 0 && audio_is_linear_pcm(format)) {
+        size_t maxNotificationFrames;
+        if (*flags & AUDIO_OUTPUT_FLAG_FAST) {
+            // notify every HAL buffer, regardless of the size of the track buffer
+            maxNotificationFrames = mFrameCount;
+        } else {
+            // For normal tracks, use at least double-buffering if no sample rate conversion,
+            // or at least triple-buffering if there is sample rate conversion
+            const int nBuffering = sampleRate == mSampleRate ? 2 : 3;
+            maxNotificationFrames = frameCount / nBuffering;
+            // If client requested a fast track but this was denied, then use the smaller maximum.
+            if (requestedFlags & AUDIO_OUTPUT_FLAG_FAST) {
+                size_t maxNotificationFramesFastDenied = FMS_20 * sampleRate / 1000;
+                if (maxNotificationFrames > maxNotificationFramesFastDenied) {
+                    maxNotificationFrames = maxNotificationFramesFastDenied;
+                }
+            }
+        }
+        if (notificationFrameCount == 0 || notificationFrameCount > maxNotificationFrames) {
+            if (notificationFrameCount == 0) {
+                ALOGD("Client defaulted notificationFrames to %zu for frameCount %zu",
+                    maxNotificationFrames, frameCount);
+            } else {
+                ALOGW("Client adjusted notificationFrames from %zu to %zu for frameCount %zu",
+                      notificationFrameCount, maxNotificationFrames, frameCount);
+            }
+            notificationFrameCount = maxNotificationFrames;
+        }
+    }
+
     *pFrameCount = frameCount;
+    *pNotificationFrameCount = notificationFrameCount;
 
     switch (mType) {
 
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index b685e1b..2ca273f 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -706,10 +706,13 @@
                 sp<Track>   createTrack_l(
                                 const sp<AudioFlinger::Client>& client,
                                 audio_stream_type_t streamType,
-                                uint32_t sampleRate,
+                                uint32_t *sampleRate,
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
                                 size_t *pFrameCount,
+                                size_t *pNotificationFrameCount,
+                                uint32_t notificationsPerBuffer,
+                                float speed,
                                 const sp<IMemory>& sharedBuffer,
                                 audio_session_t sessionId,
                                 audio_output_flags_t *flags,