Merge "Change AudioTrack resampling buffers from 3 to 2"
diff --git a/include/media/AudioResamplerPublic.h b/include/media/AudioResamplerPublic.h
index 97847a0..b705efa 100644
--- a/include/media/AudioResamplerPublic.h
+++ b/include/media/AudioResamplerPublic.h
@@ -26,4 +26,17 @@
 // TODO: replace with an API
 #define AUDIO_RESAMPLER_DOWN_RATIO_MAX 256
 
+// Returns the source frames needed to resample to destination frames.  This is not a precise
+// value and depends on the resampler (and possibly how it handles rounding internally).
+// Nevertheless, this should be an upper bound on the requirements of the resampler.
+// If srcSampleRate and dstSampleRate are equal, then it returns destination frames, which
+// may not be true if the resampler is asynchronous.
+static inline size_t sourceFramesNeeded(
+        uint32_t srcSampleRate, size_t dstFramesRequired, uint32_t dstSampleRate) {
+    // +1 for rounding - always do this even if matched ratio (resampler may use phases not ratio)
+    // +1 for additional sample needed for interpolation
+    return srcSampleRate == dstSampleRate ? dstFramesRequired :
+            size_t((uint64_t)dstFramesRequired * srcSampleRate / dstSampleRate + 1 + 1);
+}
+
 #endif // ANDROID_AUDIO_RESAMPLER_PUBLIC_H
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index d4bacc0..1d5fc95 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -66,12 +66,11 @@
         return BAD_VALUE;
     }
 
-    // FIXME merge with similar code in createTrack_l(), except we're missing
-    //       some information here that is available in createTrack_l():
+    // FIXME handle in server, like createTrack_l(), possible missing info:
     //          audio_io_handle_t output
     //          audio_format_t format
     //          audio_channel_mask_t channelMask
-    //          audio_output_flags_t flags
+    //          audio_output_flags_t flags (FAST)
     uint32_t afSampleRate;
     status_t status;
     status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
@@ -101,16 +100,16 @@
         minBufCount = 2;
     }
 
-    *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
-            afFrameCount * minBufCount * uint64_t(sampleRate) / afSampleRate;
-    // The formula above should always produce a non-zero value, but return an error
-    // in the unlikely event that it does not, as that's part of the API contract.
+    *frameCount = minBufCount * sourceFramesNeeded(sampleRate, afFrameCount, afSampleRate);
+    // The formula above should always produce a non-zero value under normal circumstances:
+    // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
+    // Return error in the unlikely event that it does not, as that's part of the API contract.
     if (*frameCount == 0) {
-        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %d",
+        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %u",
                 streamType, sampleRate);
         return BAD_VALUE;
     }
-    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, minBufCount=%d, afSampleRate=%d, afLatency=%d",
+    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, minBufCount=%u, afSampleRate=%u, afLatency=%u",
             *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
     return NO_ERROR;
 }
@@ -1015,11 +1014,9 @@
     // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
     //  n = 1   fast track with single buffering; nBuffering is ignored
     //  n = 2   fast track with double buffering
-    //  n = 2   normal track, no sample rate conversion
-    //  n = 3   normal track, with sample rate conversion
-    //          (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
-    //  n > 3   very high latency or very small notification interval; nBuffering is ignored
-    const uint32_t nBuffering = (mSampleRate == afSampleRate) ? 2 : 3;
+    //  n = 2   normal track, (including those with sample rate conversion)
+    //  n >= 3  very high latency or very small notification interval (unused).
+    const uint32_t nBuffering = 2;
 
     mNotificationFramesAct = mNotificationFramesReq;
 
@@ -1060,39 +1057,9 @@
         // But when initializing a shared buffer AudioTrack via set(),
         // there _is_ a frameCount parameter.  We silently ignore it.
         frameCount = mSharedBuffer->size() / mFrameSize;
-
-    } else if (!(mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
-
-        // FIXME move these calculations and associated checks to server
-
-        // Ensure that buffer depth covers at least audio hardware latency
-        uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
-        ALOGV("afFrameCount=%zu, minBufCount=%d, afSampleRate=%u, afLatency=%d",
-                afFrameCount, minBufCount, afSampleRate, afLatency);
-        if (minBufCount <= nBuffering) {
-            minBufCount = nBuffering;
-        }
-
-        size_t minFrameCount = afFrameCount * minBufCount * uint64_t(mSampleRate) / afSampleRate;
-        ALOGV("minFrameCount: %zu, afFrameCount=%zu, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
-                ", afLatency=%d",
-                minFrameCount, afFrameCount, minBufCount, mSampleRate, afSampleRate, afLatency);
-
-        if (frameCount == 0) {
-            frameCount = minFrameCount;
-        } else if (frameCount < minFrameCount) {
-            // not ALOGW because it happens all the time when playing key clicks over A2DP
-            ALOGV("Minimum buffer size corrected from %zu to %zu",
-                     frameCount, minFrameCount);
-            frameCount = minFrameCount;
-        }
-        // Make sure that application is notified with sufficient margin before underrun
-        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
-            mNotificationFramesAct = frameCount/nBuffering;
-        }
-
     } else {
-        // For fast tracks, the frame count calculations and checks are done by server
+        // For fast and normal streaming tracks,
+        // the frame count calculations and checks are done by server
     }
 
     IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
@@ -1175,23 +1142,10 @@
         if (trackFlags & IAudioFlinger::TRACK_FAST) {
             ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);
             mAwaitBoost = true;
-            if (mSharedBuffer == 0) {
-                // Theoretically double-buffering is not required for fast tracks,
-                // due to tighter scheduling.  But in practice, to accommodate kernels with
-                // scheduling jitter, and apps with computation jitter, we use double-buffering.
-                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
-                    mNotificationFramesAct = frameCount/nBuffering;
-                }
-            }
         } else {
             ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
             // once denied, do not request again if IAudioTrack is re-created
             mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
-            if (mSharedBuffer == 0) {
-                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
-                    mNotificationFramesAct = frameCount/nBuffering;
-                }
-            }
         }
     }
     if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
@@ -1214,6 +1168,16 @@
             //return NO_INIT;
         }
     }
+    // Make sure that application is notified with sufficient margin before underrun
+    if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) {
+        // Theoretically double-buffering is not required for fast tracks,
+        // due to tighter scheduling.  But in practice, to accommodate kernels with
+        // scheduling jitter, and apps with computation jitter, we use double-buffering
+        // for fast tracks just like normal streaming tracks.
+        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount / nBuffering) {
+            mNotificationFramesAct = frameCount / nBuffering;
+        }
+    }
 
     // We retain a copy of the I/O handle, but don't own the reference
     mOutput = output;
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 384bd25..40ab0af 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -174,18 +174,6 @@
 // and that all "fast" AudioRecord clients read from.  In either case, the size can be small.
 static const size_t kRecordThreadReadOnlyHeapSize = 0x2000;
 
-// Returns the source frames needed to resample to destination frames.  This is not a precise
-// value and depends on the resampler (and possibly how it handles rounding internally).
-// If srcSampleRate and dstSampleRate are equal, then it returns destination frames, which
-// may not be a true if the resampler is asynchronous.
-static inline size_t sourceFramesNeeded(
-        uint32_t srcSampleRate, size_t dstFramesRequired, uint32_t dstSampleRate) {
-    // +1 for rounding - always do this even if matched ratio
-    // +1 for additional sample needed for interpolation
-    return srcSampleRate == dstSampleRate ? dstFramesRequired :
-            size_t((uint64_t)dstFramesRequired * srcSampleRate / dstSampleRate + 1 + 1);
-}
-
 // ----------------------------------------------------------------------------
 
 static pthread_once_t sFastTrackMultiplierOnce = PTHREAD_ONCE_INIT;
@@ -1497,20 +1485,25 @@
                 audio_is_linear_pcm(format),
                 channelMask, sampleRate, mSampleRate, hasFastMixer(), tid, mFastTrackAvailMask);
         *flags &= ~IAudioFlinger::TRACK_FAST;
-        // For compatibility with AudioTrack calculation, buffer depth is forced
-        // to be at least 2 x the normal mixer frame count and cover audio hardware latency.
-        // This is probably too conservative, but legacy application code may depend on it.
-        // If you change this calculation, also review the start threshold which is related.
+      }
+    }
+    // For normal PCM streaming tracks, update minimum frame count.
+    // For compatibility with AudioTrack calculation, buffer depth is forced
+    // to be at least 2 x the normal mixer frame count and cover audio hardware latency.
+    // This is probably too conservative, but legacy application code may depend on it.
+    // If you change this calculation, also review the start threshold which is related.
+    if (!(*flags & IAudioFlinger::TRACK_FAST)
+            && audio_is_linear_pcm(format) && sharedBuffer == 0) {
         uint32_t latencyMs = mOutput->stream->get_latency(mOutput->stream);
         uint32_t minBufCount = latencyMs / ((1000 * mNormalFrameCount) / mSampleRate);
         if (minBufCount < 2) {
             minBufCount = 2;
         }
-        size_t minFrameCount = mNormalFrameCount * minBufCount;
-        if (frameCount < minFrameCount) {
+        size_t minFrameCount =
+                minBufCount * sourceFramesNeeded(sampleRate, mNormalFrameCount, mSampleRate);
+        if (frameCount < minFrameCount) { // including frameCount == 0
             frameCount = minFrameCount;
         }
-      }
     }
     *pFrameCount = frameCount;