TimedAudio: Fix a cause of audio popping.
This is a manual merge from ics-aah
> TimedAudio: Fix a cause of audio popping.
>
> Fix an issue with buffer lifecycle management which could cause audio
> pops on timed outputs. There were two issues at work here.
>
> 1) During trim operations for the queued timed audio data, buffers
> were being trimmed based on their starting PTS instead of when the
> chunk of audio data actually ended. This means that if you have a
> very large chunk of audio data (larger than the mixer lead time),
> then a buffer at the head of the queue could be eligible to be
> trimmed before its data had been completely mixed into the output
> stream, even though the output stream was fully buffered and in no
> danger of underflow.
> 2) The implementation of getNextBuffer and releaseBuffer for timed
> audio tracks was not keeping anything like a reference to the data
> that it handed out to the mixer. The original architecture here
> seemed to be expecting a ring buffer design, but timed audio tracks
> use a packet based design. Pieces of packets are handed out to the
> mixer which then frequently will hold onto that chunk of data
> across two mix operations, using the first part of the chunk to
> finish a mix buffer and then using the end of the chunk for the
> start of the next mix buffer. If the buffer that the mixer is
> holding a piece of got trimmed before the start of the next mix
> operation, it would return to its heap and could be filled with who
> knows what by the time it actually got mixed. On debug builds,
> they seem to get zero'ed out as they go back to the heap causing
> obvious pops in presentation.
>
> This change addresses both issues. Trim operations are now based on
> ending presentation time for a chunk of audio, not the start. Also,
> when the head of the queue is in flight to the mixer, it can no longer
> be trimmed immediately, merely flagged for trim by the mixer when the
> mixer finally does call releaseBuffer.
>
> Signed-off-by: John Grossman <johngro@google.com>
> Change-Id: Ia1ba08cb9dea35a698723ab2d9bcbf804f1682fe
Change-Id: I2c5e2f0375c410f0de075886aac56ff6317b144c
Signed-off-by: John Grossman <johngro@google.com>
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index b6ac359..ecc6147 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -3896,6 +3896,8 @@
int sessionId)
: Track(thread, client, streamType, sampleRate, format, channelMask,
frameCount, sharedBuffer, sessionId, IAudioFlinger::TRACK_TIMED),
+ mQueueHeadInFlight(false),
+ mTrimQueueHeadOnRelease(false),
mTimedSilenceBuffer(NULL),
mTimedSilenceBufferSize(0),
mTimedAudioOutputOnTime(false),
@@ -3910,6 +3912,13 @@
mLocalTimeToSampleTransform.a_to_b_denom = mLocalTimeFreq;
LinearTransform::reduce(&mLocalTimeToSampleTransform.a_to_b_numer,
&mLocalTimeToSampleTransform.a_to_b_denom);
+
+ mMediaTimeToSampleTransform.a_zero = 0;
+ mMediaTimeToSampleTransform.b_zero = 0;
+ mMediaTimeToSampleTransform.a_to_b_numer = sampleRate;
+ mMediaTimeToSampleTransform.a_to_b_denom = 1000000;
+ LinearTransform::reduce(&mMediaTimeToSampleTransform.a_to_b_numer,
+ &mMediaTimeToSampleTransform.a_to_b_denom);
}
AudioFlinger::PlaybackThread::TimedTrack::~TimedTrack() {
@@ -3969,12 +3978,35 @@
size_t trimIndex;
for (trimIndex = 0; trimIndex < mTimedBufferQueue.size(); trimIndex++) {
- if (mTimedBufferQueue[trimIndex].pts() > mediaTimeNow)
+ int64_t frameCount = mTimedBufferQueue[trimIndex].buffer()->size()
+ / mCblk->frameSize;
+ int64_t bufEnd;
+
+ if (!mMediaTimeToSampleTransform.doReverseTransform(frameCount,
+ &bufEnd)) {
+ ALOGE("Failed to convert frame count of %lld to media time duration"
+ " (scale factor %d/%u) in %s", frameCount,
+ mMediaTimeToSampleTransform.a_to_b_numer,
+ mMediaTimeToSampleTransform.a_to_b_denom,
+ __PRETTY_FUNCTION__);
break;
+ }
+ bufEnd += mTimedBufferQueue[trimIndex].pts();
+
+ if (bufEnd > mediaTimeNow)
+ break;
+
+ // Is the buffer we want to use in the middle of a mix operation right
+ // now? If so, don't actually trim it. Just wait for the releaseBuffer
+ // from the mixer which should be coming back shortly.
+ if (!trimIndex && mQueueHeadInFlight) {
+ mTrimQueueHeadOnRelease = true;
+ }
}
- if (trimIndex) {
- mTimedBufferQueue.removeItemsAt(0, trimIndex);
+ size_t trimStart = mTrimQueueHeadOnRelease ? 1 : 0;
+ if (trimStart < trimIndex) {
+ mTimedBufferQueue.removeItemsAt(trimStart, trimIndex);
}
}
@@ -4028,6 +4060,9 @@
Mutex::Autolock _l(mTimedBufferQueueLock);
+ ALOG_ASSERT(!mQueueHeadInFlight,
+ "getNextBuffer called without releaseBuffer!");
+
while (true) {
// if we have no timed buffers, then fail
@@ -4049,7 +4084,7 @@
if (mMediaTimeTransform.a_to_b_denom == 0) {
// the transform represents a pause, so yield silence
- timedYieldSilence(buffer->frameCount, buffer);
+ timedYieldSilence_l(buffer->frameCount, buffer);
return NO_ERROR;
}
@@ -4119,7 +4154,7 @@
(!mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleStartupThreshold)) {
// the next input is close enough to being on time, so concatenate it
// with the last output
- timedYieldSamples(buffer);
+ timedYieldSamples_l(buffer);
ALOGV("*** on time: head.pos=%d frameCount=%u", head.position(), buffer->frameCount);
return NO_ERROR;
@@ -4128,7 +4163,7 @@
// the next input sample is too big, so fill it with silence
uint32_t framesUntilNextInput = (sampleDelta + 0x80000000) >> 32;
- timedYieldSilence(framesUntilNextInput, buffer);
+ timedYieldSilence_l(framesUntilNextInput, buffer);
ALOGV("*** silence: frameCount=%u", buffer->frameCount);
return NO_ERROR;
} else {
@@ -4148,7 +4183,7 @@
head.setPosition(onTimeSamplePosition);
// yield the available samples
- timedYieldSamples(buffer);
+ timedYieldSamples_l(buffer);
ALOGV("*** late: head.pos=%d frameCount=%u", head.position(), buffer->frameCount);
return NO_ERROR;
@@ -4161,7 +4196,7 @@
// buffer's capacity.
//
// Caller must hold mTimedBufferQueueLock
-void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSamples(
+void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSamples_l(
AudioBufferProvider::Buffer* buffer) {
const TimedBuffer& head = mTimedBufferQueue[0];
@@ -4174,13 +4209,14 @@
size_t framesRequested = buffer->frameCount;
buffer->frameCount = min(framesLeftInHead, framesRequested);
+ mQueueHeadInFlight = true;
mTimedAudioOutputOnTime = true;
}
// Yield samples of silence up to the given output buffer's capacity
//
// Caller must hold mTimedBufferQueueLock
-void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSilence(
+void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSilence_l(
uint32_t numFrames, AudioBufferProvider::Buffer* buffer) {
// lazily allocate a buffer filled with silence
@@ -4210,21 +4246,44 @@
// queue, its either because the buffer is part of the silence buffer, or
// because the head of the timed queue was trimmed after the mixer called
// getNextBuffer but before the mixer called releaseBuffer.
- if ((buffer->raw != mTimedSilenceBuffer) && mTimedBufferQueue.size()) {
+ if (buffer->raw == mTimedSilenceBuffer) {
+ ALOG_ASSERT(!mQueueHeadInFlight,
+ "Queue head in flight during release of silence buffer!");
+ goto done;
+ }
+
+ ALOG_ASSERT(mQueueHeadInFlight,
+ "TimedTrack::releaseBuffer of non-silence buffer, but no queue"
+ " head in flight.");
+
+ if (mTimedBufferQueue.size()) {
TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
void* start = head.buffer()->pointer();
- void* end = (char *) head.buffer()->pointer() + head.buffer()->size();
+ void* end = reinterpret_cast<void*>(
+ reinterpret_cast<uint8_t*>(head.buffer()->pointer())
+ + head.buffer()->size());
- if ((buffer->raw >= start) && (buffer->raw <= end)) {
- head.setPosition(head.position() +
- (buffer->frameCount * mCblk->frameSize));
- if (static_cast<size_t>(head.position()) >= head.buffer()->size()) {
- mTimedBufferQueue.removeAt(0);
- }
+ ALOG_ASSERT((buffer->raw >= start) && (buffer->raw < end),
+ "released buffer not within the head of the timed buffer"
+ " queue; qHead = [%p, %p], released buffer = %p",
+ start, end, buffer->raw);
+
+ head.setPosition(head.position() +
+ (buffer->frameCount * mCblk->frameSize));
+ mQueueHeadInFlight = false;
+
+ if ((static_cast<size_t>(head.position()) >= head.buffer()->size())
+ || mTrimQueueHeadOnRelease) {
+ mTimedBufferQueue.removeAt(0);
+ mTrimQueueHeadOnRelease = false;
}
+ } else {
+ LOG_FATAL("TimedTrack::releaseBuffer of non-silence buffer with no"
+ " buffers in the timed buffer queue");
}
+done:
buffer->raw = 0;
buffer->frameCount = 0;
}
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 8a787e1..2812e27 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -786,10 +786,9 @@
// AudioBufferProvider interface
virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts);
virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
-
- void timedYieldSamples(AudioBufferProvider::Buffer* buffer);
- void timedYieldSilence(uint32_t numFrames,
- AudioBufferProvider::Buffer* buffer);
+ void timedYieldSamples_l(AudioBufferProvider::Buffer* buffer);
+ void timedYieldSilence_l(uint32_t numFrames,
+ AudioBufferProvider::Buffer* buffer);
status_t allocateTimedBuffer(size_t size,
sp<IMemory>* buffer);
@@ -812,8 +811,13 @@
uint64_t mLocalTimeFreq;
LinearTransform mLocalTimeToSampleTransform;
+ LinearTransform mMediaTimeToSampleTransform;
sp<MemoryDealer> mTimedMemoryDealer;
+
Vector<TimedBuffer> mTimedBufferQueue;
+ bool mQueueHeadInFlight;
+ bool mTrimQueueHeadOnRelease;
+
uint8_t* mTimedSilenceBuffer;
uint32_t mTimedSilenceBufferSize;
mutable Mutex mTimedBufferQueueLock;