blob: 1cf2c7242f0d20d9293407f3da4ce697c178ae6d [file] [log] [blame]
Phil Burk87c9f642017-05-17 07:22:39 -07001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Phil Burkfbf031e2017-10-12 15:58:31 -070017#define LOG_TAG (mInService ? "AudioStreamInternalPlay_Service" \
18 : "AudioStreamInternalPlay_Client")
Phil Burk87c9f642017-05-17 07:22:39 -070019//#define LOG_NDEBUG 0
20#include <utils/Log.h>
21
Phil Burkfd34a932017-07-19 07:03:52 -070022#define ATRACE_TAG ATRACE_TAG_AUDIO
23
24#include <utils/Trace.h>
25
Phil Burk87c9f642017-05-17 07:22:39 -070026#include "client/AudioStreamInternalPlay.h"
27#include "utility/AudioClock.h"
28
29using android::WrappingBuffer;
30
31using namespace aaudio;
32
33AudioStreamInternalPlay::AudioStreamInternalPlay(AAudioServiceInterface &serviceInterface,
34 bool inService)
35 : AudioStreamInternal(serviceInterface, inService) {
36
37}
38
39AudioStreamInternalPlay::~AudioStreamInternalPlay() {}
40
41
Phil Burkb336e892017-07-05 15:35:43 -070042aaudio_result_t AudioStreamInternalPlay::requestPauseInternal()
43{
44 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
45 ALOGE("AudioStreamInternal::requestPauseInternal() mServiceStreamHandle invalid = 0x%08X",
46 mServiceStreamHandle);
47 return AAUDIO_ERROR_INVALID_STATE;
48 }
49
50 mClockModel.stop(AudioClock::getNanoseconds());
51 setState(AAUDIO_STREAM_STATE_PAUSING);
Phil Burkbcc36742017-08-31 17:24:51 -070052 mAtomicTimestamp.clear();
Phil Burk965650e2017-09-07 21:00:09 -070053 return mServiceInterface.pauseStream(mServiceStreamHandle);
Phil Burkb336e892017-07-05 15:35:43 -070054}
55
56aaudio_result_t AudioStreamInternalPlay::requestPause()
57{
58 aaudio_result_t result = stopCallback();
59 if (result != AAUDIO_OK) {
60 return result;
61 }
62 result = requestPauseInternal();
63 return result;
64}
65
66aaudio_result_t AudioStreamInternalPlay::requestFlush() {
67 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
68 ALOGE("AudioStreamInternal::requestFlush() mServiceStreamHandle invalid = 0x%08X",
69 mServiceStreamHandle);
70 return AAUDIO_ERROR_INVALID_STATE;
71 }
72
73 setState(AAUDIO_STREAM_STATE_FLUSHING);
74 return mServiceInterface.flushStream(mServiceStreamHandle);
75}
76
Phil Burkbcc36742017-08-31 17:24:51 -070077void AudioStreamInternalPlay::advanceClientToMatchServerPosition() {
Phil Burkb336e892017-07-05 15:35:43 -070078 int64_t readCounter = mAudioEndpoint.getDataReadCounter();
79 int64_t writeCounter = mAudioEndpoint.getDataWriteCounter();
80
81 // Bump offset so caller does not see the retrograde motion in getFramesRead().
Phil Burkbcc36742017-08-31 17:24:51 -070082 int64_t offset = writeCounter - readCounter;
83 mFramesOffsetFromService += offset;
84 ALOGD("advanceClientToMatchServerPosition() readN = %lld, writeN = %lld, offset = %lld",
Phil Burkb336e892017-07-05 15:35:43 -070085 (long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);
86
Phil Burkbcc36742017-08-31 17:24:51 -070087 // Force writeCounter to match readCounter.
88 // This is because we cannot change the read counter in the hardware.
Phil Burkb336e892017-07-05 15:35:43 -070089 mAudioEndpoint.setDataWriteCounter(readCounter);
90}
91
Phil Burkbcc36742017-08-31 17:24:51 -070092void AudioStreamInternalPlay::onFlushFromServer() {
93 advanceClientToMatchServerPosition();
94}
95
Phil Burk87c9f642017-05-17 07:22:39 -070096// Write the data, block if needed and timeoutMillis > 0
97aaudio_result_t AudioStreamInternalPlay::write(const void *buffer, int32_t numFrames,
98 int64_t timeoutNanoseconds)
99
100{
101 return processData((void *)buffer, numFrames, timeoutNanoseconds);
102}
103
104// Write as much data as we can without blocking.
105aaudio_result_t AudioStreamInternalPlay::processDataNow(void *buffer, int32_t numFrames,
106 int64_t currentNanoTime, int64_t *wakeTimePtr) {
107 aaudio_result_t result = processCommands();
108 if (result != AAUDIO_OK) {
109 return result;
110 }
111
Phil Burkfd34a932017-07-19 07:03:52 -0700112 const char *traceName = "aaWrNow";
113 ATRACE_BEGIN(traceName);
114
Phil Burkbcc36742017-08-31 17:24:51 -0700115 if (mClockModel.isStarting()) {
116 // Still haven't got any timestamps from server.
117 // Keep waiting until we get some valid timestamps then start writing to the
118 // current buffer position.
119 ALOGD("processDataNow() wait for valid timestamps");
120 // Sleep very briefly and hope we get a timestamp soon.
121 *wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
122 ATRACE_END();
123 return 0;
124 }
125 // If we have gotten this far then we have at least one timestamp from server.
126
Phil Burkfd34a932017-07-19 07:03:52 -0700127 // If a DMA channel or DSP is reading the other end then we have to update the readCounter.
Phil Burk87c9f642017-05-17 07:22:39 -0700128 if (mAudioEndpoint.isFreeRunning()) {
Phil Burk87c9f642017-05-17 07:22:39 -0700129 // Update data queue based on the timing model.
130 int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
Phil Burkec89b2e2017-06-20 15:05:06 -0700131 // ALOGD("AudioStreamInternal::processDataNow() - estimatedReadCounter = %d", (int)estimatedReadCounter);
Phil Burk87c9f642017-05-17 07:22:39 -0700132 mAudioEndpoint.setDataReadCounter(estimatedReadCounter);
133 }
Phil Burk87c9f642017-05-17 07:22:39 -0700134
Phil Burkbcc36742017-08-31 17:24:51 -0700135 if (mNeedCatchUp.isRequested()) {
136 // Catch an MMAP pointer that is already advancing.
137 // This will avoid initial underruns caused by a slow cold start.
138 advanceClientToMatchServerPosition();
139 mNeedCatchUp.acknowledge();
140 }
141
Phil Burk87c9f642017-05-17 07:22:39 -0700142 // If the read index passed the write index then consider it an underrun.
143 if (mAudioEndpoint.getFullFramesAvailable() < 0) {
144 mXRunCount++;
Phil Burkfd34a932017-07-19 07:03:52 -0700145 if (ATRACE_ENABLED()) {
146 ATRACE_INT("aaUnderRuns", mXRunCount);
147 }
Phil Burk87c9f642017-05-17 07:22:39 -0700148 }
149
150 // Write some data to the buffer.
151 //ALOGD("AudioStreamInternal::processDataNow() - writeNowWithConversion(%d)", numFrames);
152 int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
153 //ALOGD("AudioStreamInternal::processDataNow() - tried to write %d frames, wrote %d",
154 // numFrames, framesWritten);
Phil Burkfd34a932017-07-19 07:03:52 -0700155 if (ATRACE_ENABLED()) {
156 ATRACE_INT("aaWrote", framesWritten);
157 }
Phil Burk87c9f642017-05-17 07:22:39 -0700158
159 // Calculate an ideal time to wake up.
160 if (wakeTimePtr != nullptr && framesWritten >= 0) {
161 // By default wake up a few milliseconds from now. // TODO review
162 int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
163 aaudio_stream_state_t state = getState();
164 //ALOGD("AudioStreamInternal::processDataNow() - wakeTime based on %s",
165 // AAudio_convertStreamStateToText(state));
166 switch (state) {
167 case AAUDIO_STREAM_STATE_OPEN:
168 case AAUDIO_STREAM_STATE_STARTING:
169 if (framesWritten != 0) {
170 // Don't wait to write more data. Just prime the buffer.
171 wakeTime = currentNanoTime;
172 }
173 break;
Phil Burkfd34a932017-07-19 07:03:52 -0700174 case AAUDIO_STREAM_STATE_STARTED:
Phil Burk87c9f642017-05-17 07:22:39 -0700175 {
Phil Burkfd34a932017-07-19 07:03:52 -0700176 // When do we expect the next read burst to occur?
Phil Burk87c9f642017-05-17 07:22:39 -0700177
Phil Burkfd34a932017-07-19 07:03:52 -0700178 // Calculate frame position based off of the writeCounter because
179 // the readCounter might have just advanced in the background,
180 // causing us to sleep until a later burst.
Phil Burkbcc36742017-08-31 17:24:51 -0700181 int64_t nextPosition = mAudioEndpoint.getDataWriteCounter() + mFramesPerBurst
Phil Burkfd34a932017-07-19 07:03:52 -0700182 - mAudioEndpoint.getBufferSizeInFrames();
Phil Burkbcc36742017-08-31 17:24:51 -0700183 wakeTime = mClockModel.convertPositionToTime(nextPosition);
Phil Burk87c9f642017-05-17 07:22:39 -0700184 }
185 break;
186 default:
187 break;
188 }
189 *wakeTimePtr = wakeTime;
190
191 }
Phil Burkfd34a932017-07-19 07:03:52 -0700192
193 ATRACE_END();
Phil Burk87c9f642017-05-17 07:22:39 -0700194 return framesWritten;
195}
196
197
198aaudio_result_t AudioStreamInternalPlay::writeNowWithConversion(const void *buffer,
199 int32_t numFrames) {
200 // ALOGD("AudioStreamInternal::writeNowWithConversion(%p, %d)",
201 // buffer, numFrames);
202 WrappingBuffer wrappingBuffer;
203 uint8_t *source = (uint8_t *) buffer;
204 int32_t framesLeft = numFrames;
205
206 mAudioEndpoint.getEmptyFramesAvailable(&wrappingBuffer);
207
Phil Burkfd34a932017-07-19 07:03:52 -0700208 // Write data in one or two parts.
Phil Burk87c9f642017-05-17 07:22:39 -0700209 int partIndex = 0;
210 while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
211 int32_t framesToWrite = framesLeft;
212 int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
213 if (framesAvailable > 0) {
214 if (framesToWrite > framesAvailable) {
215 framesToWrite = framesAvailable;
216 }
217 int32_t numBytes = getBytesPerFrame() * framesToWrite;
218 int32_t numSamples = framesToWrite * getSamplesPerFrame();
219 // Data conversion.
220 float levelFrom;
221 float levelTo;
222 bool ramping = mVolumeRamp.nextSegment(framesToWrite * getSamplesPerFrame(),
223 &levelFrom, &levelTo);
224 // The formats are validated when the stream is opened so we do not have to
225 // check for illegal combinations here.
226 // TODO factor this out into a utility function
227 if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
228 if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
229 AAudio_linearRamp(
230 (const float *) source,
231 (float *) wrappingBuffer.data[partIndex],
232 framesToWrite,
233 getSamplesPerFrame(),
234 levelFrom,
235 levelTo);
236 } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
237 if (ramping) {
238 AAudioConvert_floatToPcm16(
239 (const float *) source,
240 (int16_t *) wrappingBuffer.data[partIndex],
241 framesToWrite,
242 getSamplesPerFrame(),
243 levelFrom,
244 levelTo);
245 } else {
246 AAudioConvert_floatToPcm16(
247 (const float *) source,
248 (int16_t *) wrappingBuffer.data[partIndex],
249 numSamples,
250 levelTo);
251 }
252 }
253 } else if (getFormat() == AAUDIO_FORMAT_PCM_I16) {
254 if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
255 if (ramping) {
256 AAudioConvert_pcm16ToFloat(
257 (const int16_t *) source,
258 (float *) wrappingBuffer.data[partIndex],
259 framesToWrite,
260 getSamplesPerFrame(),
261 levelFrom,
262 levelTo);
263 } else {
264 AAudioConvert_pcm16ToFloat(
265 (const int16_t *) source,
266 (float *) wrappingBuffer.data[partIndex],
267 numSamples,
268 levelTo);
269 }
270 } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
271 AAudio_linearRamp(
272 (const int16_t *) source,
273 (int16_t *) wrappingBuffer.data[partIndex],
274 framesToWrite,
275 getSamplesPerFrame(),
276 levelFrom,
277 levelTo);
278 }
279 }
280 source += numBytes;
281 framesLeft -= framesToWrite;
282 } else {
283 break;
284 }
285 partIndex++;
286 }
287 int32_t framesWritten = numFrames - framesLeft;
288 mAudioEndpoint.advanceWriteIndex(framesWritten);
289
Phil Burk87c9f642017-05-17 07:22:39 -0700290 // ALOGD("AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
291 return framesWritten;
292}
293
Phil Burk87c9f642017-05-17 07:22:39 -0700294int64_t AudioStreamInternalPlay::getFramesRead()
295{
Phil Burkec89b2e2017-06-20 15:05:06 -0700296 int64_t framesReadHardware;
297 if (isActive()) {
298 framesReadHardware = mClockModel.convertTimeToPosition(AudioClock::getNanoseconds());
299 } else {
300 framesReadHardware = mAudioEndpoint.getDataReadCounter();
301 }
302 int64_t framesRead = framesReadHardware + mFramesOffsetFromService;
Phil Burk87c9f642017-05-17 07:22:39 -0700303 // Prevent retrograde motion.
304 if (framesRead < mLastFramesRead) {
305 framesRead = mLastFramesRead;
306 } else {
307 mLastFramesRead = framesRead;
308 }
Phil Burkec89b2e2017-06-20 15:05:06 -0700309 //ALOGD("AudioStreamInternalPlay::getFramesRead() returns %lld", (long long)framesRead);
Phil Burk87c9f642017-05-17 07:22:39 -0700310 return framesRead;
311}
312
313int64_t AudioStreamInternalPlay::getFramesWritten()
314{
Phil Burkec89b2e2017-06-20 15:05:06 -0700315 int64_t framesWritten = mAudioEndpoint.getDataWriteCounter()
Phil Burk87c9f642017-05-17 07:22:39 -0700316 + mFramesOffsetFromService;
Phil Burkec89b2e2017-06-20 15:05:06 -0700317 //ALOGD("AudioStreamInternalPlay::getFramesWritten() returns %lld", (long long)framesWritten);
318 return framesWritten;
Phil Burk87c9f642017-05-17 07:22:39 -0700319}
320
321
322// Render audio in the application callback and then write the data to the stream.
323void *AudioStreamInternalPlay::callbackLoop() {
324 aaudio_result_t result = AAUDIO_OK;
325 aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
326 AAudioStream_dataCallback appCallback = getDataCallbackProc();
327 if (appCallback == nullptr) return NULL;
Phil Burkfd34a932017-07-19 07:03:52 -0700328 int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
Phil Burk87c9f642017-05-17 07:22:39 -0700329
330 // result might be a frame count
331 while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
332 // Call application using the AAudio callback interface.
333 callbackResult = (*appCallback)(
334 (AAudioStream *) this,
335 getDataCallbackUserData(),
336 mCallbackBuffer,
337 mCallbackFrames);
338
339 if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
Phil Burkfd34a932017-07-19 07:03:52 -0700340 // Write audio data to stream. This is a BLOCKING WRITE!
Phil Burk87c9f642017-05-17 07:22:39 -0700341 result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
342 if ((result != mCallbackFrames)) {
343 ALOGE("AudioStreamInternalPlay(): callbackLoop: write() returned %d", result);
344 if (result >= 0) {
345 // Only wrote some of the frames requested. Must have timed out.
346 result = AAUDIO_ERROR_TIMEOUT;
347 }
348 AAudioStream_errorCallback errorCallback = getErrorCallbackProc();
349 if (errorCallback != nullptr) {
350 (*errorCallback)(
351 (AAudioStream *) this,
352 getErrorCallbackUserData(),
353 result);
354 }
355 break;
356 }
357 } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
358 ALOGD("AudioStreamInternalPlay(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
359 break;
360 }
361 }
362
363 ALOGD("AudioStreamInternalPlay(): callbackLoop() exiting, result = %d, isActive() = %d",
364 result, (int) isActive());
365 return NULL;
366}
Phil Burk965650e2017-09-07 21:00:09 -0700367
368//------------------------------------------------------------------------------
369// Implementation of PlayerBase
370status_t AudioStreamInternalPlay::doSetVolume() {
371 mVolumeRamp.setTarget(mStreamVolume * getDuckAndMuteVolume());
372 return android::NO_ERROR;
373}