blob: f2e40a28203a9f8c312b362b8a1c8a7039e54241 [file] [log] [blame]
Phil Burk87c9f642017-05-17 07:22:39 -07001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Phil Burkec89b2e2017-06-20 15:05:06 -070017#define LOG_TAG (mInService ? "AAudioService" : "AAudio")
Phil Burk87c9f642017-05-17 07:22:39 -070018//#define LOG_NDEBUG 0
19#include <utils/Log.h>
20
Phil Burkfd34a932017-07-19 07:03:52 -070021#define ATRACE_TAG ATRACE_TAG_AUDIO
22
23#include <utils/Trace.h>
24
Phil Burk87c9f642017-05-17 07:22:39 -070025#include "client/AudioStreamInternalPlay.h"
26#include "utility/AudioClock.h"
27
28using android::WrappingBuffer;
29
30using namespace aaudio;
31
32AudioStreamInternalPlay::AudioStreamInternalPlay(AAudioServiceInterface &serviceInterface,
33 bool inService)
34 : AudioStreamInternal(serviceInterface, inService) {
35
36}
37
38AudioStreamInternalPlay::~AudioStreamInternalPlay() {}
39
40
Phil Burkb336e892017-07-05 15:35:43 -070041aaudio_result_t AudioStreamInternalPlay::requestPauseInternal()
42{
43 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
44 ALOGE("AudioStreamInternal::requestPauseInternal() mServiceStreamHandle invalid = 0x%08X",
45 mServiceStreamHandle);
46 return AAUDIO_ERROR_INVALID_STATE;
47 }
48
49 mClockModel.stop(AudioClock::getNanoseconds());
50 setState(AAUDIO_STREAM_STATE_PAUSING);
Phil Burkbcc36742017-08-31 17:24:51 -070051 mAtomicTimestamp.clear();
Phil Burkb336e892017-07-05 15:35:43 -070052 return AAudioConvert_androidToAAudioResult(pauseWithStatus());
53}
54
55aaudio_result_t AudioStreamInternalPlay::requestPause()
56{
57 aaudio_result_t result = stopCallback();
58 if (result != AAUDIO_OK) {
59 return result;
60 }
61 result = requestPauseInternal();
62 return result;
63}
64
65aaudio_result_t AudioStreamInternalPlay::requestFlush() {
66 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
67 ALOGE("AudioStreamInternal::requestFlush() mServiceStreamHandle invalid = 0x%08X",
68 mServiceStreamHandle);
69 return AAUDIO_ERROR_INVALID_STATE;
70 }
71
72 setState(AAUDIO_STREAM_STATE_FLUSHING);
73 return mServiceInterface.flushStream(mServiceStreamHandle);
74}
75
Phil Burkbcc36742017-08-31 17:24:51 -070076void AudioStreamInternalPlay::advanceClientToMatchServerPosition() {
Phil Burkb336e892017-07-05 15:35:43 -070077 int64_t readCounter = mAudioEndpoint.getDataReadCounter();
78 int64_t writeCounter = mAudioEndpoint.getDataWriteCounter();
79
80 // Bump offset so caller does not see the retrograde motion in getFramesRead().
Phil Burkbcc36742017-08-31 17:24:51 -070081 int64_t offset = writeCounter - readCounter;
82 mFramesOffsetFromService += offset;
83 ALOGD("advanceClientToMatchServerPosition() readN = %lld, writeN = %lld, offset = %lld",
Phil Burkb336e892017-07-05 15:35:43 -070084 (long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);
85
Phil Burkbcc36742017-08-31 17:24:51 -070086 // Force writeCounter to match readCounter.
87 // This is because we cannot change the read counter in the hardware.
Phil Burkb336e892017-07-05 15:35:43 -070088 mAudioEndpoint.setDataWriteCounter(readCounter);
89}
90
Phil Burkbcc36742017-08-31 17:24:51 -070091void AudioStreamInternalPlay::onFlushFromServer() {
92 advanceClientToMatchServerPosition();
93}
94
Phil Burk87c9f642017-05-17 07:22:39 -070095// Write the data, block if needed and timeoutMillis > 0
96aaudio_result_t AudioStreamInternalPlay::write(const void *buffer, int32_t numFrames,
97 int64_t timeoutNanoseconds)
98
99{
100 return processData((void *)buffer, numFrames, timeoutNanoseconds);
101}
102
103// Write as much data as we can without blocking.
104aaudio_result_t AudioStreamInternalPlay::processDataNow(void *buffer, int32_t numFrames,
105 int64_t currentNanoTime, int64_t *wakeTimePtr) {
106 aaudio_result_t result = processCommands();
107 if (result != AAUDIO_OK) {
108 return result;
109 }
110
Phil Burkfd34a932017-07-19 07:03:52 -0700111 const char *traceName = "aaWrNow";
112 ATRACE_BEGIN(traceName);
113
Phil Burkbcc36742017-08-31 17:24:51 -0700114 if (mClockModel.isStarting()) {
115 // Still haven't got any timestamps from server.
116 // Keep waiting until we get some valid timestamps then start writing to the
117 // current buffer position.
118 ALOGD("processDataNow() wait for valid timestamps");
119 // Sleep very briefly and hope we get a timestamp soon.
120 *wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
121 ATRACE_END();
122 return 0;
123 }
124 // If we have gotten this far then we have at least one timestamp from server.
125
Phil Burkfd34a932017-07-19 07:03:52 -0700126 // If a DMA channel or DSP is reading the other end then we have to update the readCounter.
Phil Burk87c9f642017-05-17 07:22:39 -0700127 if (mAudioEndpoint.isFreeRunning()) {
Phil Burk87c9f642017-05-17 07:22:39 -0700128 // Update data queue based on the timing model.
129 int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
Phil Burkec89b2e2017-06-20 15:05:06 -0700130 // ALOGD("AudioStreamInternal::processDataNow() - estimatedReadCounter = %d", (int)estimatedReadCounter);
Phil Burk87c9f642017-05-17 07:22:39 -0700131 mAudioEndpoint.setDataReadCounter(estimatedReadCounter);
132 }
Phil Burk87c9f642017-05-17 07:22:39 -0700133
Phil Burkbcc36742017-08-31 17:24:51 -0700134 if (mNeedCatchUp.isRequested()) {
135 // Catch an MMAP pointer that is already advancing.
136 // This will avoid initial underruns caused by a slow cold start.
137 advanceClientToMatchServerPosition();
138 mNeedCatchUp.acknowledge();
139 }
140
Phil Burk87c9f642017-05-17 07:22:39 -0700141 // If the read index passed the write index then consider it an underrun.
142 if (mAudioEndpoint.getFullFramesAvailable() < 0) {
143 mXRunCount++;
Phil Burkfd34a932017-07-19 07:03:52 -0700144 if (ATRACE_ENABLED()) {
145 ATRACE_INT("aaUnderRuns", mXRunCount);
146 }
Phil Burk87c9f642017-05-17 07:22:39 -0700147 }
148
149 // Write some data to the buffer.
150 //ALOGD("AudioStreamInternal::processDataNow() - writeNowWithConversion(%d)", numFrames);
151 int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
152 //ALOGD("AudioStreamInternal::processDataNow() - tried to write %d frames, wrote %d",
153 // numFrames, framesWritten);
Phil Burkfd34a932017-07-19 07:03:52 -0700154 if (ATRACE_ENABLED()) {
155 ATRACE_INT("aaWrote", framesWritten);
156 }
Phil Burk87c9f642017-05-17 07:22:39 -0700157
158 // Calculate an ideal time to wake up.
159 if (wakeTimePtr != nullptr && framesWritten >= 0) {
160 // By default wake up a few milliseconds from now. // TODO review
161 int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
162 aaudio_stream_state_t state = getState();
163 //ALOGD("AudioStreamInternal::processDataNow() - wakeTime based on %s",
164 // AAudio_convertStreamStateToText(state));
165 switch (state) {
166 case AAUDIO_STREAM_STATE_OPEN:
167 case AAUDIO_STREAM_STATE_STARTING:
168 if (framesWritten != 0) {
169 // Don't wait to write more data. Just prime the buffer.
170 wakeTime = currentNanoTime;
171 }
172 break;
Phil Burkfd34a932017-07-19 07:03:52 -0700173 case AAUDIO_STREAM_STATE_STARTED:
Phil Burk87c9f642017-05-17 07:22:39 -0700174 {
Phil Burkfd34a932017-07-19 07:03:52 -0700175 // When do we expect the next read burst to occur?
Phil Burk87c9f642017-05-17 07:22:39 -0700176
Phil Burkfd34a932017-07-19 07:03:52 -0700177 // Calculate frame position based off of the writeCounter because
178 // the readCounter might have just advanced in the background,
179 // causing us to sleep until a later burst.
Phil Burkbcc36742017-08-31 17:24:51 -0700180 int64_t nextPosition = mAudioEndpoint.getDataWriteCounter() + mFramesPerBurst
Phil Burkfd34a932017-07-19 07:03:52 -0700181 - mAudioEndpoint.getBufferSizeInFrames();
Phil Burkbcc36742017-08-31 17:24:51 -0700182 wakeTime = mClockModel.convertPositionToTime(nextPosition);
Phil Burk87c9f642017-05-17 07:22:39 -0700183 }
184 break;
185 default:
186 break;
187 }
188 *wakeTimePtr = wakeTime;
189
190 }
Phil Burkfd34a932017-07-19 07:03:52 -0700191
192 ATRACE_END();
Phil Burk87c9f642017-05-17 07:22:39 -0700193 return framesWritten;
194}
195
196
197aaudio_result_t AudioStreamInternalPlay::writeNowWithConversion(const void *buffer,
198 int32_t numFrames) {
199 // ALOGD("AudioStreamInternal::writeNowWithConversion(%p, %d)",
200 // buffer, numFrames);
201 WrappingBuffer wrappingBuffer;
202 uint8_t *source = (uint8_t *) buffer;
203 int32_t framesLeft = numFrames;
204
205 mAudioEndpoint.getEmptyFramesAvailable(&wrappingBuffer);
206
Phil Burkfd34a932017-07-19 07:03:52 -0700207 // Write data in one or two parts.
Phil Burk87c9f642017-05-17 07:22:39 -0700208 int partIndex = 0;
209 while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
210 int32_t framesToWrite = framesLeft;
211 int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
212 if (framesAvailable > 0) {
213 if (framesToWrite > framesAvailable) {
214 framesToWrite = framesAvailable;
215 }
216 int32_t numBytes = getBytesPerFrame() * framesToWrite;
217 int32_t numSamples = framesToWrite * getSamplesPerFrame();
218 // Data conversion.
219 float levelFrom;
220 float levelTo;
221 bool ramping = mVolumeRamp.nextSegment(framesToWrite * getSamplesPerFrame(),
222 &levelFrom, &levelTo);
223 // The formats are validated when the stream is opened so we do not have to
224 // check for illegal combinations here.
225 // TODO factor this out into a utility function
226 if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
227 if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
228 AAudio_linearRamp(
229 (const float *) source,
230 (float *) wrappingBuffer.data[partIndex],
231 framesToWrite,
232 getSamplesPerFrame(),
233 levelFrom,
234 levelTo);
235 } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
236 if (ramping) {
237 AAudioConvert_floatToPcm16(
238 (const float *) source,
239 (int16_t *) wrappingBuffer.data[partIndex],
240 framesToWrite,
241 getSamplesPerFrame(),
242 levelFrom,
243 levelTo);
244 } else {
245 AAudioConvert_floatToPcm16(
246 (const float *) source,
247 (int16_t *) wrappingBuffer.data[partIndex],
248 numSamples,
249 levelTo);
250 }
251 }
252 } else if (getFormat() == AAUDIO_FORMAT_PCM_I16) {
253 if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
254 if (ramping) {
255 AAudioConvert_pcm16ToFloat(
256 (const int16_t *) source,
257 (float *) wrappingBuffer.data[partIndex],
258 framesToWrite,
259 getSamplesPerFrame(),
260 levelFrom,
261 levelTo);
262 } else {
263 AAudioConvert_pcm16ToFloat(
264 (const int16_t *) source,
265 (float *) wrappingBuffer.data[partIndex],
266 numSamples,
267 levelTo);
268 }
269 } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
270 AAudio_linearRamp(
271 (const int16_t *) source,
272 (int16_t *) wrappingBuffer.data[partIndex],
273 framesToWrite,
274 getSamplesPerFrame(),
275 levelFrom,
276 levelTo);
277 }
278 }
279 source += numBytes;
280 framesLeft -= framesToWrite;
281 } else {
282 break;
283 }
284 partIndex++;
285 }
286 int32_t framesWritten = numFrames - framesLeft;
287 mAudioEndpoint.advanceWriteIndex(framesWritten);
288
Phil Burk87c9f642017-05-17 07:22:39 -0700289 // ALOGD("AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
290 return framesWritten;
291}
292
293
294int64_t AudioStreamInternalPlay::getFramesRead()
295{
Phil Burkec89b2e2017-06-20 15:05:06 -0700296 int64_t framesReadHardware;
297 if (isActive()) {
298 framesReadHardware = mClockModel.convertTimeToPosition(AudioClock::getNanoseconds());
299 } else {
300 framesReadHardware = mAudioEndpoint.getDataReadCounter();
301 }
302 int64_t framesRead = framesReadHardware + mFramesOffsetFromService;
Phil Burk87c9f642017-05-17 07:22:39 -0700303 // Prevent retrograde motion.
304 if (framesRead < mLastFramesRead) {
305 framesRead = mLastFramesRead;
306 } else {
307 mLastFramesRead = framesRead;
308 }
Phil Burkec89b2e2017-06-20 15:05:06 -0700309 //ALOGD("AudioStreamInternalPlay::getFramesRead() returns %lld", (long long)framesRead);
Phil Burk87c9f642017-05-17 07:22:39 -0700310 return framesRead;
311}
312
313int64_t AudioStreamInternalPlay::getFramesWritten()
314{
Phil Burkec89b2e2017-06-20 15:05:06 -0700315 int64_t framesWritten = mAudioEndpoint.getDataWriteCounter()
Phil Burk87c9f642017-05-17 07:22:39 -0700316 + mFramesOffsetFromService;
Phil Burkec89b2e2017-06-20 15:05:06 -0700317 //ALOGD("AudioStreamInternalPlay::getFramesWritten() returns %lld", (long long)framesWritten);
318 return framesWritten;
Phil Burk87c9f642017-05-17 07:22:39 -0700319}
320
321
322// Render audio in the application callback and then write the data to the stream.
323void *AudioStreamInternalPlay::callbackLoop() {
324 aaudio_result_t result = AAUDIO_OK;
325 aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
326 AAudioStream_dataCallback appCallback = getDataCallbackProc();
327 if (appCallback == nullptr) return NULL;
Phil Burkfd34a932017-07-19 07:03:52 -0700328 int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
Phil Burk87c9f642017-05-17 07:22:39 -0700329
330 // result might be a frame count
331 while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
332 // Call application using the AAudio callback interface.
333 callbackResult = (*appCallback)(
334 (AAudioStream *) this,
335 getDataCallbackUserData(),
336 mCallbackBuffer,
337 mCallbackFrames);
338
339 if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
Phil Burkfd34a932017-07-19 07:03:52 -0700340 // Write audio data to stream. This is a BLOCKING WRITE!
Phil Burk87c9f642017-05-17 07:22:39 -0700341 result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
342 if ((result != mCallbackFrames)) {
343 ALOGE("AudioStreamInternalPlay(): callbackLoop: write() returned %d", result);
344 if (result >= 0) {
345 // Only wrote some of the frames requested. Must have timed out.
346 result = AAUDIO_ERROR_TIMEOUT;
347 }
348 AAudioStream_errorCallback errorCallback = getErrorCallbackProc();
349 if (errorCallback != nullptr) {
350 (*errorCallback)(
351 (AAudioStream *) this,
352 getErrorCallbackUserData(),
353 result);
354 }
355 break;
356 }
357 } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
358 ALOGD("AudioStreamInternalPlay(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
359 break;
360 }
361 }
362
363 ALOGD("AudioStreamInternalPlay(): callbackLoop() exiting, result = %d, isActive() = %d",
364 result, (int) isActive());
365 return NULL;
366}