blob: 31e0a4026a86808a7f68a5dcddff98da3128c895 [file] [log] [blame]
Phil Burk87c9f642017-05-17 07:22:39 -07001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Phil Burkec89b2e2017-06-20 15:05:06 -070017#define LOG_TAG (mInService ? "AAudioService" : "AAudio")
Phil Burk87c9f642017-05-17 07:22:39 -070018//#define LOG_NDEBUG 0
19#include <utils/Log.h>
20
Phil Burkfd34a932017-07-19 07:03:52 -070021#define ATRACE_TAG ATRACE_TAG_AUDIO
22
23#include <utils/Trace.h>
24
Phil Burk87c9f642017-05-17 07:22:39 -070025#include "client/AudioStreamInternalPlay.h"
26#include "utility/AudioClock.h"
27
28using android::WrappingBuffer;
29
30using namespace aaudio;
31
32AudioStreamInternalPlay::AudioStreamInternalPlay(AAudioServiceInterface &serviceInterface,
33 bool inService)
34 : AudioStreamInternal(serviceInterface, inService) {
35
36}
37
38AudioStreamInternalPlay::~AudioStreamInternalPlay() {}
39
40
Phil Burkb336e892017-07-05 15:35:43 -070041aaudio_result_t AudioStreamInternalPlay::requestPauseInternal()
42{
43 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
44 ALOGE("AudioStreamInternal::requestPauseInternal() mServiceStreamHandle invalid = 0x%08X",
45 mServiceStreamHandle);
46 return AAUDIO_ERROR_INVALID_STATE;
47 }
48
49 mClockModel.stop(AudioClock::getNanoseconds());
50 setState(AAUDIO_STREAM_STATE_PAUSING);
51 return AAudioConvert_androidToAAudioResult(pauseWithStatus());
52}
53
54aaudio_result_t AudioStreamInternalPlay::requestPause()
55{
56 aaudio_result_t result = stopCallback();
57 if (result != AAUDIO_OK) {
58 return result;
59 }
60 result = requestPauseInternal();
61 return result;
62}
63
64aaudio_result_t AudioStreamInternalPlay::requestFlush() {
65 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
66 ALOGE("AudioStreamInternal::requestFlush() mServiceStreamHandle invalid = 0x%08X",
67 mServiceStreamHandle);
68 return AAUDIO_ERROR_INVALID_STATE;
69 }
70
71 setState(AAUDIO_STREAM_STATE_FLUSHING);
72 return mServiceInterface.flushStream(mServiceStreamHandle);
73}
74
75void AudioStreamInternalPlay::onFlushFromServer() {
76 int64_t readCounter = mAudioEndpoint.getDataReadCounter();
77 int64_t writeCounter = mAudioEndpoint.getDataWriteCounter();
78
79 // Bump offset so caller does not see the retrograde motion in getFramesRead().
80 int64_t framesFlushed = writeCounter - readCounter;
81 mFramesOffsetFromService += framesFlushed;
82 ALOGD("AudioStreamInternal::onFlushFromServer() readN = %lld, writeN = %lld, offset = %lld",
83 (long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);
84
85 // Flush written frames by forcing writeCounter to readCounter.
86 // This is because we cannot move the read counter in the hardware.
87 mAudioEndpoint.setDataWriteCounter(readCounter);
88}
89
Phil Burk87c9f642017-05-17 07:22:39 -070090// Write the data, block if needed and timeoutMillis > 0
91aaudio_result_t AudioStreamInternalPlay::write(const void *buffer, int32_t numFrames,
92 int64_t timeoutNanoseconds)
93
94{
95 return processData((void *)buffer, numFrames, timeoutNanoseconds);
96}
97
98// Write as much data as we can without blocking.
99aaudio_result_t AudioStreamInternalPlay::processDataNow(void *buffer, int32_t numFrames,
100 int64_t currentNanoTime, int64_t *wakeTimePtr) {
101 aaudio_result_t result = processCommands();
102 if (result != AAUDIO_OK) {
103 return result;
104 }
105
Phil Burkfd34a932017-07-19 07:03:52 -0700106 const char *traceName = "aaWrNow";
107 ATRACE_BEGIN(traceName);
108
109 // If a DMA channel or DSP is reading the other end then we have to update the readCounter.
Phil Burk87c9f642017-05-17 07:22:39 -0700110 if (mAudioEndpoint.isFreeRunning()) {
Phil Burk87c9f642017-05-17 07:22:39 -0700111 // Update data queue based on the timing model.
112 int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
Phil Burkec89b2e2017-06-20 15:05:06 -0700113 // ALOGD("AudioStreamInternal::processDataNow() - estimatedReadCounter = %d", (int)estimatedReadCounter);
Phil Burk87c9f642017-05-17 07:22:39 -0700114 mAudioEndpoint.setDataReadCounter(estimatedReadCounter);
115 }
Phil Burk87c9f642017-05-17 07:22:39 -0700116
117 // If the read index passed the write index then consider it an underrun.
118 if (mAudioEndpoint.getFullFramesAvailable() < 0) {
119 mXRunCount++;
Phil Burkfd34a932017-07-19 07:03:52 -0700120 if (ATRACE_ENABLED()) {
121 ATRACE_INT("aaUnderRuns", mXRunCount);
122 }
Phil Burk87c9f642017-05-17 07:22:39 -0700123 }
124
125 // Write some data to the buffer.
126 //ALOGD("AudioStreamInternal::processDataNow() - writeNowWithConversion(%d)", numFrames);
127 int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
128 //ALOGD("AudioStreamInternal::processDataNow() - tried to write %d frames, wrote %d",
129 // numFrames, framesWritten);
Phil Burkfd34a932017-07-19 07:03:52 -0700130 if (ATRACE_ENABLED()) {
131 ATRACE_INT("aaWrote", framesWritten);
132 }
Phil Burk87c9f642017-05-17 07:22:39 -0700133
134 // Calculate an ideal time to wake up.
135 if (wakeTimePtr != nullptr && framesWritten >= 0) {
136 // By default wake up a few milliseconds from now. // TODO review
137 int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
138 aaudio_stream_state_t state = getState();
139 //ALOGD("AudioStreamInternal::processDataNow() - wakeTime based on %s",
140 // AAudio_convertStreamStateToText(state));
141 switch (state) {
142 case AAUDIO_STREAM_STATE_OPEN:
143 case AAUDIO_STREAM_STATE_STARTING:
144 if (framesWritten != 0) {
145 // Don't wait to write more data. Just prime the buffer.
146 wakeTime = currentNanoTime;
147 }
148 break;
Phil Burkfd34a932017-07-19 07:03:52 -0700149 case AAUDIO_STREAM_STATE_STARTED:
Phil Burk87c9f642017-05-17 07:22:39 -0700150 {
Phil Burkfd34a932017-07-19 07:03:52 -0700151 // When do we expect the next read burst to occur?
Phil Burk87c9f642017-05-17 07:22:39 -0700152
Phil Burkfd34a932017-07-19 07:03:52 -0700153 // Calculate frame position based off of the writeCounter because
154 // the readCounter might have just advanced in the background,
155 // causing us to sleep until a later burst.
156 int64_t nextReadPosition = mAudioEndpoint.getDataWriteCounter() + mFramesPerBurst
157 - mAudioEndpoint.getBufferSizeInFrames();
Phil Burk87c9f642017-05-17 07:22:39 -0700158 wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
159 }
160 break;
161 default:
162 break;
163 }
164 *wakeTimePtr = wakeTime;
165
166 }
Phil Burkfd34a932017-07-19 07:03:52 -0700167
168 ATRACE_END();
Phil Burk87c9f642017-05-17 07:22:39 -0700169 return framesWritten;
170}
171
172
173aaudio_result_t AudioStreamInternalPlay::writeNowWithConversion(const void *buffer,
174 int32_t numFrames) {
175 // ALOGD("AudioStreamInternal::writeNowWithConversion(%p, %d)",
176 // buffer, numFrames);
177 WrappingBuffer wrappingBuffer;
178 uint8_t *source = (uint8_t *) buffer;
179 int32_t framesLeft = numFrames;
180
181 mAudioEndpoint.getEmptyFramesAvailable(&wrappingBuffer);
182
Phil Burkfd34a932017-07-19 07:03:52 -0700183 // Write data in one or two parts.
Phil Burk87c9f642017-05-17 07:22:39 -0700184 int partIndex = 0;
185 while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
186 int32_t framesToWrite = framesLeft;
187 int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
188 if (framesAvailable > 0) {
189 if (framesToWrite > framesAvailable) {
190 framesToWrite = framesAvailable;
191 }
192 int32_t numBytes = getBytesPerFrame() * framesToWrite;
193 int32_t numSamples = framesToWrite * getSamplesPerFrame();
194 // Data conversion.
195 float levelFrom;
196 float levelTo;
197 bool ramping = mVolumeRamp.nextSegment(framesToWrite * getSamplesPerFrame(),
198 &levelFrom, &levelTo);
199 // The formats are validated when the stream is opened so we do not have to
200 // check for illegal combinations here.
201 // TODO factor this out into a utility function
202 if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
203 if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
204 AAudio_linearRamp(
205 (const float *) source,
206 (float *) wrappingBuffer.data[partIndex],
207 framesToWrite,
208 getSamplesPerFrame(),
209 levelFrom,
210 levelTo);
211 } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
212 if (ramping) {
213 AAudioConvert_floatToPcm16(
214 (const float *) source,
215 (int16_t *) wrappingBuffer.data[partIndex],
216 framesToWrite,
217 getSamplesPerFrame(),
218 levelFrom,
219 levelTo);
220 } else {
221 AAudioConvert_floatToPcm16(
222 (const float *) source,
223 (int16_t *) wrappingBuffer.data[partIndex],
224 numSamples,
225 levelTo);
226 }
227 }
228 } else if (getFormat() == AAUDIO_FORMAT_PCM_I16) {
229 if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
230 if (ramping) {
231 AAudioConvert_pcm16ToFloat(
232 (const int16_t *) source,
233 (float *) wrappingBuffer.data[partIndex],
234 framesToWrite,
235 getSamplesPerFrame(),
236 levelFrom,
237 levelTo);
238 } else {
239 AAudioConvert_pcm16ToFloat(
240 (const int16_t *) source,
241 (float *) wrappingBuffer.data[partIndex],
242 numSamples,
243 levelTo);
244 }
245 } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
246 AAudio_linearRamp(
247 (const int16_t *) source,
248 (int16_t *) wrappingBuffer.data[partIndex],
249 framesToWrite,
250 getSamplesPerFrame(),
251 levelFrom,
252 levelTo);
253 }
254 }
255 source += numBytes;
256 framesLeft -= framesToWrite;
257 } else {
258 break;
259 }
260 partIndex++;
261 }
262 int32_t framesWritten = numFrames - framesLeft;
263 mAudioEndpoint.advanceWriteIndex(framesWritten);
264
Phil Burk87c9f642017-05-17 07:22:39 -0700265 // ALOGD("AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
266 return framesWritten;
267}
268
269
270int64_t AudioStreamInternalPlay::getFramesRead()
271{
Phil Burkec89b2e2017-06-20 15:05:06 -0700272 int64_t framesReadHardware;
273 if (isActive()) {
274 framesReadHardware = mClockModel.convertTimeToPosition(AudioClock::getNanoseconds());
275 } else {
276 framesReadHardware = mAudioEndpoint.getDataReadCounter();
277 }
278 int64_t framesRead = framesReadHardware + mFramesOffsetFromService;
Phil Burk87c9f642017-05-17 07:22:39 -0700279 // Prevent retrograde motion.
280 if (framesRead < mLastFramesRead) {
281 framesRead = mLastFramesRead;
282 } else {
283 mLastFramesRead = framesRead;
284 }
Phil Burkec89b2e2017-06-20 15:05:06 -0700285 //ALOGD("AudioStreamInternalPlay::getFramesRead() returns %lld", (long long)framesRead);
Phil Burk87c9f642017-05-17 07:22:39 -0700286 return framesRead;
287}
288
289int64_t AudioStreamInternalPlay::getFramesWritten()
290{
Phil Burkec89b2e2017-06-20 15:05:06 -0700291 int64_t framesWritten = mAudioEndpoint.getDataWriteCounter()
Phil Burk87c9f642017-05-17 07:22:39 -0700292 + mFramesOffsetFromService;
Phil Burkec89b2e2017-06-20 15:05:06 -0700293 //ALOGD("AudioStreamInternalPlay::getFramesWritten() returns %lld", (long long)framesWritten);
294 return framesWritten;
Phil Burk87c9f642017-05-17 07:22:39 -0700295}
296
297
298// Render audio in the application callback and then write the data to the stream.
299void *AudioStreamInternalPlay::callbackLoop() {
300 aaudio_result_t result = AAUDIO_OK;
301 aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
302 AAudioStream_dataCallback appCallback = getDataCallbackProc();
303 if (appCallback == nullptr) return NULL;
Phil Burkfd34a932017-07-19 07:03:52 -0700304 int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
Phil Burk87c9f642017-05-17 07:22:39 -0700305
306 // result might be a frame count
307 while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
308 // Call application using the AAudio callback interface.
309 callbackResult = (*appCallback)(
310 (AAudioStream *) this,
311 getDataCallbackUserData(),
312 mCallbackBuffer,
313 mCallbackFrames);
314
315 if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
Phil Burkfd34a932017-07-19 07:03:52 -0700316 // Write audio data to stream. This is a BLOCKING WRITE!
Phil Burk87c9f642017-05-17 07:22:39 -0700317 result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
318 if ((result != mCallbackFrames)) {
319 ALOGE("AudioStreamInternalPlay(): callbackLoop: write() returned %d", result);
320 if (result >= 0) {
321 // Only wrote some of the frames requested. Must have timed out.
322 result = AAUDIO_ERROR_TIMEOUT;
323 }
324 AAudioStream_errorCallback errorCallback = getErrorCallbackProc();
325 if (errorCallback != nullptr) {
326 (*errorCallback)(
327 (AAudioStream *) this,
328 getErrorCallbackUserData(),
329 result);
330 }
331 break;
332 }
333 } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
334 ALOGD("AudioStreamInternalPlay(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
335 break;
336 }
337 }
338
339 ALOGD("AudioStreamInternalPlay(): callbackLoop() exiting, result = %d, isActive() = %d",
340 result, (int) isActive());
341 return NULL;
342}