blob: 8d7a01ea856e6bcf6643110e760642880332730e [file] [log] [blame]
Phil Burk87c9f642017-05-17 07:22:39 -07001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Phil Burkfbf031e2017-10-12 15:58:31 -070017#define LOG_TAG (mInService ? "AudioStreamInternalPlay_Service" \
18 : "AudioStreamInternalPlay_Client")
Phil Burk87c9f642017-05-17 07:22:39 -070019//#define LOG_NDEBUG 0
20#include <utils/Log.h>
21
Phil Burkfd34a932017-07-19 07:03:52 -070022#define ATRACE_TAG ATRACE_TAG_AUDIO
23
24#include <utils/Trace.h>
25
Phil Burk87c9f642017-05-17 07:22:39 -070026#include "client/AudioStreamInternalPlay.h"
27#include "utility/AudioClock.h"
28
29using android::WrappingBuffer;
30
31using namespace aaudio;
32
33AudioStreamInternalPlay::AudioStreamInternalPlay(AAudioServiceInterface &serviceInterface,
34 bool inService)
35 : AudioStreamInternal(serviceInterface, inService) {
36
37}
38
39AudioStreamInternalPlay::~AudioStreamInternalPlay() {}
40
41
Phil Burkb336e892017-07-05 15:35:43 -070042aaudio_result_t AudioStreamInternalPlay::requestPauseInternal()
43{
44 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
45 ALOGE("AudioStreamInternal::requestPauseInternal() mServiceStreamHandle invalid = 0x%08X",
46 mServiceStreamHandle);
47 return AAUDIO_ERROR_INVALID_STATE;
48 }
49
50 mClockModel.stop(AudioClock::getNanoseconds());
51 setState(AAUDIO_STREAM_STATE_PAUSING);
Phil Burkbcc36742017-08-31 17:24:51 -070052 mAtomicTimestamp.clear();
Phil Burk965650e2017-09-07 21:00:09 -070053 return mServiceInterface.pauseStream(mServiceStreamHandle);
Phil Burkb336e892017-07-05 15:35:43 -070054}
55
56aaudio_result_t AudioStreamInternalPlay::requestPause()
57{
58 aaudio_result_t result = stopCallback();
59 if (result != AAUDIO_OK) {
60 return result;
61 }
62 result = requestPauseInternal();
63 return result;
64}
65
66aaudio_result_t AudioStreamInternalPlay::requestFlush() {
67 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
68 ALOGE("AudioStreamInternal::requestFlush() mServiceStreamHandle invalid = 0x%08X",
69 mServiceStreamHandle);
70 return AAUDIO_ERROR_INVALID_STATE;
71 }
72
73 setState(AAUDIO_STREAM_STATE_FLUSHING);
74 return mServiceInterface.flushStream(mServiceStreamHandle);
75}
76
Phil Burkbcc36742017-08-31 17:24:51 -070077void AudioStreamInternalPlay::advanceClientToMatchServerPosition() {
Phil Burkb336e892017-07-05 15:35:43 -070078 int64_t readCounter = mAudioEndpoint.getDataReadCounter();
79 int64_t writeCounter = mAudioEndpoint.getDataWriteCounter();
80
81 // Bump offset so caller does not see the retrograde motion in getFramesRead().
Phil Burkbcc36742017-08-31 17:24:51 -070082 int64_t offset = writeCounter - readCounter;
83 mFramesOffsetFromService += offset;
84 ALOGD("advanceClientToMatchServerPosition() readN = %lld, writeN = %lld, offset = %lld",
Phil Burkb336e892017-07-05 15:35:43 -070085 (long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);
86
Phil Burkbcc36742017-08-31 17:24:51 -070087 // Force writeCounter to match readCounter.
88 // This is because we cannot change the read counter in the hardware.
Phil Burkb336e892017-07-05 15:35:43 -070089 mAudioEndpoint.setDataWriteCounter(readCounter);
90}
91
Phil Burkbcc36742017-08-31 17:24:51 -070092void AudioStreamInternalPlay::onFlushFromServer() {
93 advanceClientToMatchServerPosition();
94}
95
Phil Burk87c9f642017-05-17 07:22:39 -070096// Write the data, block if needed and timeoutMillis > 0
97aaudio_result_t AudioStreamInternalPlay::write(const void *buffer, int32_t numFrames,
98 int64_t timeoutNanoseconds)
99
100{
101 return processData((void *)buffer, numFrames, timeoutNanoseconds);
102}
103
104// Write as much data as we can without blocking.
105aaudio_result_t AudioStreamInternalPlay::processDataNow(void *buffer, int32_t numFrames,
106 int64_t currentNanoTime, int64_t *wakeTimePtr) {
107 aaudio_result_t result = processCommands();
108 if (result != AAUDIO_OK) {
109 return result;
110 }
111
Phil Burkfd34a932017-07-19 07:03:52 -0700112 const char *traceName = "aaWrNow";
113 ATRACE_BEGIN(traceName);
114
Phil Burkbcc36742017-08-31 17:24:51 -0700115 if (mClockModel.isStarting()) {
116 // Still haven't got any timestamps from server.
117 // Keep waiting until we get some valid timestamps then start writing to the
118 // current buffer position.
119 ALOGD("processDataNow() wait for valid timestamps");
120 // Sleep very briefly and hope we get a timestamp soon.
121 *wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
122 ATRACE_END();
123 return 0;
124 }
125 // If we have gotten this far then we have at least one timestamp from server.
126
Phil Burkfd34a932017-07-19 07:03:52 -0700127 // If a DMA channel or DSP is reading the other end then we have to update the readCounter.
Phil Burk87c9f642017-05-17 07:22:39 -0700128 if (mAudioEndpoint.isFreeRunning()) {
Phil Burk87c9f642017-05-17 07:22:39 -0700129 // Update data queue based on the timing model.
130 int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
Phil Burkec89b2e2017-06-20 15:05:06 -0700131 // ALOGD("AudioStreamInternal::processDataNow() - estimatedReadCounter = %d", (int)estimatedReadCounter);
Phil Burk87c9f642017-05-17 07:22:39 -0700132 mAudioEndpoint.setDataReadCounter(estimatedReadCounter);
133 }
Phil Burk87c9f642017-05-17 07:22:39 -0700134
Phil Burkbcc36742017-08-31 17:24:51 -0700135 if (mNeedCatchUp.isRequested()) {
136 // Catch an MMAP pointer that is already advancing.
137 // This will avoid initial underruns caused by a slow cold start.
138 advanceClientToMatchServerPosition();
139 mNeedCatchUp.acknowledge();
140 }
141
Phil Burk87c9f642017-05-17 07:22:39 -0700142 // If the read index passed the write index then consider it an underrun.
143 if (mAudioEndpoint.getFullFramesAvailable() < 0) {
144 mXRunCount++;
Phil Burkfd34a932017-07-19 07:03:52 -0700145 if (ATRACE_ENABLED()) {
146 ATRACE_INT("aaUnderRuns", mXRunCount);
147 }
Phil Burk87c9f642017-05-17 07:22:39 -0700148 }
149
150 // Write some data to the buffer.
151 //ALOGD("AudioStreamInternal::processDataNow() - writeNowWithConversion(%d)", numFrames);
152 int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
153 //ALOGD("AudioStreamInternal::processDataNow() - tried to write %d frames, wrote %d",
154 // numFrames, framesWritten);
Phil Burkfd34a932017-07-19 07:03:52 -0700155 if (ATRACE_ENABLED()) {
156 ATRACE_INT("aaWrote", framesWritten);
157 }
Phil Burk87c9f642017-05-17 07:22:39 -0700158
159 // Calculate an ideal time to wake up.
160 if (wakeTimePtr != nullptr && framesWritten >= 0) {
161 // By default wake up a few milliseconds from now. // TODO review
162 int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
163 aaudio_stream_state_t state = getState();
164 //ALOGD("AudioStreamInternal::processDataNow() - wakeTime based on %s",
165 // AAudio_convertStreamStateToText(state));
166 switch (state) {
167 case AAUDIO_STREAM_STATE_OPEN:
168 case AAUDIO_STREAM_STATE_STARTING:
169 if (framesWritten != 0) {
170 // Don't wait to write more data. Just prime the buffer.
171 wakeTime = currentNanoTime;
172 }
173 break;
Phil Burkfd34a932017-07-19 07:03:52 -0700174 case AAUDIO_STREAM_STATE_STARTED:
Phil Burk87c9f642017-05-17 07:22:39 -0700175 {
Phil Burkfd34a932017-07-19 07:03:52 -0700176 // When do we expect the next read burst to occur?
Phil Burk87c9f642017-05-17 07:22:39 -0700177
Phil Burkfd34a932017-07-19 07:03:52 -0700178 // Calculate frame position based off of the writeCounter because
179 // the readCounter might have just advanced in the background,
180 // causing us to sleep until a later burst.
Phil Burkbcc36742017-08-31 17:24:51 -0700181 int64_t nextPosition = mAudioEndpoint.getDataWriteCounter() + mFramesPerBurst
Phil Burkfd34a932017-07-19 07:03:52 -0700182 - mAudioEndpoint.getBufferSizeInFrames();
Phil Burkbcc36742017-08-31 17:24:51 -0700183 wakeTime = mClockModel.convertPositionToTime(nextPosition);
Phil Burk87c9f642017-05-17 07:22:39 -0700184 }
185 break;
186 default:
187 break;
188 }
189 *wakeTimePtr = wakeTime;
190
191 }
Phil Burkfd34a932017-07-19 07:03:52 -0700192
193 ATRACE_END();
Phil Burk87c9f642017-05-17 07:22:39 -0700194 return framesWritten;
195}
196
197
198aaudio_result_t AudioStreamInternalPlay::writeNowWithConversion(const void *buffer,
199 int32_t numFrames) {
200 // ALOGD("AudioStreamInternal::writeNowWithConversion(%p, %d)",
201 // buffer, numFrames);
202 WrappingBuffer wrappingBuffer;
203 uint8_t *source = (uint8_t *) buffer;
204 int32_t framesLeft = numFrames;
205
206 mAudioEndpoint.getEmptyFramesAvailable(&wrappingBuffer);
207
Phil Burkfd34a932017-07-19 07:03:52 -0700208 // Write data in one or two parts.
Phil Burk87c9f642017-05-17 07:22:39 -0700209 int partIndex = 0;
210 while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
211 int32_t framesToWrite = framesLeft;
212 int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
213 if (framesAvailable > 0) {
214 if (framesToWrite > framesAvailable) {
215 framesToWrite = framesAvailable;
216 }
217 int32_t numBytes = getBytesPerFrame() * framesToWrite;
218 int32_t numSamples = framesToWrite * getSamplesPerFrame();
219 // Data conversion.
220 float levelFrom;
221 float levelTo;
Phil Burk9eba2102017-11-20 12:25:38 -0800222 bool ramping = mVolumeRamp.nextSegment(framesToWrite, &levelFrom, &levelTo);
Phil Burk87c9f642017-05-17 07:22:39 -0700223 // The formats are validated when the stream is opened so we do not have to
224 // check for illegal combinations here.
225 // TODO factor this out into a utility function
226 if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
227 if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
228 AAudio_linearRamp(
229 (const float *) source,
230 (float *) wrappingBuffer.data[partIndex],
231 framesToWrite,
232 getSamplesPerFrame(),
233 levelFrom,
234 levelTo);
235 } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
236 if (ramping) {
237 AAudioConvert_floatToPcm16(
238 (const float *) source,
239 (int16_t *) wrappingBuffer.data[partIndex],
240 framesToWrite,
241 getSamplesPerFrame(),
242 levelFrom,
243 levelTo);
244 } else {
245 AAudioConvert_floatToPcm16(
246 (const float *) source,
247 (int16_t *) wrappingBuffer.data[partIndex],
248 numSamples,
249 levelTo);
250 }
251 }
252 } else if (getFormat() == AAUDIO_FORMAT_PCM_I16) {
253 if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
254 if (ramping) {
255 AAudioConvert_pcm16ToFloat(
256 (const int16_t *) source,
257 (float *) wrappingBuffer.data[partIndex],
258 framesToWrite,
259 getSamplesPerFrame(),
260 levelFrom,
261 levelTo);
262 } else {
263 AAudioConvert_pcm16ToFloat(
264 (const int16_t *) source,
265 (float *) wrappingBuffer.data[partIndex],
266 numSamples,
267 levelTo);
268 }
269 } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
270 AAudio_linearRamp(
271 (const int16_t *) source,
272 (int16_t *) wrappingBuffer.data[partIndex],
273 framesToWrite,
274 getSamplesPerFrame(),
275 levelFrom,
276 levelTo);
277 }
278 }
279 source += numBytes;
280 framesLeft -= framesToWrite;
281 } else {
282 break;
283 }
284 partIndex++;
285 }
286 int32_t framesWritten = numFrames - framesLeft;
287 mAudioEndpoint.advanceWriteIndex(framesWritten);
288
Phil Burk87c9f642017-05-17 07:22:39 -0700289 // ALOGD("AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
290 return framesWritten;
291}
292
Phil Burk87c9f642017-05-17 07:22:39 -0700293int64_t AudioStreamInternalPlay::getFramesRead()
294{
Phil Burkec89b2e2017-06-20 15:05:06 -0700295 int64_t framesReadHardware;
296 if (isActive()) {
297 framesReadHardware = mClockModel.convertTimeToPosition(AudioClock::getNanoseconds());
298 } else {
299 framesReadHardware = mAudioEndpoint.getDataReadCounter();
300 }
301 int64_t framesRead = framesReadHardware + mFramesOffsetFromService;
Phil Burk87c9f642017-05-17 07:22:39 -0700302 // Prevent retrograde motion.
303 if (framesRead < mLastFramesRead) {
304 framesRead = mLastFramesRead;
305 } else {
306 mLastFramesRead = framesRead;
307 }
Phil Burkec89b2e2017-06-20 15:05:06 -0700308 //ALOGD("AudioStreamInternalPlay::getFramesRead() returns %lld", (long long)framesRead);
Phil Burk87c9f642017-05-17 07:22:39 -0700309 return framesRead;
310}
311
312int64_t AudioStreamInternalPlay::getFramesWritten()
313{
Phil Burkec89b2e2017-06-20 15:05:06 -0700314 int64_t framesWritten = mAudioEndpoint.getDataWriteCounter()
Phil Burk87c9f642017-05-17 07:22:39 -0700315 + mFramesOffsetFromService;
Phil Burkec89b2e2017-06-20 15:05:06 -0700316 //ALOGD("AudioStreamInternalPlay::getFramesWritten() returns %lld", (long long)framesWritten);
317 return framesWritten;
Phil Burk87c9f642017-05-17 07:22:39 -0700318}
319
320
321// Render audio in the application callback and then write the data to the stream.
322void *AudioStreamInternalPlay::callbackLoop() {
323 aaudio_result_t result = AAUDIO_OK;
324 aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
325 AAudioStream_dataCallback appCallback = getDataCallbackProc();
326 if (appCallback == nullptr) return NULL;
Phil Burkfd34a932017-07-19 07:03:52 -0700327 int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
Phil Burk87c9f642017-05-17 07:22:39 -0700328
329 // result might be a frame count
330 while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
331 // Call application using the AAudio callback interface.
332 callbackResult = (*appCallback)(
333 (AAudioStream *) this,
334 getDataCallbackUserData(),
335 mCallbackBuffer,
336 mCallbackFrames);
337
338 if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
Phil Burkfd34a932017-07-19 07:03:52 -0700339 // Write audio data to stream. This is a BLOCKING WRITE!
Phil Burk87c9f642017-05-17 07:22:39 -0700340 result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
341 if ((result != mCallbackFrames)) {
342 ALOGE("AudioStreamInternalPlay(): callbackLoop: write() returned %d", result);
343 if (result >= 0) {
344 // Only wrote some of the frames requested. Must have timed out.
345 result = AAUDIO_ERROR_TIMEOUT;
346 }
347 AAudioStream_errorCallback errorCallback = getErrorCallbackProc();
348 if (errorCallback != nullptr) {
349 (*errorCallback)(
350 (AAudioStream *) this,
351 getErrorCallbackUserData(),
352 result);
353 }
354 break;
355 }
356 } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
357 ALOGD("AudioStreamInternalPlay(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
358 break;
359 }
360 }
361
362 ALOGD("AudioStreamInternalPlay(): callbackLoop() exiting, result = %d, isActive() = %d",
363 result, (int) isActive());
364 return NULL;
365}
Phil Burk965650e2017-09-07 21:00:09 -0700366
367//------------------------------------------------------------------------------
368// Implementation of PlayerBase
369status_t AudioStreamInternalPlay::doSetVolume() {
370 mVolumeRamp.setTarget(mStreamVolume * getDuckAndMuteVolume());
371 return android::NO_ERROR;
372}