blob: 5660c1b1864b7f9ee3bfe80419c35110907287d7 [file] [log] [blame]
Phil Burk87c9f642017-05-17 07:22:39 -07001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Phil Burkfbf031e2017-10-12 15:58:31 -070017#define LOG_TAG (mInService ? "AudioStreamInternalPlay_Service" \
18 : "AudioStreamInternalPlay_Client")
Phil Burk87c9f642017-05-17 07:22:39 -070019//#define LOG_NDEBUG 0
20#include <utils/Log.h>
21
Phil Burkfd34a932017-07-19 07:03:52 -070022#define ATRACE_TAG ATRACE_TAG_AUDIO
23
24#include <utils/Trace.h>
25
Phil Burk87c9f642017-05-17 07:22:39 -070026#include "client/AudioStreamInternalPlay.h"
27#include "utility/AudioClock.h"
28
29using android::WrappingBuffer;
30
31using namespace aaudio;
32
33AudioStreamInternalPlay::AudioStreamInternalPlay(AAudioServiceInterface &serviceInterface,
34 bool inService)
35 : AudioStreamInternal(serviceInterface, inService) {
36
37}
38
39AudioStreamInternalPlay::~AudioStreamInternalPlay() {}
40
Phil Burk5cc83c32017-11-28 15:43:18 -080041aaudio_result_t AudioStreamInternalPlay::requestPause()
Phil Burkb336e892017-07-05 15:35:43 -070042{
Phil Burk5cc83c32017-11-28 15:43:18 -080043 aaudio_result_t result = stopCallback();
44 if (result != AAUDIO_OK) {
45 return result;
46 }
Phil Burkb336e892017-07-05 15:35:43 -070047 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
48 ALOGE("AudioStreamInternal::requestPauseInternal() mServiceStreamHandle invalid = 0x%08X",
49 mServiceStreamHandle);
50 return AAUDIO_ERROR_INVALID_STATE;
51 }
52
53 mClockModel.stop(AudioClock::getNanoseconds());
54 setState(AAUDIO_STREAM_STATE_PAUSING);
Phil Burkbcc36742017-08-31 17:24:51 -070055 mAtomicTimestamp.clear();
Phil Burk965650e2017-09-07 21:00:09 -070056 return mServiceInterface.pauseStream(mServiceStreamHandle);
Phil Burkb336e892017-07-05 15:35:43 -070057}
58
Phil Burkb336e892017-07-05 15:35:43 -070059aaudio_result_t AudioStreamInternalPlay::requestFlush() {
60 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
61 ALOGE("AudioStreamInternal::requestFlush() mServiceStreamHandle invalid = 0x%08X",
62 mServiceStreamHandle);
63 return AAUDIO_ERROR_INVALID_STATE;
64 }
65
66 setState(AAUDIO_STREAM_STATE_FLUSHING);
67 return mServiceInterface.flushStream(mServiceStreamHandle);
68}
69
Phil Burkbcc36742017-08-31 17:24:51 -070070void AudioStreamInternalPlay::advanceClientToMatchServerPosition() {
Phil Burkb336e892017-07-05 15:35:43 -070071 int64_t readCounter = mAudioEndpoint.getDataReadCounter();
72 int64_t writeCounter = mAudioEndpoint.getDataWriteCounter();
73
74 // Bump offset so caller does not see the retrograde motion in getFramesRead().
Phil Burkbcc36742017-08-31 17:24:51 -070075 int64_t offset = writeCounter - readCounter;
76 mFramesOffsetFromService += offset;
77 ALOGD("advanceClientToMatchServerPosition() readN = %lld, writeN = %lld, offset = %lld",
Phil Burkb336e892017-07-05 15:35:43 -070078 (long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);
79
Phil Burkbcc36742017-08-31 17:24:51 -070080 // Force writeCounter to match readCounter.
81 // This is because we cannot change the read counter in the hardware.
Phil Burkb336e892017-07-05 15:35:43 -070082 mAudioEndpoint.setDataWriteCounter(readCounter);
83}
84
Phil Burkbcc36742017-08-31 17:24:51 -070085void AudioStreamInternalPlay::onFlushFromServer() {
86 advanceClientToMatchServerPosition();
87}
88
Phil Burk87c9f642017-05-17 07:22:39 -070089// Write the data, block if needed and timeoutMillis > 0
90aaudio_result_t AudioStreamInternalPlay::write(const void *buffer, int32_t numFrames,
91 int64_t timeoutNanoseconds)
92
93{
94 return processData((void *)buffer, numFrames, timeoutNanoseconds);
95}
96
97// Write as much data as we can without blocking.
98aaudio_result_t AudioStreamInternalPlay::processDataNow(void *buffer, int32_t numFrames,
99 int64_t currentNanoTime, int64_t *wakeTimePtr) {
100 aaudio_result_t result = processCommands();
101 if (result != AAUDIO_OK) {
102 return result;
103 }
104
Phil Burkfd34a932017-07-19 07:03:52 -0700105 const char *traceName = "aaWrNow";
106 ATRACE_BEGIN(traceName);
107
Phil Burkbcc36742017-08-31 17:24:51 -0700108 if (mClockModel.isStarting()) {
109 // Still haven't got any timestamps from server.
110 // Keep waiting until we get some valid timestamps then start writing to the
111 // current buffer position.
112 ALOGD("processDataNow() wait for valid timestamps");
113 // Sleep very briefly and hope we get a timestamp soon.
114 *wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
115 ATRACE_END();
116 return 0;
117 }
118 // If we have gotten this far then we have at least one timestamp from server.
119
Phil Burkfd34a932017-07-19 07:03:52 -0700120 // If a DMA channel or DSP is reading the other end then we have to update the readCounter.
Phil Burk87c9f642017-05-17 07:22:39 -0700121 if (mAudioEndpoint.isFreeRunning()) {
Phil Burk87c9f642017-05-17 07:22:39 -0700122 // Update data queue based on the timing model.
123 int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
Phil Burkec89b2e2017-06-20 15:05:06 -0700124 // ALOGD("AudioStreamInternal::processDataNow() - estimatedReadCounter = %d", (int)estimatedReadCounter);
Phil Burk87c9f642017-05-17 07:22:39 -0700125 mAudioEndpoint.setDataReadCounter(estimatedReadCounter);
126 }
Phil Burk87c9f642017-05-17 07:22:39 -0700127
Phil Burkbcc36742017-08-31 17:24:51 -0700128 if (mNeedCatchUp.isRequested()) {
129 // Catch an MMAP pointer that is already advancing.
130 // This will avoid initial underruns caused by a slow cold start.
131 advanceClientToMatchServerPosition();
132 mNeedCatchUp.acknowledge();
133 }
134
Phil Burk87c9f642017-05-17 07:22:39 -0700135 // If the read index passed the write index then consider it an underrun.
Phil Burk23296382017-11-20 15:45:11 -0800136 // For shared streams, the xRunCount is passed up from the service.
137 if (mAudioEndpoint.isFreeRunning() && mAudioEndpoint.getFullFramesAvailable() < 0) {
Phil Burk87c9f642017-05-17 07:22:39 -0700138 mXRunCount++;
Phil Burkfd34a932017-07-19 07:03:52 -0700139 if (ATRACE_ENABLED()) {
140 ATRACE_INT("aaUnderRuns", mXRunCount);
141 }
Phil Burk87c9f642017-05-17 07:22:39 -0700142 }
143
144 // Write some data to the buffer.
145 //ALOGD("AudioStreamInternal::processDataNow() - writeNowWithConversion(%d)", numFrames);
146 int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
147 //ALOGD("AudioStreamInternal::processDataNow() - tried to write %d frames, wrote %d",
148 // numFrames, framesWritten);
Phil Burkfd34a932017-07-19 07:03:52 -0700149 if (ATRACE_ENABLED()) {
150 ATRACE_INT("aaWrote", framesWritten);
151 }
Phil Burk87c9f642017-05-17 07:22:39 -0700152
153 // Calculate an ideal time to wake up.
154 if (wakeTimePtr != nullptr && framesWritten >= 0) {
155 // By default wake up a few milliseconds from now. // TODO review
156 int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
157 aaudio_stream_state_t state = getState();
158 //ALOGD("AudioStreamInternal::processDataNow() - wakeTime based on %s",
159 // AAudio_convertStreamStateToText(state));
160 switch (state) {
161 case AAUDIO_STREAM_STATE_OPEN:
162 case AAUDIO_STREAM_STATE_STARTING:
163 if (framesWritten != 0) {
164 // Don't wait to write more data. Just prime the buffer.
165 wakeTime = currentNanoTime;
166 }
167 break;
Phil Burkfd34a932017-07-19 07:03:52 -0700168 case AAUDIO_STREAM_STATE_STARTED:
Phil Burk87c9f642017-05-17 07:22:39 -0700169 {
Phil Burkfd34a932017-07-19 07:03:52 -0700170 // When do we expect the next read burst to occur?
Phil Burk87c9f642017-05-17 07:22:39 -0700171
Phil Burkfd34a932017-07-19 07:03:52 -0700172 // Calculate frame position based off of the writeCounter because
173 // the readCounter might have just advanced in the background,
174 // causing us to sleep until a later burst.
Phil Burkbcc36742017-08-31 17:24:51 -0700175 int64_t nextPosition = mAudioEndpoint.getDataWriteCounter() + mFramesPerBurst
Phil Burkfd34a932017-07-19 07:03:52 -0700176 - mAudioEndpoint.getBufferSizeInFrames();
Phil Burkbcc36742017-08-31 17:24:51 -0700177 wakeTime = mClockModel.convertPositionToTime(nextPosition);
Phil Burk87c9f642017-05-17 07:22:39 -0700178 }
179 break;
180 default:
181 break;
182 }
183 *wakeTimePtr = wakeTime;
184
185 }
Phil Burkfd34a932017-07-19 07:03:52 -0700186
187 ATRACE_END();
Phil Burk87c9f642017-05-17 07:22:39 -0700188 return framesWritten;
189}
190
191
192aaudio_result_t AudioStreamInternalPlay::writeNowWithConversion(const void *buffer,
193 int32_t numFrames) {
194 // ALOGD("AudioStreamInternal::writeNowWithConversion(%p, %d)",
195 // buffer, numFrames);
196 WrappingBuffer wrappingBuffer;
197 uint8_t *source = (uint8_t *) buffer;
198 int32_t framesLeft = numFrames;
199
200 mAudioEndpoint.getEmptyFramesAvailable(&wrappingBuffer);
201
Phil Burkfd34a932017-07-19 07:03:52 -0700202 // Write data in one or two parts.
Phil Burk87c9f642017-05-17 07:22:39 -0700203 int partIndex = 0;
204 while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
205 int32_t framesToWrite = framesLeft;
206 int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
207 if (framesAvailable > 0) {
208 if (framesToWrite > framesAvailable) {
209 framesToWrite = framesAvailable;
210 }
211 int32_t numBytes = getBytesPerFrame() * framesToWrite;
212 int32_t numSamples = framesToWrite * getSamplesPerFrame();
213 // Data conversion.
214 float levelFrom;
215 float levelTo;
Phil Burk9eba2102017-11-20 12:25:38 -0800216 bool ramping = mVolumeRamp.nextSegment(framesToWrite, &levelFrom, &levelTo);
Phil Burk87c9f642017-05-17 07:22:39 -0700217 // The formats are validated when the stream is opened so we do not have to
218 // check for illegal combinations here.
219 // TODO factor this out into a utility function
220 if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
221 if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
222 AAudio_linearRamp(
223 (const float *) source,
224 (float *) wrappingBuffer.data[partIndex],
225 framesToWrite,
226 getSamplesPerFrame(),
227 levelFrom,
228 levelTo);
229 } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
230 if (ramping) {
231 AAudioConvert_floatToPcm16(
232 (const float *) source,
233 (int16_t *) wrappingBuffer.data[partIndex],
234 framesToWrite,
235 getSamplesPerFrame(),
236 levelFrom,
237 levelTo);
238 } else {
239 AAudioConvert_floatToPcm16(
240 (const float *) source,
241 (int16_t *) wrappingBuffer.data[partIndex],
242 numSamples,
243 levelTo);
244 }
245 }
246 } else if (getFormat() == AAUDIO_FORMAT_PCM_I16) {
247 if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
248 if (ramping) {
249 AAudioConvert_pcm16ToFloat(
250 (const int16_t *) source,
251 (float *) wrappingBuffer.data[partIndex],
252 framesToWrite,
253 getSamplesPerFrame(),
254 levelFrom,
255 levelTo);
256 } else {
257 AAudioConvert_pcm16ToFloat(
258 (const int16_t *) source,
259 (float *) wrappingBuffer.data[partIndex],
260 numSamples,
261 levelTo);
262 }
263 } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
264 AAudio_linearRamp(
265 (const int16_t *) source,
266 (int16_t *) wrappingBuffer.data[partIndex],
267 framesToWrite,
268 getSamplesPerFrame(),
269 levelFrom,
270 levelTo);
271 }
272 }
273 source += numBytes;
274 framesLeft -= framesToWrite;
275 } else {
276 break;
277 }
278 partIndex++;
279 }
280 int32_t framesWritten = numFrames - framesLeft;
281 mAudioEndpoint.advanceWriteIndex(framesWritten);
282
Phil Burk87c9f642017-05-17 07:22:39 -0700283 // ALOGD("AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
284 return framesWritten;
285}
286
Phil Burk87c9f642017-05-17 07:22:39 -0700287int64_t AudioStreamInternalPlay::getFramesRead()
288{
Phil Burkec89b2e2017-06-20 15:05:06 -0700289 int64_t framesReadHardware;
290 if (isActive()) {
291 framesReadHardware = mClockModel.convertTimeToPosition(AudioClock::getNanoseconds());
292 } else {
293 framesReadHardware = mAudioEndpoint.getDataReadCounter();
294 }
295 int64_t framesRead = framesReadHardware + mFramesOffsetFromService;
Phil Burk87c9f642017-05-17 07:22:39 -0700296 // Prevent retrograde motion.
297 if (framesRead < mLastFramesRead) {
298 framesRead = mLastFramesRead;
299 } else {
300 mLastFramesRead = framesRead;
301 }
Phil Burkec89b2e2017-06-20 15:05:06 -0700302 //ALOGD("AudioStreamInternalPlay::getFramesRead() returns %lld", (long long)framesRead);
Phil Burk87c9f642017-05-17 07:22:39 -0700303 return framesRead;
304}
305
306int64_t AudioStreamInternalPlay::getFramesWritten()
307{
Phil Burkec89b2e2017-06-20 15:05:06 -0700308 int64_t framesWritten = mAudioEndpoint.getDataWriteCounter()
Phil Burk87c9f642017-05-17 07:22:39 -0700309 + mFramesOffsetFromService;
Phil Burkec89b2e2017-06-20 15:05:06 -0700310 //ALOGD("AudioStreamInternalPlay::getFramesWritten() returns %lld", (long long)framesWritten);
311 return framesWritten;
Phil Burk87c9f642017-05-17 07:22:39 -0700312}
313
314
315// Render audio in the application callback and then write the data to the stream.
316void *AudioStreamInternalPlay::callbackLoop() {
317 aaudio_result_t result = AAUDIO_OK;
318 aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
Phil Burk134f1972017-12-08 13:06:11 -0800319 if (!isDataCallbackSet()) return NULL;
Phil Burkfd34a932017-07-19 07:03:52 -0700320 int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
Phil Burk87c9f642017-05-17 07:22:39 -0700321
322 // result might be a frame count
323 while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
324 // Call application using the AAudio callback interface.
Phil Burk134f1972017-12-08 13:06:11 -0800325 callbackResult = maybeCallDataCallback(mCallbackBuffer, mCallbackFrames);
Phil Burk87c9f642017-05-17 07:22:39 -0700326
327 if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
Phil Burkfd34a932017-07-19 07:03:52 -0700328 // Write audio data to stream. This is a BLOCKING WRITE!
Phil Burk87c9f642017-05-17 07:22:39 -0700329 result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
330 if ((result != mCallbackFrames)) {
331 ALOGE("AudioStreamInternalPlay(): callbackLoop: write() returned %d", result);
332 if (result >= 0) {
333 // Only wrote some of the frames requested. Must have timed out.
334 result = AAUDIO_ERROR_TIMEOUT;
335 }
Phil Burk134f1972017-12-08 13:06:11 -0800336 maybeCallErrorCallback(result);
Phil Burk87c9f642017-05-17 07:22:39 -0700337 break;
338 }
339 } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
340 ALOGD("AudioStreamInternalPlay(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
341 break;
342 }
343 }
344
345 ALOGD("AudioStreamInternalPlay(): callbackLoop() exiting, result = %d, isActive() = %d",
346 result, (int) isActive());
347 return NULL;
348}
Phil Burk965650e2017-09-07 21:00:09 -0700349
350//------------------------------------------------------------------------------
351// Implementation of PlayerBase
352status_t AudioStreamInternalPlay::doSetVolume() {
353 mVolumeRamp.setTarget(mStreamVolume * getDuckAndMuteVolume());
354 return android::NO_ERROR;
355}