blob: 77a481b9e91e5cf20f4f882cc2936f3d0710cac3 [file] [log] [blame]
Phil Burk87c9f642017-05-17 07:22:39 -07001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Phil Burkfbf031e2017-10-12 15:58:31 -070017#define LOG_TAG (mInService ? "AudioStreamInternalCapture_Service" \
18 : "AudioStreamInternalCapture_Client")
Phil Burk87c9f642017-05-17 07:22:39 -070019//#define LOG_NDEBUG 0
20#include <utils/Log.h>
21
Phil Burkec89b2e2017-06-20 15:05:06 -070022#include <algorithm>
Phil Burk87c9f642017-05-17 07:22:39 -070023#include <aaudio/AAudio.h>
24
25#include "client/AudioStreamInternalCapture.h"
26#include "utility/AudioClock.h"
27
Phil Burkfd34a932017-07-19 07:03:52 -070028#define ATRACE_TAG ATRACE_TAG_AUDIO
29#include <utils/Trace.h>
30
Phil Burk87c9f642017-05-17 07:22:39 -070031using android::WrappingBuffer;
32
33using namespace aaudio;
34
35AudioStreamInternalCapture::AudioStreamInternalCapture(AAudioServiceInterface &serviceInterface,
36 bool inService)
37 : AudioStreamInternal(serviceInterface, inService) {
38
39}
40
41AudioStreamInternalCapture::~AudioStreamInternalCapture() {}
42
Phil Burkbcc36742017-08-31 17:24:51 -070043void AudioStreamInternalCapture::advanceClientToMatchServerPosition() {
44 int64_t readCounter = mAudioEndpoint.getDataReadCounter();
45 int64_t writeCounter = mAudioEndpoint.getDataWriteCounter();
46
47 // Bump offset so caller does not see the retrograde motion in getFramesRead().
48 int64_t offset = readCounter - writeCounter;
49 mFramesOffsetFromService += offset;
50 ALOGD("advanceClientToMatchServerPosition() readN = %lld, writeN = %lld, offset = %lld",
51 (long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);
52
53 // Force readCounter to match writeCounter.
54 // This is because we cannot change the write counter in the hardware.
55 mAudioEndpoint.setDataReadCounter(writeCounter);
56}
57
Phil Burk87c9f642017-05-17 07:22:39 -070058// Write the data, block if needed and timeoutMillis > 0
59aaudio_result_t AudioStreamInternalCapture::read(void *buffer, int32_t numFrames,
60 int64_t timeoutNanoseconds)
61{
62 return processData(buffer, numFrames, timeoutNanoseconds);
63}
64
65// Read as much data as we can without blocking.
66aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t numFrames,
67 int64_t currentNanoTime, int64_t *wakeTimePtr) {
68 aaudio_result_t result = processCommands();
69 if (result != AAUDIO_OK) {
70 return result;
71 }
72
Phil Burkfd34a932017-07-19 07:03:52 -070073 const char *traceName = "aaRdNow";
74 ATRACE_BEGIN(traceName);
75
Phil Burkbcc36742017-08-31 17:24:51 -070076 if (mClockModel.isStarting()) {
77 // Still haven't got any timestamps from server.
78 // Keep waiting until we get some valid timestamps then start writing to the
79 // current buffer position.
80 ALOGD("processDataNow() wait for valid timestamps");
81 // Sleep very briefly and hope we get a timestamp soon.
82 *wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
83 ATRACE_END();
84 return 0;
85 }
86 // If we have gotten this far then we have at least one timestamp from server.
87
Phil Burk87c9f642017-05-17 07:22:39 -070088 if (mAudioEndpoint.isFreeRunning()) {
89 //ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
90 // Update data queue based on the timing model.
91 int64_t estimatedRemoteCounter = mClockModel.convertTimeToPosition(currentNanoTime);
92 // TODO refactor, maybe use setRemoteCounter()
93 mAudioEndpoint.setDataWriteCounter(estimatedRemoteCounter);
94 }
95
Phil Burkbcc36742017-08-31 17:24:51 -070096 // This code assumes that we have already received valid timestamps.
97 if (mNeedCatchUp.isRequested()) {
98 // Catch an MMAP pointer that is already advancing.
99 // This will avoid initial underruns caused by a slow cold start.
100 advanceClientToMatchServerPosition();
101 mNeedCatchUp.acknowledge();
102 }
103
Phil Burk87c9f642017-05-17 07:22:39 -0700104 // If the write index passed the read index then consider it an overrun.
105 if (mAudioEndpoint.getEmptyFramesAvailable() < 0) {
106 mXRunCount++;
Phil Burkfd34a932017-07-19 07:03:52 -0700107 if (ATRACE_ENABLED()) {
108 ATRACE_INT("aaOverRuns", mXRunCount);
109 }
Phil Burk87c9f642017-05-17 07:22:39 -0700110 }
111
112 // Read some data from the buffer.
113 //ALOGD("AudioStreamInternalCapture::processDataNow() - readNowWithConversion(%d)", numFrames);
114 int32_t framesProcessed = readNowWithConversion(buffer, numFrames);
115 //ALOGD("AudioStreamInternalCapture::processDataNow() - tried to read %d frames, read %d",
116 // numFrames, framesProcessed);
Phil Burkfd34a932017-07-19 07:03:52 -0700117 if (ATRACE_ENABLED()) {
118 ATRACE_INT("aaRead", framesProcessed);
119 }
Phil Burk87c9f642017-05-17 07:22:39 -0700120
121 // Calculate an ideal time to wake up.
122 if (wakeTimePtr != nullptr && framesProcessed >= 0) {
123 // By default wake up a few milliseconds from now. // TODO review
124 int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
125 aaudio_stream_state_t state = getState();
126 //ALOGD("AudioStreamInternalCapture::processDataNow() - wakeTime based on %s",
127 // AAudio_convertStreamStateToText(state));
128 switch (state) {
129 case AAUDIO_STREAM_STATE_OPEN:
130 case AAUDIO_STREAM_STATE_STARTING:
131 break;
Phil Burkfd34a932017-07-19 07:03:52 -0700132 case AAUDIO_STREAM_STATE_STARTED:
Phil Burk87c9f642017-05-17 07:22:39 -0700133 {
Phil Burkfd34a932017-07-19 07:03:52 -0700134 // When do we expect the next write burst to occur?
Phil Burk87c9f642017-05-17 07:22:39 -0700135
Phil Burkfd34a932017-07-19 07:03:52 -0700136 // Calculate frame position based off of the readCounter because
137 // the writeCounter might have just advanced in the background,
138 // causing us to sleep until a later burst.
Phil Burkbcc36742017-08-31 17:24:51 -0700139 int64_t nextPosition = mAudioEndpoint.getDataReadCounter() + mFramesPerBurst;
140 wakeTime = mClockModel.convertPositionToTime(nextPosition);
Phil Burk87c9f642017-05-17 07:22:39 -0700141 }
142 break;
143 default:
144 break;
145 }
146 *wakeTimePtr = wakeTime;
147
148 }
Phil Burkfd34a932017-07-19 07:03:52 -0700149
150 ATRACE_END();
Phil Burk87c9f642017-05-17 07:22:39 -0700151 return framesProcessed;
152}
153
154aaudio_result_t AudioStreamInternalCapture::readNowWithConversion(void *buffer,
155 int32_t numFrames) {
Phil Burkfbf031e2017-10-12 15:58:31 -0700156 // ALOGD("readNowWithConversion(%p, %d)",
Phil Burk87c9f642017-05-17 07:22:39 -0700157 // buffer, numFrames);
158 WrappingBuffer wrappingBuffer;
159 uint8_t *destination = (uint8_t *) buffer;
160 int32_t framesLeft = numFrames;
161
162 mAudioEndpoint.getFullFramesAvailable(&wrappingBuffer);
163
164 // Read data in one or two parts.
165 for (int partIndex = 0; framesLeft > 0 && partIndex < WrappingBuffer::SIZE; partIndex++) {
166 int32_t framesToProcess = framesLeft;
167 int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
168 if (framesAvailable <= 0) break;
169
170 if (framesToProcess > framesAvailable) {
171 framesToProcess = framesAvailable;
172 }
173
174 int32_t numBytes = getBytesPerFrame() * framesToProcess;
175 int32_t numSamples = framesToProcess * getSamplesPerFrame();
176
177 // TODO factor this out into a utility function
178 if (mDeviceFormat == getFormat()) {
179 memcpy(destination, wrappingBuffer.data[partIndex], numBytes);
180 } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16
181 && getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
182 AAudioConvert_pcm16ToFloat(
183 (const int16_t *) wrappingBuffer.data[partIndex],
184 (float *) destination,
185 numSamples,
186 1.0f);
187 } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT
188 && getFormat() == AAUDIO_FORMAT_PCM_I16) {
189 AAudioConvert_floatToPcm16(
190 (const float *) wrappingBuffer.data[partIndex],
191 (int16_t *) destination,
192 numSamples,
193 1.0f);
194 } else {
195 ALOGE("Format conversion not supported!");
196 return AAUDIO_ERROR_INVALID_FORMAT;
197 }
198 destination += numBytes;
199 framesLeft -= framesToProcess;
200 }
201
202 int32_t framesProcessed = numFrames - framesLeft;
203 mAudioEndpoint.advanceReadIndex(framesProcessed);
Phil Burk87c9f642017-05-17 07:22:39 -0700204
Phil Burkfbf031e2017-10-12 15:58:31 -0700205 //ALOGD("readNowWithConversion() returns %d", framesProcessed);
Phil Burk87c9f642017-05-17 07:22:39 -0700206 return framesProcessed;
207}
208
Phil Burkec89b2e2017-06-20 15:05:06 -0700209int64_t AudioStreamInternalCapture::getFramesWritten() {
210 int64_t framesWrittenHardware;
211 if (isActive()) {
212 framesWrittenHardware = mClockModel.convertTimeToPosition(AudioClock::getNanoseconds());
Phil Burk87c9f642017-05-17 07:22:39 -0700213 } else {
Phil Burkec89b2e2017-06-20 15:05:06 -0700214 framesWrittenHardware = mAudioEndpoint.getDataWriteCounter();
Phil Burk87c9f642017-05-17 07:22:39 -0700215 }
Phil Burkec89b2e2017-06-20 15:05:06 -0700216 // Prevent retrograde motion.
217 mLastFramesWritten = std::max(mLastFramesWritten,
218 framesWrittenHardware + mFramesOffsetFromService);
Phil Burkfbf031e2017-10-12 15:58:31 -0700219 //ALOGD("getFramesWritten() returns %lld",
Phil Burkec89b2e2017-06-20 15:05:06 -0700220 // (long long)mLastFramesWritten);
221 return mLastFramesWritten;
Phil Burk87c9f642017-05-17 07:22:39 -0700222}
223
Phil Burkec89b2e2017-06-20 15:05:06 -0700224int64_t AudioStreamInternalCapture::getFramesRead() {
Phil Burkbcc36742017-08-31 17:24:51 -0700225 int64_t frames = mAudioEndpoint.getDataReadCounter() + mFramesOffsetFromService;
Phil Burkfbf031e2017-10-12 15:58:31 -0700226 //ALOGD("getFramesRead() returns %lld", (long long)frames);
Phil Burk87c9f642017-05-17 07:22:39 -0700227 return frames;
228}
229
230// Read data from the stream and pass it to the callback for processing.
231void *AudioStreamInternalCapture::callbackLoop() {
232 aaudio_result_t result = AAUDIO_OK;
233 aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
234 AAudioStream_dataCallback appCallback = getDataCallbackProc();
235 if (appCallback == nullptr) return NULL;
236
237 // result might be a frame count
238 while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
239
240 // Read audio data from stream.
241 int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
242
243 // This is a BLOCKING READ!
244 result = read(mCallbackBuffer, mCallbackFrames, timeoutNanos);
245 if ((result != mCallbackFrames)) {
Phil Burkfbf031e2017-10-12 15:58:31 -0700246 ALOGE("callbackLoop: read() returned %d", result);
Phil Burk87c9f642017-05-17 07:22:39 -0700247 if (result >= 0) {
248 // Only read some of the frames requested. Must have timed out.
249 result = AAUDIO_ERROR_TIMEOUT;
250 }
251 AAudioStream_errorCallback errorCallback = getErrorCallbackProc();
252 if (errorCallback != nullptr) {
253 (*errorCallback)(
254 (AAudioStream *) this,
255 getErrorCallbackUserData(),
256 result);
257 }
258 break;
259 }
260
261 // Call application using the AAudio callback interface.
262 callbackResult = (*appCallback)(
263 (AAudioStream *) this,
264 getDataCallbackUserData(),
265 mCallbackBuffer,
266 mCallbackFrames);
267
268 if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
Phil Burkfbf031e2017-10-12 15:58:31 -0700269 ALOGD("callback returned AAUDIO_CALLBACK_RESULT_STOP");
Phil Burk87c9f642017-05-17 07:22:39 -0700270 break;
271 }
272 }
273
Phil Burkfbf031e2017-10-12 15:58:31 -0700274 ALOGD("callbackLoop() exiting, result = %d, isActive() = %d",
Phil Burk87c9f642017-05-17 07:22:39 -0700275 result, (int) isActive());
276 return NULL;
277}