blob: 5d311fcbb15aa64eee4f4e657ad86e77a96def14 [file] [log] [blame]
Phil Burk87c9f642017-05-17 07:22:39 -07001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Phil Burk87c9f642017-05-17 07:22:39 -070017//#define LOG_NDEBUG 0
18#include <utils/Log.h>
19
Phil Burkec89b2e2017-06-20 15:05:06 -070020#include <algorithm>
Phil Burk0127c1b2018-03-29 13:48:06 -070021#include <audio_utils/primitives.h>
Phil Burk87c9f642017-05-17 07:22:39 -070022#include <aaudio/AAudio.h>
23
24#include "client/AudioStreamInternalCapture.h"
25#include "utility/AudioClock.h"
26
Phil Burkfd34a932017-07-19 07:03:52 -070027#define ATRACE_TAG ATRACE_TAG_AUDIO
28#include <utils/Trace.h>
29
Phil Burk58f5ce12020-08-12 14:29:10 +000030// We do this after the #includes because if a header uses ALOG.
31// it would fail on the reference to mInService.
32#undef LOG_TAG
33// This file is used in both client and server processes.
34// This is needed to make sense of the logs more easily.
35#define LOG_TAG (mInService ? "AudioStreamInternalCapture_Service" \
36 : "AudioStreamInternalCapture_Client")
37
Phil Burk87c9f642017-05-17 07:22:39 -070038using android::WrappingBuffer;
39
40using namespace aaudio;
41
42AudioStreamInternalCapture::AudioStreamInternalCapture(AAudioServiceInterface &serviceInterface,
43 bool inService)
44 : AudioStreamInternal(serviceInterface, inService) {
45
46}
47
48AudioStreamInternalCapture::~AudioStreamInternalCapture() {}
49
Phil Burkec8ca522020-05-19 10:05:58 -070050void AudioStreamInternalCapture::advanceClientToMatchServerPosition(int32_t serverMargin) {
Phil Burk5edc4ea2020-04-17 08:15:42 -070051 int64_t readCounter = mAudioEndpoint->getDataReadCounter();
Phil Burkec8ca522020-05-19 10:05:58 -070052 int64_t writeCounter = mAudioEndpoint->getDataWriteCounter() + serverMargin;
Phil Burkbcc36742017-08-31 17:24:51 -070053
54 // Bump offset so caller does not see the retrograde motion in getFramesRead().
55 int64_t offset = readCounter - writeCounter;
56 mFramesOffsetFromService += offset;
57 ALOGD("advanceClientToMatchServerPosition() readN = %lld, writeN = %lld, offset = %lld",
58 (long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);
59
60 // Force readCounter to match writeCounter.
61 // This is because we cannot change the write counter in the hardware.
Phil Burk5edc4ea2020-04-17 08:15:42 -070062 mAudioEndpoint->setDataReadCounter(writeCounter);
Phil Burkbcc36742017-08-31 17:24:51 -070063}
64
Phil Burk87c9f642017-05-17 07:22:39 -070065// Write the data, block if needed and timeoutMillis > 0
66aaudio_result_t AudioStreamInternalCapture::read(void *buffer, int32_t numFrames,
67 int64_t timeoutNanoseconds)
68{
69 return processData(buffer, numFrames, timeoutNanoseconds);
70}
71
72// Read as much data as we can without blocking.
73aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t numFrames,
74 int64_t currentNanoTime, int64_t *wakeTimePtr) {
75 aaudio_result_t result = processCommands();
76 if (result != AAUDIO_OK) {
77 return result;
78 }
79
Phil Burkfd34a932017-07-19 07:03:52 -070080 const char *traceName = "aaRdNow";
81 ATRACE_BEGIN(traceName);
82
Phil Burkbcc36742017-08-31 17:24:51 -070083 if (mClockModel.isStarting()) {
84 // Still haven't got any timestamps from server.
85 // Keep waiting until we get some valid timestamps then start writing to the
86 // current buffer position.
87 ALOGD("processDataNow() wait for valid timestamps");
88 // Sleep very briefly and hope we get a timestamp soon.
89 *wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
90 ATRACE_END();
91 return 0;
92 }
93 // If we have gotten this far then we have at least one timestamp from server.
94
Phil Burk5edc4ea2020-04-17 08:15:42 -070095 if (mAudioEndpoint->isFreeRunning()) {
Phil Burk87c9f642017-05-17 07:22:39 -070096 //ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
97 // Update data queue based on the timing model.
Phil Burkfceeee72019-06-14 11:18:45 -070098 // Jitter in the DSP can cause late writes to the FIFO.
99 // This might be caused by resampling.
100 // We want to read the FIFO after the latest possible time
101 // that the DSP could have written the data.
102 int64_t estimatedRemoteCounter = mClockModel.convertLatestTimeToPosition(currentNanoTime);
Phil Burk87c9f642017-05-17 07:22:39 -0700103 // TODO refactor, maybe use setRemoteCounter()
Phil Burk5edc4ea2020-04-17 08:15:42 -0700104 mAudioEndpoint->setDataWriteCounter(estimatedRemoteCounter);
Phil Burk87c9f642017-05-17 07:22:39 -0700105 }
106
Phil Burkbcc36742017-08-31 17:24:51 -0700107 // This code assumes that we have already received valid timestamps.
108 if (mNeedCatchUp.isRequested()) {
109 // Catch an MMAP pointer that is already advancing.
110 // This will avoid initial underruns caused by a slow cold start.
111 advanceClientToMatchServerPosition();
112 mNeedCatchUp.acknowledge();
113 }
114
Phil Burka10bd512019-09-27 11:49:17 -0700115 // If the capture buffer is full beyond capacity then consider it an overrun.
Phil Burk23296382017-11-20 15:45:11 -0800116 // For shared streams, the xRunCount is passed up from the service.
Phil Burk5edc4ea2020-04-17 08:15:42 -0700117 if (mAudioEndpoint->isFreeRunning()
118 && mAudioEndpoint->getFullFramesAvailable() > mAudioEndpoint->getBufferCapacityInFrames()) {
Phil Burk87c9f642017-05-17 07:22:39 -0700119 mXRunCount++;
Phil Burkfd34a932017-07-19 07:03:52 -0700120 if (ATRACE_ENABLED()) {
121 ATRACE_INT("aaOverRuns", mXRunCount);
122 }
Phil Burk87c9f642017-05-17 07:22:39 -0700123 }
124
125 // Read some data from the buffer.
126 //ALOGD("AudioStreamInternalCapture::processDataNow() - readNowWithConversion(%d)", numFrames);
127 int32_t framesProcessed = readNowWithConversion(buffer, numFrames);
128 //ALOGD("AudioStreamInternalCapture::processDataNow() - tried to read %d frames, read %d",
129 // numFrames, framesProcessed);
Phil Burkfd34a932017-07-19 07:03:52 -0700130 if (ATRACE_ENABLED()) {
131 ATRACE_INT("aaRead", framesProcessed);
132 }
Phil Burk87c9f642017-05-17 07:22:39 -0700133
134 // Calculate an ideal time to wake up.
135 if (wakeTimePtr != nullptr && framesProcessed >= 0) {
136 // By default wake up a few milliseconds from now. // TODO review
137 int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
138 aaudio_stream_state_t state = getState();
139 //ALOGD("AudioStreamInternalCapture::processDataNow() - wakeTime based on %s",
140 // AAudio_convertStreamStateToText(state));
141 switch (state) {
142 case AAUDIO_STREAM_STATE_OPEN:
143 case AAUDIO_STREAM_STATE_STARTING:
144 break;
Phil Burkfd34a932017-07-19 07:03:52 -0700145 case AAUDIO_STREAM_STATE_STARTED:
Phil Burk87c9f642017-05-17 07:22:39 -0700146 {
Phil Burkfd34a932017-07-19 07:03:52 -0700147 // When do we expect the next write burst to occur?
Phil Burk87c9f642017-05-17 07:22:39 -0700148
Phil Burkfd34a932017-07-19 07:03:52 -0700149 // Calculate frame position based off of the readCounter because
150 // the writeCounter might have just advanced in the background,
151 // causing us to sleep until a later burst.
Phil Burk8d97b8e2020-09-25 23:18:14 +0000152 int64_t nextPosition = mAudioEndpoint->getDataReadCounter() + getFramesPerBurst();
Phil Burkfceeee72019-06-14 11:18:45 -0700153 wakeTime = mClockModel.convertPositionToLatestTime(nextPosition);
Phil Burk87c9f642017-05-17 07:22:39 -0700154 }
155 break;
156 default:
157 break;
158 }
159 *wakeTimePtr = wakeTime;
160
161 }
Phil Burkfd34a932017-07-19 07:03:52 -0700162
163 ATRACE_END();
Phil Burk87c9f642017-05-17 07:22:39 -0700164 return framesProcessed;
165}
166
167aaudio_result_t AudioStreamInternalCapture::readNowWithConversion(void *buffer,
168 int32_t numFrames) {
Phil Burkfbf031e2017-10-12 15:58:31 -0700169 // ALOGD("readNowWithConversion(%p, %d)",
Phil Burk87c9f642017-05-17 07:22:39 -0700170 // buffer, numFrames);
171 WrappingBuffer wrappingBuffer;
172 uint8_t *destination = (uint8_t *) buffer;
173 int32_t framesLeft = numFrames;
174
Phil Burk5edc4ea2020-04-17 08:15:42 -0700175 mAudioEndpoint->getFullFramesAvailable(&wrappingBuffer);
Phil Burk87c9f642017-05-17 07:22:39 -0700176
177 // Read data in one or two parts.
178 for (int partIndex = 0; framesLeft > 0 && partIndex < WrappingBuffer::SIZE; partIndex++) {
179 int32_t framesToProcess = framesLeft;
Phil Burk0127c1b2018-03-29 13:48:06 -0700180 const int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
Phil Burk87c9f642017-05-17 07:22:39 -0700181 if (framesAvailable <= 0) break;
182
183 if (framesToProcess > framesAvailable) {
184 framesToProcess = framesAvailable;
185 }
186
Phil Burk0127c1b2018-03-29 13:48:06 -0700187 const int32_t numBytes = getBytesPerFrame() * framesToProcess;
188 const int32_t numSamples = framesToProcess * getSamplesPerFrame();
Phil Burk87c9f642017-05-17 07:22:39 -0700189
Phil Burk0127c1b2018-03-29 13:48:06 -0700190 const audio_format_t sourceFormat = getDeviceFormat();
191 const audio_format_t destinationFormat = getFormat();
Phil Burk87c9f642017-05-17 07:22:39 -0700192 // TODO factor this out into a utility function
Phil Burk0127c1b2018-03-29 13:48:06 -0700193 if (sourceFormat == destinationFormat) {
Phil Burk87c9f642017-05-17 07:22:39 -0700194 memcpy(destination, wrappingBuffer.data[partIndex], numBytes);
Phil Burk0127c1b2018-03-29 13:48:06 -0700195 } else if (sourceFormat == AUDIO_FORMAT_PCM_16_BIT
196 && destinationFormat == AUDIO_FORMAT_PCM_FLOAT) {
197 memcpy_to_float_from_i16(
Phil Burk87c9f642017-05-17 07:22:39 -0700198 (float *) destination,
Phil Burk0127c1b2018-03-29 13:48:06 -0700199 (const int16_t *) wrappingBuffer.data[partIndex],
200 numSamples);
201 } else if (sourceFormat == AUDIO_FORMAT_PCM_FLOAT
202 && destinationFormat == AUDIO_FORMAT_PCM_16_BIT) {
203 memcpy_to_i16_from_float(
Phil Burk87c9f642017-05-17 07:22:39 -0700204 (int16_t *) destination,
Phil Burk0127c1b2018-03-29 13:48:06 -0700205 (const float *) wrappingBuffer.data[partIndex],
206 numSamples);
Phil Burk87c9f642017-05-17 07:22:39 -0700207 } else {
Phil Burk0127c1b2018-03-29 13:48:06 -0700208 ALOGE("%s() - Format conversion not supported! audio_format_t source = %u, dest = %u",
209 __func__, sourceFormat, destinationFormat);
Phil Burk87c9f642017-05-17 07:22:39 -0700210 return AAUDIO_ERROR_INVALID_FORMAT;
211 }
212 destination += numBytes;
213 framesLeft -= framesToProcess;
214 }
215
216 int32_t framesProcessed = numFrames - framesLeft;
Phil Burk5edc4ea2020-04-17 08:15:42 -0700217 mAudioEndpoint->advanceReadIndex(framesProcessed);
Phil Burk87c9f642017-05-17 07:22:39 -0700218
Phil Burkfbf031e2017-10-12 15:58:31 -0700219 //ALOGD("readNowWithConversion() returns %d", framesProcessed);
Phil Burk87c9f642017-05-17 07:22:39 -0700220 return framesProcessed;
221}
222
Phil Burkec89b2e2017-06-20 15:05:06 -0700223int64_t AudioStreamInternalCapture::getFramesWritten() {
Phil Burk5edc4ea2020-04-17 08:15:42 -0700224 if (mAudioEndpoint) {
225 const int64_t framesWrittenHardware = isClockModelInControl()
226 ? mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
227 : mAudioEndpoint->getDataWriteCounter();
228 // Add service offset and prevent retrograde motion.
229 mLastFramesWritten = std::max(mLastFramesWritten,
230 framesWrittenHardware + mFramesOffsetFromService);
231 }
Phil Burkec89b2e2017-06-20 15:05:06 -0700232 return mLastFramesWritten;
Phil Burk87c9f642017-05-17 07:22:39 -0700233}
234
Phil Burkec89b2e2017-06-20 15:05:06 -0700235int64_t AudioStreamInternalCapture::getFramesRead() {
Phil Burk5edc4ea2020-04-17 08:15:42 -0700236 if (mAudioEndpoint) {
237 mLastFramesRead = mAudioEndpoint->getDataReadCounter() + mFramesOffsetFromService;
238 }
239 return mLastFramesRead;
Phil Burk87c9f642017-05-17 07:22:39 -0700240}
241
242// Read data from the stream and pass it to the callback for processing.
243void *AudioStreamInternalCapture::callbackLoop() {
244 aaudio_result_t result = AAUDIO_OK;
245 aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
Phil Burk134f1972017-12-08 13:06:11 -0800246 if (!isDataCallbackSet()) return NULL;
Phil Burk87c9f642017-05-17 07:22:39 -0700247
248 // result might be a frame count
249 while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
250
251 // Read audio data from stream.
252 int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
253
254 // This is a BLOCKING READ!
Phil Burkbf821e22020-04-17 11:51:43 -0700255 result = read(mCallbackBuffer.get(), mCallbackFrames, timeoutNanos);
Phil Burk87c9f642017-05-17 07:22:39 -0700256 if ((result != mCallbackFrames)) {
Phil Burkfbf031e2017-10-12 15:58:31 -0700257 ALOGE("callbackLoop: read() returned %d", result);
Phil Burk87c9f642017-05-17 07:22:39 -0700258 if (result >= 0) {
259 // Only read some of the frames requested. Must have timed out.
260 result = AAUDIO_ERROR_TIMEOUT;
261 }
Phil Burk134f1972017-12-08 13:06:11 -0800262 maybeCallErrorCallback(result);
Phil Burk87c9f642017-05-17 07:22:39 -0700263 break;
264 }
265
266 // Call application using the AAudio callback interface.
Phil Burkbf821e22020-04-17 11:51:43 -0700267 callbackResult = maybeCallDataCallback(mCallbackBuffer.get(), mCallbackFrames);
Phil Burk87c9f642017-05-17 07:22:39 -0700268
269 if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
Phil Burk762365c2018-12-10 16:02:16 -0800270 ALOGD("%s(): callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
Phil Burk1e83bee2018-12-17 14:15:20 -0800271 result = systemStopFromCallback();
Phil Burk87c9f642017-05-17 07:22:39 -0700272 break;
273 }
274 }
275
Phil Burkfbf031e2017-10-12 15:58:31 -0700276 ALOGD("callbackLoop() exiting, result = %d, isActive() = %d",
Phil Burk87c9f642017-05-17 07:22:39 -0700277 result, (int) isActive());
278 return NULL;
279}