blob: 4a0e6da444a5176c5619a276d00f7b9addaed0b7 [file] [log] [blame]
Phil Burk87c9f642017-05-17 07:22:39 -07001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Phil Burkfbf031e2017-10-12 15:58:31 -070017#define LOG_TAG (mInService ? "AudioStreamInternalCapture_Service" \
18 : "AudioStreamInternalCapture_Client")
Phil Burk87c9f642017-05-17 07:22:39 -070019//#define LOG_NDEBUG 0
20#include <utils/Log.h>
21
Phil Burkec89b2e2017-06-20 15:05:06 -070022#include <algorithm>
Phil Burk0127c1b2018-03-29 13:48:06 -070023#include <audio_utils/primitives.h>
Phil Burk87c9f642017-05-17 07:22:39 -070024#include <aaudio/AAudio.h>
25
26#include "client/AudioStreamInternalCapture.h"
27#include "utility/AudioClock.h"
28
Phil Burkfd34a932017-07-19 07:03:52 -070029#define ATRACE_TAG ATRACE_TAG_AUDIO
30#include <utils/Trace.h>
31
Phil Burk87c9f642017-05-17 07:22:39 -070032using android::WrappingBuffer;
33
34using namespace aaudio;
35
36AudioStreamInternalCapture::AudioStreamInternalCapture(AAudioServiceInterface &serviceInterface,
37 bool inService)
38 : AudioStreamInternal(serviceInterface, inService) {
39
40}
41
42AudioStreamInternalCapture::~AudioStreamInternalCapture() {}
43
Phil Burkbcc36742017-08-31 17:24:51 -070044void AudioStreamInternalCapture::advanceClientToMatchServerPosition() {
45 int64_t readCounter = mAudioEndpoint.getDataReadCounter();
46 int64_t writeCounter = mAudioEndpoint.getDataWriteCounter();
47
48 // Bump offset so caller does not see the retrograde motion in getFramesRead().
49 int64_t offset = readCounter - writeCounter;
50 mFramesOffsetFromService += offset;
51 ALOGD("advanceClientToMatchServerPosition() readN = %lld, writeN = %lld, offset = %lld",
52 (long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);
53
54 // Force readCounter to match writeCounter.
55 // This is because we cannot change the write counter in the hardware.
56 mAudioEndpoint.setDataReadCounter(writeCounter);
57}
58
Phil Burk87c9f642017-05-17 07:22:39 -070059// Write the data, block if needed and timeoutMillis > 0
60aaudio_result_t AudioStreamInternalCapture::read(void *buffer, int32_t numFrames,
61 int64_t timeoutNanoseconds)
62{
63 return processData(buffer, numFrames, timeoutNanoseconds);
64}
65
66// Read as much data as we can without blocking.
67aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t numFrames,
68 int64_t currentNanoTime, int64_t *wakeTimePtr) {
69 aaudio_result_t result = processCommands();
70 if (result != AAUDIO_OK) {
71 return result;
72 }
73
Phil Burkfd34a932017-07-19 07:03:52 -070074 const char *traceName = "aaRdNow";
75 ATRACE_BEGIN(traceName);
76
Phil Burkbcc36742017-08-31 17:24:51 -070077 if (mClockModel.isStarting()) {
78 // Still haven't got any timestamps from server.
79 // Keep waiting until we get some valid timestamps then start writing to the
80 // current buffer position.
81 ALOGD("processDataNow() wait for valid timestamps");
82 // Sleep very briefly and hope we get a timestamp soon.
83 *wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
84 ATRACE_END();
85 return 0;
86 }
87 // If we have gotten this far then we have at least one timestamp from server.
88
Phil Burk87c9f642017-05-17 07:22:39 -070089 if (mAudioEndpoint.isFreeRunning()) {
90 //ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
91 // Update data queue based on the timing model.
92 int64_t estimatedRemoteCounter = mClockModel.convertTimeToPosition(currentNanoTime);
93 // TODO refactor, maybe use setRemoteCounter()
94 mAudioEndpoint.setDataWriteCounter(estimatedRemoteCounter);
95 }
96
Phil Burkbcc36742017-08-31 17:24:51 -070097 // This code assumes that we have already received valid timestamps.
98 if (mNeedCatchUp.isRequested()) {
99 // Catch an MMAP pointer that is already advancing.
100 // This will avoid initial underruns caused by a slow cold start.
101 advanceClientToMatchServerPosition();
102 mNeedCatchUp.acknowledge();
103 }
104
Phil Burk87c9f642017-05-17 07:22:39 -0700105 // If the write index passed the read index then consider it an overrun.
Phil Burk23296382017-11-20 15:45:11 -0800106 // For shared streams, the xRunCount is passed up from the service.
107 if (mAudioEndpoint.isFreeRunning() && mAudioEndpoint.getEmptyFramesAvailable() < 0) {
Phil Burk87c9f642017-05-17 07:22:39 -0700108 mXRunCount++;
Phil Burkfd34a932017-07-19 07:03:52 -0700109 if (ATRACE_ENABLED()) {
110 ATRACE_INT("aaOverRuns", mXRunCount);
111 }
Phil Burk87c9f642017-05-17 07:22:39 -0700112 }
113
114 // Read some data from the buffer.
115 //ALOGD("AudioStreamInternalCapture::processDataNow() - readNowWithConversion(%d)", numFrames);
116 int32_t framesProcessed = readNowWithConversion(buffer, numFrames);
117 //ALOGD("AudioStreamInternalCapture::processDataNow() - tried to read %d frames, read %d",
118 // numFrames, framesProcessed);
Phil Burkfd34a932017-07-19 07:03:52 -0700119 if (ATRACE_ENABLED()) {
120 ATRACE_INT("aaRead", framesProcessed);
121 }
Phil Burk87c9f642017-05-17 07:22:39 -0700122
123 // Calculate an ideal time to wake up.
124 if (wakeTimePtr != nullptr && framesProcessed >= 0) {
125 // By default wake up a few milliseconds from now. // TODO review
126 int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
127 aaudio_stream_state_t state = getState();
128 //ALOGD("AudioStreamInternalCapture::processDataNow() - wakeTime based on %s",
129 // AAudio_convertStreamStateToText(state));
130 switch (state) {
131 case AAUDIO_STREAM_STATE_OPEN:
132 case AAUDIO_STREAM_STATE_STARTING:
133 break;
Phil Burkfd34a932017-07-19 07:03:52 -0700134 case AAUDIO_STREAM_STATE_STARTED:
Phil Burk87c9f642017-05-17 07:22:39 -0700135 {
Phil Burkfd34a932017-07-19 07:03:52 -0700136 // When do we expect the next write burst to occur?
Phil Burk87c9f642017-05-17 07:22:39 -0700137
Phil Burkfd34a932017-07-19 07:03:52 -0700138 // Calculate frame position based off of the readCounter because
139 // the writeCounter might have just advanced in the background,
140 // causing us to sleep until a later burst.
Phil Burkbcc36742017-08-31 17:24:51 -0700141 int64_t nextPosition = mAudioEndpoint.getDataReadCounter() + mFramesPerBurst;
142 wakeTime = mClockModel.convertPositionToTime(nextPosition);
Phil Burk87c9f642017-05-17 07:22:39 -0700143 }
144 break;
145 default:
146 break;
147 }
148 *wakeTimePtr = wakeTime;
149
150 }
Phil Burkfd34a932017-07-19 07:03:52 -0700151
152 ATRACE_END();
Phil Burk87c9f642017-05-17 07:22:39 -0700153 return framesProcessed;
154}
155
156aaudio_result_t AudioStreamInternalCapture::readNowWithConversion(void *buffer,
157 int32_t numFrames) {
Phil Burkfbf031e2017-10-12 15:58:31 -0700158 // ALOGD("readNowWithConversion(%p, %d)",
Phil Burk87c9f642017-05-17 07:22:39 -0700159 // buffer, numFrames);
160 WrappingBuffer wrappingBuffer;
161 uint8_t *destination = (uint8_t *) buffer;
162 int32_t framesLeft = numFrames;
163
164 mAudioEndpoint.getFullFramesAvailable(&wrappingBuffer);
165
166 // Read data in one or two parts.
167 for (int partIndex = 0; framesLeft > 0 && partIndex < WrappingBuffer::SIZE; partIndex++) {
168 int32_t framesToProcess = framesLeft;
Phil Burk0127c1b2018-03-29 13:48:06 -0700169 const int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
Phil Burk87c9f642017-05-17 07:22:39 -0700170 if (framesAvailable <= 0) break;
171
172 if (framesToProcess > framesAvailable) {
173 framesToProcess = framesAvailable;
174 }
175
Phil Burk0127c1b2018-03-29 13:48:06 -0700176 const int32_t numBytes = getBytesPerFrame() * framesToProcess;
177 const int32_t numSamples = framesToProcess * getSamplesPerFrame();
Phil Burk87c9f642017-05-17 07:22:39 -0700178
Phil Burk0127c1b2018-03-29 13:48:06 -0700179 const audio_format_t sourceFormat = getDeviceFormat();
180 const audio_format_t destinationFormat = getFormat();
Phil Burk87c9f642017-05-17 07:22:39 -0700181 // TODO factor this out into a utility function
Phil Burk0127c1b2018-03-29 13:48:06 -0700182 if (sourceFormat == destinationFormat) {
Phil Burk87c9f642017-05-17 07:22:39 -0700183 memcpy(destination, wrappingBuffer.data[partIndex], numBytes);
Phil Burk0127c1b2018-03-29 13:48:06 -0700184 } else if (sourceFormat == AUDIO_FORMAT_PCM_16_BIT
185 && destinationFormat == AUDIO_FORMAT_PCM_FLOAT) {
186 memcpy_to_float_from_i16(
Phil Burk87c9f642017-05-17 07:22:39 -0700187 (float *) destination,
Phil Burk0127c1b2018-03-29 13:48:06 -0700188 (const int16_t *) wrappingBuffer.data[partIndex],
189 numSamples);
190 } else if (sourceFormat == AUDIO_FORMAT_PCM_FLOAT
191 && destinationFormat == AUDIO_FORMAT_PCM_16_BIT) {
192 memcpy_to_i16_from_float(
Phil Burk87c9f642017-05-17 07:22:39 -0700193 (int16_t *) destination,
Phil Burk0127c1b2018-03-29 13:48:06 -0700194 (const float *) wrappingBuffer.data[partIndex],
195 numSamples);
Phil Burk87c9f642017-05-17 07:22:39 -0700196 } else {
Phil Burk0127c1b2018-03-29 13:48:06 -0700197 ALOGE("%s() - Format conversion not supported! audio_format_t source = %u, dest = %u",
198 __func__, sourceFormat, destinationFormat);
Phil Burk87c9f642017-05-17 07:22:39 -0700199 return AAUDIO_ERROR_INVALID_FORMAT;
200 }
201 destination += numBytes;
202 framesLeft -= framesToProcess;
203 }
204
205 int32_t framesProcessed = numFrames - framesLeft;
206 mAudioEndpoint.advanceReadIndex(framesProcessed);
Phil Burk87c9f642017-05-17 07:22:39 -0700207
Phil Burkfbf031e2017-10-12 15:58:31 -0700208 //ALOGD("readNowWithConversion() returns %d", framesProcessed);
Phil Burk87c9f642017-05-17 07:22:39 -0700209 return framesProcessed;
210}
211
Phil Burkec89b2e2017-06-20 15:05:06 -0700212int64_t AudioStreamInternalCapture::getFramesWritten() {
213 int64_t framesWrittenHardware;
214 if (isActive()) {
215 framesWrittenHardware = mClockModel.convertTimeToPosition(AudioClock::getNanoseconds());
Phil Burk87c9f642017-05-17 07:22:39 -0700216 } else {
Phil Burkec89b2e2017-06-20 15:05:06 -0700217 framesWrittenHardware = mAudioEndpoint.getDataWriteCounter();
Phil Burk87c9f642017-05-17 07:22:39 -0700218 }
Phil Burkec89b2e2017-06-20 15:05:06 -0700219 // Prevent retrograde motion.
220 mLastFramesWritten = std::max(mLastFramesWritten,
221 framesWrittenHardware + mFramesOffsetFromService);
Phil Burkfbf031e2017-10-12 15:58:31 -0700222 //ALOGD("getFramesWritten() returns %lld",
Phil Burkec89b2e2017-06-20 15:05:06 -0700223 // (long long)mLastFramesWritten);
224 return mLastFramesWritten;
Phil Burk87c9f642017-05-17 07:22:39 -0700225}
226
Phil Burkec89b2e2017-06-20 15:05:06 -0700227int64_t AudioStreamInternalCapture::getFramesRead() {
Phil Burkbcc36742017-08-31 17:24:51 -0700228 int64_t frames = mAudioEndpoint.getDataReadCounter() + mFramesOffsetFromService;
Phil Burkfbf031e2017-10-12 15:58:31 -0700229 //ALOGD("getFramesRead() returns %lld", (long long)frames);
Phil Burk87c9f642017-05-17 07:22:39 -0700230 return frames;
231}
232
233// Read data from the stream and pass it to the callback for processing.
234void *AudioStreamInternalCapture::callbackLoop() {
235 aaudio_result_t result = AAUDIO_OK;
236 aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
Phil Burk134f1972017-12-08 13:06:11 -0800237 if (!isDataCallbackSet()) return NULL;
Phil Burk87c9f642017-05-17 07:22:39 -0700238
239 // result might be a frame count
240 while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
241
242 // Read audio data from stream.
243 int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
244
245 // This is a BLOCKING READ!
246 result = read(mCallbackBuffer, mCallbackFrames, timeoutNanos);
247 if ((result != mCallbackFrames)) {
Phil Burkfbf031e2017-10-12 15:58:31 -0700248 ALOGE("callbackLoop: read() returned %d", result);
Phil Burk87c9f642017-05-17 07:22:39 -0700249 if (result >= 0) {
250 // Only read some of the frames requested. Must have timed out.
251 result = AAUDIO_ERROR_TIMEOUT;
252 }
Phil Burk134f1972017-12-08 13:06:11 -0800253 maybeCallErrorCallback(result);
Phil Burk87c9f642017-05-17 07:22:39 -0700254 break;
255 }
256
257 // Call application using the AAudio callback interface.
Phil Burk134f1972017-12-08 13:06:11 -0800258 callbackResult = maybeCallDataCallback(mCallbackBuffer, mCallbackFrames);
Phil Burk87c9f642017-05-17 07:22:39 -0700259
260 if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
Phil Burkfbf031e2017-10-12 15:58:31 -0700261 ALOGD("callback returned AAUDIO_CALLBACK_RESULT_STOP");
Phil Burk87c9f642017-05-17 07:22:39 -0700262 break;
263 }
264 }
265
Phil Burkfbf031e2017-10-12 15:58:31 -0700266 ALOGD("callbackLoop() exiting, result = %d, isActive() = %d",
Phil Burk87c9f642017-05-17 07:22:39 -0700267 result, (int) isActive());
268 return NULL;
269}