blob: 7b1e53e75d1507cc0a7caa2fbcad3a0d506a3e98 [file] [log] [blame]
Phil Burk87c9f642017-05-17 07:22:39 -07001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Phil Burkec89b2e2017-06-20 15:05:06 -070017#define LOG_TAG (mInService ? "AAudioService" : "AAudio")
Phil Burk87c9f642017-05-17 07:22:39 -070018//#define LOG_NDEBUG 0
19#include <utils/Log.h>
20
Phil Burkec89b2e2017-06-20 15:05:06 -070021#include <algorithm>
Phil Burk87c9f642017-05-17 07:22:39 -070022#include <aaudio/AAudio.h>
23
24#include "client/AudioStreamInternalCapture.h"
25#include "utility/AudioClock.h"
26
Phil Burkfd34a932017-07-19 07:03:52 -070027#define ATRACE_TAG ATRACE_TAG_AUDIO
28#include <utils/Trace.h>
29
Phil Burk87c9f642017-05-17 07:22:39 -070030using android::WrappingBuffer;
31
32using namespace aaudio;
33
34AudioStreamInternalCapture::AudioStreamInternalCapture(AAudioServiceInterface &serviceInterface,
35 bool inService)
36 : AudioStreamInternal(serviceInterface, inService) {
37
38}
39
40AudioStreamInternalCapture::~AudioStreamInternalCapture() {}
41
Phil Burk87c9f642017-05-17 07:22:39 -070042// Write the data, block if needed and timeoutMillis > 0
43aaudio_result_t AudioStreamInternalCapture::read(void *buffer, int32_t numFrames,
44 int64_t timeoutNanoseconds)
45{
46 return processData(buffer, numFrames, timeoutNanoseconds);
47}
48
49// Read as much data as we can without blocking.
50aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t numFrames,
51 int64_t currentNanoTime, int64_t *wakeTimePtr) {
52 aaudio_result_t result = processCommands();
53 if (result != AAUDIO_OK) {
54 return result;
55 }
56
Phil Burkfd34a932017-07-19 07:03:52 -070057 const char *traceName = "aaRdNow";
58 ATRACE_BEGIN(traceName);
59
Phil Burk87c9f642017-05-17 07:22:39 -070060 if (mAudioEndpoint.isFreeRunning()) {
61 //ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
62 // Update data queue based on the timing model.
63 int64_t estimatedRemoteCounter = mClockModel.convertTimeToPosition(currentNanoTime);
64 // TODO refactor, maybe use setRemoteCounter()
65 mAudioEndpoint.setDataWriteCounter(estimatedRemoteCounter);
66 }
67
68 // If the write index passed the read index then consider it an overrun.
69 if (mAudioEndpoint.getEmptyFramesAvailable() < 0) {
70 mXRunCount++;
Phil Burkfd34a932017-07-19 07:03:52 -070071 if (ATRACE_ENABLED()) {
72 ATRACE_INT("aaOverRuns", mXRunCount);
73 }
Phil Burk87c9f642017-05-17 07:22:39 -070074 }
75
76 // Read some data from the buffer.
77 //ALOGD("AudioStreamInternalCapture::processDataNow() - readNowWithConversion(%d)", numFrames);
78 int32_t framesProcessed = readNowWithConversion(buffer, numFrames);
79 //ALOGD("AudioStreamInternalCapture::processDataNow() - tried to read %d frames, read %d",
80 // numFrames, framesProcessed);
Phil Burkfd34a932017-07-19 07:03:52 -070081 if (ATRACE_ENABLED()) {
82 ATRACE_INT("aaRead", framesProcessed);
83 }
Phil Burk87c9f642017-05-17 07:22:39 -070084
85 // Calculate an ideal time to wake up.
86 if (wakeTimePtr != nullptr && framesProcessed >= 0) {
87 // By default wake up a few milliseconds from now. // TODO review
88 int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
89 aaudio_stream_state_t state = getState();
90 //ALOGD("AudioStreamInternalCapture::processDataNow() - wakeTime based on %s",
91 // AAudio_convertStreamStateToText(state));
92 switch (state) {
93 case AAUDIO_STREAM_STATE_OPEN:
94 case AAUDIO_STREAM_STATE_STARTING:
95 break;
Phil Burkfd34a932017-07-19 07:03:52 -070096 case AAUDIO_STREAM_STATE_STARTED:
Phil Burk87c9f642017-05-17 07:22:39 -070097 {
Phil Burkfd34a932017-07-19 07:03:52 -070098 // When do we expect the next write burst to occur?
Phil Burk87c9f642017-05-17 07:22:39 -070099
Phil Burkfd34a932017-07-19 07:03:52 -0700100 // Calculate frame position based off of the readCounter because
101 // the writeCounter might have just advanced in the background,
102 // causing us to sleep until a later burst.
103 int64_t nextReadPosition = mAudioEndpoint.getDataReadCounter() + mFramesPerBurst;
Phil Burk87c9f642017-05-17 07:22:39 -0700104 wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
105 }
106 break;
107 default:
108 break;
109 }
110 *wakeTimePtr = wakeTime;
111
112 }
Phil Burkfd34a932017-07-19 07:03:52 -0700113
114 ATRACE_END();
Phil Burk87c9f642017-05-17 07:22:39 -0700115 return framesProcessed;
116}
117
118aaudio_result_t AudioStreamInternalCapture::readNowWithConversion(void *buffer,
119 int32_t numFrames) {
120 // ALOGD("AudioStreamInternalCapture::readNowWithConversion(%p, %d)",
121 // buffer, numFrames);
122 WrappingBuffer wrappingBuffer;
123 uint8_t *destination = (uint8_t *) buffer;
124 int32_t framesLeft = numFrames;
125
126 mAudioEndpoint.getFullFramesAvailable(&wrappingBuffer);
127
128 // Read data in one or two parts.
129 for (int partIndex = 0; framesLeft > 0 && partIndex < WrappingBuffer::SIZE; partIndex++) {
130 int32_t framesToProcess = framesLeft;
131 int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
132 if (framesAvailable <= 0) break;
133
134 if (framesToProcess > framesAvailable) {
135 framesToProcess = framesAvailable;
136 }
137
138 int32_t numBytes = getBytesPerFrame() * framesToProcess;
139 int32_t numSamples = framesToProcess * getSamplesPerFrame();
140
141 // TODO factor this out into a utility function
142 if (mDeviceFormat == getFormat()) {
143 memcpy(destination, wrappingBuffer.data[partIndex], numBytes);
144 } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16
145 && getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
146 AAudioConvert_pcm16ToFloat(
147 (const int16_t *) wrappingBuffer.data[partIndex],
148 (float *) destination,
149 numSamples,
150 1.0f);
151 } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT
152 && getFormat() == AAUDIO_FORMAT_PCM_I16) {
153 AAudioConvert_floatToPcm16(
154 (const float *) wrappingBuffer.data[partIndex],
155 (int16_t *) destination,
156 numSamples,
157 1.0f);
158 } else {
159 ALOGE("Format conversion not supported!");
160 return AAUDIO_ERROR_INVALID_FORMAT;
161 }
162 destination += numBytes;
163 framesLeft -= framesToProcess;
164 }
165
166 int32_t framesProcessed = numFrames - framesLeft;
167 mAudioEndpoint.advanceReadIndex(framesProcessed);
Phil Burk87c9f642017-05-17 07:22:39 -0700168
169 //ALOGD("AudioStreamInternalCapture::readNowWithConversion() returns %d", framesProcessed);
170 return framesProcessed;
171}
172
Phil Burkec89b2e2017-06-20 15:05:06 -0700173int64_t AudioStreamInternalCapture::getFramesWritten() {
174 int64_t framesWrittenHardware;
175 if (isActive()) {
176 framesWrittenHardware = mClockModel.convertTimeToPosition(AudioClock::getNanoseconds());
Phil Burk87c9f642017-05-17 07:22:39 -0700177 } else {
Phil Burkec89b2e2017-06-20 15:05:06 -0700178 framesWrittenHardware = mAudioEndpoint.getDataWriteCounter();
Phil Burk87c9f642017-05-17 07:22:39 -0700179 }
Phil Burkec89b2e2017-06-20 15:05:06 -0700180 // Prevent retrograde motion.
181 mLastFramesWritten = std::max(mLastFramesWritten,
182 framesWrittenHardware + mFramesOffsetFromService);
183 //ALOGD("AudioStreamInternalCapture::getFramesWritten() returns %lld",
184 // (long long)mLastFramesWritten);
185 return mLastFramesWritten;
Phil Burk87c9f642017-05-17 07:22:39 -0700186}
187
Phil Burkec89b2e2017-06-20 15:05:06 -0700188int64_t AudioStreamInternalCapture::getFramesRead() {
Phil Burk87c9f642017-05-17 07:22:39 -0700189 int64_t frames = mAudioEndpoint.getDataWriteCounter()
190 + mFramesOffsetFromService;
191 //ALOGD("AudioStreamInternalCapture::getFramesRead() returns %lld", (long long)frames);
192 return frames;
193}
194
195// Read data from the stream and pass it to the callback for processing.
196void *AudioStreamInternalCapture::callbackLoop() {
197 aaudio_result_t result = AAUDIO_OK;
198 aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
199 AAudioStream_dataCallback appCallback = getDataCallbackProc();
200 if (appCallback == nullptr) return NULL;
201
202 // result might be a frame count
203 while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
204
205 // Read audio data from stream.
206 int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
207
208 // This is a BLOCKING READ!
209 result = read(mCallbackBuffer, mCallbackFrames, timeoutNanos);
210 if ((result != mCallbackFrames)) {
211 ALOGE("AudioStreamInternalCapture(): callbackLoop: read() returned %d", result);
212 if (result >= 0) {
213 // Only read some of the frames requested. Must have timed out.
214 result = AAUDIO_ERROR_TIMEOUT;
215 }
216 AAudioStream_errorCallback errorCallback = getErrorCallbackProc();
217 if (errorCallback != nullptr) {
218 (*errorCallback)(
219 (AAudioStream *) this,
220 getErrorCallbackUserData(),
221 result);
222 }
223 break;
224 }
225
226 // Call application using the AAudio callback interface.
227 callbackResult = (*appCallback)(
228 (AAudioStream *) this,
229 getDataCallbackUserData(),
230 mCallbackBuffer,
231 mCallbackFrames);
232
233 if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
234 ALOGD("AudioStreamInternalCapture(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
235 break;
236 }
237 }
238
239 ALOGD("AudioStreamInternalCapture(): callbackLoop() exiting, result = %d, isActive() = %d",
240 result, (int) isActive());
241 return NULL;
242}