blob: 22f8bd14ef882c70c48b9ed4ace94abb720df2cd [file] [log] [blame]
Phil Burk87c9f642017-05-17 07:22:39 -07001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Phil Burkec89b2e2017-06-20 15:05:06 -070017#define LOG_TAG (mInService ? "AAudioService" : "AAudio")
Phil Burk87c9f642017-05-17 07:22:39 -070018//#define LOG_NDEBUG 0
19#include <utils/Log.h>
20
Phil Burkec89b2e2017-06-20 15:05:06 -070021#include <algorithm>
Phil Burk87c9f642017-05-17 07:22:39 -070022#include <aaudio/AAudio.h>
23
24#include "client/AudioStreamInternalCapture.h"
25#include "utility/AudioClock.h"
26
27using android::WrappingBuffer;
28
29using namespace aaudio;
30
31AudioStreamInternalCapture::AudioStreamInternalCapture(AAudioServiceInterface &serviceInterface,
32 bool inService)
33 : AudioStreamInternal(serviceInterface, inService) {
34
35}
36
37AudioStreamInternalCapture::~AudioStreamInternalCapture() {}
38
39
40// Write the data, block if needed and timeoutMillis > 0
41aaudio_result_t AudioStreamInternalCapture::read(void *buffer, int32_t numFrames,
42 int64_t timeoutNanoseconds)
43{
44 return processData(buffer, numFrames, timeoutNanoseconds);
45}
46
47// Read as much data as we can without blocking.
48aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t numFrames,
49 int64_t currentNanoTime, int64_t *wakeTimePtr) {
50 aaudio_result_t result = processCommands();
51 if (result != AAUDIO_OK) {
52 return result;
53 }
54
55 if (mAudioEndpoint.isFreeRunning()) {
56 //ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
57 // Update data queue based on the timing model.
58 int64_t estimatedRemoteCounter = mClockModel.convertTimeToPosition(currentNanoTime);
59 // TODO refactor, maybe use setRemoteCounter()
60 mAudioEndpoint.setDataWriteCounter(estimatedRemoteCounter);
61 }
62
63 // If the write index passed the read index then consider it an overrun.
64 if (mAudioEndpoint.getEmptyFramesAvailable() < 0) {
65 mXRunCount++;
66 }
67
68 // Read some data from the buffer.
69 //ALOGD("AudioStreamInternalCapture::processDataNow() - readNowWithConversion(%d)", numFrames);
70 int32_t framesProcessed = readNowWithConversion(buffer, numFrames);
71 //ALOGD("AudioStreamInternalCapture::processDataNow() - tried to read %d frames, read %d",
72 // numFrames, framesProcessed);
73
74 // Calculate an ideal time to wake up.
75 if (wakeTimePtr != nullptr && framesProcessed >= 0) {
76 // By default wake up a few milliseconds from now. // TODO review
77 int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
78 aaudio_stream_state_t state = getState();
79 //ALOGD("AudioStreamInternalCapture::processDataNow() - wakeTime based on %s",
80 // AAudio_convertStreamStateToText(state));
81 switch (state) {
82 case AAUDIO_STREAM_STATE_OPEN:
83 case AAUDIO_STREAM_STATE_STARTING:
84 break;
85 case AAUDIO_STREAM_STATE_STARTED: // When do we expect the next read burst to occur?
86 {
87 uint32_t burstSize = mFramesPerBurst;
88 if (burstSize < 32) {
89 burstSize = 32; // TODO review
90 }
91
92 uint64_t nextReadPosition = mAudioEndpoint.getDataWriteCounter() + burstSize;
93 wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
94 }
95 break;
96 default:
97 break;
98 }
99 *wakeTimePtr = wakeTime;
100
101 }
102// ALOGD("AudioStreamInternalCapture::readNow finished: now = %llu, read# = %llu, wrote# = %llu",
103// (unsigned long long)currentNanoTime,
104// (unsigned long long)mAudioEndpoint.getDataReadCounter(),
105// (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
106 return framesProcessed;
107}
108
109aaudio_result_t AudioStreamInternalCapture::readNowWithConversion(void *buffer,
110 int32_t numFrames) {
111 // ALOGD("AudioStreamInternalCapture::readNowWithConversion(%p, %d)",
112 // buffer, numFrames);
113 WrappingBuffer wrappingBuffer;
114 uint8_t *destination = (uint8_t *) buffer;
115 int32_t framesLeft = numFrames;
116
117 mAudioEndpoint.getFullFramesAvailable(&wrappingBuffer);
118
119 // Read data in one or two parts.
120 for (int partIndex = 0; framesLeft > 0 && partIndex < WrappingBuffer::SIZE; partIndex++) {
121 int32_t framesToProcess = framesLeft;
122 int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
123 if (framesAvailable <= 0) break;
124
125 if (framesToProcess > framesAvailable) {
126 framesToProcess = framesAvailable;
127 }
128
129 int32_t numBytes = getBytesPerFrame() * framesToProcess;
130 int32_t numSamples = framesToProcess * getSamplesPerFrame();
131
132 // TODO factor this out into a utility function
133 if (mDeviceFormat == getFormat()) {
134 memcpy(destination, wrappingBuffer.data[partIndex], numBytes);
135 } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16
136 && getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
137 AAudioConvert_pcm16ToFloat(
138 (const int16_t *) wrappingBuffer.data[partIndex],
139 (float *) destination,
140 numSamples,
141 1.0f);
142 } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT
143 && getFormat() == AAUDIO_FORMAT_PCM_I16) {
144 AAudioConvert_floatToPcm16(
145 (const float *) wrappingBuffer.data[partIndex],
146 (int16_t *) destination,
147 numSamples,
148 1.0f);
149 } else {
150 ALOGE("Format conversion not supported!");
151 return AAUDIO_ERROR_INVALID_FORMAT;
152 }
153 destination += numBytes;
154 framesLeft -= framesToProcess;
155 }
156
157 int32_t framesProcessed = numFrames - framesLeft;
158 mAudioEndpoint.advanceReadIndex(framesProcessed);
Phil Burk87c9f642017-05-17 07:22:39 -0700159
160 //ALOGD("AudioStreamInternalCapture::readNowWithConversion() returns %d", framesProcessed);
161 return framesProcessed;
162}
163
Phil Burkec89b2e2017-06-20 15:05:06 -0700164int64_t AudioStreamInternalCapture::getFramesWritten() {
165 int64_t framesWrittenHardware;
166 if (isActive()) {
167 framesWrittenHardware = mClockModel.convertTimeToPosition(AudioClock::getNanoseconds());
Phil Burk87c9f642017-05-17 07:22:39 -0700168 } else {
Phil Burkec89b2e2017-06-20 15:05:06 -0700169 framesWrittenHardware = mAudioEndpoint.getDataWriteCounter();
Phil Burk87c9f642017-05-17 07:22:39 -0700170 }
Phil Burkec89b2e2017-06-20 15:05:06 -0700171 // Prevent retrograde motion.
172 mLastFramesWritten = std::max(mLastFramesWritten,
173 framesWrittenHardware + mFramesOffsetFromService);
174 //ALOGD("AudioStreamInternalCapture::getFramesWritten() returns %lld",
175 // (long long)mLastFramesWritten);
176 return mLastFramesWritten;
Phil Burk87c9f642017-05-17 07:22:39 -0700177}
178
Phil Burkec89b2e2017-06-20 15:05:06 -0700179int64_t AudioStreamInternalCapture::getFramesRead() {
Phil Burk87c9f642017-05-17 07:22:39 -0700180 int64_t frames = mAudioEndpoint.getDataWriteCounter()
181 + mFramesOffsetFromService;
182 //ALOGD("AudioStreamInternalCapture::getFramesRead() returns %lld", (long long)frames);
183 return frames;
184}
185
186// Read data from the stream and pass it to the callback for processing.
187void *AudioStreamInternalCapture::callbackLoop() {
188 aaudio_result_t result = AAUDIO_OK;
189 aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
190 AAudioStream_dataCallback appCallback = getDataCallbackProc();
191 if (appCallback == nullptr) return NULL;
192
193 // result might be a frame count
194 while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
195
196 // Read audio data from stream.
197 int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
198
199 // This is a BLOCKING READ!
200 result = read(mCallbackBuffer, mCallbackFrames, timeoutNanos);
201 if ((result != mCallbackFrames)) {
202 ALOGE("AudioStreamInternalCapture(): callbackLoop: read() returned %d", result);
203 if (result >= 0) {
204 // Only read some of the frames requested. Must have timed out.
205 result = AAUDIO_ERROR_TIMEOUT;
206 }
207 AAudioStream_errorCallback errorCallback = getErrorCallbackProc();
208 if (errorCallback != nullptr) {
209 (*errorCallback)(
210 (AAudioStream *) this,
211 getErrorCallbackUserData(),
212 result);
213 }
214 break;
215 }
216
217 // Call application using the AAudio callback interface.
218 callbackResult = (*appCallback)(
219 (AAudioStream *) this,
220 getDataCallbackUserData(),
221 mCallbackBuffer,
222 mCallbackFrames);
223
224 if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
225 ALOGD("AudioStreamInternalCapture(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
226 break;
227 }
228 }
229
230 ALOGD("AudioStreamInternalCapture(): callbackLoop() exiting, result = %d, isActive() = %d",
231 result, (int) isActive());
232 return NULL;
233}