blob: 3934a6d276377fa1be808acfa15aff11d83b146b [file] [log] [blame]
Phil Burk204a1632017-01-03 17:23:43 -08001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Phil Burk5ed503c2017-02-01 09:38:15 -080017#define LOG_TAG "AAudio"
Phil Burk204a1632017-01-03 17:23:43 -080018//#define LOG_NDEBUG 0
19#include <utils/Log.h>
20
Phil Burk7f6b40d2017-02-09 13:18:38 -080021#include <stdint.h>
Phil Burk204a1632017-01-03 17:23:43 -080022#include <assert.h>
23
24#include <binder/IServiceManager.h>
25
Phil Burk5ed503c2017-02-01 09:38:15 -080026#include <aaudio/AAudio.h>
Phil Burkc8f372c2017-03-28 11:32:39 -070027#include <utils/String16.h>
Phil Burk204a1632017-01-03 17:23:43 -080028
Phil Burk7f6b40d2017-02-09 13:18:38 -080029#include "AudioClock.h"
30#include "AudioEndpointParcelable.h"
31#include "binding/AAudioStreamRequest.h"
32#include "binding/AAudioStreamConfiguration.h"
33#include "binding/IAAudioService.h"
Phil Burk5ed503c2017-02-01 09:38:15 -080034#include "binding/AAudioServiceMessage.h"
Phil Burk7f6b40d2017-02-09 13:18:38 -080035#include "fifo/FifoBuffer.h"
Phil Burk204a1632017-01-03 17:23:43 -080036
Phil Burk3df348f2017-02-08 11:41:55 -080037#include "core/AudioStreamBuilder.h"
Phil Burk7f6b40d2017-02-09 13:18:38 -080038#include "AudioStreamInternal.h"
Phil Burk204a1632017-01-03 17:23:43 -080039
40#define LOG_TIMESTAMPS 0
41
42using android::String16;
43using android::IServiceManager;
44using android::defaultServiceManager;
45using android::interface_cast;
Phil Burkdec33ab2017-01-17 14:48:16 -080046using android::Mutex;
Phil Burk7f6b40d2017-02-09 13:18:38 -080047using android::WrappingBuffer;
Phil Burk204a1632017-01-03 17:23:43 -080048
Phil Burk5ed503c2017-02-01 09:38:15 -080049using namespace aaudio;
Phil Burk204a1632017-01-03 17:23:43 -080050
Phil Burkc8f372c2017-03-28 11:32:39 -070051#define MIN_TIMEOUT_NANOS (1000 * AAUDIO_NANOS_PER_MILLISECOND)
52
53// Wait at least this many times longer than the operation should take.
54#define MIN_TIMEOUT_OPERATIONS 4
55
Phil Burk7f6b40d2017-02-09 13:18:38 -080056#define ALOG_CONDITION (mInService == false)
Phil Burkdec33ab2017-01-17 14:48:16 -080057
Phil Burk7f6b40d2017-02-09 13:18:38 -080058AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface &serviceInterface, bool inService)
Phil Burk204a1632017-01-03 17:23:43 -080059 : AudioStream()
60 , mClockModel()
61 , mAudioEndpoint()
Phil Burk5ed503c2017-02-01 09:38:15 -080062 , mServiceStreamHandle(AAUDIO_HANDLE_INVALID)
Phil Burk204a1632017-01-03 17:23:43 -080063 , mFramesPerBurst(16)
Phil Burk7f6b40d2017-02-09 13:18:38 -080064 , mServiceInterface(serviceInterface)
65 , mInService(inService)
Phil Burk204a1632017-01-03 17:23:43 -080066{
Phil Burk204a1632017-01-03 17:23:43 -080067}
68
69AudioStreamInternal::~AudioStreamInternal() {
70}
71
Phil Burk5ed503c2017-02-01 09:38:15 -080072aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
Phil Burk204a1632017-01-03 17:23:43 -080073
Phil Burk5ed503c2017-02-01 09:38:15 -080074 aaudio_result_t result = AAUDIO_OK;
75 AAudioStreamRequest request;
76 AAudioStreamConfiguration configuration;
Phil Burk204a1632017-01-03 17:23:43 -080077
78 result = AudioStream::open(builder);
79 if (result < 0) {
80 return result;
81 }
82
Phil Burk7f6b40d2017-02-09 13:18:38 -080083 // We have to do volume scaling. So we prefer FLOAT format.
84 if (getFormat() == AAUDIO_UNSPECIFIED) {
85 setFormat(AAUDIO_FORMAT_PCM_FLOAT);
86 }
87
Phil Burkdec33ab2017-01-17 14:48:16 -080088 // Build the request to send to the server.
Phil Burk204a1632017-01-03 17:23:43 -080089 request.setUserId(getuid());
90 request.setProcessId(getpid());
Phil Burk7f6b40d2017-02-09 13:18:38 -080091 request.setDirection(getDirection());
92
Phil Burk204a1632017-01-03 17:23:43 -080093 request.getConfiguration().setDeviceId(getDeviceId());
94 request.getConfiguration().setSampleRate(getSampleRate());
95 request.getConfiguration().setSamplesPerFrame(getSamplesPerFrame());
96 request.getConfiguration().setAudioFormat(getFormat());
Phil Burk7f6b40d2017-02-09 13:18:38 -080097 aaudio_sharing_mode_t sharingMode = getSharingMode();
98 ALOGE("AudioStreamInternal.open(): sharingMode %d", sharingMode);
99 request.getConfiguration().setSharingMode(sharingMode);
Phil Burk3df348f2017-02-08 11:41:55 -0800100 request.getConfiguration().setBufferCapacity(builder.getBufferCapacity());
Phil Burk204a1632017-01-03 17:23:43 -0800101
Phil Burk7f6b40d2017-02-09 13:18:38 -0800102 mServiceStreamHandle = mServiceInterface.openStream(request, configuration);
103 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal.open(): openStream returned mServiceStreamHandle = 0x%08X",
Phil Burk204a1632017-01-03 17:23:43 -0800104 (unsigned int)mServiceStreamHandle);
105 if (mServiceStreamHandle < 0) {
106 result = mServiceStreamHandle;
Phil Burk7f6b40d2017-02-09 13:18:38 -0800107 ALOGE("AudioStreamInternal.open(): openStream() returned %d", result);
Phil Burk204a1632017-01-03 17:23:43 -0800108 } else {
109 result = configuration.validate();
Phil Burk5ed503c2017-02-01 09:38:15 -0800110 if (result != AAUDIO_OK) {
Phil Burk204a1632017-01-03 17:23:43 -0800111 close();
112 return result;
113 }
114 // Save results of the open.
115 setSampleRate(configuration.getSampleRate());
116 setSamplesPerFrame(configuration.getSamplesPerFrame());
Phil Burk7f6b40d2017-02-09 13:18:38 -0800117 setDeviceId(configuration.getDeviceId());
Phil Burk204a1632017-01-03 17:23:43 -0800118
Phil Burk7f6b40d2017-02-09 13:18:38 -0800119 // Save device format so we can do format conversion and volume scaling together.
120 mDeviceFormat = configuration.getAudioFormat();
121
122 result = mServiceInterface.getStreamDescription(mServiceStreamHandle, mEndPointParcelable);
123 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal.open(): getStreamDescriptor(0x%08X) returns %d",
124 mServiceStreamHandle, result);
Phil Burk5ed503c2017-02-01 09:38:15 -0800125 if (result != AAUDIO_OK) {
Phil Burk204a1632017-01-03 17:23:43 -0800126 ALOGE("AudioStreamInternal.open(): getStreamDescriptor returns %d", result);
Phil Burk7f6b40d2017-02-09 13:18:38 -0800127 mServiceInterface.closeStream(mServiceStreamHandle);
Phil Burk204a1632017-01-03 17:23:43 -0800128 return result;
129 }
Phil Burk7f6b40d2017-02-09 13:18:38 -0800130
Phil Burk204a1632017-01-03 17:23:43 -0800131 // resolve parcelable into a descriptor
Phil Burk7f6b40d2017-02-09 13:18:38 -0800132 result = mEndPointParcelable.resolve(&mEndpointDescriptor);
133 if (result != AAUDIO_OK) {
134 ALOGE("AudioStreamInternal.open(): resolve() returns %d", result);
135 mServiceInterface.closeStream(mServiceStreamHandle);
136 return result;
137 }
Phil Burk204a1632017-01-03 17:23:43 -0800138
139 // Configure endpoint based on descriptor.
140 mAudioEndpoint.configure(&mEndpointDescriptor);
141
Phil Burk204a1632017-01-03 17:23:43 -0800142 mFramesPerBurst = mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
143 assert(mFramesPerBurst >= 16);
144 assert(mEndpointDescriptor.downDataQueueDescriptor.capacityInFrames < 10 * 1024);
145
146 mClockModel.setSampleRate(getSampleRate());
147 mClockModel.setFramesPerBurst(mFramesPerBurst);
148
Phil Burkc8f372c2017-03-28 11:32:39 -0700149 if (getDataCallbackProc()) {
150 mCallbackFrames = builder.getFramesPerDataCallback();
151 if (mCallbackFrames > getBufferCapacity() / 2) {
152 ALOGE("AudioStreamInternal.open(): framesPerCallback too large");
Phil Burk7f6b40d2017-02-09 13:18:38 -0800153 mServiceInterface.closeStream(mServiceStreamHandle);
Phil Burkc8f372c2017-03-28 11:32:39 -0700154 return AAUDIO_ERROR_OUT_OF_RANGE;
155
156 } else if (mCallbackFrames < 0) {
157 ALOGE("AudioStreamInternal.open(): framesPerCallback negative");
Phil Burk7f6b40d2017-02-09 13:18:38 -0800158 mServiceInterface.closeStream(mServiceStreamHandle);
Phil Burkc8f372c2017-03-28 11:32:39 -0700159 return AAUDIO_ERROR_OUT_OF_RANGE;
160
161 }
162 if (mCallbackFrames == AAUDIO_UNSPECIFIED) {
163 mCallbackFrames = mFramesPerBurst;
164 }
165
166 int32_t bytesPerFrame = getSamplesPerFrame()
167 * AAudioConvert_formatToSizeInBytes(getFormat());
168 int32_t callbackBufferSize = mCallbackFrames * bytesPerFrame;
169 mCallbackBuffer = new uint8_t[callbackBufferSize];
170 }
171
Phil Burk5ed503c2017-02-01 09:38:15 -0800172 setState(AAUDIO_STREAM_STATE_OPEN);
Phil Burk204a1632017-01-03 17:23:43 -0800173 }
174 return result;
175}
176
Phil Burk5ed503c2017-02-01 09:38:15 -0800177aaudio_result_t AudioStreamInternal::close() {
Phil Burk7f6b40d2017-02-09 13:18:38 -0800178 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X", mServiceStreamHandle);
Phil Burk5ed503c2017-02-01 09:38:15 -0800179 if (mServiceStreamHandle != AAUDIO_HANDLE_INVALID) {
180 aaudio_handle_t serviceStreamHandle = mServiceStreamHandle;
181 mServiceStreamHandle = AAUDIO_HANDLE_INVALID;
Phil Burk7f6b40d2017-02-09 13:18:38 -0800182
183 mServiceInterface.closeStream(serviceStreamHandle);
Phil Burkc8f372c2017-03-28 11:32:39 -0700184 delete[] mCallbackBuffer;
Phil Burk7f6b40d2017-02-09 13:18:38 -0800185 return mEndPointParcelable.close();
Phil Burk204a1632017-01-03 17:23:43 -0800186 } else {
Phil Burk5ed503c2017-02-01 09:38:15 -0800187 return AAUDIO_ERROR_INVALID_HANDLE;
Phil Burk204a1632017-01-03 17:23:43 -0800188 }
189}
190
Phil Burk7f6b40d2017-02-09 13:18:38 -0800191
Phil Burkc8f372c2017-03-28 11:32:39 -0700192// Render audio in the application callback and then write the data to the stream.
193void *AudioStreamInternal::callbackLoop() {
194 aaudio_result_t result = AAUDIO_OK;
195 aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
196 int32_t framesWritten = 0;
197 AAudioStream_dataCallback appCallback = getDataCallbackProc();
198 if (appCallback == nullptr) return NULL;
199
200 while (mCallbackEnabled.load() && isPlaying() && (result >= 0)) { // result might be a frame count
201 // Call application using the AAudio callback interface.
202 callbackResult = (*appCallback)(
203 (AAudioStream *) this,
204 getDataCallbackUserData(),
205 mCallbackBuffer,
206 mCallbackFrames);
207
208 if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
209 // Write audio data to stream
210 int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
211 result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
212 if (result == AAUDIO_ERROR_DISCONNECTED) {
213 if (getErrorCallbackProc() != nullptr) {
214 ALOGD("AudioStreamAAudio(): callbackLoop() stream disconnected");
215 (*getErrorCallbackProc())(
216 (AAudioStream *) this,
217 getErrorCallbackUserData(),
218 AAUDIO_OK);
219 }
220 break;
221 } else if (result != mCallbackFrames) {
222 ALOGE("AudioStreamAAudio(): callbackLoop() wrote %d / %d",
223 framesWritten, mCallbackFrames);
224 break;
225 }
226 } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
227 ALOGD("AudioStreamAAudio(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
228 break;
229 }
230 }
231
232 ALOGD("AudioStreamAAudio(): callbackLoop() exiting, result = %d, isPlaying() = %d",
233 result, (int) isPlaying());
234 return NULL; // TODO review
235}
236
237static void *aaudio_callback_thread_proc(void *context)
238{
239 AudioStreamInternal *stream = (AudioStreamInternal *)context;
240 //LOGD("AudioStreamAAudio(): oboe_callback_thread, stream = %p", stream);
241 if (stream != NULL) {
242 return stream->callbackLoop();
243 } else {
244 return NULL;
245 }
246}
247
Phil Burk5ed503c2017-02-01 09:38:15 -0800248aaudio_result_t AudioStreamInternal::requestStart()
Phil Burk204a1632017-01-03 17:23:43 -0800249{
Phil Burk3316d5e2017-02-15 11:23:01 -0800250 int64_t startTime;
Phil Burk7f6b40d2017-02-09 13:18:38 -0800251 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): start()");
Phil Burk5ed503c2017-02-01 09:38:15 -0800252 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
253 return AAUDIO_ERROR_INVALID_STATE;
Phil Burk204a1632017-01-03 17:23:43 -0800254 }
Phil Burk7f6b40d2017-02-09 13:18:38 -0800255
Phil Burk3316d5e2017-02-15 11:23:01 -0800256 startTime = AudioClock::getNanoseconds();
Phil Burk204a1632017-01-03 17:23:43 -0800257 mClockModel.start(startTime);
258 processTimestamp(0, startTime);
Phil Burk5ed503c2017-02-01 09:38:15 -0800259 setState(AAUDIO_STREAM_STATE_STARTING);
Phil Burk7f6b40d2017-02-09 13:18:38 -0800260 aaudio_result_t result = mServiceInterface.startStream(mServiceStreamHandle);;
Phil Burkc8f372c2017-03-28 11:32:39 -0700261
262 if (result == AAUDIO_OK && getDataCallbackProc() != nullptr) {
263 // Launch the callback loop thread.
264 int64_t periodNanos = mCallbackFrames
265 * AAUDIO_NANOS_PER_SECOND
266 / getSampleRate();
267 mCallbackEnabled.store(true);
268 result = createThread(periodNanos, aaudio_callback_thread_proc, this);
269 }
270 return result;
Phil Burk204a1632017-01-03 17:23:43 -0800271}
272
Phil Burkc8f372c2017-03-28 11:32:39 -0700273int64_t AudioStreamInternal::calculateReasonableTimeout(int32_t framesPerOperation) {
274
275 // Wait for at least a second or some number of callbacks to join the thread.
276 int64_t timeoutNanoseconds = (MIN_TIMEOUT_OPERATIONS * framesPerOperation * AAUDIO_NANOS_PER_SECOND)
277 / getSampleRate();
278 if (timeoutNanoseconds < MIN_TIMEOUT_NANOS) { // arbitrary number of seconds
279 timeoutNanoseconds = MIN_TIMEOUT_NANOS;
280 }
281 return timeoutNanoseconds;
282}
283
284aaudio_result_t AudioStreamInternal::stopCallback()
285{
286 if (isDataCallbackActive()) {
287 mCallbackEnabled.store(false);
288 return joinThread(NULL, calculateReasonableTimeout(mCallbackFrames));
289 } else {
290 return AAUDIO_OK;
291 }
292}
293
294aaudio_result_t AudioStreamInternal::requestPauseInternal()
Phil Burk204a1632017-01-03 17:23:43 -0800295{
296 ALOGD("AudioStreamInternal(): pause()");
Phil Burk5ed503c2017-02-01 09:38:15 -0800297 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
298 return AAUDIO_ERROR_INVALID_STATE;
Phil Burk204a1632017-01-03 17:23:43 -0800299 }
Phil Burk7f6b40d2017-02-09 13:18:38 -0800300
Phil Burk3316d5e2017-02-15 11:23:01 -0800301 mClockModel.stop(AudioClock::getNanoseconds());
Phil Burk5ed503c2017-02-01 09:38:15 -0800302 setState(AAUDIO_STREAM_STATE_PAUSING);
Phil Burk7f6b40d2017-02-09 13:18:38 -0800303 return mServiceInterface.startStream(mServiceStreamHandle);
Phil Burk204a1632017-01-03 17:23:43 -0800304}
305
Phil Burkc8f372c2017-03-28 11:32:39 -0700306aaudio_result_t AudioStreamInternal::requestPause()
307{
308 aaudio_result_t result = stopCallback();
309 if (result != AAUDIO_OK) {
310 return result;
311 }
312 return requestPauseInternal();
313}
314
Phil Burk5ed503c2017-02-01 09:38:15 -0800315aaudio_result_t AudioStreamInternal::requestFlush() {
Phil Burk7f6b40d2017-02-09 13:18:38 -0800316 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): flush()");
Phil Burk5ed503c2017-02-01 09:38:15 -0800317 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
318 return AAUDIO_ERROR_INVALID_STATE;
Phil Burk204a1632017-01-03 17:23:43 -0800319 }
Phil Burk7f6b40d2017-02-09 13:18:38 -0800320
Phil Burkc8f372c2017-03-28 11:32:39 -0700321 setState(AAUDIO_STREAM_STATE_FLUSHING);
Phil Burk7f6b40d2017-02-09 13:18:38 -0800322 return mServiceInterface.flushStream(mServiceStreamHandle);
Phil Burk204a1632017-01-03 17:23:43 -0800323}
324
325void AudioStreamInternal::onFlushFromServer() {
Phil Burk7f6b40d2017-02-09 13:18:38 -0800326 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): onFlushFromServer()");
Phil Burk3316d5e2017-02-15 11:23:01 -0800327 int64_t readCounter = mAudioEndpoint.getDownDataReadCounter();
328 int64_t writeCounter = mAudioEndpoint.getDownDataWriteCounter();
Phil Burk204a1632017-01-03 17:23:43 -0800329 // Bump offset so caller does not see the retrograde motion in getFramesRead().
Phil Burk3316d5e2017-02-15 11:23:01 -0800330 int64_t framesFlushed = writeCounter - readCounter;
Phil Burk204a1632017-01-03 17:23:43 -0800331 mFramesOffsetFromService += framesFlushed;
332 // Flush written frames by forcing writeCounter to readCounter.
333 // This is because we cannot move the read counter in the hardware.
334 mAudioEndpoint.setDownDataWriteCounter(readCounter);
335}
336
Phil Burk5ed503c2017-02-01 09:38:15 -0800337aaudio_result_t AudioStreamInternal::requestStop()
Phil Burk204a1632017-01-03 17:23:43 -0800338{
339 // TODO better implementation of requestStop()
Phil Burk5ed503c2017-02-01 09:38:15 -0800340 aaudio_result_t result = requestPause();
341 if (result == AAUDIO_OK) {
342 aaudio_stream_state_t state;
343 result = waitForStateChange(AAUDIO_STREAM_STATE_PAUSING,
Phil Burk204a1632017-01-03 17:23:43 -0800344 &state,
Phil Burk5ed503c2017-02-01 09:38:15 -0800345 500 * AAUDIO_NANOS_PER_MILLISECOND);// TODO temporary code
346 if (result == AAUDIO_OK) {
Phil Burk204a1632017-01-03 17:23:43 -0800347 result = requestFlush();
348 }
349 }
350 return result;
351}
352
Phil Burk5ed503c2017-02-01 09:38:15 -0800353aaudio_result_t AudioStreamInternal::registerThread() {
Phil Burk7f6b40d2017-02-09 13:18:38 -0800354 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): registerThread()");
Phil Burk5ed503c2017-02-01 09:38:15 -0800355 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
356 return AAUDIO_ERROR_INVALID_STATE;
Phil Burk204a1632017-01-03 17:23:43 -0800357 }
Phil Burk7f6b40d2017-02-09 13:18:38 -0800358 return mServiceInterface.registerAudioThread(mServiceStreamHandle,
359 getpid(),
360 gettid(),
361 getPeriodNanoseconds());
Phil Burk204a1632017-01-03 17:23:43 -0800362}
363
Phil Burk5ed503c2017-02-01 09:38:15 -0800364aaudio_result_t AudioStreamInternal::unregisterThread() {
Phil Burk7f6b40d2017-02-09 13:18:38 -0800365 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): unregisterThread()");
Phil Burk5ed503c2017-02-01 09:38:15 -0800366 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
367 return AAUDIO_ERROR_INVALID_STATE;
Phil Burk204a1632017-01-03 17:23:43 -0800368 }
Phil Burk7f6b40d2017-02-09 13:18:38 -0800369 return mServiceInterface.unregisterAudioThread(mServiceStreamHandle, getpid(), gettid());
Phil Burk204a1632017-01-03 17:23:43 -0800370}
371
Phil Burk5ed503c2017-02-01 09:38:15 -0800372aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
Phil Burk3316d5e2017-02-15 11:23:01 -0800373 int64_t *framePosition,
374 int64_t *timeNanoseconds) {
Phil Burkc8f372c2017-03-28 11:32:39 -0700375 // TODO implement using real HAL
Phil Burk3316d5e2017-02-15 11:23:01 -0800376 int64_t time = AudioClock::getNanoseconds();
Phil Burk204a1632017-01-03 17:23:43 -0800377 *framePosition = mClockModel.convertTimeToPosition(time);
Phil Burk5ed503c2017-02-01 09:38:15 -0800378 *timeNanoseconds = time + (10 * AAUDIO_NANOS_PER_MILLISECOND); // Fake hardware delay
379 return AAUDIO_OK;
Phil Burk204a1632017-01-03 17:23:43 -0800380}
381
Phil Burkc8f372c2017-03-28 11:32:39 -0700382aaudio_result_t AudioStreamInternal::updateStateWhileWaiting() {
383 if (isDataCallbackActive()) {
384 return AAUDIO_OK; // state is getting updated by the callback thread read/write call
385 }
Phil Burk204a1632017-01-03 17:23:43 -0800386 return processCommands();
387}
388
389#if LOG_TIMESTAMPS
Phil Burk5ed503c2017-02-01 09:38:15 -0800390static void AudioStreamInternal_LogTimestamp(AAudioServiceMessage &command) {
Phil Burk204a1632017-01-03 17:23:43 -0800391 static int64_t oldPosition = 0;
Phil Burk3316d5e2017-02-15 11:23:01 -0800392 static int64_t oldTime = 0;
Phil Burk204a1632017-01-03 17:23:43 -0800393 int64_t framePosition = command.timestamp.position;
Phil Burk3316d5e2017-02-15 11:23:01 -0800394 int64_t nanoTime = command.timestamp.timestamp;
Phil Burk7f6b40d2017-02-09 13:18:38 -0800395 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %llu",
Phil Burk204a1632017-01-03 17:23:43 -0800396 (long long) framePosition,
397 (long long) nanoTime);
398 int64_t nanosDelta = nanoTime - oldTime;
399 if (nanosDelta > 0 && oldTime > 0) {
400 int64_t framesDelta = framePosition - oldPosition;
Phil Burk5ed503c2017-02-01 09:38:15 -0800401 int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta;
Phil Burk7f6b40d2017-02-09 13:18:38 -0800402 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
403 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
404 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() - measured rate = %llu", (unsigned long long) rate);
Phil Burk204a1632017-01-03 17:23:43 -0800405 }
406 oldPosition = framePosition;
407 oldTime = nanoTime;
408}
409#endif
410
Phil Burk5ed503c2017-02-01 09:38:15 -0800411aaudio_result_t AudioStreamInternal::onTimestampFromServer(AAudioServiceMessage *message) {
Phil Burk3316d5e2017-02-15 11:23:01 -0800412 int64_t framePosition = 0;
Phil Burk204a1632017-01-03 17:23:43 -0800413#if LOG_TIMESTAMPS
414 AudioStreamInternal_LogTimestamp(command);
415#endif
416 framePosition = message->timestamp.position;
417 processTimestamp(framePosition, message->timestamp.timestamp);
Phil Burk5ed503c2017-02-01 09:38:15 -0800418 return AAUDIO_OK;
Phil Burk204a1632017-01-03 17:23:43 -0800419}
420
Phil Burk5ed503c2017-02-01 09:38:15 -0800421aaudio_result_t AudioStreamInternal::onEventFromServer(AAudioServiceMessage *message) {
422 aaudio_result_t result = AAUDIO_OK;
Phil Burk7f6b40d2017-02-09 13:18:38 -0800423 ALOGD_IF(ALOG_CONDITION, "processCommands() got event %d", message->event.event);
Phil Burk204a1632017-01-03 17:23:43 -0800424 switch (message->event.event) {
Phil Burk5ed503c2017-02-01 09:38:15 -0800425 case AAUDIO_SERVICE_EVENT_STARTED:
Phil Burk7f6b40d2017-02-09 13:18:38 -0800426 ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_STARTED");
Phil Burk5ed503c2017-02-01 09:38:15 -0800427 setState(AAUDIO_STREAM_STATE_STARTED);
Phil Burk204a1632017-01-03 17:23:43 -0800428 break;
Phil Burk5ed503c2017-02-01 09:38:15 -0800429 case AAUDIO_SERVICE_EVENT_PAUSED:
Phil Burk7f6b40d2017-02-09 13:18:38 -0800430 ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_PAUSED");
Phil Burk5ed503c2017-02-01 09:38:15 -0800431 setState(AAUDIO_STREAM_STATE_PAUSED);
Phil Burk204a1632017-01-03 17:23:43 -0800432 break;
Phil Burk5ed503c2017-02-01 09:38:15 -0800433 case AAUDIO_SERVICE_EVENT_FLUSHED:
Phil Burk7f6b40d2017-02-09 13:18:38 -0800434 ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED");
Phil Burk5ed503c2017-02-01 09:38:15 -0800435 setState(AAUDIO_STREAM_STATE_FLUSHED);
Phil Burk204a1632017-01-03 17:23:43 -0800436 onFlushFromServer();
437 break;
Phil Burk5ed503c2017-02-01 09:38:15 -0800438 case AAUDIO_SERVICE_EVENT_CLOSED:
Phil Burk7f6b40d2017-02-09 13:18:38 -0800439 ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_CLOSED");
Phil Burk5ed503c2017-02-01 09:38:15 -0800440 setState(AAUDIO_STREAM_STATE_CLOSED);
Phil Burk204a1632017-01-03 17:23:43 -0800441 break;
Phil Burk5ed503c2017-02-01 09:38:15 -0800442 case AAUDIO_SERVICE_EVENT_DISCONNECTED:
443 result = AAUDIO_ERROR_DISCONNECTED;
Phil Burk7f6b40d2017-02-09 13:18:38 -0800444 setState(AAUDIO_STREAM_STATE_DISCONNECTED);
Phil Burk5ed503c2017-02-01 09:38:15 -0800445 ALOGW("WARNING - processCommands() AAUDIO_SERVICE_EVENT_DISCONNECTED");
Phil Burk204a1632017-01-03 17:23:43 -0800446 break;
Phil Burk7f6b40d2017-02-09 13:18:38 -0800447 case AAUDIO_SERVICE_EVENT_VOLUME:
448 mVolume = message->event.dataDouble;
449 ALOGD_IF(ALOG_CONDITION, "processCommands() AAUDIO_SERVICE_EVENT_VOLUME %f", mVolume);
450 break;
Phil Burk204a1632017-01-03 17:23:43 -0800451 default:
452 ALOGW("WARNING - processCommands() Unrecognized event = %d",
453 (int) message->event.event);
454 break;
455 }
456 return result;
457}
458
459// Process all the commands coming from the server.
Phil Burk5ed503c2017-02-01 09:38:15 -0800460aaudio_result_t AudioStreamInternal::processCommands() {
461 aaudio_result_t result = AAUDIO_OK;
Phil Burk204a1632017-01-03 17:23:43 -0800462
Phil Burk5ed503c2017-02-01 09:38:15 -0800463 while (result == AAUDIO_OK) {
Phil Burk7f6b40d2017-02-09 13:18:38 -0800464 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::processCommands() - looping, %d", result);
Phil Burk5ed503c2017-02-01 09:38:15 -0800465 AAudioServiceMessage message;
Phil Burk204a1632017-01-03 17:23:43 -0800466 if (mAudioEndpoint.readUpCommand(&message) != 1) {
467 break; // no command this time, no problem
468 }
469 switch (message.what) {
Phil Burk5ed503c2017-02-01 09:38:15 -0800470 case AAudioServiceMessage::code::TIMESTAMP:
Phil Burk204a1632017-01-03 17:23:43 -0800471 result = onTimestampFromServer(&message);
472 break;
473
Phil Burk5ed503c2017-02-01 09:38:15 -0800474 case AAudioServiceMessage::code::EVENT:
Phil Burk204a1632017-01-03 17:23:43 -0800475 result = onEventFromServer(&message);
476 break;
477
478 default:
479 ALOGW("WARNING - AudioStreamInternal::processCommands() Unrecognized what = %d",
480 (int) message.what);
Phil Burk5ed503c2017-02-01 09:38:15 -0800481 result = AAUDIO_ERROR_UNEXPECTED_VALUE;
Phil Burk204a1632017-01-03 17:23:43 -0800482 break;
483 }
484 }
485 return result;
486}
487
488// Write the data, block if needed and timeoutMillis > 0
Phil Burk5ed503c2017-02-01 09:38:15 -0800489aaudio_result_t AudioStreamInternal::write(const void *buffer, int32_t numFrames,
Phil Burk3316d5e2017-02-15 11:23:01 -0800490 int64_t timeoutNanoseconds)
Phil Burk204a1632017-01-03 17:23:43 -0800491{
Phil Burk5ed503c2017-02-01 09:38:15 -0800492 aaudio_result_t result = AAUDIO_OK;
Phil Burk7f6b40d2017-02-09 13:18:38 -0800493 int32_t loopCount = 0;
Phil Burk204a1632017-01-03 17:23:43 -0800494 uint8_t* source = (uint8_t*)buffer;
Phil Burk3316d5e2017-02-15 11:23:01 -0800495 int64_t currentTimeNanos = AudioClock::getNanoseconds();
496 int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
Phil Burk204a1632017-01-03 17:23:43 -0800497 int32_t framesLeft = numFrames;
Phil Burk7f6b40d2017-02-09 13:18:38 -0800498 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write(%p, %d) at time %08llu , mState = %s",
499 // buffer, numFrames, (unsigned long long) currentTimeNanos,
500 // AAudio_convertStreamStateToText(getState()));
Phil Burk204a1632017-01-03 17:23:43 -0800501
502 // Write until all the data has been written or until a timeout occurs.
503 while (framesLeft > 0) {
Phil Burk7f6b40d2017-02-09 13:18:38 -0800504 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write() loop: framesLeft = %d, loopCount = %d =====",
505 // framesLeft, loopCount++);
Phil Burk204a1632017-01-03 17:23:43 -0800506 // The call to writeNow() will not block. It will just write as much as it can.
Phil Burk3316d5e2017-02-15 11:23:01 -0800507 int64_t wakeTimeNanos = 0;
Phil Burk5ed503c2017-02-01 09:38:15 -0800508 aaudio_result_t framesWritten = writeNow(source, framesLeft,
Phil Burk204a1632017-01-03 17:23:43 -0800509 currentTimeNanos, &wakeTimeNanos);
Phil Burk7f6b40d2017-02-09 13:18:38 -0800510 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write() loop: framesWritten = %d", framesWritten);
Phil Burk204a1632017-01-03 17:23:43 -0800511 if (framesWritten < 0) {
Phil Burk7f6b40d2017-02-09 13:18:38 -0800512 ALOGE("AudioStreamInternal::write() loop: writeNow returned %d", framesWritten);
Phil Burk204a1632017-01-03 17:23:43 -0800513 result = framesWritten;
514 break;
515 }
516 framesLeft -= (int32_t) framesWritten;
517 source += framesWritten * getBytesPerFrame();
518
519 // Should we block?
520 if (timeoutNanoseconds == 0) {
521 break; // don't block
522 } else if (framesLeft > 0) {
Phil Burk7f6b40d2017-02-09 13:18:38 -0800523 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal:: original wakeTimeNanos %lld", (long long) wakeTimeNanos);
Phil Burk204a1632017-01-03 17:23:43 -0800524 // clip the wake time to something reasonable
525 if (wakeTimeNanos < currentTimeNanos) {
526 wakeTimeNanos = currentTimeNanos;
527 }
528 if (wakeTimeNanos > deadlineNanos) {
529 // If we time out, just return the framesWritten so far.
Phil Burk7f6b40d2017-02-09 13:18:38 -0800530 ALOGE("AudioStreamInternal::write(): timed out after %lld nanos",
531 (long long) timeoutNanoseconds);
Phil Burk204a1632017-01-03 17:23:43 -0800532 break;
533 }
534
Phil Burk7f6b40d2017-02-09 13:18:38 -0800535 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal:: sleep until %lld, dur = %lld", (long long) wakeTimeNanos,
Phil Burk204a1632017-01-03 17:23:43 -0800536 // (long long) (wakeTimeNanos - currentTimeNanos));
537 AudioClock::sleepForNanos(wakeTimeNanos - currentTimeNanos);
538 currentTimeNanos = AudioClock::getNanoseconds();
539 }
540 }
541
542 // return error or framesWritten
Phil Burk7f6b40d2017-02-09 13:18:38 -0800543 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write() result = %d, framesLeft = %d, #%d",
544 // result, framesLeft, loopCount);
545 (void) loopCount;
Phil Burk204a1632017-01-03 17:23:43 -0800546 return (result < 0) ? result : numFrames - framesLeft;
547}
548
549// Write as much data as we can without blocking.
Phil Burk5ed503c2017-02-01 09:38:15 -0800550aaudio_result_t AudioStreamInternal::writeNow(const void *buffer, int32_t numFrames,
Phil Burk3316d5e2017-02-15 11:23:01 -0800551 int64_t currentNanoTime, int64_t *wakeTimePtr) {
Phil Burk7f6b40d2017-02-09 13:18:38 -0800552
553 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow(%p) - enter", buffer);
Phil Burk204a1632017-01-03 17:23:43 -0800554 {
Phil Burk5ed503c2017-02-01 09:38:15 -0800555 aaudio_result_t result = processCommands();
Phil Burk7f6b40d2017-02-09 13:18:38 -0800556 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - processCommands() returned %d", result);
Phil Burk5ed503c2017-02-01 09:38:15 -0800557 if (result != AAUDIO_OK) {
Phil Burk204a1632017-01-03 17:23:43 -0800558 return result;
559 }
560 }
561
562 if (mAudioEndpoint.isOutputFreeRunning()) {
Phil Burk7f6b40d2017-02-09 13:18:38 -0800563 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - update read counter");
Phil Burk204a1632017-01-03 17:23:43 -0800564 // Update data queue based on the timing model.
565 int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
566 mAudioEndpoint.setDownDataReadCounter(estimatedReadCounter);
Phil Burk204a1632017-01-03 17:23:43 -0800567 }
568 // TODO else query from endpoint cuz set by actual reader, maybe
569
Phil Burk7f6b40d2017-02-09 13:18:38 -0800570 // If the read index passed the write index then consider it an underrun.
571 if (mAudioEndpoint.getFullFramesAvailable() < 0) {
572 mXRunCount++;
Phil Burk204a1632017-01-03 17:23:43 -0800573 }
Phil Burk7f6b40d2017-02-09 13:18:38 -0800574
575 // Write some data to the buffer.
576 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - writeNowWithConversion(%d)", numFrames);
577 int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
578 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - tried to write %d frames, wrote %d",
Phil Burk204a1632017-01-03 17:23:43 -0800579 // numFrames, framesWritten);
580
581 // Calculate an ideal time to wake up.
582 if (wakeTimePtr != nullptr && framesWritten >= 0) {
583 // By default wake up a few milliseconds from now. // TODO review
Phil Burk7f6b40d2017-02-09 13:18:38 -0800584 int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
585 aaudio_stream_state_t state = getState();
586 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - wakeTime based on %s",
587 // AAudio_convertStreamStateToText(state));
588 switch (state) {
Phil Burk5ed503c2017-02-01 09:38:15 -0800589 case AAUDIO_STREAM_STATE_OPEN:
590 case AAUDIO_STREAM_STATE_STARTING:
Phil Burk204a1632017-01-03 17:23:43 -0800591 if (framesWritten != 0) {
592 // Don't wait to write more data. Just prime the buffer.
593 wakeTime = currentNanoTime;
594 }
595 break;
Phil Burk5ed503c2017-02-01 09:38:15 -0800596 case AAUDIO_STREAM_STATE_STARTED: // When do we expect the next read burst to occur?
Phil Burk204a1632017-01-03 17:23:43 -0800597 {
598 uint32_t burstSize = mFramesPerBurst;
599 if (burstSize < 32) {
600 burstSize = 32; // TODO review
601 }
602
603 uint64_t nextReadPosition = mAudioEndpoint.getDownDataReadCounter() + burstSize;
604 wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
605 }
606 break;
607 default:
608 break;
609 }
610 *wakeTimePtr = wakeTime;
611
612 }
Phil Burk7f6b40d2017-02-09 13:18:38 -0800613// ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow finished: now = %llu, read# = %llu, wrote# = %llu",
Phil Burk204a1632017-01-03 17:23:43 -0800614// (unsigned long long)currentNanoTime,
615// (unsigned long long)mAudioEndpoint.getDownDataReadCounter(),
616// (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
617 return framesWritten;
618}
619
Phil Burk7f6b40d2017-02-09 13:18:38 -0800620
621// TODO this function needs a major cleanup.
622aaudio_result_t AudioStreamInternal::writeNowWithConversion(const void *buffer,
623 int32_t numFrames) {
624 // ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNowWithConversion(%p, %d)", buffer, numFrames);
625 WrappingBuffer wrappingBuffer;
626 mAudioEndpoint.getEmptyRoomAvailable(&wrappingBuffer);
627 uint8_t *source = (uint8_t *) buffer;
628 int32_t framesLeft = numFrames;
629
630 mAudioEndpoint.getEmptyRoomAvailable(&wrappingBuffer);
631
632 // Read data in one or two parts.
633 int partIndex = 0;
634 while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
635 int32_t framesToWrite = framesLeft;
636 int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
637 if (framesAvailable > 0) {
638 if (framesToWrite > framesAvailable) {
639 framesToWrite = framesAvailable;
640 }
641 int32_t numBytes = getBytesPerFrame();
642 // TODO handle volume scaling
643 if (getFormat() == mDeviceFormat) {
644 // Copy straight through.
645 memcpy(wrappingBuffer.data[partIndex], source, numBytes);
646 } else if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT
647 && mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
648 // Data conversion.
649 AAudioConvert_floatToPcm16(
650 (const float *) source,
651 framesToWrite * getSamplesPerFrame(),
652 (int16_t *) wrappingBuffer.data[partIndex]);
653 } else {
654 // TODO handle more conversions
655 ALOGE("AudioStreamInternal::writeNowWithConversion() unsupported formats: %d, %d",
656 getFormat(), mDeviceFormat);
657 return AAUDIO_ERROR_UNEXPECTED_VALUE;
658 }
659
660 source += numBytes;
661 framesLeft -= framesToWrite;
662 }
663 partIndex++;
664 }
665 int32_t framesWritten = numFrames - framesLeft;
666 mAudioEndpoint.advanceWriteIndex(framesWritten);
667
668 if (framesWritten > 0) {
669 incrementFramesWritten(framesWritten);
670 }
671 // ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
672 return framesWritten;
673}
674
Phil Burk3316d5e2017-02-15 11:23:01 -0800675void AudioStreamInternal::processTimestamp(uint64_t position, int64_t time) {
Phil Burk204a1632017-01-03 17:23:43 -0800676 mClockModel.processTimestamp( position, time);
677}
678
Phil Burk3316d5e2017-02-15 11:23:01 -0800679aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {
680 int32_t actualFrames = 0;
681 aaudio_result_t result = mAudioEndpoint.setBufferSizeInFrames(requestedFrames, &actualFrames);
682 if (result < 0) {
683 return result;
684 } else {
685 return (aaudio_result_t) actualFrames;
686 }
Phil Burk204a1632017-01-03 17:23:43 -0800687}
688
Phil Burk3316d5e2017-02-15 11:23:01 -0800689int32_t AudioStreamInternal::getBufferSize() const
Phil Burk204a1632017-01-03 17:23:43 -0800690{
691 return mAudioEndpoint.getBufferSizeInFrames();
692}
693
Phil Burk3316d5e2017-02-15 11:23:01 -0800694int32_t AudioStreamInternal::getBufferCapacity() const
Phil Burk204a1632017-01-03 17:23:43 -0800695{
696 return mAudioEndpoint.getBufferCapacityInFrames();
697}
698
Phil Burk3316d5e2017-02-15 11:23:01 -0800699int32_t AudioStreamInternal::getFramesPerBurst() const
Phil Burk204a1632017-01-03 17:23:43 -0800700{
701 return mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
702}
703
Phil Burk3316d5e2017-02-15 11:23:01 -0800704int64_t AudioStreamInternal::getFramesRead()
Phil Burk204a1632017-01-03 17:23:43 -0800705{
Phil Burk3316d5e2017-02-15 11:23:01 -0800706 int64_t framesRead =
Phil Burk204a1632017-01-03 17:23:43 -0800707 mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
708 + mFramesOffsetFromService;
709 // Prevent retrograde motion.
710 if (framesRead < mLastFramesRead) {
711 framesRead = mLastFramesRead;
712 } else {
713 mLastFramesRead = framesRead;
714 }
Phil Burk7f6b40d2017-02-09 13:18:38 -0800715 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
Phil Burk204a1632017-01-03 17:23:43 -0800716 return framesRead;
717}
718
719// TODO implement getTimestamp