blob: 662cb9e99ae8052ca64f04e4a4f05fc494dc3c53 [file] [log] [blame]
Phil Burk204a1632017-01-03 17:23:43 -08001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Phil Burk5ed503c2017-02-01 09:38:15 -080017#define LOG_TAG "AAudio"
Phil Burk204a1632017-01-03 17:23:43 -080018//#define LOG_NDEBUG 0
19#include <utils/Log.h>
20
Phil Burk7f6b40d2017-02-09 13:18:38 -080021#include <stdint.h>
Phil Burk204a1632017-01-03 17:23:43 -080022#include <assert.h>
23
24#include <binder/IServiceManager.h>
25
Phil Burk5ed503c2017-02-01 09:38:15 -080026#include <aaudio/AAudio.h>
Phil Burkc8f372c2017-03-28 11:32:39 -070027#include <utils/String16.h>
Phil Burk204a1632017-01-03 17:23:43 -080028
Phil Burk7f6b40d2017-02-09 13:18:38 -080029#include "AudioClock.h"
30#include "AudioEndpointParcelable.h"
31#include "binding/AAudioStreamRequest.h"
32#include "binding/AAudioStreamConfiguration.h"
33#include "binding/IAAudioService.h"
Phil Burk5ed503c2017-02-01 09:38:15 -080034#include "binding/AAudioServiceMessage.h"
Phil Burk7f6b40d2017-02-09 13:18:38 -080035#include "fifo/FifoBuffer.h"
Phil Burk204a1632017-01-03 17:23:43 -080036
Phil Burk3df348f2017-02-08 11:41:55 -080037#include "core/AudioStreamBuilder.h"
Phil Burk7f6b40d2017-02-09 13:18:38 -080038#include "AudioStreamInternal.h"
Phil Burk204a1632017-01-03 17:23:43 -080039
40#define LOG_TIMESTAMPS 0
41
42using android::String16;
43using android::IServiceManager;
44using android::defaultServiceManager;
45using android::interface_cast;
Phil Burkdec33ab2017-01-17 14:48:16 -080046using android::Mutex;
Phil Burk7f6b40d2017-02-09 13:18:38 -080047using android::WrappingBuffer;
Phil Burk204a1632017-01-03 17:23:43 -080048
Phil Burk5ed503c2017-02-01 09:38:15 -080049using namespace aaudio;
Phil Burk204a1632017-01-03 17:23:43 -080050
Phil Burkc8f372c2017-03-28 11:32:39 -070051#define MIN_TIMEOUT_NANOS (1000 * AAUDIO_NANOS_PER_MILLISECOND)
52
53// Wait at least this many times longer than the operation should take.
54#define MIN_TIMEOUT_OPERATIONS 4
55
Phil Burk7f6b40d2017-02-09 13:18:38 -080056#define ALOG_CONDITION (mInService == false)
Phil Burkdec33ab2017-01-17 14:48:16 -080057
Phil Burk7f6b40d2017-02-09 13:18:38 -080058AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface &serviceInterface, bool inService)
Phil Burk204a1632017-01-03 17:23:43 -080059 : AudioStream()
60 , mClockModel()
61 , mAudioEndpoint()
Phil Burk5ed503c2017-02-01 09:38:15 -080062 , mServiceStreamHandle(AAUDIO_HANDLE_INVALID)
Phil Burk204a1632017-01-03 17:23:43 -080063 , mFramesPerBurst(16)
Phil Burk7f6b40d2017-02-09 13:18:38 -080064 , mServiceInterface(serviceInterface)
65 , mInService(inService)
Phil Burk204a1632017-01-03 17:23:43 -080066{
Phil Burk204a1632017-01-03 17:23:43 -080067}
68
69AudioStreamInternal::~AudioStreamInternal() {
70}
71
Phil Burk5ed503c2017-02-01 09:38:15 -080072aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
Phil Burk204a1632017-01-03 17:23:43 -080073
Phil Burk5ed503c2017-02-01 09:38:15 -080074 aaudio_result_t result = AAUDIO_OK;
75 AAudioStreamRequest request;
76 AAudioStreamConfiguration configuration;
Phil Burk204a1632017-01-03 17:23:43 -080077
78 result = AudioStream::open(builder);
79 if (result < 0) {
80 return result;
81 }
82
Phil Burk7f6b40d2017-02-09 13:18:38 -080083 // We have to do volume scaling. So we prefer FLOAT format.
84 if (getFormat() == AAUDIO_UNSPECIFIED) {
85 setFormat(AAUDIO_FORMAT_PCM_FLOAT);
86 }
87
Phil Burkdec33ab2017-01-17 14:48:16 -080088 // Build the request to send to the server.
Phil Burk204a1632017-01-03 17:23:43 -080089 request.setUserId(getuid());
90 request.setProcessId(getpid());
Phil Burk7f6b40d2017-02-09 13:18:38 -080091 request.setDirection(getDirection());
92
Phil Burk204a1632017-01-03 17:23:43 -080093 request.getConfiguration().setDeviceId(getDeviceId());
94 request.getConfiguration().setSampleRate(getSampleRate());
95 request.getConfiguration().setSamplesPerFrame(getSamplesPerFrame());
96 request.getConfiguration().setAudioFormat(getFormat());
Phil Burk7f6b40d2017-02-09 13:18:38 -080097 aaudio_sharing_mode_t sharingMode = getSharingMode();
98 ALOGE("AudioStreamInternal.open(): sharingMode %d", sharingMode);
99 request.getConfiguration().setSharingMode(sharingMode);
Phil Burk3df348f2017-02-08 11:41:55 -0800100 request.getConfiguration().setBufferCapacity(builder.getBufferCapacity());
Phil Burk204a1632017-01-03 17:23:43 -0800101
Phil Burk7f6b40d2017-02-09 13:18:38 -0800102 mServiceStreamHandle = mServiceInterface.openStream(request, configuration);
103 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal.open(): openStream returned mServiceStreamHandle = 0x%08X",
Phil Burk204a1632017-01-03 17:23:43 -0800104 (unsigned int)mServiceStreamHandle);
105 if (mServiceStreamHandle < 0) {
106 result = mServiceStreamHandle;
Phil Burk7f6b40d2017-02-09 13:18:38 -0800107 ALOGE("AudioStreamInternal.open(): openStream() returned %d", result);
Phil Burk204a1632017-01-03 17:23:43 -0800108 } else {
109 result = configuration.validate();
Phil Burk5ed503c2017-02-01 09:38:15 -0800110 if (result != AAUDIO_OK) {
Phil Burk204a1632017-01-03 17:23:43 -0800111 close();
112 return result;
113 }
114 // Save results of the open.
115 setSampleRate(configuration.getSampleRate());
116 setSamplesPerFrame(configuration.getSamplesPerFrame());
Phil Burk7f6b40d2017-02-09 13:18:38 -0800117 setDeviceId(configuration.getDeviceId());
Phil Burk204a1632017-01-03 17:23:43 -0800118
Phil Burk7f6b40d2017-02-09 13:18:38 -0800119 // Save device format so we can do format conversion and volume scaling together.
120 mDeviceFormat = configuration.getAudioFormat();
121
122 result = mServiceInterface.getStreamDescription(mServiceStreamHandle, mEndPointParcelable);
123 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal.open(): getStreamDescriptor(0x%08X) returns %d",
124 mServiceStreamHandle, result);
Phil Burk5ed503c2017-02-01 09:38:15 -0800125 if (result != AAUDIO_OK) {
Phil Burk204a1632017-01-03 17:23:43 -0800126 ALOGE("AudioStreamInternal.open(): getStreamDescriptor returns %d", result);
Phil Burk7f6b40d2017-02-09 13:18:38 -0800127 mServiceInterface.closeStream(mServiceStreamHandle);
Phil Burk204a1632017-01-03 17:23:43 -0800128 return result;
129 }
Phil Burk7f6b40d2017-02-09 13:18:38 -0800130
Phil Burk204a1632017-01-03 17:23:43 -0800131 // resolve parcelable into a descriptor
Phil Burk7f6b40d2017-02-09 13:18:38 -0800132 result = mEndPointParcelable.resolve(&mEndpointDescriptor);
133 if (result != AAUDIO_OK) {
134 ALOGE("AudioStreamInternal.open(): resolve() returns %d", result);
135 mServiceInterface.closeStream(mServiceStreamHandle);
136 return result;
137 }
Phil Burk204a1632017-01-03 17:23:43 -0800138
139 // Configure endpoint based on descriptor.
140 mAudioEndpoint.configure(&mEndpointDescriptor);
141
Phil Burk204a1632017-01-03 17:23:43 -0800142 mFramesPerBurst = mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
143 assert(mFramesPerBurst >= 16);
144 assert(mEndpointDescriptor.downDataQueueDescriptor.capacityInFrames < 10 * 1024);
145
146 mClockModel.setSampleRate(getSampleRate());
147 mClockModel.setFramesPerBurst(mFramesPerBurst);
148
Phil Burkc8f372c2017-03-28 11:32:39 -0700149 if (getDataCallbackProc()) {
150 mCallbackFrames = builder.getFramesPerDataCallback();
151 if (mCallbackFrames > getBufferCapacity() / 2) {
152 ALOGE("AudioStreamInternal.open(): framesPerCallback too large");
Phil Burk7f6b40d2017-02-09 13:18:38 -0800153 mServiceInterface.closeStream(mServiceStreamHandle);
Phil Burkc8f372c2017-03-28 11:32:39 -0700154 return AAUDIO_ERROR_OUT_OF_RANGE;
155
156 } else if (mCallbackFrames < 0) {
157 ALOGE("AudioStreamInternal.open(): framesPerCallback negative");
Phil Burk7f6b40d2017-02-09 13:18:38 -0800158 mServiceInterface.closeStream(mServiceStreamHandle);
Phil Burkc8f372c2017-03-28 11:32:39 -0700159 return AAUDIO_ERROR_OUT_OF_RANGE;
160
161 }
162 if (mCallbackFrames == AAUDIO_UNSPECIFIED) {
163 mCallbackFrames = mFramesPerBurst;
164 }
165
166 int32_t bytesPerFrame = getSamplesPerFrame()
167 * AAudioConvert_formatToSizeInBytes(getFormat());
168 int32_t callbackBufferSize = mCallbackFrames * bytesPerFrame;
169 mCallbackBuffer = new uint8_t[callbackBufferSize];
170 }
171
Phil Burk5ed503c2017-02-01 09:38:15 -0800172 setState(AAUDIO_STREAM_STATE_OPEN);
Phil Burk204a1632017-01-03 17:23:43 -0800173 }
174 return result;
175}
176
Phil Burk5ed503c2017-02-01 09:38:15 -0800177aaudio_result_t AudioStreamInternal::close() {
Phil Burk7f6b40d2017-02-09 13:18:38 -0800178 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X", mServiceStreamHandle);
Phil Burk5ed503c2017-02-01 09:38:15 -0800179 if (mServiceStreamHandle != AAUDIO_HANDLE_INVALID) {
180 aaudio_handle_t serviceStreamHandle = mServiceStreamHandle;
181 mServiceStreamHandle = AAUDIO_HANDLE_INVALID;
Phil Burk7f6b40d2017-02-09 13:18:38 -0800182
183 mServiceInterface.closeStream(serviceStreamHandle);
Phil Burkc8f372c2017-03-28 11:32:39 -0700184 delete[] mCallbackBuffer;
Phil Burk7f6b40d2017-02-09 13:18:38 -0800185 return mEndPointParcelable.close();
Phil Burk204a1632017-01-03 17:23:43 -0800186 } else {
Phil Burk5ed503c2017-02-01 09:38:15 -0800187 return AAUDIO_ERROR_INVALID_HANDLE;
Phil Burk204a1632017-01-03 17:23:43 -0800188 }
189}
190
Phil Burkc8f372c2017-03-28 11:32:39 -0700191// Render audio in the application callback and then write the data to the stream.
192void *AudioStreamInternal::callbackLoop() {
193 aaudio_result_t result = AAUDIO_OK;
194 aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
Phil Burkc8f372c2017-03-28 11:32:39 -0700195 AAudioStream_dataCallback appCallback = getDataCallbackProc();
196 if (appCallback == nullptr) return NULL;
197
Phil Burk65658fa2017-04-07 12:17:12 -0700198 // result might be a frame count
199 while (mCallbackEnabled.load() && isPlaying() && (result >= 0)) {
Phil Burkc8f372c2017-03-28 11:32:39 -0700200 // Call application using the AAudio callback interface.
201 callbackResult = (*appCallback)(
202 (AAudioStream *) this,
203 getDataCallbackUserData(),
204 mCallbackBuffer,
205 mCallbackFrames);
206
207 if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
Phil Burk65658fa2017-04-07 12:17:12 -0700208 // Write audio data to stream.
Phil Burkc8f372c2017-03-28 11:32:39 -0700209 int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
Phil Burk65658fa2017-04-07 12:17:12 -0700210
211 // This is a BLOCKING WRITE!
Phil Burkc8f372c2017-03-28 11:32:39 -0700212 result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
Phil Burk65658fa2017-04-07 12:17:12 -0700213 if ((result != mCallbackFrames)) {
214 ALOGE("AudioStreamInternal(): callbackLoop: write() returned %d", result);
215 if (result >= 0) {
216 // Only wrote some of the frames requested. Must have timed out.
217 result = AAUDIO_ERROR_TIMEOUT;
218 }
Phil Burkc8f372c2017-03-28 11:32:39 -0700219 if (getErrorCallbackProc() != nullptr) {
Phil Burkc8f372c2017-03-28 11:32:39 -0700220 (*getErrorCallbackProc())(
221 (AAudioStream *) this,
222 getErrorCallbackUserData(),
Phil Burk65658fa2017-04-07 12:17:12 -0700223 result);
Phil Burkc8f372c2017-03-28 11:32:39 -0700224 }
225 break;
Phil Burkc8f372c2017-03-28 11:32:39 -0700226 }
227 } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
Phil Burk65658fa2017-04-07 12:17:12 -0700228 ALOGD("AudioStreamInternal(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
Phil Burkc8f372c2017-03-28 11:32:39 -0700229 break;
230 }
231 }
232
Phil Burk65658fa2017-04-07 12:17:12 -0700233 ALOGD("AudioStreamInternal(): callbackLoop() exiting, result = %d, isPlaying() = %d",
Phil Burkc8f372c2017-03-28 11:32:39 -0700234 result, (int) isPlaying());
235 return NULL; // TODO review
236}
237
238static void *aaudio_callback_thread_proc(void *context)
239{
240 AudioStreamInternal *stream = (AudioStreamInternal *)context;
Phil Burk65658fa2017-04-07 12:17:12 -0700241 //LOGD("AudioStreamInternal(): oboe_callback_thread, stream = %p", stream);
Phil Burkc8f372c2017-03-28 11:32:39 -0700242 if (stream != NULL) {
243 return stream->callbackLoop();
244 } else {
245 return NULL;
246 }
247}
248
Phil Burk5ed503c2017-02-01 09:38:15 -0800249aaudio_result_t AudioStreamInternal::requestStart()
Phil Burk204a1632017-01-03 17:23:43 -0800250{
Phil Burk3316d5e2017-02-15 11:23:01 -0800251 int64_t startTime;
Phil Burk7f6b40d2017-02-09 13:18:38 -0800252 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): start()");
Phil Burk5ed503c2017-02-01 09:38:15 -0800253 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
254 return AAUDIO_ERROR_INVALID_STATE;
Phil Burk204a1632017-01-03 17:23:43 -0800255 }
Phil Burk7f6b40d2017-02-09 13:18:38 -0800256
Phil Burk3316d5e2017-02-15 11:23:01 -0800257 startTime = AudioClock::getNanoseconds();
Phil Burk204a1632017-01-03 17:23:43 -0800258 mClockModel.start(startTime);
259 processTimestamp(0, startTime);
Phil Burk5ed503c2017-02-01 09:38:15 -0800260 setState(AAUDIO_STREAM_STATE_STARTING);
Phil Burk7f6b40d2017-02-09 13:18:38 -0800261 aaudio_result_t result = mServiceInterface.startStream(mServiceStreamHandle);;
Phil Burkc8f372c2017-03-28 11:32:39 -0700262
263 if (result == AAUDIO_OK && getDataCallbackProc() != nullptr) {
264 // Launch the callback loop thread.
265 int64_t periodNanos = mCallbackFrames
266 * AAUDIO_NANOS_PER_SECOND
267 / getSampleRate();
268 mCallbackEnabled.store(true);
269 result = createThread(periodNanos, aaudio_callback_thread_proc, this);
270 }
271 return result;
Phil Burk204a1632017-01-03 17:23:43 -0800272}
273
Phil Burkc8f372c2017-03-28 11:32:39 -0700274int64_t AudioStreamInternal::calculateReasonableTimeout(int32_t framesPerOperation) {
275
276 // Wait for at least a second or some number of callbacks to join the thread.
277 int64_t timeoutNanoseconds = (MIN_TIMEOUT_OPERATIONS * framesPerOperation * AAUDIO_NANOS_PER_SECOND)
278 / getSampleRate();
279 if (timeoutNanoseconds < MIN_TIMEOUT_NANOS) { // arbitrary number of seconds
280 timeoutNanoseconds = MIN_TIMEOUT_NANOS;
281 }
282 return timeoutNanoseconds;
283}
284
285aaudio_result_t AudioStreamInternal::stopCallback()
286{
287 if (isDataCallbackActive()) {
288 mCallbackEnabled.store(false);
289 return joinThread(NULL, calculateReasonableTimeout(mCallbackFrames));
290 } else {
291 return AAUDIO_OK;
292 }
293}
294
295aaudio_result_t AudioStreamInternal::requestPauseInternal()
Phil Burk204a1632017-01-03 17:23:43 -0800296{
297 ALOGD("AudioStreamInternal(): pause()");
Phil Burk5ed503c2017-02-01 09:38:15 -0800298 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
299 return AAUDIO_ERROR_INVALID_STATE;
Phil Burk204a1632017-01-03 17:23:43 -0800300 }
Phil Burk7f6b40d2017-02-09 13:18:38 -0800301
Phil Burk3316d5e2017-02-15 11:23:01 -0800302 mClockModel.stop(AudioClock::getNanoseconds());
Phil Burk5ed503c2017-02-01 09:38:15 -0800303 setState(AAUDIO_STREAM_STATE_PAUSING);
Phil Burk7f6b40d2017-02-09 13:18:38 -0800304 return mServiceInterface.startStream(mServiceStreamHandle);
Phil Burk204a1632017-01-03 17:23:43 -0800305}
306
Phil Burkc8f372c2017-03-28 11:32:39 -0700307aaudio_result_t AudioStreamInternal::requestPause()
308{
309 aaudio_result_t result = stopCallback();
310 if (result != AAUDIO_OK) {
311 return result;
312 }
313 return requestPauseInternal();
314}
315
Phil Burk5ed503c2017-02-01 09:38:15 -0800316aaudio_result_t AudioStreamInternal::requestFlush() {
Phil Burk7f6b40d2017-02-09 13:18:38 -0800317 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): flush()");
Phil Burk5ed503c2017-02-01 09:38:15 -0800318 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
319 return AAUDIO_ERROR_INVALID_STATE;
Phil Burk204a1632017-01-03 17:23:43 -0800320 }
Phil Burk7f6b40d2017-02-09 13:18:38 -0800321
Phil Burkc8f372c2017-03-28 11:32:39 -0700322 setState(AAUDIO_STREAM_STATE_FLUSHING);
Phil Burk7f6b40d2017-02-09 13:18:38 -0800323 return mServiceInterface.flushStream(mServiceStreamHandle);
Phil Burk204a1632017-01-03 17:23:43 -0800324}
325
326void AudioStreamInternal::onFlushFromServer() {
Phil Burk7f6b40d2017-02-09 13:18:38 -0800327 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): onFlushFromServer()");
Phil Burk3316d5e2017-02-15 11:23:01 -0800328 int64_t readCounter = mAudioEndpoint.getDownDataReadCounter();
329 int64_t writeCounter = mAudioEndpoint.getDownDataWriteCounter();
Phil Burk204a1632017-01-03 17:23:43 -0800330 // Bump offset so caller does not see the retrograde motion in getFramesRead().
Phil Burk3316d5e2017-02-15 11:23:01 -0800331 int64_t framesFlushed = writeCounter - readCounter;
Phil Burk204a1632017-01-03 17:23:43 -0800332 mFramesOffsetFromService += framesFlushed;
333 // Flush written frames by forcing writeCounter to readCounter.
334 // This is because we cannot move the read counter in the hardware.
335 mAudioEndpoint.setDownDataWriteCounter(readCounter);
336}
337
Phil Burk5ed503c2017-02-01 09:38:15 -0800338aaudio_result_t AudioStreamInternal::requestStop()
Phil Burk204a1632017-01-03 17:23:43 -0800339{
340 // TODO better implementation of requestStop()
Phil Burk5ed503c2017-02-01 09:38:15 -0800341 aaudio_result_t result = requestPause();
342 if (result == AAUDIO_OK) {
343 aaudio_stream_state_t state;
344 result = waitForStateChange(AAUDIO_STREAM_STATE_PAUSING,
Phil Burk204a1632017-01-03 17:23:43 -0800345 &state,
Phil Burk5ed503c2017-02-01 09:38:15 -0800346 500 * AAUDIO_NANOS_PER_MILLISECOND);// TODO temporary code
347 if (result == AAUDIO_OK) {
Phil Burk204a1632017-01-03 17:23:43 -0800348 result = requestFlush();
349 }
350 }
351 return result;
352}
353
Phil Burk5ed503c2017-02-01 09:38:15 -0800354aaudio_result_t AudioStreamInternal::registerThread() {
Phil Burk7f6b40d2017-02-09 13:18:38 -0800355 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): registerThread()");
Phil Burk5ed503c2017-02-01 09:38:15 -0800356 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
357 return AAUDIO_ERROR_INVALID_STATE;
Phil Burk204a1632017-01-03 17:23:43 -0800358 }
Phil Burk7f6b40d2017-02-09 13:18:38 -0800359 return mServiceInterface.registerAudioThread(mServiceStreamHandle,
360 getpid(),
361 gettid(),
362 getPeriodNanoseconds());
Phil Burk204a1632017-01-03 17:23:43 -0800363}
364
Phil Burk5ed503c2017-02-01 09:38:15 -0800365aaudio_result_t AudioStreamInternal::unregisterThread() {
Phil Burk7f6b40d2017-02-09 13:18:38 -0800366 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): unregisterThread()");
Phil Burk5ed503c2017-02-01 09:38:15 -0800367 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
368 return AAUDIO_ERROR_INVALID_STATE;
Phil Burk204a1632017-01-03 17:23:43 -0800369 }
Phil Burk7f6b40d2017-02-09 13:18:38 -0800370 return mServiceInterface.unregisterAudioThread(mServiceStreamHandle, getpid(), gettid());
Phil Burk204a1632017-01-03 17:23:43 -0800371}
372
Phil Burk5ed503c2017-02-01 09:38:15 -0800373aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
Phil Burk3316d5e2017-02-15 11:23:01 -0800374 int64_t *framePosition,
375 int64_t *timeNanoseconds) {
Phil Burkc8f372c2017-03-28 11:32:39 -0700376 // TODO implement using real HAL
Phil Burk3316d5e2017-02-15 11:23:01 -0800377 int64_t time = AudioClock::getNanoseconds();
Phil Burk204a1632017-01-03 17:23:43 -0800378 *framePosition = mClockModel.convertTimeToPosition(time);
Phil Burk5ed503c2017-02-01 09:38:15 -0800379 *timeNanoseconds = time + (10 * AAUDIO_NANOS_PER_MILLISECOND); // Fake hardware delay
380 return AAUDIO_OK;
Phil Burk204a1632017-01-03 17:23:43 -0800381}
382
Phil Burkc8f372c2017-03-28 11:32:39 -0700383aaudio_result_t AudioStreamInternal::updateStateWhileWaiting() {
384 if (isDataCallbackActive()) {
385 return AAUDIO_OK; // state is getting updated by the callback thread read/write call
386 }
Phil Burk204a1632017-01-03 17:23:43 -0800387 return processCommands();
388}
389
390#if LOG_TIMESTAMPS
Phil Burk5ed503c2017-02-01 09:38:15 -0800391static void AudioStreamInternal_LogTimestamp(AAudioServiceMessage &command) {
Phil Burk204a1632017-01-03 17:23:43 -0800392 static int64_t oldPosition = 0;
Phil Burk3316d5e2017-02-15 11:23:01 -0800393 static int64_t oldTime = 0;
Phil Burk204a1632017-01-03 17:23:43 -0800394 int64_t framePosition = command.timestamp.position;
Phil Burk3316d5e2017-02-15 11:23:01 -0800395 int64_t nanoTime = command.timestamp.timestamp;
Phil Burk7f6b40d2017-02-09 13:18:38 -0800396 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %llu",
Phil Burk204a1632017-01-03 17:23:43 -0800397 (long long) framePosition,
398 (long long) nanoTime);
399 int64_t nanosDelta = nanoTime - oldTime;
400 if (nanosDelta > 0 && oldTime > 0) {
401 int64_t framesDelta = framePosition - oldPosition;
Phil Burk5ed503c2017-02-01 09:38:15 -0800402 int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta;
Phil Burk7f6b40d2017-02-09 13:18:38 -0800403 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
404 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
405 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() - measured rate = %llu", (unsigned long long) rate);
Phil Burk204a1632017-01-03 17:23:43 -0800406 }
407 oldPosition = framePosition;
408 oldTime = nanoTime;
409}
410#endif
411
Phil Burk5ed503c2017-02-01 09:38:15 -0800412aaudio_result_t AudioStreamInternal::onTimestampFromServer(AAudioServiceMessage *message) {
Phil Burk3316d5e2017-02-15 11:23:01 -0800413 int64_t framePosition = 0;
Phil Burk204a1632017-01-03 17:23:43 -0800414#if LOG_TIMESTAMPS
415 AudioStreamInternal_LogTimestamp(command);
416#endif
417 framePosition = message->timestamp.position;
418 processTimestamp(framePosition, message->timestamp.timestamp);
Phil Burk5ed503c2017-02-01 09:38:15 -0800419 return AAUDIO_OK;
Phil Burk204a1632017-01-03 17:23:43 -0800420}
421
Phil Burk5ed503c2017-02-01 09:38:15 -0800422aaudio_result_t AudioStreamInternal::onEventFromServer(AAudioServiceMessage *message) {
423 aaudio_result_t result = AAUDIO_OK;
Phil Burk7f6b40d2017-02-09 13:18:38 -0800424 ALOGD_IF(ALOG_CONDITION, "processCommands() got event %d", message->event.event);
Phil Burk204a1632017-01-03 17:23:43 -0800425 switch (message->event.event) {
Phil Burk5ed503c2017-02-01 09:38:15 -0800426 case AAUDIO_SERVICE_EVENT_STARTED:
Phil Burk7f6b40d2017-02-09 13:18:38 -0800427 ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_STARTED");
Phil Burk5ed503c2017-02-01 09:38:15 -0800428 setState(AAUDIO_STREAM_STATE_STARTED);
Phil Burk204a1632017-01-03 17:23:43 -0800429 break;
Phil Burk5ed503c2017-02-01 09:38:15 -0800430 case AAUDIO_SERVICE_EVENT_PAUSED:
Phil Burk7f6b40d2017-02-09 13:18:38 -0800431 ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_PAUSED");
Phil Burk5ed503c2017-02-01 09:38:15 -0800432 setState(AAUDIO_STREAM_STATE_PAUSED);
Phil Burk204a1632017-01-03 17:23:43 -0800433 break;
Phil Burk5ed503c2017-02-01 09:38:15 -0800434 case AAUDIO_SERVICE_EVENT_FLUSHED:
Phil Burk7f6b40d2017-02-09 13:18:38 -0800435 ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED");
Phil Burk5ed503c2017-02-01 09:38:15 -0800436 setState(AAUDIO_STREAM_STATE_FLUSHED);
Phil Burk204a1632017-01-03 17:23:43 -0800437 onFlushFromServer();
438 break;
Phil Burk5ed503c2017-02-01 09:38:15 -0800439 case AAUDIO_SERVICE_EVENT_CLOSED:
Phil Burk7f6b40d2017-02-09 13:18:38 -0800440 ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_CLOSED");
Phil Burk5ed503c2017-02-01 09:38:15 -0800441 setState(AAUDIO_STREAM_STATE_CLOSED);
Phil Burk204a1632017-01-03 17:23:43 -0800442 break;
Phil Burk5ed503c2017-02-01 09:38:15 -0800443 case AAUDIO_SERVICE_EVENT_DISCONNECTED:
444 result = AAUDIO_ERROR_DISCONNECTED;
Phil Burk7f6b40d2017-02-09 13:18:38 -0800445 setState(AAUDIO_STREAM_STATE_DISCONNECTED);
Phil Burk5ed503c2017-02-01 09:38:15 -0800446 ALOGW("WARNING - processCommands() AAUDIO_SERVICE_EVENT_DISCONNECTED");
Phil Burk204a1632017-01-03 17:23:43 -0800447 break;
Phil Burk7f6b40d2017-02-09 13:18:38 -0800448 case AAUDIO_SERVICE_EVENT_VOLUME:
449 mVolume = message->event.dataDouble;
450 ALOGD_IF(ALOG_CONDITION, "processCommands() AAUDIO_SERVICE_EVENT_VOLUME %f", mVolume);
451 break;
Phil Burk204a1632017-01-03 17:23:43 -0800452 default:
453 ALOGW("WARNING - processCommands() Unrecognized event = %d",
454 (int) message->event.event);
455 break;
456 }
457 return result;
458}
459
460// Process all the commands coming from the server.
Phil Burk5ed503c2017-02-01 09:38:15 -0800461aaudio_result_t AudioStreamInternal::processCommands() {
462 aaudio_result_t result = AAUDIO_OK;
Phil Burk204a1632017-01-03 17:23:43 -0800463
Phil Burk5ed503c2017-02-01 09:38:15 -0800464 while (result == AAUDIO_OK) {
Phil Burk7f6b40d2017-02-09 13:18:38 -0800465 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::processCommands() - looping, %d", result);
Phil Burk5ed503c2017-02-01 09:38:15 -0800466 AAudioServiceMessage message;
Phil Burk204a1632017-01-03 17:23:43 -0800467 if (mAudioEndpoint.readUpCommand(&message) != 1) {
468 break; // no command this time, no problem
469 }
470 switch (message.what) {
Phil Burk5ed503c2017-02-01 09:38:15 -0800471 case AAudioServiceMessage::code::TIMESTAMP:
Phil Burk204a1632017-01-03 17:23:43 -0800472 result = onTimestampFromServer(&message);
473 break;
474
Phil Burk5ed503c2017-02-01 09:38:15 -0800475 case AAudioServiceMessage::code::EVENT:
Phil Burk204a1632017-01-03 17:23:43 -0800476 result = onEventFromServer(&message);
477 break;
478
479 default:
480 ALOGW("WARNING - AudioStreamInternal::processCommands() Unrecognized what = %d",
481 (int) message.what);
Phil Burk5ed503c2017-02-01 09:38:15 -0800482 result = AAUDIO_ERROR_UNEXPECTED_VALUE;
Phil Burk204a1632017-01-03 17:23:43 -0800483 break;
484 }
485 }
486 return result;
487}
488
489// Write the data, block if needed and timeoutMillis > 0
Phil Burk5ed503c2017-02-01 09:38:15 -0800490aaudio_result_t AudioStreamInternal::write(const void *buffer, int32_t numFrames,
Phil Burk3316d5e2017-02-15 11:23:01 -0800491 int64_t timeoutNanoseconds)
Phil Burk204a1632017-01-03 17:23:43 -0800492{
Phil Burk5ed503c2017-02-01 09:38:15 -0800493 aaudio_result_t result = AAUDIO_OK;
Phil Burk7f6b40d2017-02-09 13:18:38 -0800494 int32_t loopCount = 0;
Phil Burk204a1632017-01-03 17:23:43 -0800495 uint8_t* source = (uint8_t*)buffer;
Phil Burk3316d5e2017-02-15 11:23:01 -0800496 int64_t currentTimeNanos = AudioClock::getNanoseconds();
497 int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
Phil Burk204a1632017-01-03 17:23:43 -0800498 int32_t framesLeft = numFrames;
Phil Burk7f6b40d2017-02-09 13:18:38 -0800499 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write(%p, %d) at time %08llu , mState = %s",
500 // buffer, numFrames, (unsigned long long) currentTimeNanos,
501 // AAudio_convertStreamStateToText(getState()));
Phil Burk204a1632017-01-03 17:23:43 -0800502
503 // Write until all the data has been written or until a timeout occurs.
504 while (framesLeft > 0) {
Phil Burk7f6b40d2017-02-09 13:18:38 -0800505 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write() loop: framesLeft = %d, loopCount = %d =====",
506 // framesLeft, loopCount++);
Phil Burk204a1632017-01-03 17:23:43 -0800507 // The call to writeNow() will not block. It will just write as much as it can.
Phil Burk3316d5e2017-02-15 11:23:01 -0800508 int64_t wakeTimeNanos = 0;
Phil Burk5ed503c2017-02-01 09:38:15 -0800509 aaudio_result_t framesWritten = writeNow(source, framesLeft,
Phil Burk204a1632017-01-03 17:23:43 -0800510 currentTimeNanos, &wakeTimeNanos);
Phil Burk7f6b40d2017-02-09 13:18:38 -0800511 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write() loop: framesWritten = %d", framesWritten);
Phil Burk204a1632017-01-03 17:23:43 -0800512 if (framesWritten < 0) {
Phil Burk7f6b40d2017-02-09 13:18:38 -0800513 ALOGE("AudioStreamInternal::write() loop: writeNow returned %d", framesWritten);
Phil Burk204a1632017-01-03 17:23:43 -0800514 result = framesWritten;
515 break;
516 }
517 framesLeft -= (int32_t) framesWritten;
518 source += framesWritten * getBytesPerFrame();
519
520 // Should we block?
521 if (timeoutNanoseconds == 0) {
522 break; // don't block
523 } else if (framesLeft > 0) {
Phil Burk7f6b40d2017-02-09 13:18:38 -0800524 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal:: original wakeTimeNanos %lld", (long long) wakeTimeNanos);
Phil Burk204a1632017-01-03 17:23:43 -0800525 // clip the wake time to something reasonable
526 if (wakeTimeNanos < currentTimeNanos) {
527 wakeTimeNanos = currentTimeNanos;
528 }
529 if (wakeTimeNanos > deadlineNanos) {
530 // If we time out, just return the framesWritten so far.
Phil Burk7f6b40d2017-02-09 13:18:38 -0800531 ALOGE("AudioStreamInternal::write(): timed out after %lld nanos",
532 (long long) timeoutNanoseconds);
Phil Burk204a1632017-01-03 17:23:43 -0800533 break;
534 }
535
Phil Burk7f6b40d2017-02-09 13:18:38 -0800536 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal:: sleep until %lld, dur = %lld", (long long) wakeTimeNanos,
Phil Burk204a1632017-01-03 17:23:43 -0800537 // (long long) (wakeTimeNanos - currentTimeNanos));
538 AudioClock::sleepForNanos(wakeTimeNanos - currentTimeNanos);
539 currentTimeNanos = AudioClock::getNanoseconds();
540 }
541 }
542
543 // return error or framesWritten
Phil Burk7f6b40d2017-02-09 13:18:38 -0800544 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write() result = %d, framesLeft = %d, #%d",
545 // result, framesLeft, loopCount);
546 (void) loopCount;
Phil Burk204a1632017-01-03 17:23:43 -0800547 return (result < 0) ? result : numFrames - framesLeft;
548}
549
550// Write as much data as we can without blocking.
Phil Burk5ed503c2017-02-01 09:38:15 -0800551aaudio_result_t AudioStreamInternal::writeNow(const void *buffer, int32_t numFrames,
Phil Burk3316d5e2017-02-15 11:23:01 -0800552 int64_t currentNanoTime, int64_t *wakeTimePtr) {
Phil Burk7f6b40d2017-02-09 13:18:38 -0800553
554 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow(%p) - enter", buffer);
Phil Burk204a1632017-01-03 17:23:43 -0800555 {
Phil Burk5ed503c2017-02-01 09:38:15 -0800556 aaudio_result_t result = processCommands();
Phil Burk7f6b40d2017-02-09 13:18:38 -0800557 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - processCommands() returned %d", result);
Phil Burk5ed503c2017-02-01 09:38:15 -0800558 if (result != AAUDIO_OK) {
Phil Burk204a1632017-01-03 17:23:43 -0800559 return result;
560 }
561 }
562
563 if (mAudioEndpoint.isOutputFreeRunning()) {
Phil Burk7f6b40d2017-02-09 13:18:38 -0800564 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - update read counter");
Phil Burk204a1632017-01-03 17:23:43 -0800565 // Update data queue based on the timing model.
566 int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
567 mAudioEndpoint.setDownDataReadCounter(estimatedReadCounter);
Phil Burk204a1632017-01-03 17:23:43 -0800568 }
569 // TODO else query from endpoint cuz set by actual reader, maybe
570
Phil Burk7f6b40d2017-02-09 13:18:38 -0800571 // If the read index passed the write index then consider it an underrun.
572 if (mAudioEndpoint.getFullFramesAvailable() < 0) {
573 mXRunCount++;
Phil Burk204a1632017-01-03 17:23:43 -0800574 }
Phil Burk7f6b40d2017-02-09 13:18:38 -0800575
576 // Write some data to the buffer.
577 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - writeNowWithConversion(%d)", numFrames);
578 int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
579 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - tried to write %d frames, wrote %d",
Phil Burk204a1632017-01-03 17:23:43 -0800580 // numFrames, framesWritten);
581
582 // Calculate an ideal time to wake up.
583 if (wakeTimePtr != nullptr && framesWritten >= 0) {
584 // By default wake up a few milliseconds from now. // TODO review
Phil Burk7f6b40d2017-02-09 13:18:38 -0800585 int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
586 aaudio_stream_state_t state = getState();
587 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - wakeTime based on %s",
588 // AAudio_convertStreamStateToText(state));
589 switch (state) {
Phil Burk5ed503c2017-02-01 09:38:15 -0800590 case AAUDIO_STREAM_STATE_OPEN:
591 case AAUDIO_STREAM_STATE_STARTING:
Phil Burk204a1632017-01-03 17:23:43 -0800592 if (framesWritten != 0) {
593 // Don't wait to write more data. Just prime the buffer.
594 wakeTime = currentNanoTime;
595 }
596 break;
Phil Burk5ed503c2017-02-01 09:38:15 -0800597 case AAUDIO_STREAM_STATE_STARTED: // When do we expect the next read burst to occur?
Phil Burk204a1632017-01-03 17:23:43 -0800598 {
599 uint32_t burstSize = mFramesPerBurst;
600 if (burstSize < 32) {
601 burstSize = 32; // TODO review
602 }
603
604 uint64_t nextReadPosition = mAudioEndpoint.getDownDataReadCounter() + burstSize;
605 wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
606 }
607 break;
608 default:
609 break;
610 }
611 *wakeTimePtr = wakeTime;
612
613 }
Phil Burk7f6b40d2017-02-09 13:18:38 -0800614// ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow finished: now = %llu, read# = %llu, wrote# = %llu",
Phil Burk204a1632017-01-03 17:23:43 -0800615// (unsigned long long)currentNanoTime,
616// (unsigned long long)mAudioEndpoint.getDownDataReadCounter(),
617// (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
618 return framesWritten;
619}
620
Phil Burk7f6b40d2017-02-09 13:18:38 -0800621
622// TODO this function needs a major cleanup.
623aaudio_result_t AudioStreamInternal::writeNowWithConversion(const void *buffer,
624 int32_t numFrames) {
625 // ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNowWithConversion(%p, %d)", buffer, numFrames);
626 WrappingBuffer wrappingBuffer;
627 mAudioEndpoint.getEmptyRoomAvailable(&wrappingBuffer);
628 uint8_t *source = (uint8_t *) buffer;
629 int32_t framesLeft = numFrames;
630
631 mAudioEndpoint.getEmptyRoomAvailable(&wrappingBuffer);
632
633 // Read data in one or two parts.
634 int partIndex = 0;
635 while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
636 int32_t framesToWrite = framesLeft;
637 int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
638 if (framesAvailable > 0) {
639 if (framesToWrite > framesAvailable) {
640 framesToWrite = framesAvailable;
641 }
642 int32_t numBytes = getBytesPerFrame();
643 // TODO handle volume scaling
644 if (getFormat() == mDeviceFormat) {
645 // Copy straight through.
646 memcpy(wrappingBuffer.data[partIndex], source, numBytes);
647 } else if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT
648 && mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
649 // Data conversion.
650 AAudioConvert_floatToPcm16(
651 (const float *) source,
652 framesToWrite * getSamplesPerFrame(),
653 (int16_t *) wrappingBuffer.data[partIndex]);
654 } else {
655 // TODO handle more conversions
656 ALOGE("AudioStreamInternal::writeNowWithConversion() unsupported formats: %d, %d",
657 getFormat(), mDeviceFormat);
658 return AAUDIO_ERROR_UNEXPECTED_VALUE;
659 }
660
661 source += numBytes;
662 framesLeft -= framesToWrite;
663 }
664 partIndex++;
665 }
666 int32_t framesWritten = numFrames - framesLeft;
667 mAudioEndpoint.advanceWriteIndex(framesWritten);
668
669 if (framesWritten > 0) {
670 incrementFramesWritten(framesWritten);
671 }
672 // ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
673 return framesWritten;
674}
675
Phil Burk3316d5e2017-02-15 11:23:01 -0800676void AudioStreamInternal::processTimestamp(uint64_t position, int64_t time) {
Phil Burk204a1632017-01-03 17:23:43 -0800677 mClockModel.processTimestamp( position, time);
678}
679
Phil Burk3316d5e2017-02-15 11:23:01 -0800680aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {
681 int32_t actualFrames = 0;
682 aaudio_result_t result = mAudioEndpoint.setBufferSizeInFrames(requestedFrames, &actualFrames);
683 if (result < 0) {
684 return result;
685 } else {
686 return (aaudio_result_t) actualFrames;
687 }
Phil Burk204a1632017-01-03 17:23:43 -0800688}
689
Phil Burk3316d5e2017-02-15 11:23:01 -0800690int32_t AudioStreamInternal::getBufferSize() const
Phil Burk204a1632017-01-03 17:23:43 -0800691{
692 return mAudioEndpoint.getBufferSizeInFrames();
693}
694
Phil Burk3316d5e2017-02-15 11:23:01 -0800695int32_t AudioStreamInternal::getBufferCapacity() const
Phil Burk204a1632017-01-03 17:23:43 -0800696{
697 return mAudioEndpoint.getBufferCapacityInFrames();
698}
699
Phil Burk3316d5e2017-02-15 11:23:01 -0800700int32_t AudioStreamInternal::getFramesPerBurst() const
Phil Burk204a1632017-01-03 17:23:43 -0800701{
702 return mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
703}
704
Phil Burk3316d5e2017-02-15 11:23:01 -0800705int64_t AudioStreamInternal::getFramesRead()
Phil Burk204a1632017-01-03 17:23:43 -0800706{
Phil Burk3316d5e2017-02-15 11:23:01 -0800707 int64_t framesRead =
Phil Burk204a1632017-01-03 17:23:43 -0800708 mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
709 + mFramesOffsetFromService;
710 // Prevent retrograde motion.
711 if (framesRead < mLastFramesRead) {
712 framesRead = mLastFramesRead;
713 } else {
714 mLastFramesRead = framesRead;
715 }
Phil Burk7f6b40d2017-02-09 13:18:38 -0800716 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
Phil Burk204a1632017-01-03 17:23:43 -0800717 return framesRead;
718}
719
720// TODO implement getTimestamp