blob: 730420570d1dded7518621c0dc1c3fbdd0d9da1f [file] [log] [blame]
Phil Burk204a1632017-01-03 17:23:43 -08001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Phil Burk5ed503c2017-02-01 09:38:15 -080017#define LOG_TAG "AAudio"
Phil Burk204a1632017-01-03 17:23:43 -080018//#define LOG_NDEBUG 0
19#include <utils/Log.h>
20
Phil Burkc0c70e32017-02-09 13:18:38 -080021#include <stdint.h>
Phil Burk204a1632017-01-03 17:23:43 -080022#include <assert.h>
23
24#include <binder/IServiceManager.h>
25
Phil Burk5ed503c2017-02-01 09:38:15 -080026#include <aaudio/AAudio.h>
Phil Burke4d7bb42017-03-28 11:32:39 -070027#include <utils/String16.h>
Phil Burk204a1632017-01-03 17:23:43 -080028
Phil Burkc0c70e32017-02-09 13:18:38 -080029#include "AudioClock.h"
30#include "AudioEndpointParcelable.h"
31#include "binding/AAudioStreamRequest.h"
32#include "binding/AAudioStreamConfiguration.h"
33#include "binding/IAAudioService.h"
Phil Burk5ed503c2017-02-01 09:38:15 -080034#include "binding/AAudioServiceMessage.h"
Phil Burkc0c70e32017-02-09 13:18:38 -080035#include "fifo/FifoBuffer.h"
Phil Burk204a1632017-01-03 17:23:43 -080036
Phil Burk3df348f2017-02-08 11:41:55 -080037#include "core/AudioStreamBuilder.h"
Phil Burkc0c70e32017-02-09 13:18:38 -080038#include "AudioStreamInternal.h"
Phil Burk204a1632017-01-03 17:23:43 -080039
40#define LOG_TIMESTAMPS 0
41
42using android::String16;
43using android::IServiceManager;
44using android::defaultServiceManager;
45using android::interface_cast;
Phil Burkdec33ab2017-01-17 14:48:16 -080046using android::Mutex;
Phil Burkc0c70e32017-02-09 13:18:38 -080047using android::WrappingBuffer;
Phil Burk204a1632017-01-03 17:23:43 -080048
Phil Burk5ed503c2017-02-01 09:38:15 -080049using namespace aaudio;
Phil Burk204a1632017-01-03 17:23:43 -080050
Phil Burke4d7bb42017-03-28 11:32:39 -070051#define MIN_TIMEOUT_NANOS (1000 * AAUDIO_NANOS_PER_MILLISECOND)
52
53// Wait at least this many times longer than the operation should take.
54#define MIN_TIMEOUT_OPERATIONS 4
55
Phil Burkc0c70e32017-02-09 13:18:38 -080056#define ALOG_CONDITION (mInService == false)
Phil Burkdec33ab2017-01-17 14:48:16 -080057
Phil Burkc0c70e32017-02-09 13:18:38 -080058AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface &serviceInterface, bool inService)
Phil Burk204a1632017-01-03 17:23:43 -080059 : AudioStream()
60 , mClockModel()
61 , mAudioEndpoint()
Phil Burk5ed503c2017-02-01 09:38:15 -080062 , mServiceStreamHandle(AAUDIO_HANDLE_INVALID)
Phil Burk204a1632017-01-03 17:23:43 -080063 , mFramesPerBurst(16)
Phil Burkc0c70e32017-02-09 13:18:38 -080064 , mServiceInterface(serviceInterface)
65 , mInService(inService)
Phil Burk204a1632017-01-03 17:23:43 -080066{
Phil Burk204a1632017-01-03 17:23:43 -080067}
68
69AudioStreamInternal::~AudioStreamInternal() {
70}
71
Phil Burk5ed503c2017-02-01 09:38:15 -080072aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
Phil Burk204a1632017-01-03 17:23:43 -080073
Phil Burk5ed503c2017-02-01 09:38:15 -080074 aaudio_result_t result = AAUDIO_OK;
75 AAudioStreamRequest request;
76 AAudioStreamConfiguration configuration;
Phil Burk204a1632017-01-03 17:23:43 -080077
78 result = AudioStream::open(builder);
79 if (result < 0) {
80 return result;
81 }
82
Phil Burkc0c70e32017-02-09 13:18:38 -080083 // We have to do volume scaling. So we prefer FLOAT format.
84 if (getFormat() == AAUDIO_UNSPECIFIED) {
85 setFormat(AAUDIO_FORMAT_PCM_FLOAT);
86 }
87
Phil Burkdec33ab2017-01-17 14:48:16 -080088 // Build the request to send to the server.
Phil Burk204a1632017-01-03 17:23:43 -080089 request.setUserId(getuid());
90 request.setProcessId(getpid());
Phil Burkc0c70e32017-02-09 13:18:38 -080091 request.setDirection(getDirection());
92
Phil Burk204a1632017-01-03 17:23:43 -080093 request.getConfiguration().setDeviceId(getDeviceId());
94 request.getConfiguration().setSampleRate(getSampleRate());
95 request.getConfiguration().setSamplesPerFrame(getSamplesPerFrame());
96 request.getConfiguration().setAudioFormat(getFormat());
Phil Burkc0c70e32017-02-09 13:18:38 -080097 aaudio_sharing_mode_t sharingMode = getSharingMode();
98 ALOGE("AudioStreamInternal.open(): sharingMode %d", sharingMode);
99 request.getConfiguration().setSharingMode(sharingMode);
Phil Burk3df348f2017-02-08 11:41:55 -0800100 request.getConfiguration().setBufferCapacity(builder.getBufferCapacity());
Phil Burk204a1632017-01-03 17:23:43 -0800101
Phil Burkc0c70e32017-02-09 13:18:38 -0800102 mServiceStreamHandle = mServiceInterface.openStream(request, configuration);
103 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal.open(): openStream returned mServiceStreamHandle = 0x%08X",
Phil Burk204a1632017-01-03 17:23:43 -0800104 (unsigned int)mServiceStreamHandle);
105 if (mServiceStreamHandle < 0) {
106 result = mServiceStreamHandle;
Phil Burkc0c70e32017-02-09 13:18:38 -0800107 ALOGE("AudioStreamInternal.open(): openStream() returned %d", result);
Phil Burk204a1632017-01-03 17:23:43 -0800108 } else {
109 result = configuration.validate();
Phil Burk5ed503c2017-02-01 09:38:15 -0800110 if (result != AAUDIO_OK) {
Phil Burk204a1632017-01-03 17:23:43 -0800111 close();
112 return result;
113 }
114 // Save results of the open.
115 setSampleRate(configuration.getSampleRate());
116 setSamplesPerFrame(configuration.getSamplesPerFrame());
Phil Burkc0c70e32017-02-09 13:18:38 -0800117 setDeviceId(configuration.getDeviceId());
Phil Burk204a1632017-01-03 17:23:43 -0800118
Phil Burkc0c70e32017-02-09 13:18:38 -0800119 // Save device format so we can do format conversion and volume scaling together.
120 mDeviceFormat = configuration.getAudioFormat();
121
122 result = mServiceInterface.getStreamDescription(mServiceStreamHandle, mEndPointParcelable);
123 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal.open(): getStreamDescriptor(0x%08X) returns %d",
124 mServiceStreamHandle, result);
Phil Burk5ed503c2017-02-01 09:38:15 -0800125 if (result != AAUDIO_OK) {
Phil Burk204a1632017-01-03 17:23:43 -0800126 ALOGE("AudioStreamInternal.open(): getStreamDescriptor returns %d", result);
Phil Burkc0c70e32017-02-09 13:18:38 -0800127 mServiceInterface.closeStream(mServiceStreamHandle);
Phil Burk204a1632017-01-03 17:23:43 -0800128 return result;
129 }
Phil Burkc0c70e32017-02-09 13:18:38 -0800130
Phil Burk204a1632017-01-03 17:23:43 -0800131 // resolve parcelable into a descriptor
Phil Burkc0c70e32017-02-09 13:18:38 -0800132 result = mEndPointParcelable.resolve(&mEndpointDescriptor);
133 if (result != AAUDIO_OK) {
134 ALOGE("AudioStreamInternal.open(): resolve() returns %d", result);
135 mServiceInterface.closeStream(mServiceStreamHandle);
136 return result;
137 }
Phil Burk204a1632017-01-03 17:23:43 -0800138
139 // Configure endpoint based on descriptor.
140 mAudioEndpoint.configure(&mEndpointDescriptor);
141
Phil Burk204a1632017-01-03 17:23:43 -0800142 mFramesPerBurst = mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
143 assert(mFramesPerBurst >= 16);
144 assert(mEndpointDescriptor.downDataQueueDescriptor.capacityInFrames < 10 * 1024);
145
146 mClockModel.setSampleRate(getSampleRate());
147 mClockModel.setFramesPerBurst(mFramesPerBurst);
148
Phil Burke4d7bb42017-03-28 11:32:39 -0700149 if (getDataCallbackProc()) {
150 mCallbackFrames = builder.getFramesPerDataCallback();
151 if (mCallbackFrames > getBufferCapacity() / 2) {
152 ALOGE("AudioStreamInternal.open(): framesPerCallback too large");
Phil Burkc0c70e32017-02-09 13:18:38 -0800153 mServiceInterface.closeStream(mServiceStreamHandle);
Phil Burke4d7bb42017-03-28 11:32:39 -0700154 return AAUDIO_ERROR_OUT_OF_RANGE;
155
156 } else if (mCallbackFrames < 0) {
157 ALOGE("AudioStreamInternal.open(): framesPerCallback negative");
Phil Burkc0c70e32017-02-09 13:18:38 -0800158 mServiceInterface.closeStream(mServiceStreamHandle);
Phil Burke4d7bb42017-03-28 11:32:39 -0700159 return AAUDIO_ERROR_OUT_OF_RANGE;
160
161 }
162 if (mCallbackFrames == AAUDIO_UNSPECIFIED) {
163 mCallbackFrames = mFramesPerBurst;
164 }
165
166 int32_t bytesPerFrame = getSamplesPerFrame()
167 * AAudioConvert_formatToSizeInBytes(getFormat());
168 int32_t callbackBufferSize = mCallbackFrames * bytesPerFrame;
169 mCallbackBuffer = new uint8_t[callbackBufferSize];
170 }
171
Phil Burk5ed503c2017-02-01 09:38:15 -0800172 setState(AAUDIO_STREAM_STATE_OPEN);
Phil Burk204a1632017-01-03 17:23:43 -0800173 }
174 return result;
175}
176
Phil Burk5ed503c2017-02-01 09:38:15 -0800177aaudio_result_t AudioStreamInternal::close() {
Phil Burkc0c70e32017-02-09 13:18:38 -0800178 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X", mServiceStreamHandle);
Phil Burk5ed503c2017-02-01 09:38:15 -0800179 if (mServiceStreamHandle != AAUDIO_HANDLE_INVALID) {
180 aaudio_handle_t serviceStreamHandle = mServiceStreamHandle;
181 mServiceStreamHandle = AAUDIO_HANDLE_INVALID;
Phil Burkc0c70e32017-02-09 13:18:38 -0800182
183 mServiceInterface.closeStream(serviceStreamHandle);
Phil Burke4d7bb42017-03-28 11:32:39 -0700184 delete[] mCallbackBuffer;
Phil Burkc0c70e32017-02-09 13:18:38 -0800185 return mEndPointParcelable.close();
Phil Burk204a1632017-01-03 17:23:43 -0800186 } else {
Phil Burk5ed503c2017-02-01 09:38:15 -0800187 return AAUDIO_ERROR_INVALID_HANDLE;
Phil Burk204a1632017-01-03 17:23:43 -0800188 }
189}
190
Phil Burkc0c70e32017-02-09 13:18:38 -0800191
Phil Burke4d7bb42017-03-28 11:32:39 -0700192// Render audio in the application callback and then write the data to the stream.
193void *AudioStreamInternal::callbackLoop() {
194 aaudio_result_t result = AAUDIO_OK;
195 aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
Phil Burke4d7bb42017-03-28 11:32:39 -0700196 AAudioStream_dataCallback appCallback = getDataCallbackProc();
197 if (appCallback == nullptr) return NULL;
198
Phil Burk677d7912017-04-07 12:17:12 -0700199 // result might be a frame count
200 while (mCallbackEnabled.load() && isPlaying() && (result >= 0)) {
Phil Burke4d7bb42017-03-28 11:32:39 -0700201 // Call application using the AAudio callback interface.
202 callbackResult = (*appCallback)(
203 (AAudioStream *) this,
204 getDataCallbackUserData(),
205 mCallbackBuffer,
206 mCallbackFrames);
207
208 if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
Phil Burk677d7912017-04-07 12:17:12 -0700209 // Write audio data to stream.
Phil Burke4d7bb42017-03-28 11:32:39 -0700210 int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
Phil Burk677d7912017-04-07 12:17:12 -0700211
212 // This is a BLOCKING WRITE!
Phil Burke4d7bb42017-03-28 11:32:39 -0700213 result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
Phil Burk677d7912017-04-07 12:17:12 -0700214 if ((result != mCallbackFrames)) {
215 ALOGE("AudioStreamInternal(): callbackLoop: write() returned %d", result);
216 if (result >= 0) {
217 // Only wrote some of the frames requested. Must have timed out.
218 result = AAUDIO_ERROR_TIMEOUT;
219 }
Phil Burke4d7bb42017-03-28 11:32:39 -0700220 if (getErrorCallbackProc() != nullptr) {
Phil Burke4d7bb42017-03-28 11:32:39 -0700221 (*getErrorCallbackProc())(
222 (AAudioStream *) this,
223 getErrorCallbackUserData(),
Phil Burk677d7912017-04-07 12:17:12 -0700224 result);
Phil Burke4d7bb42017-03-28 11:32:39 -0700225 }
226 break;
Phil Burke4d7bb42017-03-28 11:32:39 -0700227 }
228 } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
Phil Burk677d7912017-04-07 12:17:12 -0700229 ALOGD("AudioStreamInternal(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
Phil Burke4d7bb42017-03-28 11:32:39 -0700230 break;
231 }
232 }
233
Phil Burk677d7912017-04-07 12:17:12 -0700234 ALOGD("AudioStreamInternal(): callbackLoop() exiting, result = %d, isPlaying() = %d",
Phil Burke4d7bb42017-03-28 11:32:39 -0700235 result, (int) isPlaying());
236 return NULL; // TODO review
237}
238
239static void *aaudio_callback_thread_proc(void *context)
240{
241 AudioStreamInternal *stream = (AudioStreamInternal *)context;
Phil Burk677d7912017-04-07 12:17:12 -0700242 //LOGD("AudioStreamInternal(): oboe_callback_thread, stream = %p", stream);
Phil Burke4d7bb42017-03-28 11:32:39 -0700243 if (stream != NULL) {
244 return stream->callbackLoop();
245 } else {
246 return NULL;
247 }
248}
249
Phil Burk5ed503c2017-02-01 09:38:15 -0800250aaudio_result_t AudioStreamInternal::requestStart()
Phil Burk204a1632017-01-03 17:23:43 -0800251{
Phil Burk3316d5e2017-02-15 11:23:01 -0800252 int64_t startTime;
Phil Burkc0c70e32017-02-09 13:18:38 -0800253 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): start()");
Phil Burk5ed503c2017-02-01 09:38:15 -0800254 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
255 return AAUDIO_ERROR_INVALID_STATE;
Phil Burk204a1632017-01-03 17:23:43 -0800256 }
Phil Burkc0c70e32017-02-09 13:18:38 -0800257
Phil Burk3316d5e2017-02-15 11:23:01 -0800258 startTime = AudioClock::getNanoseconds();
Phil Burk204a1632017-01-03 17:23:43 -0800259 mClockModel.start(startTime);
260 processTimestamp(0, startTime);
Phil Burk5ed503c2017-02-01 09:38:15 -0800261 setState(AAUDIO_STREAM_STATE_STARTING);
Phil Burkc0c70e32017-02-09 13:18:38 -0800262 aaudio_result_t result = mServiceInterface.startStream(mServiceStreamHandle);;
Phil Burke4d7bb42017-03-28 11:32:39 -0700263
264 if (result == AAUDIO_OK && getDataCallbackProc() != nullptr) {
265 // Launch the callback loop thread.
266 int64_t periodNanos = mCallbackFrames
267 * AAUDIO_NANOS_PER_SECOND
268 / getSampleRate();
269 mCallbackEnabled.store(true);
270 result = createThread(periodNanos, aaudio_callback_thread_proc, this);
271 }
272 return result;
Phil Burk204a1632017-01-03 17:23:43 -0800273}
274
Phil Burke4d7bb42017-03-28 11:32:39 -0700275int64_t AudioStreamInternal::calculateReasonableTimeout(int32_t framesPerOperation) {
276
277 // Wait for at least a second or some number of callbacks to join the thread.
278 int64_t timeoutNanoseconds = (MIN_TIMEOUT_OPERATIONS * framesPerOperation * AAUDIO_NANOS_PER_SECOND)
279 / getSampleRate();
280 if (timeoutNanoseconds < MIN_TIMEOUT_NANOS) { // arbitrary number of seconds
281 timeoutNanoseconds = MIN_TIMEOUT_NANOS;
282 }
283 return timeoutNanoseconds;
284}
285
286aaudio_result_t AudioStreamInternal::stopCallback()
287{
288 if (isDataCallbackActive()) {
289 mCallbackEnabled.store(false);
290 return joinThread(NULL, calculateReasonableTimeout(mCallbackFrames));
291 } else {
292 return AAUDIO_OK;
293 }
294}
295
296aaudio_result_t AudioStreamInternal::requestPauseInternal()
Phil Burk204a1632017-01-03 17:23:43 -0800297{
298 ALOGD("AudioStreamInternal(): pause()");
Phil Burk5ed503c2017-02-01 09:38:15 -0800299 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
300 return AAUDIO_ERROR_INVALID_STATE;
Phil Burk204a1632017-01-03 17:23:43 -0800301 }
Phil Burkc0c70e32017-02-09 13:18:38 -0800302
Phil Burk3316d5e2017-02-15 11:23:01 -0800303 mClockModel.stop(AudioClock::getNanoseconds());
Phil Burk5ed503c2017-02-01 09:38:15 -0800304 setState(AAUDIO_STREAM_STATE_PAUSING);
Phil Burkc0c70e32017-02-09 13:18:38 -0800305 return mServiceInterface.startStream(mServiceStreamHandle);
Phil Burk204a1632017-01-03 17:23:43 -0800306}
307
Phil Burke4d7bb42017-03-28 11:32:39 -0700308aaudio_result_t AudioStreamInternal::requestPause()
309{
310 aaudio_result_t result = stopCallback();
311 if (result != AAUDIO_OK) {
312 return result;
313 }
314 return requestPauseInternal();
315}
316
Phil Burk5ed503c2017-02-01 09:38:15 -0800317aaudio_result_t AudioStreamInternal::requestFlush() {
Phil Burkc0c70e32017-02-09 13:18:38 -0800318 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): flush()");
Phil Burk5ed503c2017-02-01 09:38:15 -0800319 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
320 return AAUDIO_ERROR_INVALID_STATE;
Phil Burk204a1632017-01-03 17:23:43 -0800321 }
Phil Burkc0c70e32017-02-09 13:18:38 -0800322
Phil Burke4d7bb42017-03-28 11:32:39 -0700323 setState(AAUDIO_STREAM_STATE_FLUSHING);
Phil Burkc0c70e32017-02-09 13:18:38 -0800324 return mServiceInterface.flushStream(mServiceStreamHandle);
Phil Burk204a1632017-01-03 17:23:43 -0800325}
326
327void AudioStreamInternal::onFlushFromServer() {
Phil Burkc0c70e32017-02-09 13:18:38 -0800328 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): onFlushFromServer()");
Phil Burk3316d5e2017-02-15 11:23:01 -0800329 int64_t readCounter = mAudioEndpoint.getDownDataReadCounter();
330 int64_t writeCounter = mAudioEndpoint.getDownDataWriteCounter();
Phil Burk204a1632017-01-03 17:23:43 -0800331 // Bump offset so caller does not see the retrograde motion in getFramesRead().
Phil Burk3316d5e2017-02-15 11:23:01 -0800332 int64_t framesFlushed = writeCounter - readCounter;
Phil Burk204a1632017-01-03 17:23:43 -0800333 mFramesOffsetFromService += framesFlushed;
334 // Flush written frames by forcing writeCounter to readCounter.
335 // This is because we cannot move the read counter in the hardware.
336 mAudioEndpoint.setDownDataWriteCounter(readCounter);
337}
338
Phil Burk5ed503c2017-02-01 09:38:15 -0800339aaudio_result_t AudioStreamInternal::requestStop()
Phil Burk204a1632017-01-03 17:23:43 -0800340{
341 // TODO better implementation of requestStop()
Phil Burk5ed503c2017-02-01 09:38:15 -0800342 aaudio_result_t result = requestPause();
343 if (result == AAUDIO_OK) {
344 aaudio_stream_state_t state;
345 result = waitForStateChange(AAUDIO_STREAM_STATE_PAUSING,
Phil Burk204a1632017-01-03 17:23:43 -0800346 &state,
Phil Burk5ed503c2017-02-01 09:38:15 -0800347 500 * AAUDIO_NANOS_PER_MILLISECOND);// TODO temporary code
348 if (result == AAUDIO_OK) {
Phil Burk204a1632017-01-03 17:23:43 -0800349 result = requestFlush();
350 }
351 }
352 return result;
353}
354
Phil Burk5ed503c2017-02-01 09:38:15 -0800355aaudio_result_t AudioStreamInternal::registerThread() {
Phil Burkc0c70e32017-02-09 13:18:38 -0800356 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): registerThread()");
Phil Burk5ed503c2017-02-01 09:38:15 -0800357 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
358 return AAUDIO_ERROR_INVALID_STATE;
Phil Burk204a1632017-01-03 17:23:43 -0800359 }
Phil Burkc0c70e32017-02-09 13:18:38 -0800360 return mServiceInterface.registerAudioThread(mServiceStreamHandle,
361 getpid(),
362 gettid(),
363 getPeriodNanoseconds());
Phil Burk204a1632017-01-03 17:23:43 -0800364}
365
Phil Burk5ed503c2017-02-01 09:38:15 -0800366aaudio_result_t AudioStreamInternal::unregisterThread() {
Phil Burkc0c70e32017-02-09 13:18:38 -0800367 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): unregisterThread()");
Phil Burk5ed503c2017-02-01 09:38:15 -0800368 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
369 return AAUDIO_ERROR_INVALID_STATE;
Phil Burk204a1632017-01-03 17:23:43 -0800370 }
Phil Burkc0c70e32017-02-09 13:18:38 -0800371 return mServiceInterface.unregisterAudioThread(mServiceStreamHandle, getpid(), gettid());
Phil Burk204a1632017-01-03 17:23:43 -0800372}
373
Phil Burk5ed503c2017-02-01 09:38:15 -0800374aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
Phil Burk3316d5e2017-02-15 11:23:01 -0800375 int64_t *framePosition,
376 int64_t *timeNanoseconds) {
Phil Burke4d7bb42017-03-28 11:32:39 -0700377 // TODO implement using real HAL
Phil Burk3316d5e2017-02-15 11:23:01 -0800378 int64_t time = AudioClock::getNanoseconds();
Phil Burk204a1632017-01-03 17:23:43 -0800379 *framePosition = mClockModel.convertTimeToPosition(time);
Phil Burk5ed503c2017-02-01 09:38:15 -0800380 *timeNanoseconds = time + (10 * AAUDIO_NANOS_PER_MILLISECOND); // Fake hardware delay
381 return AAUDIO_OK;
Phil Burk204a1632017-01-03 17:23:43 -0800382}
383
Phil Burke4d7bb42017-03-28 11:32:39 -0700384aaudio_result_t AudioStreamInternal::updateStateWhileWaiting() {
385 if (isDataCallbackActive()) {
386 return AAUDIO_OK; // state is getting updated by the callback thread read/write call
387 }
Phil Burk204a1632017-01-03 17:23:43 -0800388 return processCommands();
389}
390
391#if LOG_TIMESTAMPS
Phil Burk5ed503c2017-02-01 09:38:15 -0800392static void AudioStreamInternal_LogTimestamp(AAudioServiceMessage &command) {
Phil Burk204a1632017-01-03 17:23:43 -0800393 static int64_t oldPosition = 0;
Phil Burk3316d5e2017-02-15 11:23:01 -0800394 static int64_t oldTime = 0;
Phil Burk204a1632017-01-03 17:23:43 -0800395 int64_t framePosition = command.timestamp.position;
Phil Burk3316d5e2017-02-15 11:23:01 -0800396 int64_t nanoTime = command.timestamp.timestamp;
Phil Burkc0c70e32017-02-09 13:18:38 -0800397 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %llu",
Phil Burk204a1632017-01-03 17:23:43 -0800398 (long long) framePosition,
399 (long long) nanoTime);
400 int64_t nanosDelta = nanoTime - oldTime;
401 if (nanosDelta > 0 && oldTime > 0) {
402 int64_t framesDelta = framePosition - oldPosition;
Phil Burk5ed503c2017-02-01 09:38:15 -0800403 int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta;
Phil Burkc0c70e32017-02-09 13:18:38 -0800404 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
405 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
406 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() - measured rate = %llu", (unsigned long long) rate);
Phil Burk204a1632017-01-03 17:23:43 -0800407 }
408 oldPosition = framePosition;
409 oldTime = nanoTime;
410}
411#endif
412
Phil Burk5ed503c2017-02-01 09:38:15 -0800413aaudio_result_t AudioStreamInternal::onTimestampFromServer(AAudioServiceMessage *message) {
Phil Burk3316d5e2017-02-15 11:23:01 -0800414 int64_t framePosition = 0;
Phil Burk204a1632017-01-03 17:23:43 -0800415#if LOG_TIMESTAMPS
416 AudioStreamInternal_LogTimestamp(command);
417#endif
418 framePosition = message->timestamp.position;
419 processTimestamp(framePosition, message->timestamp.timestamp);
Phil Burk5ed503c2017-02-01 09:38:15 -0800420 return AAUDIO_OK;
Phil Burk204a1632017-01-03 17:23:43 -0800421}
422
Phil Burk5ed503c2017-02-01 09:38:15 -0800423aaudio_result_t AudioStreamInternal::onEventFromServer(AAudioServiceMessage *message) {
424 aaudio_result_t result = AAUDIO_OK;
Phil Burkc0c70e32017-02-09 13:18:38 -0800425 ALOGD_IF(ALOG_CONDITION, "processCommands() got event %d", message->event.event);
Phil Burk204a1632017-01-03 17:23:43 -0800426 switch (message->event.event) {
Phil Burk5ed503c2017-02-01 09:38:15 -0800427 case AAUDIO_SERVICE_EVENT_STARTED:
Phil Burkc0c70e32017-02-09 13:18:38 -0800428 ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_STARTED");
Phil Burk5ed503c2017-02-01 09:38:15 -0800429 setState(AAUDIO_STREAM_STATE_STARTED);
Phil Burk204a1632017-01-03 17:23:43 -0800430 break;
Phil Burk5ed503c2017-02-01 09:38:15 -0800431 case AAUDIO_SERVICE_EVENT_PAUSED:
Phil Burkc0c70e32017-02-09 13:18:38 -0800432 ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_PAUSED");
Phil Burk5ed503c2017-02-01 09:38:15 -0800433 setState(AAUDIO_STREAM_STATE_PAUSED);
Phil Burk204a1632017-01-03 17:23:43 -0800434 break;
Phil Burk5ed503c2017-02-01 09:38:15 -0800435 case AAUDIO_SERVICE_EVENT_FLUSHED:
Phil Burkc0c70e32017-02-09 13:18:38 -0800436 ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED");
Phil Burk5ed503c2017-02-01 09:38:15 -0800437 setState(AAUDIO_STREAM_STATE_FLUSHED);
Phil Burk204a1632017-01-03 17:23:43 -0800438 onFlushFromServer();
439 break;
Phil Burk5ed503c2017-02-01 09:38:15 -0800440 case AAUDIO_SERVICE_EVENT_CLOSED:
Phil Burkc0c70e32017-02-09 13:18:38 -0800441 ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_CLOSED");
Phil Burk5ed503c2017-02-01 09:38:15 -0800442 setState(AAUDIO_STREAM_STATE_CLOSED);
Phil Burk204a1632017-01-03 17:23:43 -0800443 break;
Phil Burk5ed503c2017-02-01 09:38:15 -0800444 case AAUDIO_SERVICE_EVENT_DISCONNECTED:
445 result = AAUDIO_ERROR_DISCONNECTED;
Phil Burkc0c70e32017-02-09 13:18:38 -0800446 setState(AAUDIO_STREAM_STATE_DISCONNECTED);
Phil Burk5ed503c2017-02-01 09:38:15 -0800447 ALOGW("WARNING - processCommands() AAUDIO_SERVICE_EVENT_DISCONNECTED");
Phil Burk204a1632017-01-03 17:23:43 -0800448 break;
Phil Burkc0c70e32017-02-09 13:18:38 -0800449 case AAUDIO_SERVICE_EVENT_VOLUME:
450 mVolume = message->event.dataDouble;
451 ALOGD_IF(ALOG_CONDITION, "processCommands() AAUDIO_SERVICE_EVENT_VOLUME %f", mVolume);
452 break;
Phil Burk204a1632017-01-03 17:23:43 -0800453 default:
454 ALOGW("WARNING - processCommands() Unrecognized event = %d",
455 (int) message->event.event);
456 break;
457 }
458 return result;
459}
460
461// Process all the commands coming from the server.
Phil Burk5ed503c2017-02-01 09:38:15 -0800462aaudio_result_t AudioStreamInternal::processCommands() {
463 aaudio_result_t result = AAUDIO_OK;
Phil Burk204a1632017-01-03 17:23:43 -0800464
Phil Burk5ed503c2017-02-01 09:38:15 -0800465 while (result == AAUDIO_OK) {
Phil Burkc0c70e32017-02-09 13:18:38 -0800466 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::processCommands() - looping, %d", result);
Phil Burk5ed503c2017-02-01 09:38:15 -0800467 AAudioServiceMessage message;
Phil Burk204a1632017-01-03 17:23:43 -0800468 if (mAudioEndpoint.readUpCommand(&message) != 1) {
469 break; // no command this time, no problem
470 }
471 switch (message.what) {
Phil Burk5ed503c2017-02-01 09:38:15 -0800472 case AAudioServiceMessage::code::TIMESTAMP:
Phil Burk204a1632017-01-03 17:23:43 -0800473 result = onTimestampFromServer(&message);
474 break;
475
Phil Burk5ed503c2017-02-01 09:38:15 -0800476 case AAudioServiceMessage::code::EVENT:
Phil Burk204a1632017-01-03 17:23:43 -0800477 result = onEventFromServer(&message);
478 break;
479
480 default:
481 ALOGW("WARNING - AudioStreamInternal::processCommands() Unrecognized what = %d",
482 (int) message.what);
Phil Burk5ed503c2017-02-01 09:38:15 -0800483 result = AAUDIO_ERROR_UNEXPECTED_VALUE;
Phil Burk204a1632017-01-03 17:23:43 -0800484 break;
485 }
486 }
487 return result;
488}
489
490// Write the data, block if needed and timeoutMillis > 0
Phil Burk5ed503c2017-02-01 09:38:15 -0800491aaudio_result_t AudioStreamInternal::write(const void *buffer, int32_t numFrames,
Phil Burk3316d5e2017-02-15 11:23:01 -0800492 int64_t timeoutNanoseconds)
Phil Burk204a1632017-01-03 17:23:43 -0800493{
Phil Burk5ed503c2017-02-01 09:38:15 -0800494 aaudio_result_t result = AAUDIO_OK;
Phil Burkc0c70e32017-02-09 13:18:38 -0800495 int32_t loopCount = 0;
Phil Burk204a1632017-01-03 17:23:43 -0800496 uint8_t* source = (uint8_t*)buffer;
Phil Burk3316d5e2017-02-15 11:23:01 -0800497 int64_t currentTimeNanos = AudioClock::getNanoseconds();
498 int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
Phil Burk204a1632017-01-03 17:23:43 -0800499 int32_t framesLeft = numFrames;
Phil Burkc0c70e32017-02-09 13:18:38 -0800500 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write(%p, %d) at time %08llu , mState = %s",
501 // buffer, numFrames, (unsigned long long) currentTimeNanos,
502 // AAudio_convertStreamStateToText(getState()));
Phil Burk204a1632017-01-03 17:23:43 -0800503
504 // Write until all the data has been written or until a timeout occurs.
505 while (framesLeft > 0) {
Phil Burkc0c70e32017-02-09 13:18:38 -0800506 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write() loop: framesLeft = %d, loopCount = %d =====",
507 // framesLeft, loopCount++);
Phil Burk204a1632017-01-03 17:23:43 -0800508 // The call to writeNow() will not block. It will just write as much as it can.
Phil Burk3316d5e2017-02-15 11:23:01 -0800509 int64_t wakeTimeNanos = 0;
Phil Burk5ed503c2017-02-01 09:38:15 -0800510 aaudio_result_t framesWritten = writeNow(source, framesLeft,
Phil Burk204a1632017-01-03 17:23:43 -0800511 currentTimeNanos, &wakeTimeNanos);
Phil Burkc0c70e32017-02-09 13:18:38 -0800512 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write() loop: framesWritten = %d", framesWritten);
Phil Burk204a1632017-01-03 17:23:43 -0800513 if (framesWritten < 0) {
Phil Burkc0c70e32017-02-09 13:18:38 -0800514 ALOGE("AudioStreamInternal::write() loop: writeNow returned %d", framesWritten);
Phil Burk204a1632017-01-03 17:23:43 -0800515 result = framesWritten;
516 break;
517 }
518 framesLeft -= (int32_t) framesWritten;
519 source += framesWritten * getBytesPerFrame();
520
521 // Should we block?
522 if (timeoutNanoseconds == 0) {
523 break; // don't block
524 } else if (framesLeft > 0) {
Phil Burkc0c70e32017-02-09 13:18:38 -0800525 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal:: original wakeTimeNanos %lld", (long long) wakeTimeNanos);
Phil Burk204a1632017-01-03 17:23:43 -0800526 // clip the wake time to something reasonable
527 if (wakeTimeNanos < currentTimeNanos) {
528 wakeTimeNanos = currentTimeNanos;
529 }
530 if (wakeTimeNanos > deadlineNanos) {
531 // If we time out, just return the framesWritten so far.
Phil Burkc0c70e32017-02-09 13:18:38 -0800532 ALOGE("AudioStreamInternal::write(): timed out after %lld nanos",
533 (long long) timeoutNanoseconds);
Phil Burk204a1632017-01-03 17:23:43 -0800534 break;
535 }
536
Phil Burkc0c70e32017-02-09 13:18:38 -0800537 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal:: sleep until %lld, dur = %lld", (long long) wakeTimeNanos,
Phil Burk204a1632017-01-03 17:23:43 -0800538 // (long long) (wakeTimeNanos - currentTimeNanos));
539 AudioClock::sleepForNanos(wakeTimeNanos - currentTimeNanos);
540 currentTimeNanos = AudioClock::getNanoseconds();
541 }
542 }
543
544 // return error or framesWritten
Phil Burkc0c70e32017-02-09 13:18:38 -0800545 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write() result = %d, framesLeft = %d, #%d",
546 // result, framesLeft, loopCount);
547 (void) loopCount;
Phil Burk204a1632017-01-03 17:23:43 -0800548 return (result < 0) ? result : numFrames - framesLeft;
549}
550
551// Write as much data as we can without blocking.
Phil Burk5ed503c2017-02-01 09:38:15 -0800552aaudio_result_t AudioStreamInternal::writeNow(const void *buffer, int32_t numFrames,
Phil Burk3316d5e2017-02-15 11:23:01 -0800553 int64_t currentNanoTime, int64_t *wakeTimePtr) {
Phil Burkc0c70e32017-02-09 13:18:38 -0800554
555 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow(%p) - enter", buffer);
Phil Burk204a1632017-01-03 17:23:43 -0800556 {
Phil Burk5ed503c2017-02-01 09:38:15 -0800557 aaudio_result_t result = processCommands();
Phil Burkc0c70e32017-02-09 13:18:38 -0800558 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - processCommands() returned %d", result);
Phil Burk5ed503c2017-02-01 09:38:15 -0800559 if (result != AAUDIO_OK) {
Phil Burk204a1632017-01-03 17:23:43 -0800560 return result;
561 }
562 }
563
564 if (mAudioEndpoint.isOutputFreeRunning()) {
Phil Burkc0c70e32017-02-09 13:18:38 -0800565 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - update read counter");
Phil Burk204a1632017-01-03 17:23:43 -0800566 // Update data queue based on the timing model.
567 int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
568 mAudioEndpoint.setDownDataReadCounter(estimatedReadCounter);
Phil Burk204a1632017-01-03 17:23:43 -0800569 }
570 // TODO else query from endpoint cuz set by actual reader, maybe
571
Phil Burkc0c70e32017-02-09 13:18:38 -0800572 // If the read index passed the write index then consider it an underrun.
573 if (mAudioEndpoint.getFullFramesAvailable() < 0) {
574 mXRunCount++;
Phil Burk204a1632017-01-03 17:23:43 -0800575 }
Phil Burkc0c70e32017-02-09 13:18:38 -0800576
577 // Write some data to the buffer.
578 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - writeNowWithConversion(%d)", numFrames);
579 int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
580 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - tried to write %d frames, wrote %d",
Phil Burk204a1632017-01-03 17:23:43 -0800581 // numFrames, framesWritten);
582
583 // Calculate an ideal time to wake up.
584 if (wakeTimePtr != nullptr && framesWritten >= 0) {
585 // By default wake up a few milliseconds from now. // TODO review
Phil Burkc0c70e32017-02-09 13:18:38 -0800586 int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
587 aaudio_stream_state_t state = getState();
588 //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - wakeTime based on %s",
589 // AAudio_convertStreamStateToText(state));
590 switch (state) {
Phil Burk5ed503c2017-02-01 09:38:15 -0800591 case AAUDIO_STREAM_STATE_OPEN:
592 case AAUDIO_STREAM_STATE_STARTING:
Phil Burk204a1632017-01-03 17:23:43 -0800593 if (framesWritten != 0) {
594 // Don't wait to write more data. Just prime the buffer.
595 wakeTime = currentNanoTime;
596 }
597 break;
Phil Burk5ed503c2017-02-01 09:38:15 -0800598 case AAUDIO_STREAM_STATE_STARTED: // When do we expect the next read burst to occur?
Phil Burk204a1632017-01-03 17:23:43 -0800599 {
600 uint32_t burstSize = mFramesPerBurst;
601 if (burstSize < 32) {
602 burstSize = 32; // TODO review
603 }
604
605 uint64_t nextReadPosition = mAudioEndpoint.getDownDataReadCounter() + burstSize;
606 wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
607 }
608 break;
609 default:
610 break;
611 }
612 *wakeTimePtr = wakeTime;
613
614 }
Phil Burkc0c70e32017-02-09 13:18:38 -0800615// ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow finished: now = %llu, read# = %llu, wrote# = %llu",
Phil Burk204a1632017-01-03 17:23:43 -0800616// (unsigned long long)currentNanoTime,
617// (unsigned long long)mAudioEndpoint.getDownDataReadCounter(),
618// (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
619 return framesWritten;
620}
621
Phil Burkc0c70e32017-02-09 13:18:38 -0800622
623// TODO this function needs a major cleanup.
624aaudio_result_t AudioStreamInternal::writeNowWithConversion(const void *buffer,
625 int32_t numFrames) {
626 // ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNowWithConversion(%p, %d)", buffer, numFrames);
627 WrappingBuffer wrappingBuffer;
628 mAudioEndpoint.getEmptyRoomAvailable(&wrappingBuffer);
629 uint8_t *source = (uint8_t *) buffer;
630 int32_t framesLeft = numFrames;
631
632 mAudioEndpoint.getEmptyRoomAvailable(&wrappingBuffer);
633
634 // Read data in one or two parts.
635 int partIndex = 0;
636 while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
637 int32_t framesToWrite = framesLeft;
638 int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
639 if (framesAvailable > 0) {
640 if (framesToWrite > framesAvailable) {
641 framesToWrite = framesAvailable;
642 }
643 int32_t numBytes = getBytesPerFrame();
644 // TODO handle volume scaling
645 if (getFormat() == mDeviceFormat) {
646 // Copy straight through.
647 memcpy(wrappingBuffer.data[partIndex], source, numBytes);
648 } else if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT
649 && mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
650 // Data conversion.
651 AAudioConvert_floatToPcm16(
652 (const float *) source,
653 framesToWrite * getSamplesPerFrame(),
654 (int16_t *) wrappingBuffer.data[partIndex]);
655 } else {
656 // TODO handle more conversions
657 ALOGE("AudioStreamInternal::writeNowWithConversion() unsupported formats: %d, %d",
658 getFormat(), mDeviceFormat);
659 return AAUDIO_ERROR_UNEXPECTED_VALUE;
660 }
661
662 source += numBytes;
663 framesLeft -= framesToWrite;
664 }
665 partIndex++;
666 }
667 int32_t framesWritten = numFrames - framesLeft;
668 mAudioEndpoint.advanceWriteIndex(framesWritten);
669
670 if (framesWritten > 0) {
671 incrementFramesWritten(framesWritten);
672 }
673 // ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
674 return framesWritten;
675}
676
Phil Burk3316d5e2017-02-15 11:23:01 -0800677void AudioStreamInternal::processTimestamp(uint64_t position, int64_t time) {
Phil Burk204a1632017-01-03 17:23:43 -0800678 mClockModel.processTimestamp( position, time);
679}
680
Phil Burk3316d5e2017-02-15 11:23:01 -0800681aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {
682 int32_t actualFrames = 0;
683 aaudio_result_t result = mAudioEndpoint.setBufferSizeInFrames(requestedFrames, &actualFrames);
684 if (result < 0) {
685 return result;
686 } else {
687 return (aaudio_result_t) actualFrames;
688 }
Phil Burk204a1632017-01-03 17:23:43 -0800689}
690
Phil Burk3316d5e2017-02-15 11:23:01 -0800691int32_t AudioStreamInternal::getBufferSize() const
Phil Burk204a1632017-01-03 17:23:43 -0800692{
693 return mAudioEndpoint.getBufferSizeInFrames();
694}
695
Phil Burk3316d5e2017-02-15 11:23:01 -0800696int32_t AudioStreamInternal::getBufferCapacity() const
Phil Burk204a1632017-01-03 17:23:43 -0800697{
698 return mAudioEndpoint.getBufferCapacityInFrames();
699}
700
Phil Burk3316d5e2017-02-15 11:23:01 -0800701int32_t AudioStreamInternal::getFramesPerBurst() const
Phil Burk204a1632017-01-03 17:23:43 -0800702{
703 return mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
704}
705
Phil Burk3316d5e2017-02-15 11:23:01 -0800706int64_t AudioStreamInternal::getFramesRead()
Phil Burk204a1632017-01-03 17:23:43 -0800707{
Phil Burk3316d5e2017-02-15 11:23:01 -0800708 int64_t framesRead =
Phil Burk204a1632017-01-03 17:23:43 -0800709 mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
710 + mFramesOffsetFromService;
711 // Prevent retrograde motion.
712 if (framesRead < mLastFramesRead) {
713 framesRead = mLastFramesRead;
714 } else {
715 mLastFramesRead = framesRead;
716 }
Phil Burkc0c70e32017-02-09 13:18:38 -0800717 ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
Phil Burk204a1632017-01-03 17:23:43 -0800718 return framesRead;
719}
720
721// TODO implement getTimestamp