blob: 143d4b7b3e3c79b564effc2038bc1051b7e2a29a [file] [log] [blame]
Phil Burk204a1632017-01-03 17:23:43 -08001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Phil Burk5ed503c2017-02-01 09:38:15 -080017#define LOG_TAG "AAudio"
Phil Burk204a1632017-01-03 17:23:43 -080018//#define LOG_NDEBUG 0
19#include <utils/Log.h>
20
Phil Burk4485d412017-05-09 15:55:02 -070021#define ATRACE_TAG ATRACE_TAG_AUDIO
22
Phil Burkc0c70e32017-02-09 13:18:38 -080023#include <stdint.h>
Phil Burk204a1632017-01-03 17:23:43 -080024#include <assert.h>
25
26#include <binder/IServiceManager.h>
27
Phil Burk5ed503c2017-02-01 09:38:15 -080028#include <aaudio/AAudio.h>
Phil Burke4d7bb42017-03-28 11:32:39 -070029#include <utils/String16.h>
Phil Burk4485d412017-05-09 15:55:02 -070030#include <utils/Trace.h>
Phil Burk204a1632017-01-03 17:23:43 -080031
Phil Burkc0c70e32017-02-09 13:18:38 -080032#include "AudioClock.h"
33#include "AudioEndpointParcelable.h"
34#include "binding/AAudioStreamRequest.h"
35#include "binding/AAudioStreamConfiguration.h"
36#include "binding/IAAudioService.h"
Phil Burk5ed503c2017-02-01 09:38:15 -080037#include "binding/AAudioServiceMessage.h"
Phil Burk3df348f2017-02-08 11:41:55 -080038#include "core/AudioStreamBuilder.h"
Phil Burke572f462017-04-20 13:03:19 -070039#include "fifo/FifoBuffer.h"
40#include "utility/LinearRamp.h"
41
Phil Burkc0c70e32017-02-09 13:18:38 -080042#include "AudioStreamInternal.h"
Phil Burk204a1632017-01-03 17:23:43 -080043
44#define LOG_TIMESTAMPS 0
45
46using android::String16;
Phil Burkdec33ab2017-01-17 14:48:16 -080047using android::Mutex;
Phil Burkc0c70e32017-02-09 13:18:38 -080048using android::WrappingBuffer;
Phil Burk204a1632017-01-03 17:23:43 -080049
Phil Burk5ed503c2017-02-01 09:38:15 -080050using namespace aaudio;
Phil Burk204a1632017-01-03 17:23:43 -080051
Phil Burke4d7bb42017-03-28 11:32:39 -070052#define MIN_TIMEOUT_NANOS (1000 * AAUDIO_NANOS_PER_MILLISECOND)
53
54// Wait at least this many times longer than the operation should take.
55#define MIN_TIMEOUT_OPERATIONS 4
56
Phil Burk71f35bb2017-04-13 16:05:07 -070057//static int64_t s_logCounter = 0;
58//#define MYLOG_CONDITION (mInService == true && s_logCounter++ < 500)
59//#define MYLOG_CONDITION (s_logCounter++ < 500000)
60#define MYLOG_CONDITION (1)
Phil Burkdec33ab2017-01-17 14:48:16 -080061
Phil Burkc0c70e32017-02-09 13:18:38 -080062AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface &serviceInterface, bool inService)
Phil Burk204a1632017-01-03 17:23:43 -080063 : AudioStream()
64 , mClockModel()
65 , mAudioEndpoint()
Phil Burk5ed503c2017-02-01 09:38:15 -080066 , mServiceStreamHandle(AAUDIO_HANDLE_INVALID)
Phil Burk204a1632017-01-03 17:23:43 -080067 , mFramesPerBurst(16)
Phil Burkc0c70e32017-02-09 13:18:38 -080068 , mServiceInterface(serviceInterface)
Phil Burk71f35bb2017-04-13 16:05:07 -070069 , mInService(inService) {
Phil Burk204a1632017-01-03 17:23:43 -080070}
71
72AudioStreamInternal::~AudioStreamInternal() {
73}
74
Phil Burk5ed503c2017-02-01 09:38:15 -080075aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
Phil Burk204a1632017-01-03 17:23:43 -080076
Phil Burk5ed503c2017-02-01 09:38:15 -080077 aaudio_result_t result = AAUDIO_OK;
78 AAudioStreamRequest request;
79 AAudioStreamConfiguration configuration;
Phil Burk204a1632017-01-03 17:23:43 -080080
81 result = AudioStream::open(builder);
82 if (result < 0) {
83 return result;
84 }
85
Phil Burkc0c70e32017-02-09 13:18:38 -080086 // We have to do volume scaling. So we prefer FLOAT format.
87 if (getFormat() == AAUDIO_UNSPECIFIED) {
88 setFormat(AAUDIO_FORMAT_PCM_FLOAT);
89 }
Phil Burk71f35bb2017-04-13 16:05:07 -070090 // Request FLOAT for the shared mixer.
91 request.getConfiguration().setAudioFormat(AAUDIO_FORMAT_PCM_FLOAT);
Phil Burkc0c70e32017-02-09 13:18:38 -080092
Phil Burkdec33ab2017-01-17 14:48:16 -080093 // Build the request to send to the server.
Phil Burk204a1632017-01-03 17:23:43 -080094 request.setUserId(getuid());
95 request.setProcessId(getpid());
Phil Burkc0c70e32017-02-09 13:18:38 -080096 request.setDirection(getDirection());
Phil Burk71f35bb2017-04-13 16:05:07 -070097 request.setSharingModeMatchRequired(isSharingModeMatchRequired());
Phil Burkc0c70e32017-02-09 13:18:38 -080098
Phil Burk204a1632017-01-03 17:23:43 -080099 request.getConfiguration().setDeviceId(getDeviceId());
100 request.getConfiguration().setSampleRate(getSampleRate());
101 request.getConfiguration().setSamplesPerFrame(getSamplesPerFrame());
Phil Burk71f35bb2017-04-13 16:05:07 -0700102 request.getConfiguration().setSharingMode(getSharingMode());
103
Phil Burk3df348f2017-02-08 11:41:55 -0800104 request.getConfiguration().setBufferCapacity(builder.getBufferCapacity());
Phil Burk204a1632017-01-03 17:23:43 -0800105
Phil Burkc0c70e32017-02-09 13:18:38 -0800106 mServiceStreamHandle = mServiceInterface.openStream(request, configuration);
Phil Burk204a1632017-01-03 17:23:43 -0800107 if (mServiceStreamHandle < 0) {
108 result = mServiceStreamHandle;
Phil Burk71f35bb2017-04-13 16:05:07 -0700109 ALOGE("AudioStreamInternal.open(): %s openStream() returned %d", getLocationName(), result);
Phil Burk204a1632017-01-03 17:23:43 -0800110 } else {
111 result = configuration.validate();
Phil Burk5ed503c2017-02-01 09:38:15 -0800112 if (result != AAUDIO_OK) {
Phil Burk204a1632017-01-03 17:23:43 -0800113 close();
114 return result;
115 }
116 // Save results of the open.
117 setSampleRate(configuration.getSampleRate());
118 setSamplesPerFrame(configuration.getSamplesPerFrame());
Phil Burkc0c70e32017-02-09 13:18:38 -0800119 setDeviceId(configuration.getDeviceId());
Phil Burk204a1632017-01-03 17:23:43 -0800120
Phil Burkc0c70e32017-02-09 13:18:38 -0800121 // Save device format so we can do format conversion and volume scaling together.
122 mDeviceFormat = configuration.getAudioFormat();
123
124 result = mServiceInterface.getStreamDescription(mServiceStreamHandle, mEndPointParcelable);
Phil Burk5ed503c2017-02-01 09:38:15 -0800125 if (result != AAUDIO_OK) {
Phil Burk71f35bb2017-04-13 16:05:07 -0700126 ALOGE("AudioStreamInternal.open(): %s getStreamDescriptor returns %d",
127 getLocationName(), result);
Phil Burkc0c70e32017-02-09 13:18:38 -0800128 mServiceInterface.closeStream(mServiceStreamHandle);
Phil Burk204a1632017-01-03 17:23:43 -0800129 return result;
130 }
Phil Burkc0c70e32017-02-09 13:18:38 -0800131
Phil Burk204a1632017-01-03 17:23:43 -0800132 // resolve parcelable into a descriptor
Phil Burkc0c70e32017-02-09 13:18:38 -0800133 result = mEndPointParcelable.resolve(&mEndpointDescriptor);
134 if (result != AAUDIO_OK) {
135 ALOGE("AudioStreamInternal.open(): resolve() returns %d", result);
136 mServiceInterface.closeStream(mServiceStreamHandle);
137 return result;
138 }
Phil Burk204a1632017-01-03 17:23:43 -0800139
140 // Configure endpoint based on descriptor.
141 mAudioEndpoint.configure(&mEndpointDescriptor);
142
Phil Burk204a1632017-01-03 17:23:43 -0800143 mFramesPerBurst = mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
Phil Burk71f35bb2017-04-13 16:05:07 -0700144 int32_t capacity = mEndpointDescriptor.downDataQueueDescriptor.capacityInFrames;
145
146 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal.open() %s framesPerBurst = %d, capacity = %d",
147 getLocationName(), mFramesPerBurst, capacity);
148 // Validate result from server.
149 if (mFramesPerBurst < 16 || mFramesPerBurst > 16 * 1024) {
150 ALOGE("AudioStream::open(): framesPerBurst out of range = %d", mFramesPerBurst);
151 return AAUDIO_ERROR_OUT_OF_RANGE;
152 }
153 if (capacity < mFramesPerBurst || capacity > 32 * 1024) {
154 ALOGE("AudioStream::open(): bufferCapacity out of range = %d", capacity);
155 return AAUDIO_ERROR_OUT_OF_RANGE;
156 }
Phil Burk204a1632017-01-03 17:23:43 -0800157
158 mClockModel.setSampleRate(getSampleRate());
159 mClockModel.setFramesPerBurst(mFramesPerBurst);
160
Phil Burke4d7bb42017-03-28 11:32:39 -0700161 if (getDataCallbackProc()) {
162 mCallbackFrames = builder.getFramesPerDataCallback();
163 if (mCallbackFrames > getBufferCapacity() / 2) {
Phil Burk71f35bb2017-04-13 16:05:07 -0700164 ALOGE("AudioStreamInternal.open(): framesPerCallback too large = %d, capacity = %d",
165 mCallbackFrames, getBufferCapacity());
Phil Burkc0c70e32017-02-09 13:18:38 -0800166 mServiceInterface.closeStream(mServiceStreamHandle);
Phil Burke4d7bb42017-03-28 11:32:39 -0700167 return AAUDIO_ERROR_OUT_OF_RANGE;
168
169 } else if (mCallbackFrames < 0) {
170 ALOGE("AudioStreamInternal.open(): framesPerCallback negative");
Phil Burkc0c70e32017-02-09 13:18:38 -0800171 mServiceInterface.closeStream(mServiceStreamHandle);
Phil Burke4d7bb42017-03-28 11:32:39 -0700172 return AAUDIO_ERROR_OUT_OF_RANGE;
173
174 }
175 if (mCallbackFrames == AAUDIO_UNSPECIFIED) {
176 mCallbackFrames = mFramesPerBurst;
177 }
178
179 int32_t bytesPerFrame = getSamplesPerFrame()
180 * AAudioConvert_formatToSizeInBytes(getFormat());
181 int32_t callbackBufferSize = mCallbackFrames * bytesPerFrame;
182 mCallbackBuffer = new uint8_t[callbackBufferSize];
183 }
184
Phil Burk5ed503c2017-02-01 09:38:15 -0800185 setState(AAUDIO_STREAM_STATE_OPEN);
Phil Burk204a1632017-01-03 17:23:43 -0800186 }
187 return result;
188}
189
Phil Burk5ed503c2017-02-01 09:38:15 -0800190aaudio_result_t AudioStreamInternal::close() {
Phil Burk71f35bb2017-04-13 16:05:07 -0700191 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X",
192 mServiceStreamHandle);
Phil Burk5ed503c2017-02-01 09:38:15 -0800193 if (mServiceStreamHandle != AAUDIO_HANDLE_INVALID) {
Phil Burk4485d412017-05-09 15:55:02 -0700194 // Don't close a stream while it is running.
195 aaudio_stream_state_t currentState = getState();
196 if (isPlaying()) {
197 requestStop();
198 aaudio_stream_state_t nextState;
199 int64_t timeoutNanoseconds = MIN_TIMEOUT_NANOS;
200 aaudio_result_t result = waitForStateChange(currentState, &nextState,
201 timeoutNanoseconds);
202 if (result != AAUDIO_OK) {
203 ALOGE("AudioStreamInternal::close() waitForStateChange() returned %d %s",
204 result, AAudio_convertResultToText(result));
205 }
206 }
Phil Burk5ed503c2017-02-01 09:38:15 -0800207 aaudio_handle_t serviceStreamHandle = mServiceStreamHandle;
208 mServiceStreamHandle = AAUDIO_HANDLE_INVALID;
Phil Burkc0c70e32017-02-09 13:18:38 -0800209
210 mServiceInterface.closeStream(serviceStreamHandle);
Phil Burke4d7bb42017-03-28 11:32:39 -0700211 delete[] mCallbackBuffer;
Phil Burk4485d412017-05-09 15:55:02 -0700212 mCallbackBuffer = nullptr;
Phil Burkc0c70e32017-02-09 13:18:38 -0800213 return mEndPointParcelable.close();
Phil Burk204a1632017-01-03 17:23:43 -0800214 } else {
Phil Burk5ed503c2017-02-01 09:38:15 -0800215 return AAUDIO_ERROR_INVALID_HANDLE;
Phil Burk204a1632017-01-03 17:23:43 -0800216 }
217}
218
Phil Burkc0c70e32017-02-09 13:18:38 -0800219
Phil Burke4d7bb42017-03-28 11:32:39 -0700220// Render audio in the application callback and then write the data to the stream.
221void *AudioStreamInternal::callbackLoop() {
222 aaudio_result_t result = AAUDIO_OK;
223 aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
Phil Burke4d7bb42017-03-28 11:32:39 -0700224 AAudioStream_dataCallback appCallback = getDataCallbackProc();
225 if (appCallback == nullptr) return NULL;
226
Phil Burk677d7912017-04-07 12:17:12 -0700227 // result might be a frame count
228 while (mCallbackEnabled.load() && isPlaying() && (result >= 0)) {
Phil Burke4d7bb42017-03-28 11:32:39 -0700229 // Call application using the AAudio callback interface.
230 callbackResult = (*appCallback)(
231 (AAudioStream *) this,
232 getDataCallbackUserData(),
233 mCallbackBuffer,
234 mCallbackFrames);
235
236 if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
Phil Burk677d7912017-04-07 12:17:12 -0700237 // Write audio data to stream.
Phil Burke4d7bb42017-03-28 11:32:39 -0700238 int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
Phil Burk677d7912017-04-07 12:17:12 -0700239
240 // This is a BLOCKING WRITE!
Phil Burke4d7bb42017-03-28 11:32:39 -0700241 result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
Phil Burk677d7912017-04-07 12:17:12 -0700242 if ((result != mCallbackFrames)) {
243 ALOGE("AudioStreamInternal(): callbackLoop: write() returned %d", result);
244 if (result >= 0) {
245 // Only wrote some of the frames requested. Must have timed out.
246 result = AAUDIO_ERROR_TIMEOUT;
247 }
Phil Burke4d7bb42017-03-28 11:32:39 -0700248 if (getErrorCallbackProc() != nullptr) {
Phil Burke4d7bb42017-03-28 11:32:39 -0700249 (*getErrorCallbackProc())(
250 (AAudioStream *) this,
251 getErrorCallbackUserData(),
Phil Burk677d7912017-04-07 12:17:12 -0700252 result);
Phil Burke4d7bb42017-03-28 11:32:39 -0700253 }
254 break;
Phil Burke4d7bb42017-03-28 11:32:39 -0700255 }
256 } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
Phil Burk677d7912017-04-07 12:17:12 -0700257 ALOGD("AudioStreamInternal(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
Phil Burke4d7bb42017-03-28 11:32:39 -0700258 break;
259 }
260 }
261
Phil Burk677d7912017-04-07 12:17:12 -0700262 ALOGD("AudioStreamInternal(): callbackLoop() exiting, result = %d, isPlaying() = %d",
Phil Burke4d7bb42017-03-28 11:32:39 -0700263 result, (int) isPlaying());
Phil Burk5204d312017-05-04 17:16:13 -0700264 return NULL;
Phil Burke4d7bb42017-03-28 11:32:39 -0700265}
266
267static void *aaudio_callback_thread_proc(void *context)
268{
269 AudioStreamInternal *stream = (AudioStreamInternal *)context;
Phil Burk677d7912017-04-07 12:17:12 -0700270 //LOGD("AudioStreamInternal(): oboe_callback_thread, stream = %p", stream);
Phil Burke4d7bb42017-03-28 11:32:39 -0700271 if (stream != NULL) {
272 return stream->callbackLoop();
273 } else {
274 return NULL;
275 }
276}
277
Phil Burk5ed503c2017-02-01 09:38:15 -0800278aaudio_result_t AudioStreamInternal::requestStart()
Phil Burk204a1632017-01-03 17:23:43 -0800279{
Phil Burk3316d5e2017-02-15 11:23:01 -0800280 int64_t startTime;
Phil Burk71f35bb2017-04-13 16:05:07 -0700281 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): start()");
Phil Burk5ed503c2017-02-01 09:38:15 -0800282 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
283 return AAUDIO_ERROR_INVALID_STATE;
Phil Burk204a1632017-01-03 17:23:43 -0800284 }
Phil Burkc0c70e32017-02-09 13:18:38 -0800285
Phil Burk3316d5e2017-02-15 11:23:01 -0800286 startTime = AudioClock::getNanoseconds();
Phil Burk204a1632017-01-03 17:23:43 -0800287 mClockModel.start(startTime);
288 processTimestamp(0, startTime);
Phil Burk5ed503c2017-02-01 09:38:15 -0800289 setState(AAUDIO_STREAM_STATE_STARTING);
Phil Burkc0c70e32017-02-09 13:18:38 -0800290 aaudio_result_t result = mServiceInterface.startStream(mServiceStreamHandle);;
Phil Burke4d7bb42017-03-28 11:32:39 -0700291
292 if (result == AAUDIO_OK && getDataCallbackProc() != nullptr) {
293 // Launch the callback loop thread.
294 int64_t periodNanos = mCallbackFrames
295 * AAUDIO_NANOS_PER_SECOND
296 / getSampleRate();
297 mCallbackEnabled.store(true);
298 result = createThread(periodNanos, aaudio_callback_thread_proc, this);
299 }
300 return result;
Phil Burk204a1632017-01-03 17:23:43 -0800301}
302
Phil Burke4d7bb42017-03-28 11:32:39 -0700303int64_t AudioStreamInternal::calculateReasonableTimeout(int32_t framesPerOperation) {
304
305 // Wait for at least a second or some number of callbacks to join the thread.
Phil Burk71f35bb2017-04-13 16:05:07 -0700306 int64_t timeoutNanoseconds = (MIN_TIMEOUT_OPERATIONS
307 * framesPerOperation
308 * AAUDIO_NANOS_PER_SECOND)
309 / getSampleRate();
Phil Burke4d7bb42017-03-28 11:32:39 -0700310 if (timeoutNanoseconds < MIN_TIMEOUT_NANOS) { // arbitrary number of seconds
311 timeoutNanoseconds = MIN_TIMEOUT_NANOS;
312 }
313 return timeoutNanoseconds;
314}
315
316aaudio_result_t AudioStreamInternal::stopCallback()
317{
318 if (isDataCallbackActive()) {
319 mCallbackEnabled.store(false);
320 return joinThread(NULL, calculateReasonableTimeout(mCallbackFrames));
321 } else {
322 return AAUDIO_OK;
323 }
324}
325
326aaudio_result_t AudioStreamInternal::requestPauseInternal()
Phil Burk204a1632017-01-03 17:23:43 -0800327{
Phil Burk5ed503c2017-02-01 09:38:15 -0800328 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
Phil Burk71f35bb2017-04-13 16:05:07 -0700329 ALOGE("AudioStreamInternal(): requestPauseInternal() mServiceStreamHandle invalid = 0x%08X",
330 mServiceStreamHandle);
Phil Burk5ed503c2017-02-01 09:38:15 -0800331 return AAUDIO_ERROR_INVALID_STATE;
Phil Burk204a1632017-01-03 17:23:43 -0800332 }
Phil Burkc0c70e32017-02-09 13:18:38 -0800333
Phil Burk3316d5e2017-02-15 11:23:01 -0800334 mClockModel.stop(AudioClock::getNanoseconds());
Phil Burk5ed503c2017-02-01 09:38:15 -0800335 setState(AAUDIO_STREAM_STATE_PAUSING);
Phil Burk71f35bb2017-04-13 16:05:07 -0700336 return mServiceInterface.pauseStream(mServiceStreamHandle);
Phil Burk204a1632017-01-03 17:23:43 -0800337}
338
Phil Burke4d7bb42017-03-28 11:32:39 -0700339aaudio_result_t AudioStreamInternal::requestPause()
340{
Phil Burk71f35bb2017-04-13 16:05:07 -0700341 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): %s requestPause()", getLocationName());
Phil Burke4d7bb42017-03-28 11:32:39 -0700342 aaudio_result_t result = stopCallback();
343 if (result != AAUDIO_OK) {
344 return result;
345 }
Phil Burk71f35bb2017-04-13 16:05:07 -0700346 result = requestPauseInternal();
347 ALOGD("AudioStreamInternal(): requestPause() returns %d", result);
348 return result;
Phil Burke4d7bb42017-03-28 11:32:39 -0700349}
350
Phil Burk5ed503c2017-02-01 09:38:15 -0800351aaudio_result_t AudioStreamInternal::requestFlush() {
Phil Burk71f35bb2017-04-13 16:05:07 -0700352 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): requestFlush()");
Phil Burk5ed503c2017-02-01 09:38:15 -0800353 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
Phil Burk71f35bb2017-04-13 16:05:07 -0700354 ALOGE("AudioStreamInternal(): requestFlush() mServiceStreamHandle invalid = 0x%08X",
355 mServiceStreamHandle);
Phil Burk5ed503c2017-02-01 09:38:15 -0800356 return AAUDIO_ERROR_INVALID_STATE;
Phil Burk204a1632017-01-03 17:23:43 -0800357 }
Phil Burkc0c70e32017-02-09 13:18:38 -0800358
Phil Burke4d7bb42017-03-28 11:32:39 -0700359 setState(AAUDIO_STREAM_STATE_FLUSHING);
Phil Burkc0c70e32017-02-09 13:18:38 -0800360 return mServiceInterface.flushStream(mServiceStreamHandle);
Phil Burk204a1632017-01-03 17:23:43 -0800361}
362
363void AudioStreamInternal::onFlushFromServer() {
Phil Burk71f35bb2017-04-13 16:05:07 -0700364 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): onFlushFromServer()");
Phil Burk3316d5e2017-02-15 11:23:01 -0800365 int64_t readCounter = mAudioEndpoint.getDownDataReadCounter();
366 int64_t writeCounter = mAudioEndpoint.getDownDataWriteCounter();
Phil Burk71f35bb2017-04-13 16:05:07 -0700367
Phil Burk204a1632017-01-03 17:23:43 -0800368 // Bump offset so caller does not see the retrograde motion in getFramesRead().
Phil Burk3316d5e2017-02-15 11:23:01 -0800369 int64_t framesFlushed = writeCounter - readCounter;
Phil Burk204a1632017-01-03 17:23:43 -0800370 mFramesOffsetFromService += framesFlushed;
Phil Burk71f35bb2017-04-13 16:05:07 -0700371
Phil Burk204a1632017-01-03 17:23:43 -0800372 // Flush written frames by forcing writeCounter to readCounter.
373 // This is because we cannot move the read counter in the hardware.
374 mAudioEndpoint.setDownDataWriteCounter(readCounter);
375}
376
Phil Burk71f35bb2017-04-13 16:05:07 -0700377aaudio_result_t AudioStreamInternal::requestStopInternal()
378{
379 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
380 ALOGE("AudioStreamInternal(): requestStopInternal() mServiceStreamHandle invalid = 0x%08X",
381 mServiceStreamHandle);
382 return AAUDIO_ERROR_INVALID_STATE;
383 }
384
385 mClockModel.stop(AudioClock::getNanoseconds());
386 setState(AAUDIO_STREAM_STATE_STOPPING);
387 return mServiceInterface.stopStream(mServiceStreamHandle);
388}
389
Phil Burk5ed503c2017-02-01 09:38:15 -0800390aaudio_result_t AudioStreamInternal::requestStop()
Phil Burk204a1632017-01-03 17:23:43 -0800391{
Phil Burk71f35bb2017-04-13 16:05:07 -0700392 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): %s requestStop()", getLocationName());
393 aaudio_result_t result = stopCallback();
394 if (result != AAUDIO_OK) {
395 return result;
Phil Burk204a1632017-01-03 17:23:43 -0800396 }
Phil Burk71f35bb2017-04-13 16:05:07 -0700397 result = requestStopInternal();
398 ALOGD("AudioStreamInternal(): requestStop() returns %d", result);
Phil Burk204a1632017-01-03 17:23:43 -0800399 return result;
400}
401
Phil Burk5ed503c2017-02-01 09:38:15 -0800402aaudio_result_t AudioStreamInternal::registerThread() {
Phil Burk5ed503c2017-02-01 09:38:15 -0800403 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
404 return AAUDIO_ERROR_INVALID_STATE;
Phil Burk204a1632017-01-03 17:23:43 -0800405 }
Phil Burkc0c70e32017-02-09 13:18:38 -0800406 return mServiceInterface.registerAudioThread(mServiceStreamHandle,
407 getpid(),
408 gettid(),
409 getPeriodNanoseconds());
Phil Burk204a1632017-01-03 17:23:43 -0800410}
411
Phil Burk5ed503c2017-02-01 09:38:15 -0800412aaudio_result_t AudioStreamInternal::unregisterThread() {
Phil Burk5ed503c2017-02-01 09:38:15 -0800413 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
414 return AAUDIO_ERROR_INVALID_STATE;
Phil Burk204a1632017-01-03 17:23:43 -0800415 }
Phil Burkc0c70e32017-02-09 13:18:38 -0800416 return mServiceInterface.unregisterAudioThread(mServiceStreamHandle, getpid(), gettid());
Phil Burk204a1632017-01-03 17:23:43 -0800417}
418
Phil Burk5ed503c2017-02-01 09:38:15 -0800419aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
Phil Burk3316d5e2017-02-15 11:23:01 -0800420 int64_t *framePosition,
421 int64_t *timeNanoseconds) {
Phil Burk5204d312017-05-04 17:16:13 -0700422 // TODO Generate in server and pass to client. Return latest.
Phil Burk3316d5e2017-02-15 11:23:01 -0800423 int64_t time = AudioClock::getNanoseconds();
Phil Burk204a1632017-01-03 17:23:43 -0800424 *framePosition = mClockModel.convertTimeToPosition(time);
Phil Burk5ed503c2017-02-01 09:38:15 -0800425 *timeNanoseconds = time + (10 * AAUDIO_NANOS_PER_MILLISECOND); // Fake hardware delay
426 return AAUDIO_OK;
Phil Burk204a1632017-01-03 17:23:43 -0800427}
428
Phil Burke4d7bb42017-03-28 11:32:39 -0700429aaudio_result_t AudioStreamInternal::updateStateWhileWaiting() {
430 if (isDataCallbackActive()) {
431 return AAUDIO_OK; // state is getting updated by the callback thread read/write call
432 }
Phil Burk204a1632017-01-03 17:23:43 -0800433 return processCommands();
434}
435
436#if LOG_TIMESTAMPS
Phil Burk5ed503c2017-02-01 09:38:15 -0800437static void AudioStreamInternal_LogTimestamp(AAudioServiceMessage &command) {
Phil Burk204a1632017-01-03 17:23:43 -0800438 static int64_t oldPosition = 0;
Phil Burk3316d5e2017-02-15 11:23:01 -0800439 static int64_t oldTime = 0;
Phil Burk204a1632017-01-03 17:23:43 -0800440 int64_t framePosition = command.timestamp.position;
Phil Burk3316d5e2017-02-15 11:23:01 -0800441 int64_t nanoTime = command.timestamp.timestamp;
Phil Burk71f35bb2017-04-13 16:05:07 -0700442 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %llu",
Phil Burk204a1632017-01-03 17:23:43 -0800443 (long long) framePosition,
444 (long long) nanoTime);
445 int64_t nanosDelta = nanoTime - oldTime;
446 if (nanosDelta > 0 && oldTime > 0) {
447 int64_t framesDelta = framePosition - oldPosition;
Phil Burk5ed503c2017-02-01 09:38:15 -0800448 int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta;
Phil Burk71f35bb2017-04-13 16:05:07 -0700449 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
450 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
451 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() - measured rate = %llu", (unsigned long long) rate);
Phil Burk204a1632017-01-03 17:23:43 -0800452 }
453 oldPosition = framePosition;
454 oldTime = nanoTime;
455}
456#endif
457
Phil Burk5ed503c2017-02-01 09:38:15 -0800458aaudio_result_t AudioStreamInternal::onTimestampFromServer(AAudioServiceMessage *message) {
Phil Burk3316d5e2017-02-15 11:23:01 -0800459 int64_t framePosition = 0;
Phil Burk204a1632017-01-03 17:23:43 -0800460#if LOG_TIMESTAMPS
461 AudioStreamInternal_LogTimestamp(command);
462#endif
463 framePosition = message->timestamp.position;
464 processTimestamp(framePosition, message->timestamp.timestamp);
Phil Burk5ed503c2017-02-01 09:38:15 -0800465 return AAUDIO_OK;
Phil Burk204a1632017-01-03 17:23:43 -0800466}
467
Phil Burk5ed503c2017-02-01 09:38:15 -0800468aaudio_result_t AudioStreamInternal::onEventFromServer(AAudioServiceMessage *message) {
469 aaudio_result_t result = AAUDIO_OK;
Phil Burk71f35bb2017-04-13 16:05:07 -0700470 ALOGD_IF(MYLOG_CONDITION, "processCommands() got event %d", message->event.event);
Phil Burk204a1632017-01-03 17:23:43 -0800471 switch (message->event.event) {
Phil Burk5ed503c2017-02-01 09:38:15 -0800472 case AAUDIO_SERVICE_EVENT_STARTED:
Phil Burk71f35bb2017-04-13 16:05:07 -0700473 ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_STARTED");
Phil Burk5ed503c2017-02-01 09:38:15 -0800474 setState(AAUDIO_STREAM_STATE_STARTED);
Phil Burk204a1632017-01-03 17:23:43 -0800475 break;
Phil Burk5ed503c2017-02-01 09:38:15 -0800476 case AAUDIO_SERVICE_EVENT_PAUSED:
Phil Burk71f35bb2017-04-13 16:05:07 -0700477 ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_PAUSED");
Phil Burk5ed503c2017-02-01 09:38:15 -0800478 setState(AAUDIO_STREAM_STATE_PAUSED);
Phil Burk204a1632017-01-03 17:23:43 -0800479 break;
Phil Burk71f35bb2017-04-13 16:05:07 -0700480 case AAUDIO_SERVICE_EVENT_STOPPED:
481 ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_STOPPED");
482 setState(AAUDIO_STREAM_STATE_STOPPED);
483 break;
Phil Burk5ed503c2017-02-01 09:38:15 -0800484 case AAUDIO_SERVICE_EVENT_FLUSHED:
Phil Burk71f35bb2017-04-13 16:05:07 -0700485 ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED");
Phil Burk5ed503c2017-02-01 09:38:15 -0800486 setState(AAUDIO_STREAM_STATE_FLUSHED);
Phil Burk204a1632017-01-03 17:23:43 -0800487 onFlushFromServer();
488 break;
Phil Burk5ed503c2017-02-01 09:38:15 -0800489 case AAUDIO_SERVICE_EVENT_CLOSED:
Phil Burk71f35bb2017-04-13 16:05:07 -0700490 ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_CLOSED");
Phil Burk5ed503c2017-02-01 09:38:15 -0800491 setState(AAUDIO_STREAM_STATE_CLOSED);
Phil Burk204a1632017-01-03 17:23:43 -0800492 break;
Phil Burk5ed503c2017-02-01 09:38:15 -0800493 case AAUDIO_SERVICE_EVENT_DISCONNECTED:
494 result = AAUDIO_ERROR_DISCONNECTED;
Phil Burkc0c70e32017-02-09 13:18:38 -0800495 setState(AAUDIO_STREAM_STATE_DISCONNECTED);
Phil Burk5ed503c2017-02-01 09:38:15 -0800496 ALOGW("WARNING - processCommands() AAUDIO_SERVICE_EVENT_DISCONNECTED");
Phil Burk204a1632017-01-03 17:23:43 -0800497 break;
Phil Burkc0c70e32017-02-09 13:18:38 -0800498 case AAUDIO_SERVICE_EVENT_VOLUME:
Phil Burke572f462017-04-20 13:03:19 -0700499 mVolumeRamp.setTarget((float) message->event.dataDouble);
500 ALOGD_IF(MYLOG_CONDITION, "processCommands() AAUDIO_SERVICE_EVENT_VOLUME %f",
501 message->event.dataDouble);
Phil Burkc0c70e32017-02-09 13:18:38 -0800502 break;
Phil Burk204a1632017-01-03 17:23:43 -0800503 default:
504 ALOGW("WARNING - processCommands() Unrecognized event = %d",
505 (int) message->event.event);
506 break;
507 }
508 return result;
509}
510
511// Process all the commands coming from the server.
Phil Burk5ed503c2017-02-01 09:38:15 -0800512aaudio_result_t AudioStreamInternal::processCommands() {
513 aaudio_result_t result = AAUDIO_OK;
Phil Burk204a1632017-01-03 17:23:43 -0800514
Phil Burk5ed503c2017-02-01 09:38:15 -0800515 while (result == AAUDIO_OK) {
Phil Burk71f35bb2017-04-13 16:05:07 -0700516 //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::processCommands() - looping, %d", result);
Phil Burk5ed503c2017-02-01 09:38:15 -0800517 AAudioServiceMessage message;
Phil Burk204a1632017-01-03 17:23:43 -0800518 if (mAudioEndpoint.readUpCommand(&message) != 1) {
519 break; // no command this time, no problem
520 }
521 switch (message.what) {
Phil Burk5ed503c2017-02-01 09:38:15 -0800522 case AAudioServiceMessage::code::TIMESTAMP:
Phil Burk204a1632017-01-03 17:23:43 -0800523 result = onTimestampFromServer(&message);
524 break;
525
Phil Burk5ed503c2017-02-01 09:38:15 -0800526 case AAudioServiceMessage::code::EVENT:
Phil Burk204a1632017-01-03 17:23:43 -0800527 result = onEventFromServer(&message);
528 break;
529
530 default:
Phil Burk71f35bb2017-04-13 16:05:07 -0700531 ALOGE("WARNING - AudioStreamInternal::processCommands() Unrecognized what = %d",
Phil Burk204a1632017-01-03 17:23:43 -0800532 (int) message.what);
Phil Burk5ed503c2017-02-01 09:38:15 -0800533 result = AAUDIO_ERROR_UNEXPECTED_VALUE;
Phil Burk204a1632017-01-03 17:23:43 -0800534 break;
535 }
536 }
537 return result;
538}
539
540// Write the data, block if needed and timeoutMillis > 0
Phil Burk5ed503c2017-02-01 09:38:15 -0800541aaudio_result_t AudioStreamInternal::write(const void *buffer, int32_t numFrames,
Phil Burk3316d5e2017-02-15 11:23:01 -0800542 int64_t timeoutNanoseconds)
Phil Burk204a1632017-01-03 17:23:43 -0800543{
Phil Burk4485d412017-05-09 15:55:02 -0700544 const char * traceName = (mInService) ? "aaWrtS" : "aaWrtC";
545 ATRACE_BEGIN(traceName);
Phil Burk5ed503c2017-02-01 09:38:15 -0800546 aaudio_result_t result = AAUDIO_OK;
Phil Burkc0c70e32017-02-09 13:18:38 -0800547 int32_t loopCount = 0;
Phil Burk204a1632017-01-03 17:23:43 -0800548 uint8_t* source = (uint8_t*)buffer;
Phil Burk3316d5e2017-02-15 11:23:01 -0800549 int64_t currentTimeNanos = AudioClock::getNanoseconds();
550 int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
Phil Burk204a1632017-01-03 17:23:43 -0800551 int32_t framesLeft = numFrames;
Phil Burk204a1632017-01-03 17:23:43 -0800552
Phil Burk4485d412017-05-09 15:55:02 -0700553 int32_t fullFrames = mAudioEndpoint.getFullFramesAvailable();
554 if (ATRACE_ENABLED()) {
555 const char * traceName = (mInService) ? "aaFullS" : "aaFullC";
556 ATRACE_INT(traceName, fullFrames);
557 }
558
Phil Burk204a1632017-01-03 17:23:43 -0800559 // Write until all the data has been written or until a timeout occurs.
560 while (framesLeft > 0) {
561 // The call to writeNow() will not block. It will just write as much as it can.
Phil Burk3316d5e2017-02-15 11:23:01 -0800562 int64_t wakeTimeNanos = 0;
Phil Burk5ed503c2017-02-01 09:38:15 -0800563 aaudio_result_t framesWritten = writeNow(source, framesLeft,
Phil Burk204a1632017-01-03 17:23:43 -0800564 currentTimeNanos, &wakeTimeNanos);
Phil Burk204a1632017-01-03 17:23:43 -0800565 if (framesWritten < 0) {
Phil Burkc0c70e32017-02-09 13:18:38 -0800566 ALOGE("AudioStreamInternal::write() loop: writeNow returned %d", framesWritten);
Phil Burk204a1632017-01-03 17:23:43 -0800567 result = framesWritten;
568 break;
569 }
570 framesLeft -= (int32_t) framesWritten;
571 source += framesWritten * getBytesPerFrame();
572
573 // Should we block?
574 if (timeoutNanoseconds == 0) {
575 break; // don't block
576 } else if (framesLeft > 0) {
Phil Burk204a1632017-01-03 17:23:43 -0800577 // clip the wake time to something reasonable
578 if (wakeTimeNanos < currentTimeNanos) {
579 wakeTimeNanos = currentTimeNanos;
580 }
581 if (wakeTimeNanos > deadlineNanos) {
582 // If we time out, just return the framesWritten so far.
Phil Burkc0c70e32017-02-09 13:18:38 -0800583 ALOGE("AudioStreamInternal::write(): timed out after %lld nanos",
584 (long long) timeoutNanoseconds);
Phil Burk204a1632017-01-03 17:23:43 -0800585 break;
586 }
587
Phil Burk71f35bb2017-04-13 16:05:07 -0700588 int64_t sleepForNanos = wakeTimeNanos - currentTimeNanos;
589 AudioClock::sleepForNanos(sleepForNanos);
Phil Burk204a1632017-01-03 17:23:43 -0800590 currentTimeNanos = AudioClock::getNanoseconds();
591 }
592 }
593
594 // return error or framesWritten
Phil Burkc0c70e32017-02-09 13:18:38 -0800595 (void) loopCount;
Phil Burk4485d412017-05-09 15:55:02 -0700596 ATRACE_END();
Phil Burk204a1632017-01-03 17:23:43 -0800597 return (result < 0) ? result : numFrames - framesLeft;
598}
599
600// Write as much data as we can without blocking.
Phil Burk5ed503c2017-02-01 09:38:15 -0800601aaudio_result_t AudioStreamInternal::writeNow(const void *buffer, int32_t numFrames,
Phil Burk3316d5e2017-02-15 11:23:01 -0800602 int64_t currentNanoTime, int64_t *wakeTimePtr) {
Phil Burk5204d312017-05-04 17:16:13 -0700603 aaudio_result_t result = processCommands();
604 if (result != AAUDIO_OK) {
605 return result;
Phil Burk204a1632017-01-03 17:23:43 -0800606 }
607
608 if (mAudioEndpoint.isOutputFreeRunning()) {
Phil Burk71f35bb2017-04-13 16:05:07 -0700609 //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - update read counter");
Phil Burk204a1632017-01-03 17:23:43 -0800610 // Update data queue based on the timing model.
611 int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
612 mAudioEndpoint.setDownDataReadCounter(estimatedReadCounter);
Phil Burk204a1632017-01-03 17:23:43 -0800613 }
614 // TODO else query from endpoint cuz set by actual reader, maybe
615
Phil Burkc0c70e32017-02-09 13:18:38 -0800616 // If the read index passed the write index then consider it an underrun.
617 if (mAudioEndpoint.getFullFramesAvailable() < 0) {
618 mXRunCount++;
Phil Burk204a1632017-01-03 17:23:43 -0800619 }
Phil Burkc0c70e32017-02-09 13:18:38 -0800620
621 // Write some data to the buffer.
Phil Burk71f35bb2017-04-13 16:05:07 -0700622 //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - writeNowWithConversion(%d)", numFrames);
Phil Burkc0c70e32017-02-09 13:18:38 -0800623 int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
Phil Burk71f35bb2017-04-13 16:05:07 -0700624 //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - tried to write %d frames, wrote %d",
Phil Burk204a1632017-01-03 17:23:43 -0800625 // numFrames, framesWritten);
626
627 // Calculate an ideal time to wake up.
628 if (wakeTimePtr != nullptr && framesWritten >= 0) {
629 // By default wake up a few milliseconds from now. // TODO review
Phil Burkc0c70e32017-02-09 13:18:38 -0800630 int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
631 aaudio_stream_state_t state = getState();
Phil Burk71f35bb2017-04-13 16:05:07 -0700632 //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - wakeTime based on %s",
Phil Burkc0c70e32017-02-09 13:18:38 -0800633 // AAudio_convertStreamStateToText(state));
634 switch (state) {
Phil Burk5ed503c2017-02-01 09:38:15 -0800635 case AAUDIO_STREAM_STATE_OPEN:
636 case AAUDIO_STREAM_STATE_STARTING:
Phil Burk204a1632017-01-03 17:23:43 -0800637 if (framesWritten != 0) {
638 // Don't wait to write more data. Just prime the buffer.
639 wakeTime = currentNanoTime;
640 }
641 break;
Phil Burk5ed503c2017-02-01 09:38:15 -0800642 case AAUDIO_STREAM_STATE_STARTED: // When do we expect the next read burst to occur?
Phil Burk204a1632017-01-03 17:23:43 -0800643 {
644 uint32_t burstSize = mFramesPerBurst;
645 if (burstSize < 32) {
646 burstSize = 32; // TODO review
647 }
648
649 uint64_t nextReadPosition = mAudioEndpoint.getDownDataReadCounter() + burstSize;
650 wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
651 }
652 break;
653 default:
654 break;
655 }
656 *wakeTimePtr = wakeTime;
657
658 }
Phil Burk71f35bb2017-04-13 16:05:07 -0700659// ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow finished: now = %llu, read# = %llu, wrote# = %llu",
Phil Burk204a1632017-01-03 17:23:43 -0800660// (unsigned long long)currentNanoTime,
661// (unsigned long long)mAudioEndpoint.getDownDataReadCounter(),
662// (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
663 return framesWritten;
664}
665
Phil Burkc0c70e32017-02-09 13:18:38 -0800666
Phil Burkc0c70e32017-02-09 13:18:38 -0800667aaudio_result_t AudioStreamInternal::writeNowWithConversion(const void *buffer,
668 int32_t numFrames) {
Phil Burke572f462017-04-20 13:03:19 -0700669 // ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNowWithConversion(%p, %d)",
670 // buffer, numFrames);
Phil Burkc0c70e32017-02-09 13:18:38 -0800671 WrappingBuffer wrappingBuffer;
Phil Burkc0c70e32017-02-09 13:18:38 -0800672 uint8_t *source = (uint8_t *) buffer;
673 int32_t framesLeft = numFrames;
674
675 mAudioEndpoint.getEmptyRoomAvailable(&wrappingBuffer);
676
677 // Read data in one or two parts.
678 int partIndex = 0;
679 while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
680 int32_t framesToWrite = framesLeft;
681 int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
682 if (framesAvailable > 0) {
683 if (framesToWrite > framesAvailable) {
684 framesToWrite = framesAvailable;
685 }
Phil Burk71f35bb2017-04-13 16:05:07 -0700686 int32_t numBytes = getBytesPerFrame() * framesToWrite;
Phil Burke572f462017-04-20 13:03:19 -0700687 int32_t numSamples = framesToWrite * getSamplesPerFrame();
688 // Data conversion.
689 float levelFrom;
690 float levelTo;
691 bool ramping = mVolumeRamp.nextSegment(framesToWrite * getSamplesPerFrame(),
692 &levelFrom, &levelTo);
693 // The formats are validated when the stream is opened so we do not have to
694 // check for illegal combinations here.
695 if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
696 if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
697 AAudio_linearRamp(
698 (const float *) source,
699 (float *) wrappingBuffer.data[partIndex],
700 framesToWrite,
701 getSamplesPerFrame(),
702 levelFrom,
703 levelTo);
704 } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
705 if (ramping) {
706 AAudioConvert_floatToPcm16(
707 (const float *) source,
708 (int16_t *) wrappingBuffer.data[partIndex],
709 framesToWrite,
710 getSamplesPerFrame(),
711 levelFrom,
712 levelTo);
713 } else {
714 AAudioConvert_floatToPcm16(
715 (const float *) source,
716 (int16_t *) wrappingBuffer.data[partIndex],
717 numSamples,
718 levelTo);
719 }
720 }
721 } else if (getFormat() == AAUDIO_FORMAT_PCM_I16) {
722 if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
723 if (ramping) {
724 AAudioConvert_pcm16ToFloat(
725 (const int16_t *) source,
726 (float *) wrappingBuffer.data[partIndex],
727 framesToWrite,
728 getSamplesPerFrame(),
729 levelFrom,
730 levelTo);
731 } else {
732 AAudioConvert_pcm16ToFloat(
733 (const int16_t *) source,
734 (float *) wrappingBuffer.data[partIndex],
735 numSamples,
736 levelTo);
737 }
738 } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
739 AAudio_linearRamp(
740 (const int16_t *) source,
741 (int16_t *) wrappingBuffer.data[partIndex],
742 framesToWrite,
743 getSamplesPerFrame(),
744 levelFrom,
745 levelTo);
746 }
Phil Burkc0c70e32017-02-09 13:18:38 -0800747 }
Phil Burkc0c70e32017-02-09 13:18:38 -0800748 source += numBytes;
749 framesLeft -= framesToWrite;
Phil Burk71f35bb2017-04-13 16:05:07 -0700750 } else {
751 break;
Phil Burkc0c70e32017-02-09 13:18:38 -0800752 }
753 partIndex++;
754 }
755 int32_t framesWritten = numFrames - framesLeft;
756 mAudioEndpoint.advanceWriteIndex(framesWritten);
757
758 if (framesWritten > 0) {
759 incrementFramesWritten(framesWritten);
760 }
Phil Burk71f35bb2017-04-13 16:05:07 -0700761 // ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
Phil Burkc0c70e32017-02-09 13:18:38 -0800762 return framesWritten;
763}
764
Phil Burk3316d5e2017-02-15 11:23:01 -0800765void AudioStreamInternal::processTimestamp(uint64_t position, int64_t time) {
Phil Burk204a1632017-01-03 17:23:43 -0800766 mClockModel.processTimestamp( position, time);
767}
768
Phil Burk3316d5e2017-02-15 11:23:01 -0800769aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {
770 int32_t actualFrames = 0;
Phil Burk71f35bb2017-04-13 16:05:07 -0700771 // Round to the next highest burst size.
772 if (getFramesPerBurst() > 0) {
773 int32_t numBursts = (requestedFrames + getFramesPerBurst() - 1) / getFramesPerBurst();
774 requestedFrames = numBursts * getFramesPerBurst();
775 }
776
Phil Burk3316d5e2017-02-15 11:23:01 -0800777 aaudio_result_t result = mAudioEndpoint.setBufferSizeInFrames(requestedFrames, &actualFrames);
Phil Burk71f35bb2017-04-13 16:05:07 -0700778 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::setBufferSize() %s req = %d => %d",
779 getLocationName(), requestedFrames, actualFrames);
Phil Burk3316d5e2017-02-15 11:23:01 -0800780 if (result < 0) {
781 return result;
782 } else {
783 return (aaudio_result_t) actualFrames;
784 }
Phil Burk204a1632017-01-03 17:23:43 -0800785}
786
Phil Burk3316d5e2017-02-15 11:23:01 -0800787int32_t AudioStreamInternal::getBufferSize() const
Phil Burk204a1632017-01-03 17:23:43 -0800788{
789 return mAudioEndpoint.getBufferSizeInFrames();
790}
791
Phil Burk3316d5e2017-02-15 11:23:01 -0800792int32_t AudioStreamInternal::getBufferCapacity() const
Phil Burk204a1632017-01-03 17:23:43 -0800793{
794 return mAudioEndpoint.getBufferCapacityInFrames();
795}
796
Phil Burk3316d5e2017-02-15 11:23:01 -0800797int32_t AudioStreamInternal::getFramesPerBurst() const
Phil Burk204a1632017-01-03 17:23:43 -0800798{
799 return mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
800}
801
Phil Burk3316d5e2017-02-15 11:23:01 -0800802int64_t AudioStreamInternal::getFramesRead()
Phil Burk204a1632017-01-03 17:23:43 -0800803{
Phil Burk3316d5e2017-02-15 11:23:01 -0800804 int64_t framesRead =
Phil Burk204a1632017-01-03 17:23:43 -0800805 mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
806 + mFramesOffsetFromService;
807 // Prevent retrograde motion.
808 if (framesRead < mLastFramesRead) {
809 framesRead = mLastFramesRead;
810 } else {
811 mLastFramesRead = framesRead;
812 }
Phil Burk71f35bb2017-04-13 16:05:07 -0700813 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
Phil Burk204a1632017-01-03 17:23:43 -0800814 return framesRead;
815}
816
Phil Burk4c5129b2017-04-28 15:17:32 -0700817int64_t AudioStreamInternal::getFramesWritten()
818{
819 int64_t getFramesWritten = mAudioEndpoint.getDownDataWriteCounter()
820 + mFramesOffsetFromService;
821 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::getFramesWritten() returns %lld", (long long)getFramesWritten);
822 return getFramesWritten;
823}