blob: 3a07cfa3c0e561943c114efa5f080ff082db7907 [file] [log] [blame]
Phil Burk5ed503c2017-02-01 09:38:15 -08001/*
2 * Copyright 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "AAudio"
18//#define LOG_NDEBUG 0
19#include <utils/Log.h>
20
Phil Burkc8f69a02017-05-11 15:53:06 -070021#include <cutils/properties.h>
Phil Burk5ed503c2017-02-01 09:38:15 -080022#include <stdint.h>
23#include <sys/types.h>
24#include <utils/Errors.h>
25
Phil Burka4eb0d82017-04-12 15:44:06 -070026#include "aaudio/AAudio.h"
dimitryd81a84a2019-07-17 13:55:16 +020027#include "core/AudioGlobal.h"
Phil Burkd04aeea2017-05-23 13:56:41 -070028#include <aaudio/AAudioTesting.h>
Phil Burkbba09002017-11-29 13:39:44 -080029#include <math.h>
Phil Burkd4ccc622017-12-20 15:32:44 -080030#include <system/audio-base.h>
Phil Burk41f19d82018-02-13 14:59:10 -080031#include <assert.h>
Phil Burkd04aeea2017-05-23 13:56:41 -070032
33#include "utility/AAudioUtilities.h"
Phil Burk5ed503c2017-02-01 09:38:15 -080034
35using namespace android;
36
Phil Burke572f462017-04-20 13:03:19 -070037// This is 3 dB, (10^(3/20)), to match the maximum headroom in AudioTrack for float data.
38// It is designed to allow occasional transient peaks.
39#define MAX_HEADROOM (1.41253754f)
40#define MIN_HEADROOM (0 - MAX_HEADROOM)
41
Phil Burk9dca9822017-05-26 14:27:43 -070042int32_t AAudioConvert_formatToSizeInBytes(aaudio_format_t format) {
Phil Burk3316d5e2017-02-15 11:23:01 -080043 int32_t size = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
Phil Burk5ed503c2017-02-01 09:38:15 -080044 switch (format) {
45 case AAUDIO_FORMAT_PCM_I16:
46 size = sizeof(int16_t);
47 break;
Phil Burk5ed503c2017-02-01 09:38:15 -080048 case AAUDIO_FORMAT_PCM_FLOAT:
49 size = sizeof(float);
50 break;
51 default:
52 break;
53 }
54 return size;
55}
56
Phil Burk5204d312017-05-04 17:16:13 -070057// TODO expose and call clamp16_from_float function in primitives.h
Phil Burke572f462017-04-20 13:03:19 -070058static inline int16_t clamp16_from_float(float f) {
Phil Burkbba09002017-11-29 13:39:44 -080059 static const float scale = 1 << 15;
60 return (int16_t) roundf(fmaxf(fminf(f * scale, scale - 1.f), -scale));
Phil Burke572f462017-04-20 13:03:19 -070061}
62
Phil Burk51f07172018-04-27 10:18:03 -070063// Clip to valid range of a float sample to prevent excessive volume.
64// By using fmin and fmax we also protect against NaN.
65static float clipToMinMaxHeadroom(float input) {
66 return fmin(MAX_HEADROOM, fmax(MIN_HEADROOM, input));
67}
68
Phil Burke572f462017-04-20 13:03:19 -070069static float clipAndClampFloatToPcm16(float sample, float scaler) {
70 // Clip to valid range of a float sample to prevent excessive volume.
Phil Burk51f07172018-04-27 10:18:03 -070071 sample = clipToMinMaxHeadroom(sample);
Phil Burke572f462017-04-20 13:03:19 -070072
73 // Scale and convert to a short.
74 float fval = sample * scaler;
75 return clamp16_from_float(fval);
76}
77
78void AAudioConvert_floatToPcm16(const float *source,
79 int16_t *destination,
80 int32_t numSamples,
81 float amplitude) {
Phil Burk41f19d82018-02-13 14:59:10 -080082 const float scaler = amplitude;
Phil Burk5ed503c2017-02-01 09:38:15 -080083 for (int i = 0; i < numSamples; i++) {
Phil Burke572f462017-04-20 13:03:19 -070084 float sample = *source++;
85 *destination++ = clipAndClampFloatToPcm16(sample, scaler);
Phil Burk5ed503c2017-02-01 09:38:15 -080086 }
87}
88
Phil Burke572f462017-04-20 13:03:19 -070089void AAudioConvert_floatToPcm16(const float *source,
90 int16_t *destination,
91 int32_t numFrames,
92 int32_t samplesPerFrame,
93 float amplitude1,
94 float amplitude2) {
95 float scaler = amplitude1;
96 // divide by numFrames so that we almost reach amplitude2
97 float delta = (amplitude2 - amplitude1) / numFrames;
98 for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
99 for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
100 float sample = *source++;
101 *destination++ = clipAndClampFloatToPcm16(sample, scaler);
102 }
103 scaler += delta;
104 }
105}
106
107#define SHORT_SCALE 32768
108
109void AAudioConvert_pcm16ToFloat(const int16_t *source,
110 float *destination,
111 int32_t numSamples,
112 float amplitude) {
Phil Burk41f19d82018-02-13 14:59:10 -0800113 const float scaler = amplitude / SHORT_SCALE;
Phil Burk5ed503c2017-02-01 09:38:15 -0800114 for (int i = 0; i < numSamples; i++) {
Phil Burke572f462017-04-20 13:03:19 -0700115 destination[i] = source[i] * scaler;
116 }
117}
118
119// This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
120void AAudioConvert_pcm16ToFloat(const int16_t *source,
121 float *destination,
122 int32_t numFrames,
123 int32_t samplesPerFrame,
124 float amplitude1,
125 float amplitude2) {
126 float scaler = amplitude1 / SHORT_SCALE;
Phil Burk41f19d82018-02-13 14:59:10 -0800127 const float delta = (amplitude2 - amplitude1) / (SHORT_SCALE * (float) numFrames);
Phil Burke572f462017-04-20 13:03:19 -0700128 for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
129 for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
130 *destination++ = *source++ * scaler;
131 }
132 scaler += delta;
133 }
134}
135
Phil Burk51f07172018-04-27 10:18:03 -0700136
Phil Burke572f462017-04-20 13:03:19 -0700137// This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
138void AAudio_linearRamp(const float *source,
139 float *destination,
140 int32_t numFrames,
141 int32_t samplesPerFrame,
142 float amplitude1,
143 float amplitude2) {
144 float scaler = amplitude1;
Phil Burk41f19d82018-02-13 14:59:10 -0800145 const float delta = (amplitude2 - amplitude1) / numFrames;
Phil Burke572f462017-04-20 13:03:19 -0700146 for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
147 for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
148 float sample = *source++;
Phil Burke572f462017-04-20 13:03:19 -0700149 // Clip to valid range of a float sample to prevent excessive volume.
Phil Burk51f07172018-04-27 10:18:03 -0700150 sample = clipToMinMaxHeadroom(sample);
Phil Burke572f462017-04-20 13:03:19 -0700151
152 *destination++ = sample * scaler;
153 }
154 scaler += delta;
155 }
156}
157
158// This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
159void AAudio_linearRamp(const int16_t *source,
160 int16_t *destination,
161 int32_t numFrames,
162 int32_t samplesPerFrame,
163 float amplitude1,
164 float amplitude2) {
Phil Burkbba09002017-11-29 13:39:44 -0800165 // Because we are converting from int16 to 1nt16, we do not have to scale by 1/32768.
166 float scaler = amplitude1;
Phil Burk41f19d82018-02-13 14:59:10 -0800167 const float delta = (amplitude2 - amplitude1) / numFrames;
Phil Burke572f462017-04-20 13:03:19 -0700168 for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
169 for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
170 // No need to clip because int16_t range is inherently limited.
171 float sample = *source++ * scaler;
Phil Burkbba09002017-11-29 13:39:44 -0800172 *destination++ = (int16_t) roundf(sample);
Phil Burke572f462017-04-20 13:03:19 -0700173 }
174 scaler += delta;
Phil Burk5ed503c2017-02-01 09:38:15 -0800175 }
176}
177
Phil Burk41f19d82018-02-13 14:59:10 -0800178// *************************************************************************************
179// Convert Mono To Stereo at the same time as converting format.
180void AAudioConvert_formatMonoToStereo(const float *source,
181 int16_t *destination,
182 int32_t numFrames,
183 float amplitude) {
184 const float scaler = amplitude;
185 for (int i = 0; i < numFrames; i++) {
186 float sample = *source++;
187 int16_t sample16 = clipAndClampFloatToPcm16(sample, scaler);
188 *destination++ = sample16;
189 *destination++ = sample16;
190 }
191}
192
193void AAudioConvert_formatMonoToStereo(const float *source,
194 int16_t *destination,
195 int32_t numFrames,
196 float amplitude1,
197 float amplitude2) {
198 // divide by numFrames so that we almost reach amplitude2
199 const float delta = (amplitude2 - amplitude1) / numFrames;
200 for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
201 const float scaler = amplitude1 + (frameIndex * delta);
202 const float sample = *source++;
203 int16_t sample16 = clipAndClampFloatToPcm16(sample, scaler);
204 *destination++ = sample16;
205 *destination++ = sample16;
206 }
207}
208
209void AAudioConvert_formatMonoToStereo(const int16_t *source,
210 float *destination,
211 int32_t numFrames,
212 float amplitude) {
213 const float scaler = amplitude / SHORT_SCALE;
214 for (int i = 0; i < numFrames; i++) {
215 float sample = source[i] * scaler;
216 *destination++ = sample;
217 *destination++ = sample;
218 }
219}
220
221// This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
222void AAudioConvert_formatMonoToStereo(const int16_t *source,
223 float *destination,
224 int32_t numFrames,
225 float amplitude1,
226 float amplitude2) {
227 const float scaler1 = amplitude1 / SHORT_SCALE;
228 const float delta = (amplitude2 - amplitude1) / (SHORT_SCALE * (float) numFrames);
229 for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
230 float scaler = scaler1 + (frameIndex * delta);
231 float sample = source[frameIndex] * scaler;
232 *destination++ = sample;
233 *destination++ = sample;
234 }
235}
236
237// This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
238void AAudio_linearRampMonoToStereo(const float *source,
239 float *destination,
240 int32_t numFrames,
241 float amplitude1,
242 float amplitude2) {
243 const float delta = (amplitude2 - amplitude1) / numFrames;
244 for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
245 float sample = *source++;
246
247 // Clip to valid range of a float sample to prevent excessive volume.
Phil Burk51f07172018-04-27 10:18:03 -0700248 sample = clipToMinMaxHeadroom(sample);
Phil Burk41f19d82018-02-13 14:59:10 -0800249
250 const float scaler = amplitude1 + (frameIndex * delta);
251 float sampleScaled = sample * scaler;
252 *destination++ = sampleScaled;
253 *destination++ = sampleScaled;
254 }
255}
256
257// This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
258void AAudio_linearRampMonoToStereo(const int16_t *source,
259 int16_t *destination,
260 int32_t numFrames,
261 float amplitude1,
262 float amplitude2) {
263 // Because we are converting from int16 to 1nt16, we do not have to scale by 1/32768.
264 const float delta = (amplitude2 - amplitude1) / numFrames;
265 for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
266 const float scaler = amplitude1 + (frameIndex * delta);
267 // No need to clip because int16_t range is inherently limited.
268 const float sample = *source++ * scaler;
269 int16_t sample16 = (int16_t) roundf(sample);
270 *destination++ = sample16;
271 *destination++ = sample16;
272 }
273}
274
275// *************************************************************************************
276void AAudioDataConverter::convert(
277 const FormattedData &source,
278 const FormattedData &destination,
279 int32_t numFrames,
280 float levelFrom,
281 float levelTo) {
282
283 if (source.channelCount == 1 && destination.channelCount == 2) {
284 convertMonoToStereo(source,
285 destination,
286 numFrames,
287 levelFrom,
288 levelTo);
289 } else {
290 // We only support mono to stereo conversion. Otherwise source and destination
291 // must match.
292 assert(source.channelCount == destination.channelCount);
293 convertChannelsMatch(source,
294 destination,
295 numFrames,
296 levelFrom,
297 levelTo);
298 }
299}
300
301void AAudioDataConverter::convertMonoToStereo(
302 const FormattedData &source,
303 const FormattedData &destination,
304 int32_t numFrames,
305 float levelFrom,
306 float levelTo) {
307
308 // The formats are validated when the stream is opened so we do not have to
309 // check for illegal combinations here.
310 if (source.format == AAUDIO_FORMAT_PCM_FLOAT) {
311 if (destination.format == AAUDIO_FORMAT_PCM_FLOAT) {
312 AAudio_linearRampMonoToStereo(
313 (const float *) source.data,
314 (float *) destination.data,
315 numFrames,
316 levelFrom,
317 levelTo);
318 } else if (destination.format == AAUDIO_FORMAT_PCM_I16) {
319 if (levelFrom != levelTo) {
320 AAudioConvert_formatMonoToStereo(
321 (const float *) source.data,
322 (int16_t *) destination.data,
323 numFrames,
324 levelFrom,
325 levelTo);
326 } else {
327 AAudioConvert_formatMonoToStereo(
328 (const float *) source.data,
329 (int16_t *) destination.data,
330 numFrames,
331 levelTo);
332 }
333 }
334 } else if (source.format == AAUDIO_FORMAT_PCM_I16) {
335 if (destination.format == AAUDIO_FORMAT_PCM_FLOAT) {
336 if (levelFrom != levelTo) {
337 AAudioConvert_formatMonoToStereo(
338 (const int16_t *) source.data,
339 (float *) destination.data,
340 numFrames,
341 levelFrom,
342 levelTo);
343 } else {
344 AAudioConvert_formatMonoToStereo(
345 (const int16_t *) source.data,
346 (float *) destination.data,
347 numFrames,
348 levelTo);
349 }
350 } else if (destination.format == AAUDIO_FORMAT_PCM_I16) {
351 AAudio_linearRampMonoToStereo(
352 (const int16_t *) source.data,
353 (int16_t *) destination.data,
354 numFrames,
355 levelFrom,
356 levelTo);
357 }
358 }
359}
360
361void AAudioDataConverter::convertChannelsMatch(
362 const FormattedData &source,
363 const FormattedData &destination,
364 int32_t numFrames,
365 float levelFrom,
366 float levelTo) {
367 const int32_t numSamples = numFrames * source.channelCount;
368
369 // The formats are validated when the stream is opened so we do not have to
370 // check for illegal combinations here.
371 if (source.format == AAUDIO_FORMAT_PCM_FLOAT) {
372 if (destination.format == AAUDIO_FORMAT_PCM_FLOAT) {
373 AAudio_linearRamp(
374 (const float *) source.data,
375 (float *) destination.data,
376 numFrames,
377 source.channelCount,
378 levelFrom,
379 levelTo);
380 } else if (destination.format == AAUDIO_FORMAT_PCM_I16) {
381 if (levelFrom != levelTo) {
382 AAudioConvert_floatToPcm16(
383 (const float *) source.data,
384 (int16_t *) destination.data,
385 numFrames,
386 source.channelCount,
387 levelFrom,
388 levelTo);
389 } else {
390 AAudioConvert_floatToPcm16(
391 (const float *) source.data,
392 (int16_t *) destination.data,
393 numSamples,
394 levelTo);
395 }
396 }
397 } else if (source.format == AAUDIO_FORMAT_PCM_I16) {
398 if (destination.format == AAUDIO_FORMAT_PCM_FLOAT) {
399 if (levelFrom != levelTo) {
400 AAudioConvert_pcm16ToFloat(
401 (const int16_t *) source.data,
402 (float *) destination.data,
403 numFrames,
404 source.channelCount,
405 levelFrom,
406 levelTo);
407 } else {
408 AAudioConvert_pcm16ToFloat(
409 (const int16_t *) source.data,
410 (float *) destination.data,
411 numSamples,
412 levelTo);
413 }
414 } else if (destination.format == AAUDIO_FORMAT_PCM_I16) {
415 AAudio_linearRamp(
416 (const int16_t *) source.data,
417 (int16_t *) destination.data,
418 numFrames,
419 source.channelCount,
420 levelFrom,
421 levelTo);
422 }
423 }
424}
425
Phil Burk5ed503c2017-02-01 09:38:15 -0800426status_t AAudioConvert_aaudioToAndroidStatus(aaudio_result_t result) {
427 // This covers the case for AAUDIO_OK and for positive results.
428 if (result >= 0) {
429 return result;
430 }
431 status_t status;
432 switch (result) {
433 case AAUDIO_ERROR_DISCONNECTED:
Eric Laurenta2f296e2017-06-21 18:51:47 -0700434 case AAUDIO_ERROR_NO_SERVICE:
Phil Burk5ed503c2017-02-01 09:38:15 -0800435 status = DEAD_OBJECT;
436 break;
Eric Laurenta2f296e2017-06-21 18:51:47 -0700437 case AAUDIO_ERROR_INVALID_HANDLE:
438 status = BAD_TYPE;
439 break;
Phil Burk5ed503c2017-02-01 09:38:15 -0800440 case AAUDIO_ERROR_INVALID_STATE:
441 status = INVALID_OPERATION;
442 break;
Phil Burk71f35bb2017-04-13 16:05:07 -0700443 case AAUDIO_ERROR_INVALID_RATE:
444 case AAUDIO_ERROR_INVALID_FORMAT:
Phil Burk5ed503c2017-02-01 09:38:15 -0800445 case AAUDIO_ERROR_ILLEGAL_ARGUMENT:
Phil Burk5204d312017-05-04 17:16:13 -0700446 case AAUDIO_ERROR_OUT_OF_RANGE:
Phil Burk5ed503c2017-02-01 09:38:15 -0800447 status = BAD_VALUE;
448 break;
449 case AAUDIO_ERROR_WOULD_BLOCK:
450 status = WOULD_BLOCK;
451 break;
Phil Burk5204d312017-05-04 17:16:13 -0700452 case AAUDIO_ERROR_NULL:
453 status = UNEXPECTED_NULL;
454 break;
Phil Burk940083c2017-07-17 17:00:02 -0700455 case AAUDIO_ERROR_UNAVAILABLE:
456 status = NOT_ENOUGH_DATA;
457 break;
458
Phil Burk5204d312017-05-04 17:16:13 -0700459 // TODO translate these result codes
Phil Burk5204d312017-05-04 17:16:13 -0700460 case AAUDIO_ERROR_INTERNAL:
Phil Burk5204d312017-05-04 17:16:13 -0700461 case AAUDIO_ERROR_UNIMPLEMENTED:
Phil Burk5204d312017-05-04 17:16:13 -0700462 case AAUDIO_ERROR_NO_FREE_HANDLES:
463 case AAUDIO_ERROR_NO_MEMORY:
464 case AAUDIO_ERROR_TIMEOUT:
Phil Burk5ed503c2017-02-01 09:38:15 -0800465 default:
466 status = UNKNOWN_ERROR;
467 break;
468 }
469 return status;
470}
471
472aaudio_result_t AAudioConvert_androidToAAudioResult(status_t status) {
473 // This covers the case for OK and for positive result.
474 if (status >= 0) {
475 return status;
476 }
477 aaudio_result_t result;
478 switch (status) {
479 case BAD_TYPE:
480 result = AAUDIO_ERROR_INVALID_HANDLE;
481 break;
482 case DEAD_OBJECT:
Phil Burk71f35bb2017-04-13 16:05:07 -0700483 result = AAUDIO_ERROR_NO_SERVICE;
Phil Burk5ed503c2017-02-01 09:38:15 -0800484 break;
485 case INVALID_OPERATION:
486 result = AAUDIO_ERROR_INVALID_STATE;
487 break;
Eric Laurenta2f296e2017-06-21 18:51:47 -0700488 case UNEXPECTED_NULL:
489 result = AAUDIO_ERROR_NULL;
490 break;
491 case BAD_VALUE:
492 result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
493 break;
Phil Burk5ed503c2017-02-01 09:38:15 -0800494 case WOULD_BLOCK:
495 result = AAUDIO_ERROR_WOULD_BLOCK;
496 break;
Phil Burk940083c2017-07-17 17:00:02 -0700497 case NOT_ENOUGH_DATA:
498 result = AAUDIO_ERROR_UNAVAILABLE;
499 break;
Phil Burk5ed503c2017-02-01 09:38:15 -0800500 default:
501 result = AAUDIO_ERROR_INTERNAL;
502 break;
503 }
504 return result;
505}
506
Phil Burk4e1af9f2018-01-03 15:54:35 -0800507audio_session_t AAudioConvert_aaudioToAndroidSessionId(aaudio_session_id_t sessionId) {
Phil Burk67fdd892018-01-23 15:28:55 -0800508 // If not a regular sessionId then convert to a safe value of AUDIO_SESSION_ALLOCATE.
509 return (sessionId == AAUDIO_SESSION_ID_ALLOCATE || sessionId == AAUDIO_SESSION_ID_NONE)
Phil Burk4e1af9f2018-01-03 15:54:35 -0800510 ? AUDIO_SESSION_ALLOCATE
511 : (audio_session_t) sessionId;
512}
513
Phil Burk9dca9822017-05-26 14:27:43 -0700514audio_format_t AAudioConvert_aaudioToAndroidDataFormat(aaudio_format_t aaudioFormat) {
Phil Burk5ed503c2017-02-01 09:38:15 -0800515 audio_format_t androidFormat;
516 switch (aaudioFormat) {
517 case AAUDIO_FORMAT_PCM_I16:
518 androidFormat = AUDIO_FORMAT_PCM_16_BIT;
519 break;
520 case AAUDIO_FORMAT_PCM_FLOAT:
521 androidFormat = AUDIO_FORMAT_PCM_FLOAT;
522 break;
Phil Burk5ed503c2017-02-01 09:38:15 -0800523 default:
524 androidFormat = AUDIO_FORMAT_DEFAULT;
525 ALOGE("AAudioConvert_aaudioToAndroidDataFormat 0x%08X unrecognized", aaudioFormat);
526 break;
527 }
528 return androidFormat;
529}
530
Phil Burk9dca9822017-05-26 14:27:43 -0700531aaudio_format_t AAudioConvert_androidToAAudioDataFormat(audio_format_t androidFormat) {
532 aaudio_format_t aaudioFormat = AAUDIO_FORMAT_INVALID;
Phil Burk5ed503c2017-02-01 09:38:15 -0800533 switch (androidFormat) {
534 case AUDIO_FORMAT_PCM_16_BIT:
535 aaudioFormat = AAUDIO_FORMAT_PCM_I16;
536 break;
537 case AUDIO_FORMAT_PCM_FLOAT:
538 aaudioFormat = AAUDIO_FORMAT_PCM_FLOAT;
539 break;
Phil Burk5ed503c2017-02-01 09:38:15 -0800540 default:
541 aaudioFormat = AAUDIO_FORMAT_INVALID;
542 ALOGE("AAudioConvert_androidToAAudioDataFormat 0x%08X unrecognized", androidFormat);
543 break;
544 }
545 return aaudioFormat;
546}
547
Phil Burkd4ccc622017-12-20 15:32:44 -0800548// Make a message string from the condition.
549#define STATIC_ASSERT(condition) static_assert(condition, #condition)
550
551audio_usage_t AAudioConvert_usageToInternal(aaudio_usage_t usage) {
552 // The public aaudio_content_type_t constants are supposed to have the same
553 // values as the internal audio_content_type_t values.
554 STATIC_ASSERT(AAUDIO_USAGE_MEDIA == AUDIO_USAGE_MEDIA);
555 STATIC_ASSERT(AAUDIO_USAGE_VOICE_COMMUNICATION == AUDIO_USAGE_VOICE_COMMUNICATION);
556 STATIC_ASSERT(AAUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING
557 == AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING);
558 STATIC_ASSERT(AAUDIO_USAGE_ALARM == AUDIO_USAGE_ALARM);
559 STATIC_ASSERT(AAUDIO_USAGE_NOTIFICATION == AUDIO_USAGE_NOTIFICATION);
560 STATIC_ASSERT(AAUDIO_USAGE_NOTIFICATION_RINGTONE
561 == AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE);
562 STATIC_ASSERT(AAUDIO_USAGE_NOTIFICATION_EVENT == AUDIO_USAGE_NOTIFICATION_EVENT);
563 STATIC_ASSERT(AAUDIO_USAGE_ASSISTANCE_ACCESSIBILITY == AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY);
564 STATIC_ASSERT(AAUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE
565 == AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE);
566 STATIC_ASSERT(AAUDIO_USAGE_ASSISTANCE_SONIFICATION == AUDIO_USAGE_ASSISTANCE_SONIFICATION);
567 STATIC_ASSERT(AAUDIO_USAGE_GAME == AUDIO_USAGE_GAME);
568 STATIC_ASSERT(AAUDIO_USAGE_ASSISTANT == AUDIO_USAGE_ASSISTANT);
569 if (usage == AAUDIO_UNSPECIFIED) {
570 usage = AAUDIO_USAGE_MEDIA;
571 }
572 return (audio_usage_t) usage; // same value
573}
574
575audio_content_type_t AAudioConvert_contentTypeToInternal(aaudio_content_type_t contentType) {
576 // The public aaudio_content_type_t constants are supposed to have the same
577 // values as the internal audio_content_type_t values.
578 STATIC_ASSERT(AAUDIO_CONTENT_TYPE_MUSIC == AUDIO_CONTENT_TYPE_MUSIC);
579 STATIC_ASSERT(AAUDIO_CONTENT_TYPE_SPEECH == AUDIO_CONTENT_TYPE_SPEECH);
580 STATIC_ASSERT(AAUDIO_CONTENT_TYPE_SONIFICATION == AUDIO_CONTENT_TYPE_SONIFICATION);
581 STATIC_ASSERT(AAUDIO_CONTENT_TYPE_MOVIE == AUDIO_CONTENT_TYPE_MOVIE);
582 if (contentType == AAUDIO_UNSPECIFIED) {
583 contentType = AAUDIO_CONTENT_TYPE_MUSIC;
584 }
585 return (audio_content_type_t) contentType; // same value
586}
587
588audio_source_t AAudioConvert_inputPresetToAudioSource(aaudio_input_preset_t preset) {
589 // The public aaudio_input_preset_t constants are supposed to have the same
590 // values as the internal audio_source_t values.
591 STATIC_ASSERT(AAUDIO_UNSPECIFIED == AUDIO_SOURCE_DEFAULT);
592 STATIC_ASSERT(AAUDIO_INPUT_PRESET_GENERIC == AUDIO_SOURCE_MIC);
593 STATIC_ASSERT(AAUDIO_INPUT_PRESET_CAMCORDER == AUDIO_SOURCE_CAMCORDER);
594 STATIC_ASSERT(AAUDIO_INPUT_PRESET_VOICE_RECOGNITION == AUDIO_SOURCE_VOICE_RECOGNITION);
595 STATIC_ASSERT(AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION == AUDIO_SOURCE_VOICE_COMMUNICATION);
596 STATIC_ASSERT(AAUDIO_INPUT_PRESET_UNPROCESSED == AUDIO_SOURCE_UNPROCESSED);
597 if (preset == AAUDIO_UNSPECIFIED) {
Phil Burkeaef9b92018-01-18 09:09:42 -0800598 preset = AAUDIO_INPUT_PRESET_VOICE_RECOGNITION;
Phil Burkd4ccc622017-12-20 15:32:44 -0800599 }
600 return (audio_source_t) preset; // same value
601}
602
Phil Burk3316d5e2017-02-15 11:23:01 -0800603int32_t AAudioConvert_framesToBytes(int32_t numFrames,
Phil Burk7f680132018-03-12 14:48:06 -0700604 int32_t bytesPerFrame,
605 int32_t *sizeInBytes) {
606 *sizeInBytes = 0;
607
608 if (numFrames < 0 || bytesPerFrame < 0) {
609 ALOGE("negative size, numFrames = %d, frameSize = %d", numFrames, bytesPerFrame);
610 return AAUDIO_ERROR_OUT_OF_RANGE;
611 }
612
613 // Prevent numeric overflow.
614 if (numFrames > (INT32_MAX / bytesPerFrame)) {
Yi Kong0f414de2017-12-15 13:48:50 -0800615 ALOGE("size overflow, numFrames = %d, frameSize = %d", numFrames, bytesPerFrame);
Phil Burk5ed503c2017-02-01 09:38:15 -0800616 return AAUDIO_ERROR_OUT_OF_RANGE;
617 }
Phil Burk7f680132018-03-12 14:48:06 -0700618
Phil Burk5ed503c2017-02-01 09:38:15 -0800619 *sizeInBytes = numFrames * bytesPerFrame;
620 return AAUDIO_OK;
621}
Phil Burkc8f69a02017-05-11 15:53:06 -0700622
623static int32_t AAudioProperty_getMMapProperty(const char *propName,
624 int32_t defaultValue,
625 const char * caller) {
Phil Burk87c9f642017-05-17 07:22:39 -0700626 int32_t prop = property_get_int32(propName, defaultValue);
Phil Burkc8f69a02017-05-11 15:53:06 -0700627 switch (prop) {
Phil Burkd04aeea2017-05-23 13:56:41 -0700628 case AAUDIO_UNSPECIFIED:
629 case AAUDIO_POLICY_NEVER:
630 case AAUDIO_POLICY_ALWAYS:
631 case AAUDIO_POLICY_AUTO:
Phil Burkc8f69a02017-05-11 15:53:06 -0700632 break;
633 default:
634 ALOGE("%s: invalid = %d", caller, prop);
635 prop = defaultValue;
636 break;
637 }
638 return prop;
639}
640
Phil Burkd04aeea2017-05-23 13:56:41 -0700641int32_t AAudioProperty_getMMapPolicy() {
642 return AAudioProperty_getMMapProperty(AAUDIO_PROP_MMAP_POLICY,
643 AAUDIO_UNSPECIFIED, __func__);
Phil Burkc8f69a02017-05-11 15:53:06 -0700644}
645
Phil Burkd04aeea2017-05-23 13:56:41 -0700646int32_t AAudioProperty_getMMapExclusivePolicy() {
647 return AAudioProperty_getMMapProperty(AAUDIO_PROP_MMAP_EXCLUSIVE_POLICY,
648 AAUDIO_UNSPECIFIED, __func__);
Phil Burkc8f69a02017-05-11 15:53:06 -0700649}
650
651int32_t AAudioProperty_getMixerBursts() {
Phil Burk87c9f642017-05-17 07:22:39 -0700652 const int32_t defaultBursts = 2; // arbitrary, use 2 for double buffered
Phil Burkc8f69a02017-05-11 15:53:06 -0700653 const int32_t maxBursts = 1024; // arbitrary
Phil Burk87c9f642017-05-17 07:22:39 -0700654 int32_t prop = property_get_int32(AAUDIO_PROP_MIXER_BURSTS, defaultBursts);
Phil Burkc8f69a02017-05-11 15:53:06 -0700655 if (prop < 1 || prop > maxBursts) {
656 ALOGE("AAudioProperty_getMixerBursts: invalid = %d", prop);
657 prop = defaultBursts;
658 }
659 return prop;
660}
661
Phil Burkfd34a932017-07-19 07:03:52 -0700662int32_t AAudioProperty_getWakeupDelayMicros() {
663 const int32_t minMicros = 0; // arbitrary
664 const int32_t defaultMicros = 200; // arbitrary, based on some observed jitter
665 const int32_t maxMicros = 5000; // arbitrary, probably don't want more than 500
666 int32_t prop = property_get_int32(AAUDIO_PROP_WAKEUP_DELAY_USEC, defaultMicros);
667 if (prop < minMicros) {
668 ALOGW("AAudioProperty_getWakeupDelayMicros: clipped %d to %d", prop, minMicros);
669 prop = minMicros;
670 } else if (prop > maxMicros) {
671 ALOGW("AAudioProperty_getWakeupDelayMicros: clipped %d to %d", prop, maxMicros);
672 prop = maxMicros;
673 }
674 return prop;
675}
676
677int32_t AAudioProperty_getMinimumSleepMicros() {
678 const int32_t minMicros = 20; // arbitrary
679 const int32_t defaultMicros = 200; // arbitrary
680 const int32_t maxMicros = 2000; // arbitrary
681 int32_t prop = property_get_int32(AAUDIO_PROP_MINIMUM_SLEEP_USEC, defaultMicros);
682 if (prop < minMicros) {
683 ALOGW("AAudioProperty_getMinimumSleepMicros: clipped %d to %d", prop, minMicros);
684 prop = minMicros;
685 } else if (prop > maxMicros) {
686 ALOGW("AAudioProperty_getMinimumSleepMicros: clipped %d to %d", prop, maxMicros);
687 prop = maxMicros;
688 }
689 return prop;
690}
691
Phil Burkc8f69a02017-05-11 15:53:06 -0700692int32_t AAudioProperty_getHardwareBurstMinMicros() {
693 const int32_t defaultMicros = 1000; // arbitrary
694 const int32_t maxMicros = 1000 * 1000; // arbitrary
695 int32_t prop = property_get_int32(AAUDIO_PROP_HW_BURST_MIN_USEC, defaultMicros);
696 if (prop < 1 || prop > maxMicros) {
Phil Burkfd34a932017-07-19 07:03:52 -0700697 ALOGE("AAudioProperty_getHardwareBurstMinMicros: invalid = %d, use %d",
698 prop, defaultMicros);
Phil Burkc8f69a02017-05-11 15:53:06 -0700699 prop = defaultMicros;
700 }
701 return prop;
702}
Phil Burk5cc83c32017-11-28 15:43:18 -0800703
704aaudio_result_t AAudio_isFlushAllowed(aaudio_stream_state_t state) {
705 aaudio_result_t result = AAUDIO_OK;
706 switch (state) {
707// Proceed with flushing.
708 case AAUDIO_STREAM_STATE_OPEN:
709 case AAUDIO_STREAM_STATE_PAUSED:
710 case AAUDIO_STREAM_STATE_STOPPED:
711 case AAUDIO_STREAM_STATE_FLUSHED:
712 break;
713
714// Transition from one inactive state to another.
715 case AAUDIO_STREAM_STATE_STARTING:
716 case AAUDIO_STREAM_STATE_STARTED:
717 case AAUDIO_STREAM_STATE_STOPPING:
718 case AAUDIO_STREAM_STATE_PAUSING:
719 case AAUDIO_STREAM_STATE_FLUSHING:
720 case AAUDIO_STREAM_STATE_CLOSING:
721 case AAUDIO_STREAM_STATE_CLOSED:
722 case AAUDIO_STREAM_STATE_DISCONNECTED:
723 default:
724 ALOGE("can only flush stream when PAUSED, OPEN or STOPPED, state = %s",
dimitryd81a84a2019-07-17 13:55:16 +0200725 aaudio::AudioGlobal_convertStreamStateToText(state));
Phil Burk5cc83c32017-11-28 15:43:18 -0800726 result = AAUDIO_ERROR_INVALID_STATE;
727 break;
728 }
729 return result;
730}