aaudio: use new flowgraph to simplify processing
Construct a flowgraph based on the source and destination
format and channelCount. This is groundwork for supporting 24-bit
PCM formats.
Also cleaned up handling of device related format.
This CL removes more code than it adds.
Bug: 65067568
Test: write_sine_callback.cpp -pl
Test: write_sine_callback.cpp -pl -x
Test: input_monitor -pl
Test: input_monitor -pl -x
Change-Id: Ia155bff0164912011d09b61b54f983ccf4490bd1
diff --git a/media/libaaudio/src/Android.bp b/media/libaaudio/src/Android.bp
index 4b5f30d..4090286 100644
--- a/media/libaaudio/src/Android.bp
+++ b/media/libaaudio/src/Android.bp
@@ -26,9 +26,9 @@
"utility/FixedBlockAdapter.cpp",
"utility/FixedBlockReader.cpp",
"utility/FixedBlockWriter.cpp",
- "utility/LinearRamp.cpp",
"fifo/FifoBuffer.cpp",
"fifo/FifoControllerBase.cpp",
+ "client/AAudioFlowGraph.cpp",
"client/AudioEndpoint.cpp",
"client/AudioStreamInternal.cpp",
"client/AudioStreamInternalCapture.cpp",
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
index 959db61..3d1bc9b 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
@@ -14,6 +14,10 @@
* limitations under the License.
*/
+#define LOG_TAG "AAudioStreamConfiguration"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
#include <stdint.h>
#include <sys/mman.h>
@@ -36,6 +40,7 @@
status_t AAudioStreamConfiguration::writeToParcel(Parcel* parcel) const {
status_t status;
+
status = parcel->writeInt32(getDeviceId());
if (status != NO_ERROR) goto error;
status = parcel->writeInt32(getSampleRate());
@@ -46,6 +51,7 @@
if (status != NO_ERROR) goto error;
status = parcel->writeInt32((int32_t) getFormat());
if (status != NO_ERROR) goto error;
+
status = parcel->writeInt32((int32_t) getDirection());
if (status != NO_ERROR) goto error;
status = parcel->writeInt32(getBufferCapacity());
@@ -60,7 +66,7 @@
if (status != NO_ERROR) goto error;
return NO_ERROR;
error:
- ALOGE("AAudioStreamConfiguration.writeToParcel(): write failed = %d", status);
+ ALOGE("%s(): write failed = %d", __func__, status);
return status;
}
@@ -80,7 +86,8 @@
setSharingMode((aaudio_sharing_mode_t) value);
status = parcel->readInt32(&value);
if (status != NO_ERROR) goto error;
- setFormat((aaudio_format_t) value);
+ setFormat((audio_format_t) value);
+
status = parcel->readInt32(&value);
if (status != NO_ERROR) goto error;
setDirection((aaudio_direction_t) value);
@@ -99,8 +106,9 @@
status = parcel->readInt32(&value);
if (status != NO_ERROR) goto error;
setSessionId(value);
+
return NO_ERROR;
error:
- ALOGE("AAudioStreamConfiguration.readFromParcel(): read failed = %d", status);
+ ALOGE("%s(): read failed = %d", __func__, status);
return status;
}
diff --git a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
index 0b0cf77..67955e8 100644
--- a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
+++ b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
@@ -43,7 +43,7 @@
void SharedMemoryParcelable::setup(const unique_fd& fd, int32_t sizeInBytes) {
mFd.reset(dup(fd.get())); // store a duplicate fd
- ALOGV("setup(%d -> %d, %d) this = %p\n", fd.get(), mFd.get(), sizeInBytes, this);
+ ALOGV("setup(fd = %d -> %d, size = %d) this = %p\n", fd.get(), mFd.get(), sizeInBytes, this);
mSizeInBytes = sizeInBytes;
}
@@ -104,7 +104,8 @@
mResolvedAddress = (uint8_t *) mmap(0, mSizeInBytes, PROT_READ | PROT_WRITE,
MAP_SHARED, fd.get(), 0);
if (mResolvedAddress == MMAP_UNRESOLVED_ADDRESS) {
- ALOGE("mmap() failed for fd = %d, errno = %s", fd.get(), strerror(errno));
+ ALOGE("mmap() failed for fd = %d, nBytes = %d, errno = %s",
+ fd.get(), mSizeInBytes, strerror(errno));
return AAUDIO_ERROR_INTERNAL;
}
return AAUDIO_OK;
diff --git a/media/libaaudio/src/binding/SharedMemoryParcelable.h b/media/libaaudio/src/binding/SharedMemoryParcelable.h
index 82c2240..4ec38c5 100644
--- a/media/libaaudio/src/binding/SharedMemoryParcelable.h
+++ b/media/libaaudio/src/binding/SharedMemoryParcelable.h
@@ -70,8 +70,8 @@
aaudio_result_t resolveSharedMemory(const android::base::unique_fd& fd);
android::base::unique_fd mFd;
- int32_t mSizeInBytes = 0;
- uint8_t *mResolvedAddress = MMAP_UNRESOLVED_ADDRESS;
+ int32_t mSizeInBytes = 0;
+ uint8_t *mResolvedAddress = MMAP_UNRESOLVED_ADDRESS;
private:
diff --git a/media/libaaudio/src/client/AAudioFlowGraph.cpp b/media/libaaudio/src/client/AAudioFlowGraph.cpp
new file mode 100644
index 0000000..3e43c6b
--- /dev/null
+++ b/media/libaaudio/src/client/AAudioFlowGraph.cpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioFlowGraph"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include "AAudioFlowGraph.h"
+
+#include <flowgraph/ClipToRange.h>
+#include <flowgraph/MonoToMultiConverter.h>
+#include <flowgraph/RampLinear.h>
+#include <flowgraph/SinkFloat.h>
+#include <flowgraph/SinkI16.h>
+#include <flowgraph/SinkI24.h>
+#include <flowgraph/SourceFloat.h>
+#include <flowgraph/SourceI16.h>
+#include <flowgraph/SourceI24.h>
+
+using namespace flowgraph;
+
+aaudio_result_t AAudioFlowGraph::configure(audio_format_t sourceFormat,
+ int32_t sourceChannelCount,
+ audio_format_t sinkFormat,
+ int32_t sinkChannelCount) {
+ AudioFloatOutputPort *lastOutput = nullptr;
+
+ ALOGD("%s() source format = 0x%08x, channels = %d, sink format = 0x%08x, channels = %d",
+ __func__, sourceFormat, sourceChannelCount, sinkFormat, sinkChannelCount);
+
+ switch (sourceFormat) {
+ case AUDIO_FORMAT_PCM_FLOAT:
+ mSource = std::make_unique<SourceFloat>(sourceChannelCount);
+ break;
+ case AUDIO_FORMAT_PCM_16_BIT:
+ mSource = std::make_unique<SourceI16>(sourceChannelCount);
+ break;
+ case AUDIO_FORMAT_PCM_24_BIT_PACKED:
+ mSource = std::make_unique<SourceI24>(sourceChannelCount);
+ break;
+ default: // TODO add I32
+ ALOGE("%s() Unsupported source format = %d", __func__, sourceFormat);
+ return AAUDIO_ERROR_UNIMPLEMENTED;
+ }
+ lastOutput = &mSource->output;
+
+ // Apply volume as a ramp to avoid pops.
+ mVolumeRamp = std::make_unique<RampLinear>(sourceChannelCount);
+ lastOutput->connect(&mVolumeRamp->input);
+ lastOutput = &mVolumeRamp->output;
+
+ // For a pure float graph, there is chance that the data range may be very large.
+ // So we should clip to a reasonable value that allows a little headroom.
+ if (sourceFormat == AUDIO_FORMAT_PCM_FLOAT && sinkFormat == AUDIO_FORMAT_PCM_FLOAT) {
+ mClipper = std::make_unique<ClipToRange>(sourceChannelCount);
+ lastOutput->connect(&mClipper->input);
+ lastOutput = &mClipper->output;
+ }
+
+ // Expand the number of channels if required.
+ if (sourceChannelCount == 1 && sinkChannelCount > 1) {
+ mChannelConverter = std::make_unique<MonoToMultiConverter>(sinkChannelCount);
+ lastOutput->connect(&mChannelConverter->input);
+ lastOutput = &mChannelConverter->output;
+ } else if (sourceChannelCount != sinkChannelCount) {
+ ALOGE("%s() Channel reduction not supported.", __func__);
+ return AAUDIO_ERROR_UNIMPLEMENTED;
+ }
+
+ switch (sinkFormat) {
+ case AUDIO_FORMAT_PCM_FLOAT:
+ mSink = std::make_unique<SinkFloat>(sinkChannelCount);
+ break;
+ case AUDIO_FORMAT_PCM_16_BIT:
+ mSink = std::make_unique<SinkI16>(sinkChannelCount);
+ break;
+ case AUDIO_FORMAT_PCM_24_BIT_PACKED:
+ mSink = std::make_unique<SinkI24>(sinkChannelCount);
+ break;
+ default: // TODO add I32
+ ALOGE("%s() Unsupported sink format = %d", __func__, sinkFormat);
+ return AAUDIO_ERROR_UNIMPLEMENTED;
+ }
+ lastOutput->connect(&mSink->input);
+
+ return AAUDIO_OK;
+}
+
+void AAudioFlowGraph::process(const void *source, void *destination, int32_t numFrames) {
+ mSource->setData(source, numFrames);
+ mSink->read(destination, numFrames);
+}
+
+/**
+ * @param volume between 0.0 and 1.0
+ */
+void AAudioFlowGraph::setTargetVolume(float volume) {
+ mVolumeRamp->setTarget(volume);
+}
+
+void AAudioFlowGraph::setRampLengthInFrames(int32_t numFrames) {
+ mVolumeRamp->setLengthInFrames(numFrames);
+}
diff --git a/media/libaaudio/src/client/AAudioFlowGraph.h b/media/libaaudio/src/client/AAudioFlowGraph.h
new file mode 100644
index 0000000..a49f64e
--- /dev/null
+++ b/media/libaaudio/src/client/AAudioFlowGraph.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AAUDIO_FLOW_GRAPH_H
+#define ANDROID_AAUDIO_FLOW_GRAPH_H
+
+#include <memory>
+#include <stdint.h>
+#include <sys/types.h>
+#include <system/audio.h>
+
+#include <aaudio/AAudio.h>
+#include <flowgraph/ClipToRange.h>
+#include <flowgraph/MonoToMultiConverter.h>
+#include <flowgraph/RampLinear.h>
+
+class AAudioFlowGraph {
+public:
+ /** Connect several modules together to convert from source to sink.
+ * This should only be called once for each instance.
+ *
+ * @param sourceFormat
+ * @param sourceChannelCount
+ * @param sinkFormat
+ * @param sinkChannelCount
+ * @return
+ */
+ aaudio_result_t configure(audio_format_t sourceFormat,
+ int32_t sourceChannelCount,
+ audio_format_t sinkFormat,
+ int32_t sinkChannelCount);
+
+ void process(const void *source, void *destination, int32_t numFrames);
+
+ /**
+ * @param volume between 0.0 and 1.0
+ */
+ void setTargetVolume(float volume);
+
+ void setRampLengthInFrames(int32_t numFrames);
+
+private:
+ std::unique_ptr<flowgraph::AudioSource> mSource;
+ std::unique_ptr<flowgraph::RampLinear> mVolumeRamp;
+ std::unique_ptr<flowgraph::ClipToRange> mClipper;
+ std::unique_ptr<flowgraph::MonoToMultiConverter> mChannelConverter;
+ std::unique_ptr<flowgraph::AudioSink> mSink;
+};
+
+
+#endif //ANDROID_AAUDIO_FLOW_GRAPH_H
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 9204824..0a8021a 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -39,7 +39,6 @@
#include "core/AudioStreamBuilder.h"
#include "fifo/FifoBuffer.h"
#include "utility/AudioClock.h"
-#include "utility/LinearRamp.h"
#include "AudioStreamInternal.h"
@@ -92,11 +91,11 @@
}
// We have to do volume scaling. So we prefer FLOAT format.
- if (getFormat() == AAUDIO_FORMAT_UNSPECIFIED) {
- setFormat(AAUDIO_FORMAT_PCM_FLOAT);
+ if (getFormat() == AUDIO_FORMAT_DEFAULT) {
+ setFormat(AUDIO_FORMAT_PCM_FLOAT);
}
// Request FLOAT for the shared mixer.
- request.getConfiguration().setFormat(AAUDIO_FORMAT_PCM_FLOAT);
+ request.getConfiguration().setFormat(AUDIO_FORMAT_PCM_FLOAT);
// Build the request to send to the server.
request.setUserId(getuid());
@@ -126,7 +125,7 @@
// if that failed then try switching from mono to stereo if OUTPUT.
// Only do this in the client. Otherwise we end up with a mono mixer in the service
// that writes to a stereo MMAP stream.
- ALOGD("%s - openStream() returned %d, try switching from MONO to STEREO",
+ ALOGD("%s() - openStream() returned %d, try switching from MONO to STEREO",
__func__, mServiceStreamHandle);
request.getConfiguration().setSamplesPerFrame(2); // stereo
mServiceStreamHandle = mServiceInterface.openStream(request, configurationOutput);
@@ -212,9 +211,7 @@
mCallbackFrames = mFramesPerBurst;
}
- int32_t bytesPerFrame = getSamplesPerFrame()
- * AAudioConvert_formatToSizeInBytes(getFormat());
- int32_t callbackBufferSize = mCallbackFrames * bytesPerFrame;
+ const int32_t callbackBufferSize = mCallbackFrames * getBytesPerFrame();
mCallbackBuffer = new uint8_t[callbackBufferSize];
}
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 0425cd5..3bb9e1e 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -27,7 +27,6 @@
#include "client/AudioEndpoint.h"
#include "core/AudioStream.h"
#include "utility/AudioClock.h"
-#include "utility/LinearRamp.h"
using android::sp;
using android::IAAudioService;
@@ -193,6 +192,8 @@
int64_t mServiceLatencyNanos = 0;
+ // Sometimes the hardware is operating with a different channel count from the app.
+ // Then we require conversion in AAudio.
int32_t mDeviceChannelCount = 0;
};
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index 0719fe1..4a0e6da 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -20,6 +20,7 @@
#include <utils/Log.h>
#include <algorithm>
+#include <audio_utils/primitives.h>
#include <aaudio/AAudio.h>
#include "client/AudioStreamInternalCapture.h"
@@ -165,35 +166,36 @@
// Read data in one or two parts.
for (int partIndex = 0; framesLeft > 0 && partIndex < WrappingBuffer::SIZE; partIndex++) {
int32_t framesToProcess = framesLeft;
- int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
+ const int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
if (framesAvailable <= 0) break;
if (framesToProcess > framesAvailable) {
framesToProcess = framesAvailable;
}
- int32_t numBytes = getBytesPerFrame() * framesToProcess;
- int32_t numSamples = framesToProcess * getSamplesPerFrame();
+ const int32_t numBytes = getBytesPerFrame() * framesToProcess;
+ const int32_t numSamples = framesToProcess * getSamplesPerFrame();
+ const audio_format_t sourceFormat = getDeviceFormat();
+ const audio_format_t destinationFormat = getFormat();
// TODO factor this out into a utility function
- if (getDeviceFormat() == getFormat()) {
+ if (sourceFormat == destinationFormat) {
memcpy(destination, wrappingBuffer.data[partIndex], numBytes);
- } else if (getDeviceFormat() == AAUDIO_FORMAT_PCM_I16
- && getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
- AAudioConvert_pcm16ToFloat(
- (const int16_t *) wrappingBuffer.data[partIndex],
+ } else if (sourceFormat == AUDIO_FORMAT_PCM_16_BIT
+ && destinationFormat == AUDIO_FORMAT_PCM_FLOAT) {
+ memcpy_to_float_from_i16(
(float *) destination,
- numSamples,
- 1.0f);
- } else if (getDeviceFormat() == AAUDIO_FORMAT_PCM_FLOAT
- && getFormat() == AAUDIO_FORMAT_PCM_I16) {
- AAudioConvert_floatToPcm16(
- (const float *) wrappingBuffer.data[partIndex],
+ (const int16_t *) wrappingBuffer.data[partIndex],
+ numSamples);
+ } else if (sourceFormat == AUDIO_FORMAT_PCM_FLOAT
+ && destinationFormat == AUDIO_FORMAT_PCM_16_BIT) {
+ memcpy_to_i16_from_float(
(int16_t *) destination,
- numSamples,
- 1.0f);
+ (const float *) wrappingBuffer.data[partIndex],
+ numSamples);
} else {
- ALOGE("Format conversion not supported!");
+ ALOGE("%s() - Format conversion not supported! audio_format_t source = %u, dest = %u",
+ __func__, sourceFormat, destinationFormat);
return AAUDIO_ERROR_INVALID_FORMAT;
}
destination += numBytes;
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 795ba2c..2ae37a5 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -43,9 +43,17 @@
aaudio_result_t AudioStreamInternalPlay::open(const AudioStreamBuilder &builder) {
aaudio_result_t result = AudioStreamInternal::open(builder);
if (result == AAUDIO_OK) {
+ result = mFlowGraph.configure(getFormat(),
+ getSamplesPerFrame(),
+ getDeviceFormat(),
+ getDeviceChannelCount());
+
+ if (result != AAUDIO_OK) {
+ close();
+ }
// Sample rate is constrained to common values by now and should not overflow.
int32_t numFrames = kRampMSec * getSampleRate() / AAUDIO_MILLIS_PER_SECOND;
- mVolumeRamp.setLengthInFrames(numFrames);
+ mFlowGraph.setRampLengthInFrames(numFrames);
}
return result;
}
@@ -216,22 +224,10 @@
}
int32_t numBytes = getBytesPerFrame() * framesToWrite;
- // Data conversion.
- float levelFrom;
- float levelTo;
- mVolumeRamp.nextSegment(framesToWrite, &levelFrom, &levelTo);
- AAudioDataConverter::FormattedData source(
- (void *)byteBuffer,
- getFormat(),
- getSamplesPerFrame());
- AAudioDataConverter::FormattedData destination(
- wrappingBuffer.data[partIndex],
- getDeviceFormat(),
- getDeviceChannelCount());
-
- AAudioDataConverter::convert(source, destination, framesToWrite,
- levelFrom, levelTo);
+ mFlowGraph.process((void *)byteBuffer,
+ wrappingBuffer.data[partIndex],
+ framesToWrite);
byteBuffer += numBytes;
framesLeft -= framesToWrite;
@@ -313,6 +309,6 @@
float combinedVolume = mStreamVolume * getDuckAndMuteVolume();
ALOGD("%s() mStreamVolume * duckAndMuteVolume = %f * %f = %f",
__func__, mStreamVolume, getDuckAndMuteVolume(), combinedVolume);
- mVolumeRamp.setTarget(combinedVolume);
+ mFlowGraph.setTargetVolume(combinedVolume);
return android::NO_ERROR;
}
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.h b/media/libaaudio/src/client/AudioStreamInternalPlay.h
index 977a909..cab2942 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.h
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.h
@@ -21,6 +21,7 @@
#include <aaudio/AAudio.h>
#include "binding/AAudioServiceInterface.h"
+#include "client/AAudioFlowGraph.h"
#include "client/AudioStreamInternal.h"
using android::sp;
@@ -93,7 +94,7 @@
int64_t mLastFramesRead = 0; // used to prevent retrograde motion
- LinearRamp mVolumeRamp;
+ AAudioFlowGraph mFlowGraph;
};
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index df0db79..8dc31d0 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -167,7 +167,9 @@
aaudio_format_t format)
{
AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
- streamBuilder->setFormat(format);
+ // Use audio_format_t everywhere internally.
+ const audio_format_t internalFormat = AAudioConvert_aaudioToAndroidDataFormat(format);
+ streamBuilder->setFormat(internalFormat);
}
AAUDIO_API void AAudioStreamBuilder_setSharingMode(AAudioStreamBuilder* builder,
@@ -408,7 +410,9 @@
AAUDIO_API aaudio_format_t AAudioStream_getFormat(AAudioStream* stream)
{
AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
- return audioStream->getFormat();
+ // Use audio_format_t internally.
+ audio_format_t internalFormat = audioStream->getFormat();
+ return AAudioConvert_androidToAAudioDataFormat(internalFormat);
}
AAUDIO_API aaudio_result_t AAudioStream_setBufferSizeInFrames(AAudioStream* stream,
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.cpp b/media/libaaudio/src/core/AAudioStreamParameters.cpp
index d56701b..bd42697 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.cpp
+++ b/media/libaaudio/src/core/AAudioStreamParameters.cpp
@@ -48,6 +48,20 @@
mInputPreset = other.mInputPreset;
}
+static aaudio_result_t isFormatValid(audio_format_t format) {
+ switch (format) {
+ case AUDIO_FORMAT_DEFAULT:
+ case AUDIO_FORMAT_PCM_16_BIT:
+ case AUDIO_FORMAT_PCM_FLOAT:
+ break; // valid
+ default:
+ ALOGE("audioFormat not valid, audio_format_t = 0x%08x", format);
+ return AAUDIO_ERROR_INVALID_FORMAT;
+ // break;
+ }
+ return AAUDIO_OK;
+}
+
aaudio_result_t AAudioStreamParameters::validate() const {
if (mSamplesPerFrame != AAUDIO_UNSPECIFIED
&& (mSamplesPerFrame < SAMPLES_PER_FRAME_MIN || mSamplesPerFrame > SAMPLES_PER_FRAME_MAX)) {
@@ -79,16 +93,8 @@
// break;
}
- switch (mAudioFormat) {
- case AAUDIO_FORMAT_UNSPECIFIED:
- case AAUDIO_FORMAT_PCM_I16:
- case AAUDIO_FORMAT_PCM_FLOAT:
- break; // valid
- default:
- ALOGE("audioFormat not valid = %d", mAudioFormat);
- return AAUDIO_ERROR_INVALID_FORMAT;
- // break;
- }
+ aaudio_result_t result = isFormatValid (mAudioFormat);
+ if (result != AAUDIO_OK) return result;
if (mSampleRate != AAUDIO_UNSPECIFIED
&& (mSampleRate < SAMPLE_RATE_HZ_MIN || mSampleRate > SAMPLE_RATE_HZ_MAX)) {
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.h b/media/libaaudio/src/core/AAudioStreamParameters.h
index ce5dacd..6beb4b2 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.h
+++ b/media/libaaudio/src/core/AAudioStreamParameters.h
@@ -56,11 +56,11 @@
mSamplesPerFrame = samplesPerFrame;
}
- aaudio_format_t getFormat() const {
+ audio_format_t getFormat() const {
return mAudioFormat;
}
- void setFormat(aaudio_format_t audioFormat) {
+ void setFormat(audio_format_t audioFormat) {
mAudioFormat = audioFormat;
}
@@ -120,8 +120,11 @@
mSessionId = sessionId;
}
+ /**
+ * @return bytes per frame of getFormat()
+ */
int32_t calculateBytesPerFrame() const {
- return getSamplesPerFrame() * AAudioConvert_formatToSizeInBytes(getFormat());
+ return getSamplesPerFrame() * audio_bytes_per_sample(getFormat());
}
/**
@@ -139,7 +142,7 @@
int32_t mSampleRate = AAUDIO_UNSPECIFIED;
int32_t mDeviceId = AAUDIO_UNSPECIFIED;
aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_SHARED;
- aaudio_format_t mAudioFormat = AAUDIO_FORMAT_UNSPECIFIED;
+ audio_format_t mAudioFormat = AUDIO_FORMAT_DEFAULT;
aaudio_direction_t mDirection = AAUDIO_DIRECTION_OUTPUT;
aaudio_usage_t mUsage = AAUDIO_UNSPECIFIED;
aaudio_content_type_t mContentType = AAUDIO_UNSPECIFIED;
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 31b895c..60200b2 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -192,7 +192,7 @@
return mSampleRate;
}
- aaudio_format_t getFormat() const {
+ audio_format_t getFormat() const {
return mFormat;
}
@@ -249,21 +249,14 @@
* This is only valid after setFormat() has been called.
*/
int32_t getBytesPerSample() const {
- return AAudioConvert_formatToSizeInBytes(mFormat);
+ return audio_bytes_per_sample(mFormat);
}
/**
* This is only valid after setSamplesPerFrame() and setDeviceFormat() have been called.
*/
int32_t getBytesPerDeviceFrame() const {
- return mSamplesPerFrame * getBytesPerDeviceSample();
- }
-
- /**
- * This is only valid after setDeviceFormat() has been called.
- */
- int32_t getBytesPerDeviceSample() const {
- return AAudioConvert_formatToSizeInBytes(getDeviceFormat());
+ return getSamplesPerFrame() * audio_bytes_per_sample(getDeviceFormat());
}
virtual int64_t getFramesWritten() = 0;
@@ -478,18 +471,18 @@
/**
* This should not be called after the open() call.
*/
- void setFormat(aaudio_format_t format) {
+ void setFormat(audio_format_t format) {
mFormat = format;
}
/**
* This should not be called after the open() call.
*/
- void setDeviceFormat(aaudio_format_t format) {
+ void setDeviceFormat(audio_format_t format) {
mDeviceFormat = format;
}
- aaudio_format_t getDeviceFormat() const {
+ audio_format_t getDeviceFormat() const {
return mDeviceFormat;
}
@@ -565,7 +558,7 @@
int32_t mDeviceId = AAUDIO_UNSPECIFIED;
aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_SHARED;
bool mSharingModeMatchRequired = false; // must match sharing mode requested
- aaudio_format_t mFormat = AAUDIO_FORMAT_UNSPECIFIED;
+ audio_format_t mFormat = AUDIO_FORMAT_DEFAULT;
aaudio_stream_state_t mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
aaudio_performance_mode_t mPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
@@ -577,7 +570,7 @@
// Sometimes the hardware is operating with a different format from the app.
// Then we require conversion in AAudio.
- aaudio_format_t mDeviceFormat = AAUDIO_FORMAT_UNSPECIFIED;
+ audio_format_t mDeviceFormat = AUDIO_FORMAT_INVALID;
// callback ----------------------------------
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.h b/media/libaaudio/src/legacy/AudioStreamLegacy.h
index 494edbc..8e78554 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.h
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.h
@@ -77,7 +77,6 @@
virtual int64_t incrementClientFrameCounter(int32_t frames) = 0;
-
virtual int64_t getFramesWritten() override {
return mFramesWritten.get();
}
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index 505f2ee..dbf00a9 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -81,8 +81,8 @@
}
// Preserve behavior of API 26
- if (getFormat() == AAUDIO_FORMAT_UNSPECIFIED) {
- setFormat(AAUDIO_FORMAT_PCM_FLOAT);
+ if (getFormat() == AUDIO_FORMAT_DEFAULT) {
+ setFormat(AUDIO_FORMAT_PCM_FLOAT);
}
// Maybe change device format to get a FAST path.
@@ -99,12 +99,12 @@
// We just may not get a FAST track.
// But we wouldn't have anyway without this hack.
constexpr int32_t kMostLikelySampleRateForFast = 48000;
- if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT
+ if (getFormat() == AUDIO_FORMAT_PCM_FLOAT
&& perfMode == AAUDIO_PERFORMANCE_MODE_LOW_LATENCY
&& (samplesPerFrame <= 2) // FAST only for mono and stereo
&& (getSampleRate() == kMostLikelySampleRateForFast
|| getSampleRate() == AAUDIO_UNSPECIFIED)) {
- setDeviceFormat(AAUDIO_FORMAT_PCM_I16);
+ setDeviceFormat(AUDIO_FORMAT_PCM_16_BIT);
} else {
setDeviceFormat(getFormat());
}
@@ -147,8 +147,7 @@
// ----------- open the AudioRecord ---------------------
// Might retry, but never more than once.
for (int i = 0; i < 2; i ++) {
- audio_format_t requestedInternalFormat =
- AAudioConvert_aaudioToAndroidDataFormat(getDeviceFormat());
+ const audio_format_t requestedInternalFormat = getDeviceFormat();
mAudioRecord = new AudioRecord(
mOpPackageName // const String16& opPackageName TODO does not compile
@@ -214,8 +213,8 @@
}
// Allocate format conversion buffer if needed.
- if (getDeviceFormat() == AAUDIO_FORMAT_PCM_I16
- && getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
+ if (getDeviceFormat() == AUDIO_FORMAT_PCM_16_BIT
+ && getFormat() == AUDIO_FORMAT_PCM_FLOAT) {
if (builder.getDataCallbackProc() != nullptr) {
// If we have a callback then we need to convert the data into an internal float
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 505cd77..1572f0d 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -88,9 +88,9 @@
int32_t notificationFrames = 0;
- audio_format_t format = (getFormat() == AAUDIO_FORMAT_UNSPECIFIED)
+ const audio_format_t format = (getFormat() == AUDIO_FORMAT_DEFAULT)
? AUDIO_FORMAT_PCM_FLOAT
- : AAudioConvert_aaudioToAndroidDataFormat(getFormat());
+ : getFormat();
// Setup the callback if there is one.
AudioTrack::callback_t callback = nullptr;
@@ -178,10 +178,8 @@
// Get the actual values from the AudioTrack.
setSamplesPerFrame(mAudioTrack->channelCount());
- aaudio_format_t aaudioFormat =
- AAudioConvert_androidToAAudioDataFormat(mAudioTrack->format());
- setFormat(aaudioFormat);
- setDeviceFormat(aaudioFormat);
+ setFormat(mAudioTrack->format());
+ setDeviceFormat(mAudioTrack->format());
int32_t actualSampleRate = mAudioTrack->getSampleRate();
ALOGW_IF(actualSampleRate != getSampleRate(),
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index 40ebb76..f5b3ad4 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -33,395 +33,6 @@
using namespace android;
-// This is 3 dB, (10^(3/20)), to match the maximum headroom in AudioTrack for float data.
-// It is designed to allow occasional transient peaks.
-#define MAX_HEADROOM (1.41253754f)
-#define MIN_HEADROOM (0 - MAX_HEADROOM)
-
-int32_t AAudioConvert_formatToSizeInBytes(aaudio_format_t format) {
- int32_t size = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
- switch (format) {
- case AAUDIO_FORMAT_PCM_I16:
- size = sizeof(int16_t);
- break;
- case AAUDIO_FORMAT_PCM_FLOAT:
- size = sizeof(float);
- break;
- default:
- break;
- }
- return size;
-}
-
-// TODO expose and call clamp16_from_float function in primitives.h
-static inline int16_t clamp16_from_float(float f) {
- static const float scale = 1 << 15;
- return (int16_t) roundf(fmaxf(fminf(f * scale, scale - 1.f), -scale));
-}
-
-// Clip to valid range of a float sample to prevent excessive volume.
-// By using fmin and fmax we also protect against NaN.
-static float clipToMinMaxHeadroom(float input) {
- return fmin(MAX_HEADROOM, fmax(MIN_HEADROOM, input));
-}
-
-static float clipAndClampFloatToPcm16(float sample, float scaler) {
- // Clip to valid range of a float sample to prevent excessive volume.
- sample = clipToMinMaxHeadroom(sample);
-
- // Scale and convert to a short.
- float fval = sample * scaler;
- return clamp16_from_float(fval);
-}
-
-void AAudioConvert_floatToPcm16(const float *source,
- int16_t *destination,
- int32_t numSamples,
- float amplitude) {
- const float scaler = amplitude;
- for (int i = 0; i < numSamples; i++) {
- float sample = *source++;
- *destination++ = clipAndClampFloatToPcm16(sample, scaler);
- }
-}
-
-void AAudioConvert_floatToPcm16(const float *source,
- int16_t *destination,
- int32_t numFrames,
- int32_t samplesPerFrame,
- float amplitude1,
- float amplitude2) {
- float scaler = amplitude1;
- // divide by numFrames so that we almost reach amplitude2
- float delta = (amplitude2 - amplitude1) / numFrames;
- for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
- for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
- float sample = *source++;
- *destination++ = clipAndClampFloatToPcm16(sample, scaler);
- }
- scaler += delta;
- }
-}
-
-#define SHORT_SCALE 32768
-
-void AAudioConvert_pcm16ToFloat(const int16_t *source,
- float *destination,
- int32_t numSamples,
- float amplitude) {
- const float scaler = amplitude / SHORT_SCALE;
- for (int i = 0; i < numSamples; i++) {
- destination[i] = source[i] * scaler;
- }
-}
-
-// This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
-void AAudioConvert_pcm16ToFloat(const int16_t *source,
- float *destination,
- int32_t numFrames,
- int32_t samplesPerFrame,
- float amplitude1,
- float amplitude2) {
- float scaler = amplitude1 / SHORT_SCALE;
- const float delta = (amplitude2 - amplitude1) / (SHORT_SCALE * (float) numFrames);
- for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
- for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
- *destination++ = *source++ * scaler;
- }
- scaler += delta;
- }
-}
-
-
-// This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
-void AAudio_linearRamp(const float *source,
- float *destination,
- int32_t numFrames,
- int32_t samplesPerFrame,
- float amplitude1,
- float amplitude2) {
- float scaler = amplitude1;
- const float delta = (amplitude2 - amplitude1) / numFrames;
- for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
- for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
- float sample = *source++;
- // Clip to valid range of a float sample to prevent excessive volume.
- sample = clipToMinMaxHeadroom(sample);
-
- *destination++ = sample * scaler;
- }
- scaler += delta;
- }
-}
-
-// This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
-void AAudio_linearRamp(const int16_t *source,
- int16_t *destination,
- int32_t numFrames,
- int32_t samplesPerFrame,
- float amplitude1,
- float amplitude2) {
- // Because we are converting from int16 to 1nt16, we do not have to scale by 1/32768.
- float scaler = amplitude1;
- const float delta = (amplitude2 - amplitude1) / numFrames;
- for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
- for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
- // No need to clip because int16_t range is inherently limited.
- float sample = *source++ * scaler;
- *destination++ = (int16_t) roundf(sample);
- }
- scaler += delta;
- }
-}
-
-// *************************************************************************************
-// Convert Mono To Stereo at the same time as converting format.
-void AAudioConvert_formatMonoToStereo(const float *source,
- int16_t *destination,
- int32_t numFrames,
- float amplitude) {
- const float scaler = amplitude;
- for (int i = 0; i < numFrames; i++) {
- float sample = *source++;
- int16_t sample16 = clipAndClampFloatToPcm16(sample, scaler);
- *destination++ = sample16;
- *destination++ = sample16;
- }
-}
-
-void AAudioConvert_formatMonoToStereo(const float *source,
- int16_t *destination,
- int32_t numFrames,
- float amplitude1,
- float amplitude2) {
- // divide by numFrames so that we almost reach amplitude2
- const float delta = (amplitude2 - amplitude1) / numFrames;
- for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
- const float scaler = amplitude1 + (frameIndex * delta);
- const float sample = *source++;
- int16_t sample16 = clipAndClampFloatToPcm16(sample, scaler);
- *destination++ = sample16;
- *destination++ = sample16;
- }
-}
-
-void AAudioConvert_formatMonoToStereo(const int16_t *source,
- float *destination,
- int32_t numFrames,
- float amplitude) {
- const float scaler = amplitude / SHORT_SCALE;
- for (int i = 0; i < numFrames; i++) {
- float sample = source[i] * scaler;
- *destination++ = sample;
- *destination++ = sample;
- }
-}
-
-// This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
-void AAudioConvert_formatMonoToStereo(const int16_t *source,
- float *destination,
- int32_t numFrames,
- float amplitude1,
- float amplitude2) {
- const float scaler1 = amplitude1 / SHORT_SCALE;
- const float delta = (amplitude2 - amplitude1) / (SHORT_SCALE * (float) numFrames);
- for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
- float scaler = scaler1 + (frameIndex * delta);
- float sample = source[frameIndex] * scaler;
- *destination++ = sample;
- *destination++ = sample;
- }
-}
-
-// This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
-void AAudio_linearRampMonoToStereo(const float *source,
- float *destination,
- int32_t numFrames,
- float amplitude1,
- float amplitude2) {
- const float delta = (amplitude2 - amplitude1) / numFrames;
- for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
- float sample = *source++;
-
- // Clip to valid range of a float sample to prevent excessive volume.
- sample = clipToMinMaxHeadroom(sample);
-
- const float scaler = amplitude1 + (frameIndex * delta);
- float sampleScaled = sample * scaler;
- *destination++ = sampleScaled;
- *destination++ = sampleScaled;
- }
-}
-
-// This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
-void AAudio_linearRampMonoToStereo(const int16_t *source,
- int16_t *destination,
- int32_t numFrames,
- float amplitude1,
- float amplitude2) {
- // Because we are converting from int16 to 1nt16, we do not have to scale by 1/32768.
- const float delta = (amplitude2 - amplitude1) / numFrames;
- for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
- const float scaler = amplitude1 + (frameIndex * delta);
- // No need to clip because int16_t range is inherently limited.
- const float sample = *source++ * scaler;
- int16_t sample16 = (int16_t) roundf(sample);
- *destination++ = sample16;
- *destination++ = sample16;
- }
-}
-
-// *************************************************************************************
-void AAudioDataConverter::convert(
- const FormattedData &source,
- const FormattedData &destination,
- int32_t numFrames,
- float levelFrom,
- float levelTo) {
-
- if (source.channelCount == 1 && destination.channelCount == 2) {
- convertMonoToStereo(source,
- destination,
- numFrames,
- levelFrom,
- levelTo);
- } else {
- // We only support mono to stereo conversion. Otherwise source and destination
- // must match.
- assert(source.channelCount == destination.channelCount);
- convertChannelsMatch(source,
- destination,
- numFrames,
- levelFrom,
- levelTo);
- }
-}
-
-void AAudioDataConverter::convertMonoToStereo(
- const FormattedData &source,
- const FormattedData &destination,
- int32_t numFrames,
- float levelFrom,
- float levelTo) {
-
- // The formats are validated when the stream is opened so we do not have to
- // check for illegal combinations here.
- if (source.format == AAUDIO_FORMAT_PCM_FLOAT) {
- if (destination.format == AAUDIO_FORMAT_PCM_FLOAT) {
- AAudio_linearRampMonoToStereo(
- (const float *) source.data,
- (float *) destination.data,
- numFrames,
- levelFrom,
- levelTo);
- } else if (destination.format == AAUDIO_FORMAT_PCM_I16) {
- if (levelFrom != levelTo) {
- AAudioConvert_formatMonoToStereo(
- (const float *) source.data,
- (int16_t *) destination.data,
- numFrames,
- levelFrom,
- levelTo);
- } else {
- AAudioConvert_formatMonoToStereo(
- (const float *) source.data,
- (int16_t *) destination.data,
- numFrames,
- levelTo);
- }
- }
- } else if (source.format == AAUDIO_FORMAT_PCM_I16) {
- if (destination.format == AAUDIO_FORMAT_PCM_FLOAT) {
- if (levelFrom != levelTo) {
- AAudioConvert_formatMonoToStereo(
- (const int16_t *) source.data,
- (float *) destination.data,
- numFrames,
- levelFrom,
- levelTo);
- } else {
- AAudioConvert_formatMonoToStereo(
- (const int16_t *) source.data,
- (float *) destination.data,
- numFrames,
- levelTo);
- }
- } else if (destination.format == AAUDIO_FORMAT_PCM_I16) {
- AAudio_linearRampMonoToStereo(
- (const int16_t *) source.data,
- (int16_t *) destination.data,
- numFrames,
- levelFrom,
- levelTo);
- }
- }
-}
-
-void AAudioDataConverter::convertChannelsMatch(
- const FormattedData &source,
- const FormattedData &destination,
- int32_t numFrames,
- float levelFrom,
- float levelTo) {
- const int32_t numSamples = numFrames * source.channelCount;
-
- // The formats are validated when the stream is opened so we do not have to
- // check for illegal combinations here.
- if (source.format == AAUDIO_FORMAT_PCM_FLOAT) {
- if (destination.format == AAUDIO_FORMAT_PCM_FLOAT) {
- AAudio_linearRamp(
- (const float *) source.data,
- (float *) destination.data,
- numFrames,
- source.channelCount,
- levelFrom,
- levelTo);
- } else if (destination.format == AAUDIO_FORMAT_PCM_I16) {
- if (levelFrom != levelTo) {
- AAudioConvert_floatToPcm16(
- (const float *) source.data,
- (int16_t *) destination.data,
- numFrames,
- source.channelCount,
- levelFrom,
- levelTo);
- } else {
- AAudioConvert_floatToPcm16(
- (const float *) source.data,
- (int16_t *) destination.data,
- numSamples,
- levelTo);
- }
- }
- } else if (source.format == AAUDIO_FORMAT_PCM_I16) {
- if (destination.format == AAUDIO_FORMAT_PCM_FLOAT) {
- if (levelFrom != levelTo) {
- AAudioConvert_pcm16ToFloat(
- (const int16_t *) source.data,
- (float *) destination.data,
- numFrames,
- source.channelCount,
- levelFrom,
- levelTo);
- } else {
- AAudioConvert_pcm16ToFloat(
- (const int16_t *) source.data,
- (float *) destination.data,
- numSamples,
- levelTo);
- }
- } else if (destination.format == AAUDIO_FORMAT_PCM_I16) {
- AAudio_linearRamp(
- (const int16_t *) source.data,
- (int16_t *) destination.data,
- numFrames,
- source.channelCount,
- levelFrom,
- levelTo);
- }
- }
-}
-
status_t AAudioConvert_aaudioToAndroidStatus(aaudio_result_t result) {
// This covers the case for AAUDIO_OK and for positive results.
if (result >= 0) {
@@ -513,6 +124,9 @@
audio_format_t AAudioConvert_aaudioToAndroidDataFormat(aaudio_format_t aaudioFormat) {
audio_format_t androidFormat;
switch (aaudioFormat) {
+ case AAUDIO_FORMAT_UNSPECIFIED:
+ androidFormat = AUDIO_FORMAT_DEFAULT;
+ break;
case AAUDIO_FORMAT_PCM_I16:
androidFormat = AUDIO_FORMAT_PCM_16_BIT;
break;
@@ -520,16 +134,19 @@
androidFormat = AUDIO_FORMAT_PCM_FLOAT;
break;
default:
- androidFormat = AUDIO_FORMAT_DEFAULT;
- ALOGE("AAudioConvert_aaudioToAndroidDataFormat 0x%08X unrecognized", aaudioFormat);
+ androidFormat = AUDIO_FORMAT_INVALID;
+ ALOGE("%s() 0x%08X unrecognized", __func__, aaudioFormat);
break;
}
return androidFormat;
}
aaudio_format_t AAudioConvert_androidToAAudioDataFormat(audio_format_t androidFormat) {
- aaudio_format_t aaudioFormat = AAUDIO_FORMAT_INVALID;
+ aaudio_format_t aaudioFormat;
switch (androidFormat) {
+ case AUDIO_FORMAT_DEFAULT:
+ aaudioFormat = AAUDIO_FORMAT_UNSPECIFIED;
+ break;
case AUDIO_FORMAT_PCM_16_BIT:
aaudioFormat = AAUDIO_FORMAT_PCM_I16;
break;
@@ -538,7 +155,7 @@
break;
default:
aaudioFormat = AAUDIO_FORMAT_INVALID;
- ALOGE("AAudioConvert_androidToAAudioDataFormat 0x%08X unrecognized", androidFormat);
+ ALOGE("%s() 0x%08X unrecognized", __func__, androidFormat);
break;
}
return aaudioFormat;
diff --git a/media/libaaudio/src/utility/AAudioUtilities.h b/media/libaaudio/src/utility/AAudioUtilities.h
index 4b975e8..dc2b198 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.h
+++ b/media/libaaudio/src/utility/AAudioUtilities.h
@@ -45,156 +45,6 @@
audio_session_t AAudioConvert_aaudioToAndroidSessionId(aaudio_session_id_t sessionId);
/**
- * Convert an array of floats to an array of int16_t.
- *
- * @param source
- * @param destination
- * @param numSamples number of values in the array
- * @param amplitude level between 0.0 and 1.0
- */
-void AAudioConvert_floatToPcm16(const float *source,
- int16_t *destination,
- int32_t numSamples,
- float amplitude);
-
-/**
- * Convert floats to int16_t and scale by a linear ramp.
- *
- * The ramp stops just short of reaching amplitude2 so that the next
- * ramp can start at amplitude2 without causing a discontinuity.
- *
- * @param source
- * @param destination
- * @param numFrames
- * @param samplesPerFrame AKA number of channels
- * @param amplitude1 level at start of ramp, between 0.0 and 1.0
- * @param amplitude2 level past end of ramp, between 0.0 and 1.0
- */
-void AAudioConvert_floatToPcm16(const float *source,
- int16_t *destination,
- int32_t numFrames,
- int32_t samplesPerFrame,
- float amplitude1,
- float amplitude2);
-
-/**
- * Convert int16_t array to float array ranging from -1.0 to +1.0.
- * @param source
- * @param destination
- * @param numSamples
- */
-//void AAudioConvert_pcm16ToFloat(const int16_t *source, int32_t numSamples,
-// float *destination);
-
-/**
- *
- * Convert int16_t array to float array ranging from +/- amplitude.
- * @param source
- * @param destination
- * @param numSamples
- * @param amplitude
- */
-void AAudioConvert_pcm16ToFloat(const int16_t *source,
- float *destination,
- int32_t numSamples,
- float amplitude);
-
-/**
- * Convert floats to int16_t and scale by a linear ramp.
- *
- * The ramp stops just short of reaching amplitude2 so that the next
- * ramp can start at amplitude2 without causing a discontinuity.
- *
- * @param source
- * @param destination
- * @param numFrames
- * @param samplesPerFrame AKA number of channels
- * @param amplitude1 level at start of ramp, between 0.0 and 1.0
- * @param amplitude2 level at end of ramp, between 0.0 and 1.0
- */
-void AAudioConvert_pcm16ToFloat(const int16_t *source,
- float *destination,
- int32_t numFrames,
- int32_t samplesPerFrame,
- float amplitude1,
- float amplitude2);
-
-/**
- * Scale floats by a linear ramp.
- *
- * The ramp stops just short of reaching amplitude2 so that the next
- * ramp can start at amplitude2 without causing a discontinuity.
- *
- * @param source
- * @param destination
- * @param numFrames
- * @param samplesPerFrame
- * @param amplitude1
- * @param amplitude2
- */
-void AAudio_linearRamp(const float *source,
- float *destination,
- int32_t numFrames,
- int32_t samplesPerFrame,
- float amplitude1,
- float amplitude2);
-
-/**
- * Scale int16_t's by a linear ramp.
- *
- * The ramp stops just short of reaching amplitude2 so that the next
- * ramp can start at amplitude2 without causing a discontinuity.
- *
- * @param source
- * @param destination
- * @param numFrames
- * @param samplesPerFrame
- * @param amplitude1
- * @param amplitude2
- */
-void AAudio_linearRamp(const int16_t *source,
- int16_t *destination,
- int32_t numFrames,
- int32_t samplesPerFrame,
- float amplitude1,
- float amplitude2);
-
-class AAudioDataConverter {
-public:
-
- struct FormattedData {
-
- FormattedData(void *data, aaudio_format_t format, int32_t channelCount)
- : data(data)
- , format(format)
- , channelCount(channelCount) {}
-
- const void *data = nullptr;
- const aaudio_format_t format = AAUDIO_FORMAT_UNSPECIFIED;
- const int32_t channelCount = 1;
- };
-
- static void convert(const FormattedData &source,
- const FormattedData &destination,
- int32_t numFrames,
- float levelFrom,
- float levelTo);
-
-private:
- static void convertMonoToStereo(const FormattedData &source,
- const FormattedData &destination,
- int32_t numFrames,
- float levelFrom,
- float levelTo);
-
- static void convertChannelsMatch(const FormattedData &source,
- const FormattedData &destination,
- int32_t numFrames,
- float levelFrom,
- float levelTo);
-};
-
-/**
* Calculate the number of bytes and prevent numeric overflow.
* The *sizeInBytes will be set to zero if there is an error.
*
@@ -234,12 +84,6 @@
*/
audio_source_t AAudioConvert_inputPresetToAudioSource(aaudio_input_preset_t preset);
-/**
- * @return the size of a sample of the given format in bytes or AAUDIO_ERROR_ILLEGAL_ARGUMENT
- */
-int32_t AAudioConvert_formatToSizeInBytes(aaudio_format_t format);
-
-
// Note that this code may be replaced by Settings or by some other system configuration tool.
#define AAUDIO_PROP_MMAP_POLICY "aaudio.mmap_policy"
diff --git a/media/libaaudio/src/utility/LinearRamp.cpp b/media/libaaudio/src/utility/LinearRamp.cpp
deleted file mode 100644
index 1714bbf..0000000
--- a/media/libaaudio/src/utility/LinearRamp.cpp
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "LinearRamp.h"
-
-bool LinearRamp::isRamping() {
- float target = mTarget.load();
- if (target != mLevelTo) {
- // Update target. Continue from previous level.
- mLevelTo = target;
- mRemaining = mLengthInFrames;
- return true;
- } else {
- return mRemaining > 0;
- }
-}
-
-bool LinearRamp::nextSegment(int32_t frames, float *levelFrom, float *levelTo) {
- bool ramping = isRamping();
- *levelFrom = mLevelFrom;
- if (ramping) {
- float level;
- if (frames >= mRemaining) {
- level = mLevelTo;
- mRemaining = 0;
- } else {
- // Interpolate to a point along the full ramp.
- level = mLevelFrom + (frames * (mLevelTo - mLevelFrom) / mRemaining);
- mRemaining -= frames;
- }
- mLevelFrom = level; // for next ramp
- *levelTo = level;
- } else {
- *levelTo = mLevelTo;
- }
- return ramping;
-}
\ No newline at end of file
diff --git a/media/libaaudio/src/utility/LinearRamp.h b/media/libaaudio/src/utility/LinearRamp.h
deleted file mode 100644
index 2b1b8e0..0000000
--- a/media/libaaudio/src/utility/LinearRamp.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef AAUDIO_LINEAR_RAMP_H
-#define AAUDIO_LINEAR_RAMP_H
-
-#include <atomic>
-#include <stdint.h>
-
-/**
- * Generate segments along a linear ramp.
- * The ramp target can be updated from another thread.
- * When the target is updated, a new ramp is started from the current position.
- *
- * The first ramp starts at 0.0.
- *
- */
-class LinearRamp {
-public:
- LinearRamp() {
- mTarget.store(1.0f);
- }
-
- void setLengthInFrames(int32_t frames) {
- mLengthInFrames = frames;
- }
-
- int32_t getLengthInFrames() {
- return mLengthInFrames;
- }
-
- /**
- * This may be called by another thread.
- * @param target
- */
- void setTarget(float target) {
- mTarget.store(target);
- }
-
- float getTarget() {
- return mTarget.load();
- }
-
- /**
- * Force the nextSegment to start from this level.
- *
- * WARNING: this can cause a discontinuity if called while the ramp is being used.
- * Only call this when setting the initial ramp.
- *
- * @param level
- */
- void forceCurrent(float level) {
- mLevelFrom = level;
- mLevelTo = level; // forces a ramp if it does not match target
- }
-
- float getCurrent() {
- return mLevelFrom;
- }
-
- /**
- * Get levels for next ramp segment.
- *
- * @param frames number of frames in the segment
- * @param levelFrom pointer to starting amplitude
- * @param levelTo pointer to ending amplitude
- * @return true if ramp is still moving towards the target
- */
- bool nextSegment(int32_t frames, float *levelFrom, float *levelTo);
-
-private:
-
- bool isRamping();
-
- std::atomic<float> mTarget;
-
- int32_t mLengthInFrames = 48000 / 100; // 10 msec at 48000 Hz
- int32_t mRemaining = 0;
- float mLevelFrom = 0.0f;
- float mLevelTo = 0.0f;
-};
-
-
-#endif //AAUDIO_LINEAR_RAMP_H
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index 99aa4d6..ef272b0 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -34,13 +34,6 @@
}
cc_test {
- name: "test_linear_ramp",
- defaults: ["libaaudio_tests_defaults"],
- srcs: ["test_linear_ramp.cpp"],
- shared_libs: ["libaaudio"],
-}
-
-cc_test {
name: "test_open_params",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_open_params.cpp"],
diff --git a/media/libaaudio/tests/test_linear_ramp.cpp b/media/libaaudio/tests/test_linear_ramp.cpp
deleted file mode 100644
index 93226ba..0000000
--- a/media/libaaudio/tests/test_linear_ramp.cpp
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <iostream>
-#include <math.h>
-
-#include <gtest/gtest.h>
-
-#include "utility/AAudioUtilities.h"
-#include "utility/LinearRamp.h"
-
-TEST(test_linear_ramp, linear_ramp_segments) {
- LinearRamp ramp;
- const float source[4] = {1.0f, 1.0f, 1.0f, 1.0f };
- float destination[4] = {1.0f, 1.0f, 1.0f, 1.0f };
-
- float levelFrom = -1.0f;
- float levelTo = -1.0f;
- ramp.setLengthInFrames(8);
- ramp.setTarget(8.0f);
-
- EXPECT_EQ(8, ramp.getLengthInFrames());
-
- bool ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
- EXPECT_EQ(1, ramping);
- EXPECT_EQ(0.0f, levelFrom);
- EXPECT_EQ(4.0f, levelTo);
-
- AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
- EXPECT_EQ(0.0f, destination[0]);
- EXPECT_EQ(1.0f, destination[1]);
- EXPECT_EQ(2.0f, destination[2]);
- EXPECT_EQ(3.0f, destination[3]);
-
- ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
- EXPECT_EQ(1, ramping);
- EXPECT_EQ(4.0f, levelFrom);
- EXPECT_EQ(8.0f, levelTo);
-
- AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
- EXPECT_EQ(4.0f, destination[0]);
- EXPECT_EQ(5.0f, destination[1]);
- EXPECT_EQ(6.0f, destination[2]);
- EXPECT_EQ(7.0f, destination[3]);
-
- ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
- EXPECT_EQ(0, ramping);
- EXPECT_EQ(8.0f, levelFrom);
- EXPECT_EQ(8.0f, levelTo);
-
- AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
- EXPECT_EQ(8.0f, destination[0]);
- EXPECT_EQ(8.0f, destination[1]);
- EXPECT_EQ(8.0f, destination[2]);
- EXPECT_EQ(8.0f, destination[3]);
-
-};
-
-
-TEST(test_linear_ramp, linear_ramp_forced) {
- LinearRamp ramp;
- const float source[4] = {1.0f, 1.0f, 1.0f, 1.0f };
- float destination[4] = {1.0f, 1.0f, 1.0f, 1.0f };
-
- float levelFrom = -1.0f;
- float levelTo = -1.0f;
- ramp.setLengthInFrames(4);
- ramp.setTarget(8.0f);
- ramp.forceCurrent(4.0f);
- EXPECT_EQ(4.0f, ramp.getCurrent());
-
- bool ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
- EXPECT_EQ(1, ramping);
- EXPECT_EQ(4.0f, levelFrom);
- EXPECT_EQ(8.0f, levelTo);
-
- AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
- EXPECT_EQ(4.0f, destination[0]);
- EXPECT_EQ(5.0f, destination[1]);
- EXPECT_EQ(6.0f, destination[2]);
- EXPECT_EQ(7.0f, destination[3]);
-
- ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
- EXPECT_EQ(0, ramping);
- EXPECT_EQ(8.0f, levelFrom);
- EXPECT_EQ(8.0f, levelTo);
-
- AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
- EXPECT_EQ(8.0f, destination[0]);
- EXPECT_EQ(8.0f, destination[1]);
- EXPECT_EQ(8.0f, destination[2]);
- EXPECT_EQ(8.0f, destination[3]);
-
-};
-
-constexpr int16_t kMaxI16 = INT16_MAX;
-constexpr int16_t kMinI16 = INT16_MIN;
-constexpr int16_t kHalfI16 = 16384;
-constexpr int16_t kTenthI16 = 3277;
-
-//void AAudioConvert_floatToPcm16(const float *source,
-// int16_t *destination,
-// int32_t numSamples,
-// float amplitude);
-TEST(test_linear_ramp, float_to_i16) {
- const float source[] = {12345.6f, 1.0f, 0.5f, 0.1f, 0.0f, -0.1f, -0.5f, -1.0f, -12345.6f};
- constexpr size_t count = sizeof(source) / sizeof(source[0]);
- int16_t destination[count];
- const int16_t expected[count] = {kMaxI16, kMaxI16, kHalfI16, kTenthI16, 0,
- -kTenthI16, -kHalfI16, kMinI16, kMinI16};
-
- AAudioConvert_floatToPcm16(source, destination, count, 1.0f);
- for (size_t i = 0; i < count; i++) {
- EXPECT_EQ(expected[i], destination[i]);
- }
-
-}
-
-//void AAudioConvert_pcm16ToFloat(const int16_t *source,
-// float *destination,
-// int32_t numSamples,
-// float amplitude);
-TEST(test_linear_ramp, i16_to_float) {
- const int16_t source[] = {kMaxI16, kHalfI16, kTenthI16, 0,
- -kTenthI16, -kHalfI16, kMinI16};
- constexpr size_t count = sizeof(source) / sizeof(source[0]);
- float destination[count];
- const float expected[count] = {(32767.0f / 32768.0f), 0.5f, 0.1f, 0.0f, -0.1f, -0.5f, -1.0f};
-
- AAudioConvert_pcm16ToFloat(source, destination, count, 1.0f);
- for (size_t i = 0; i < count; i++) {
- EXPECT_NEAR(expected[i], destination[i], 0.0001f);
- }
-
-}
-
-//void AAudio_linearRamp(const int16_t *source,
-// int16_t *destination,
-// int32_t numFrames,
-// int32_t samplesPerFrame,
-// float amplitude1,
-// float amplitude2);
-TEST(test_linear_ramp, ramp_i16_to_i16) {
- const int16_t source[] = {1, 1, 1, 1, 1, 1, 1, 1};
- constexpr size_t count = sizeof(source) / sizeof(source[0]);
- int16_t destination[count];
- // Ramp will sweep from -1 to almost +1
- const int16_t expected[count] = {
- -1, // from -1.00
- -1, // from -0.75
- -1, // from -0.55, round away from zero
- 0, // from -0.25, round up to zero
- 0, // from 0.00
- 0, // from 0.25, round down to zero
- 1, // from 0.50, round away from zero
- 1 // from 0.75
- };
-
- // sweep across zero to test symmetry
- constexpr float amplitude1 = -1.0;
- constexpr float amplitude2 = 1.0;
- AAudio_linearRamp(source, destination, count, 1, amplitude1, amplitude2);
- for (size_t i = 0; i < count; i++) {
- EXPECT_EQ(expected[i], destination[i]);
- }
-
-}