AudioFlinger: Split off audio processing library

Test: native AudioResampler test, general playback test
Bug: 31015569
Change-Id: Ifb248f4402a583438d756c014dcd7a4577aef713
diff --git a/media/libaudioprocessing/tests/Android.mk b/media/libaudioprocessing/tests/Android.mk
new file mode 100644
index 0000000..23e1c3a
--- /dev/null
+++ b/media/libaudioprocessing/tests/Android.mk
@@ -0,0 +1,87 @@
+# Build the unit tests for libaudioprocessing
+
+LOCAL_PATH := $(call my-dir)
+
+#
+# resampler unit test
+#
+include $(CLEAR_VARS)
+
+LOCAL_SHARED_LIBRARIES := \
+    libaudioutils \
+    libaudioprocessing \
+    libcutils \
+    liblog \
+    libutils \
+
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+
+LOCAL_SRC_FILES := \
+    resampler_tests.cpp
+
+LOCAL_MODULE := resampler_tests
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_CFLAGS := -Werror -Wall
+
+include $(BUILD_NATIVE_TEST)
+
+#
+# audio mixer test tool
+#
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+    test-mixer.cpp \
+
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+
+LOCAL_STATIC_LIBRARIES := \
+    libsndfile \
+
+LOCAL_SHARED_LIBRARIES := \
+    libaudioprocessing \
+    libaudioutils \
+    libcutils \
+    liblog \
+    libutils \
+
+LOCAL_MODULE := test-mixer
+
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_CFLAGS := -Werror -Wall
+
+include $(BUILD_EXECUTABLE)
+
+#
+# build audio resampler test tool
+#
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+    test-resampler.cpp \
+
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+
+LOCAL_STATIC_LIBRARIES := \
+    libsndfile \
+
+LOCAL_SHARED_LIBRARIES := \
+    libaudioprocessing \
+    libaudioutils \
+    libcutils \
+    liblog \
+    libutils \
+
+LOCAL_MODULE := test-resampler
+
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_CFLAGS := -Werror -Wall
+
+include $(BUILD_EXECUTABLE)
diff --git a/media/libaudioprocessing/tests/README b/media/libaudioprocessing/tests/README
new file mode 100644
index 0000000..ed7e2ed
--- /dev/null
+++ b/media/libaudioprocessing/tests/README
@@ -0,0 +1,13 @@
+For libsonic dependency:
+pushd $ANDROID_BUILD_TOP/external/sonic
+mm
+popd
+
+To build audio processing library:
+pushd ..
+Optionally uncomment USE_NEON=false in Android.mk
+mm
+popd
+
+Then build here:
+mm
diff --git a/media/libaudioprocessing/tests/build_and_run_all_unit_tests.sh b/media/libaudioprocessing/tests/build_and_run_all_unit_tests.sh
new file mode 100755
index 0000000..704d095
--- /dev/null
+++ b/media/libaudioprocessing/tests/build_and_run_all_unit_tests.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+if [ -z "$ANDROID_BUILD_TOP" ]; then
+    echo "Android build environment not set"
+    exit -1
+fi
+
+# ensure we have mm
+. $ANDROID_BUILD_TOP/build/envsetup.sh
+
+pushd $ANDROID_BUILD_TOP/frameworks/av/media/libaudioprocessing
+pwd
+mm
+
+echo "waiting for device"
+adb root && adb wait-for-device remount
+adb push $OUT/system/lib/libaudioresampler.so /system/lib
+adb push $OUT/system/lib64/libaudioresampler.so /system/lib64
+adb push $OUT/data/nativetest/resampler_tests/resampler_tests /data/nativetest/resampler_tests/resampler_tests
+adb push $OUT/data/nativetest64/resampler_tests/resampler_tests /data/nativetest64/resampler_tests/resampler_tests
+
+sh $ANDROID_BUILD_TOP/frameworks/av/media/libaudioprocessing/tests/run_all_unit_tests.sh
+
+popd
diff --git a/media/libaudioprocessing/tests/mixer_to_wav_tests.sh b/media/libaudioprocessing/tests/mixer_to_wav_tests.sh
new file mode 100755
index 0000000..72b02fc
--- /dev/null
+++ b/media/libaudioprocessing/tests/mixer_to_wav_tests.sh
@@ -0,0 +1,145 @@
+#!/bin/bash
+#
+# This script uses test-mixer to generate WAV files
+# for evaluation of the AudioMixer component.
+#
+# Sine and chirp signals are used for input because they
+# show up as clear lines, either horizontal or diagonal,
+# on a spectrogram. This means easy verification of multiple
+# track mixing.
+#
+# After execution, look for created subdirectories like
+# mixer_i_i
+# mixer_i_f
+# mixer_f_f
+#
+# Recommend using a program such as audacity to evaluate
+# the output WAV files, e.g.
+#
+# cd testdir
+# audacity *.wav
+#
+# Using Audacity:
+#
+# Under "Waveform" view mode you can zoom into the
+# start of the WAV file to verify proper ramping.
+#
+# Select "Spectrogram" to see verify the lines
+# (sine = horizontal, chirp = diagonal) which should
+# be clear (except for around the start as the volume
+# ramping causes spectral distortion).
+
+if [ -z "$ANDROID_BUILD_TOP" ]; then
+    echo "Android build environment not set"
+    exit -1
+fi
+
+# ensure we have mm
+. $ANDROID_BUILD_TOP/build/envsetup.sh
+
+pushd $ANDROID_BUILD_TOP/frameworks/av/media/libaudioprocessing
+
+# build
+pwd
+mm
+
+# send to device
+echo "waiting for device"
+adb root && adb wait-for-device remount
+adb push $OUT/system/lib/libaudioprocessing.so /system/lib
+adb push $OUT/system/lib64/libaudioprocessing.so /system/lib64
+adb push $OUT/system/bin/test-mixer /system/bin
+
+# createwav creates a series of WAV files testing various
+# mixer settings
+# $1 = flags
+# $2 = directory
+function createwav() {
+# create directory if it doesn't exist
+    if [ ! -d $2 ]; then
+        mkdir $2
+    fi
+
+# Test:
+# process__genericResampling with mixed integer and float track input
+# track__Resample / track__genericResample
+    adb shell test-mixer $1 -s 48000 \
+        -o /sdcard/tm48000grif.wav \
+        sine:2,4000,7520 chirp:2,9200 sine:1,3000,18000 \
+        sine:f,6,6000,19000  chirp:i,4,30000
+    adb pull /sdcard/tm48000grif.wav $2
+
+# Test:
+# process__genericResampling
+# track__Resample / track__genericResample
+    adb shell test-mixer $1 -s 48000 \
+        -o /sdcard/tm48000gr.wav \
+        sine:2,4000,7520 chirp:2,9200 sine:1,3000,18000 \
+        sine:6,6000,19000
+    adb pull /sdcard/tm48000gr.wav $2
+
+# Test:
+# process__genericResample
+# track__Resample / track__genericResample
+# track__NoResample / track__16BitsStereo / track__16BitsMono
+# Aux buffer
+    adb shell test-mixer $1 -c 5 -s 9307 \
+        -a /sdcard/aux9307gra.wav -o /sdcard/tm9307gra.wav \
+        sine:4,1000,3000 sine:1,2000,9307 chirp:3,9307
+    adb pull /sdcard/tm9307gra.wav $2
+    adb pull /sdcard/aux9307gra.wav $2
+
+# Test:
+# process__genericNoResampling
+# track__NoResample / track__16BitsStereo / track__16BitsMono
+    adb shell test-mixer $1 -s 32000 \
+        -o /sdcard/tm32000gnr.wav \
+        sine:2,1000,32000 chirp:2,32000  sine:1,3000,32000
+    adb pull /sdcard/tm32000gnr.wav $2
+
+# Test:
+# process__genericNoResampling
+# track__NoResample / track__16BitsStereo / track__16BitsMono
+# Aux buffer
+    adb shell test-mixer $1 -s 32000 \
+        -a /sdcard/aux32000gnra.wav -o /sdcard/tm32000gnra.wav \
+        sine:2,1000,32000 chirp:2,32000  sine:1,3000,32000
+    adb pull /sdcard/tm32000gnra.wav $2
+    adb pull /sdcard/aux32000gnra.wav $2
+
+# Test:
+# process__NoResampleOneTrack / process__OneTrack16BitsStereoNoResampling
+# Downmixer
+    adb shell test-mixer $1 -s 32000 \
+        -o /sdcard/tm32000nrot.wav \
+        sine:6,1000,32000
+    adb pull /sdcard/tm32000nrot.wav $2
+
+# Test:
+# process__NoResampleOneTrack / OneTrack16BitsStereoNoResampling
+# Aux buffer
+    adb shell test-mixer $1 -s 44100 \
+        -a /sdcard/aux44100nrota.wav -o /sdcard/tm44100nrota.wav \
+        sine:2,2000,44100
+    adb pull /sdcard/tm44100nrota.wav $2
+    adb pull /sdcard/aux44100nrota.wav $2
+}
+
+#
+# Call createwav to generate WAV files in various combinations
+#
+# i_i = integer input track, integer mixer output
+# f_f = float input track,   float mixer output
+# i_f = integer input track, float_mixer output
+#
+# If the mixer output is float, then the output WAV file is pcm float.
+#
+# TODO: create a "snr" like "diff" to automatically
+# compare files in these directories together.
+#
+
+createwav "" "tests/mixer_i_i"
+createwav "-f -m" "tests/mixer_f_f"
+createwav "-m" "tests/mixer_i_f"
+
+popd
diff --git a/media/libaudioprocessing/tests/resampler_tests.cpp b/media/libaudioprocessing/tests/resampler_tests.cpp
new file mode 100644
index 0000000..8d5e016
--- /dev/null
+++ b/media/libaudioprocessing/tests/resampler_tests.cpp
@@ -0,0 +1,486 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "audioflinger_resampler_tests"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <time.h>
+#include <unistd.h>
+
+#include <iostream>
+#include <utility>
+#include <vector>
+
+#include <android/log.h>
+#include <gtest/gtest.h>
+#include <media/AudioBufferProvider.h>
+
+#include <media/AudioResampler.h>
+#include "test_utils.h"
+
+template <typename T>
+static void printData(T *data, size_t size) {
+    const size_t stride = 8;
+    for (size_t i = 0; i < size; ) {
+        for (size_t j = 0; j < stride && i < size; ++j) {
+            std::cout << data[i++] << ' ';  // extra space before newline
+        }
+        std::cout << '\n'; // or endl
+    }
+}
+
+void resample(int channels, void *output,
+        size_t outputFrames, const std::vector<size_t> &outputIncr,
+        android::AudioBufferProvider *provider, android::AudioResampler *resampler)
+{
+    for (size_t i = 0, j = 0; i < outputFrames; ) {
+        size_t thisFrames = outputIncr[j++];
+        if (j >= outputIncr.size()) {
+            j = 0;
+        }
+        if (thisFrames == 0 || thisFrames > outputFrames - i) {
+            thisFrames = outputFrames - i;
+        }
+        size_t framesResampled = resampler->resample(
+                (int32_t*) output + channels*i, thisFrames, provider);
+        // we should have enough buffer space, so there is no short count.
+        ASSERT_EQ(thisFrames, framesResampled);
+        i += thisFrames;
+    }
+}
+
+void buffercmp(const void *reference, const void *test,
+        size_t outputFrameSize, size_t outputFrames)
+{
+    for (size_t i = 0; i < outputFrames; ++i) {
+        int check = memcmp((const char*)reference + i * outputFrameSize,
+                (const char*)test + i * outputFrameSize, outputFrameSize);
+        if (check) {
+            ALOGE("Failure at frame %zu", i);
+            ASSERT_EQ(check, 0); /* fails */
+        }
+    }
+}
+
+void testBufferIncrement(size_t channels, bool useFloat,
+        unsigned inputFreq, unsigned outputFreq,
+        enum android::AudioResampler::src_quality quality)
+{
+    const audio_format_t format = useFloat ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
+    // create the provider
+    std::vector<int> inputIncr;
+    SignalProvider provider;
+    if (useFloat) {
+        provider.setChirp<float>(channels,
+                0., outputFreq/2., outputFreq, outputFreq/2000.);
+    } else {
+        provider.setChirp<int16_t>(channels,
+                0., outputFreq/2., outputFreq, outputFreq/2000.);
+    }
+    provider.setIncr(inputIncr);
+
+    // calculate the output size
+    size_t outputFrames = ((int64_t) provider.getNumFrames() * outputFreq) / inputFreq;
+    size_t outputFrameSize = (channels == 1 ? 2 : channels) * (useFloat ? sizeof(float) : sizeof(int32_t));
+    size_t outputSize = outputFrameSize * outputFrames;
+    outputSize &= ~7;
+
+    // create the resampler
+    android::AudioResampler* resampler;
+
+    resampler = android::AudioResampler::create(format, channels, outputFreq, quality);
+    resampler->setSampleRate(inputFreq);
+    resampler->setVolume(android::AudioResampler::UNITY_GAIN_FLOAT,
+            android::AudioResampler::UNITY_GAIN_FLOAT);
+
+    // set up the reference run
+    std::vector<size_t> refIncr;
+    refIncr.push_back(outputFrames);
+    void* reference = calloc(outputFrames, outputFrameSize);
+    resample(channels, reference, outputFrames, refIncr, &provider, resampler);
+
+    provider.reset();
+
+#if 0
+    /* this test will fail - API interface issue: reset() does not clear internal buffers */
+    resampler->reset();
+#else
+    delete resampler;
+    resampler = android::AudioResampler::create(format, channels, outputFreq, quality);
+    resampler->setSampleRate(inputFreq);
+    resampler->setVolume(android::AudioResampler::UNITY_GAIN_FLOAT,
+            android::AudioResampler::UNITY_GAIN_FLOAT);
+#endif
+
+    // set up the test run
+    std::vector<size_t> outIncr;
+    outIncr.push_back(1);
+    outIncr.push_back(2);
+    outIncr.push_back(3);
+    void* test = calloc(outputFrames, outputFrameSize);
+    inputIncr.push_back(1);
+    inputIncr.push_back(3);
+    provider.setIncr(inputIncr);
+    resample(channels, test, outputFrames, outIncr, &provider, resampler);
+
+    // check
+    buffercmp(reference, test, outputFrameSize, outputFrames);
+
+    free(reference);
+    free(test);
+    delete resampler;
+}
+
+template <typename T>
+inline double sqr(T v)
+{
+    double dv = static_cast<double>(v);
+    return dv * dv;
+}
+
+template <typename T>
+double signalEnergy(T *start, T *end, unsigned stride)
+{
+    double accum = 0;
+
+    for (T *p = start; p < end; p += stride) {
+        accum += sqr(*p);
+    }
+    unsigned count = (end - start + stride - 1) / stride;
+    return accum / count;
+}
+
+// TI = resampler input type, int16_t or float
+// TO = resampler output type, int32_t or float
+template <typename TI, typename TO>
+void testStopbandDownconversion(size_t channels,
+        unsigned inputFreq, unsigned outputFreq,
+        unsigned passband, unsigned stopband,
+        enum android::AudioResampler::src_quality quality)
+{
+    // create the provider
+    std::vector<int> inputIncr;
+    SignalProvider provider;
+    provider.setChirp<TI>(channels,
+            0., inputFreq/2., inputFreq, inputFreq/2000.);
+    provider.setIncr(inputIncr);
+
+    // calculate the output size
+    size_t outputFrames = ((int64_t) provider.getNumFrames() * outputFreq) / inputFreq;
+    size_t outputFrameSize = (channels == 1 ? 2 : channels) * sizeof(TO);
+    size_t outputSize = outputFrameSize * outputFrames;
+    outputSize &= ~7;
+
+    // create the resampler
+    android::AudioResampler* resampler;
+
+    resampler = android::AudioResampler::create(
+            is_same<TI, int16_t>::value ? AUDIO_FORMAT_PCM_16_BIT : AUDIO_FORMAT_PCM_FLOAT,
+            channels, outputFreq, quality);
+    resampler->setSampleRate(inputFreq);
+    resampler->setVolume(android::AudioResampler::UNITY_GAIN_FLOAT,
+            android::AudioResampler::UNITY_GAIN_FLOAT);
+
+    // set up the reference run
+    std::vector<size_t> refIncr;
+    refIncr.push_back(outputFrames);
+    void* reference = calloc(outputFrames, outputFrameSize);
+    resample(channels, reference, outputFrames, refIncr, &provider, resampler);
+
+    TO *out = reinterpret_cast<TO *>(reference);
+
+    // check signal energy in passband
+    const unsigned passbandFrame = passband * outputFreq / 1000.;
+    const unsigned stopbandFrame = stopband * outputFreq / 1000.;
+
+    // check each channel separately
+    if (channels == 1) channels = 2; // workaround (mono duplicates output channel)
+
+    for (size_t i = 0; i < channels; ++i) {
+        double passbandEnergy = signalEnergy(out, out + passbandFrame * channels, channels);
+        double stopbandEnergy = signalEnergy(out + stopbandFrame * channels,
+                out + outputFrames * channels, channels);
+        double dbAtten = -10. * log10(stopbandEnergy / passbandEnergy);
+        ASSERT_GT(dbAtten, 60.);
+
+#if 0
+        // internal verification
+        printf("if:%d  of:%d  pbf:%d  sbf:%d  sbe: %f  pbe: %f  db: %.2f\n",
+                provider.getNumFrames(), outputFrames,
+                passbandFrame, stopbandFrame, stopbandEnergy, passbandEnergy, dbAtten);
+        for (size_t i = 0; i < 10; ++i) {
+            std::cout << out[i+passbandFrame*channels] << std::endl;
+        }
+        for (size_t i = 0; i < 10; ++i) {
+            std::cout << out[i+stopbandFrame*channels] << std::endl;
+        }
+#endif
+    }
+
+    free(reference);
+    delete resampler;
+}
+
+/* Buffer increment test
+ *
+ * We compare a reference output, where we consume and process the entire
+ * buffer at a time, and a test output, where we provide small chunks of input
+ * data and process small chunks of output (which may not be equivalent in size).
+ *
+ * Two subtests - fixed phase (3:2 down) and interpolated phase (147:320 up)
+ */
+TEST(audioflinger_resampler, bufferincrement_fixedphase) {
+    // all of these work
+    static const enum android::AudioResampler::src_quality kQualityArray[] = {
+            android::AudioResampler::LOW_QUALITY,
+            android::AudioResampler::MED_QUALITY,
+            android::AudioResampler::HIGH_QUALITY,
+            android::AudioResampler::VERY_HIGH_QUALITY,
+            android::AudioResampler::DYN_LOW_QUALITY,
+            android::AudioResampler::DYN_MED_QUALITY,
+            android::AudioResampler::DYN_HIGH_QUALITY,
+    };
+
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testBufferIncrement(2, false, 48000, 32000, kQualityArray[i]);
+    }
+}
+
+TEST(audioflinger_resampler, bufferincrement_interpolatedphase) {
+    // all of these work except low quality
+    static const enum android::AudioResampler::src_quality kQualityArray[] = {
+//           android::AudioResampler::LOW_QUALITY,
+            android::AudioResampler::MED_QUALITY,
+            android::AudioResampler::HIGH_QUALITY,
+            android::AudioResampler::VERY_HIGH_QUALITY,
+            android::AudioResampler::DYN_LOW_QUALITY,
+            android::AudioResampler::DYN_MED_QUALITY,
+            android::AudioResampler::DYN_HIGH_QUALITY,
+    };
+
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testBufferIncrement(2, false, 22050, 48000, kQualityArray[i]);
+    }
+}
+
+TEST(audioflinger_resampler, bufferincrement_fixedphase_multi) {
+    // only dynamic quality
+    static const enum android::AudioResampler::src_quality kQualityArray[] = {
+            android::AudioResampler::DYN_LOW_QUALITY,
+            android::AudioResampler::DYN_MED_QUALITY,
+            android::AudioResampler::DYN_HIGH_QUALITY,
+    };
+
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testBufferIncrement(4, false, 48000, 32000, kQualityArray[i]);
+    }
+}
+
+TEST(audioflinger_resampler, bufferincrement_interpolatedphase_multi_float) {
+    // only dynamic quality
+    static const enum android::AudioResampler::src_quality kQualityArray[] = {
+            android::AudioResampler::DYN_LOW_QUALITY,
+            android::AudioResampler::DYN_MED_QUALITY,
+            android::AudioResampler::DYN_HIGH_QUALITY,
+    };
+
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testBufferIncrement(8, true, 22050, 48000, kQualityArray[i]);
+    }
+}
+
+/* Simple aliasing test
+ *
+ * This checks stopband response of the chirp signal to make sure frequencies
+ * are properly suppressed.  It uses downsampling because the stopband can be
+ * clearly isolated by input frequencies exceeding the output sample rate (nyquist).
+ */
+TEST(audioflinger_resampler, stopbandresponse_integer) {
+    // not all of these may work (old resamplers fail on downsampling)
+    static const enum android::AudioResampler::src_quality kQualityArray[] = {
+            //android::AudioResampler::LOW_QUALITY,
+            //android::AudioResampler::MED_QUALITY,
+            //android::AudioResampler::HIGH_QUALITY,
+            //android::AudioResampler::VERY_HIGH_QUALITY,
+            android::AudioResampler::DYN_LOW_QUALITY,
+            android::AudioResampler::DYN_MED_QUALITY,
+            android::AudioResampler::DYN_HIGH_QUALITY,
+    };
+
+    // in this test we assume a maximum transition band between 12kHz and 20kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<int16_t, int32_t>(
+                2, 48000, 32000, 12000, 20000, kQualityArray[i]);
+    }
+
+    // in this test we assume a maximum transition band between 7kHz and 15kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    // (the weird ratio triggers interpolative resampling)
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<int16_t, int32_t>(
+                2, 48000, 22101, 7000, 15000, kQualityArray[i]);
+    }
+}
+
+TEST(audioflinger_resampler, stopbandresponse_integer_mono) {
+    // not all of these may work (old resamplers fail on downsampling)
+    static const enum android::AudioResampler::src_quality kQualityArray[] = {
+            //android::AudioResampler::LOW_QUALITY,
+            //android::AudioResampler::MED_QUALITY,
+            //android::AudioResampler::HIGH_QUALITY,
+            //android::AudioResampler::VERY_HIGH_QUALITY,
+            android::AudioResampler::DYN_LOW_QUALITY,
+            android::AudioResampler::DYN_MED_QUALITY,
+            android::AudioResampler::DYN_HIGH_QUALITY,
+    };
+
+    // in this test we assume a maximum transition band between 12kHz and 20kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<int16_t, int32_t>(
+                1, 48000, 32000, 12000, 20000, kQualityArray[i]);
+    }
+
+    // in this test we assume a maximum transition band between 7kHz and 15kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    // (the weird ratio triggers interpolative resampling)
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<int16_t, int32_t>(
+                1, 48000, 22101, 7000, 15000, kQualityArray[i]);
+    }
+}
+
+TEST(audioflinger_resampler, stopbandresponse_integer_multichannel) {
+    // not all of these may work (old resamplers fail on downsampling)
+    static const enum android::AudioResampler::src_quality kQualityArray[] = {
+            //android::AudioResampler::LOW_QUALITY,
+            //android::AudioResampler::MED_QUALITY,
+            //android::AudioResampler::HIGH_QUALITY,
+            //android::AudioResampler::VERY_HIGH_QUALITY,
+            android::AudioResampler::DYN_LOW_QUALITY,
+            android::AudioResampler::DYN_MED_QUALITY,
+            android::AudioResampler::DYN_HIGH_QUALITY,
+    };
+
+    // in this test we assume a maximum transition band between 12kHz and 20kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<int16_t, int32_t>(
+                8, 48000, 32000, 12000, 20000, kQualityArray[i]);
+    }
+
+    // in this test we assume a maximum transition band between 7kHz and 15kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    // (the weird ratio triggers interpolative resampling)
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<int16_t, int32_t>(
+                8, 48000, 22101, 7000, 15000, kQualityArray[i]);
+    }
+}
+
+TEST(audioflinger_resampler, stopbandresponse_float) {
+    // not all of these may work (old resamplers fail on downsampling)
+    static const enum android::AudioResampler::src_quality kQualityArray[] = {
+            //android::AudioResampler::LOW_QUALITY,
+            //android::AudioResampler::MED_QUALITY,
+            //android::AudioResampler::HIGH_QUALITY,
+            //android::AudioResampler::VERY_HIGH_QUALITY,
+            android::AudioResampler::DYN_LOW_QUALITY,
+            android::AudioResampler::DYN_MED_QUALITY,
+            android::AudioResampler::DYN_HIGH_QUALITY,
+    };
+
+    // in this test we assume a maximum transition band between 12kHz and 20kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<float, float>(
+                2, 48000, 32000, 12000, 20000, kQualityArray[i]);
+    }
+
+    // in this test we assume a maximum transition band between 7kHz and 15kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    // (the weird ratio triggers interpolative resampling)
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<float, float>(
+                2, 48000, 22101, 7000, 15000, kQualityArray[i]);
+    }
+}
+
+TEST(audioflinger_resampler, stopbandresponse_float_mono) {
+    // not all of these may work (old resamplers fail on downsampling)
+    static const enum android::AudioResampler::src_quality kQualityArray[] = {
+            //android::AudioResampler::LOW_QUALITY,
+            //android::AudioResampler::MED_QUALITY,
+            //android::AudioResampler::HIGH_QUALITY,
+            //android::AudioResampler::VERY_HIGH_QUALITY,
+            android::AudioResampler::DYN_LOW_QUALITY,
+            android::AudioResampler::DYN_MED_QUALITY,
+            android::AudioResampler::DYN_HIGH_QUALITY,
+    };
+
+    // in this test we assume a maximum transition band between 12kHz and 20kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<float, float>(
+                1, 48000, 32000, 12000, 20000, kQualityArray[i]);
+    }
+
+    // in this test we assume a maximum transition band between 7kHz and 15kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    // (the weird ratio triggers interpolative resampling)
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<float, float>(
+                1, 48000, 22101, 7000, 15000, kQualityArray[i]);
+    }
+}
+
+TEST(audioflinger_resampler, stopbandresponse_float_multichannel) {
+    // not all of these may work (old resamplers fail on downsampling)
+    static const enum android::AudioResampler::src_quality kQualityArray[] = {
+            //android::AudioResampler::LOW_QUALITY,
+            //android::AudioResampler::MED_QUALITY,
+            //android::AudioResampler::HIGH_QUALITY,
+            //android::AudioResampler::VERY_HIGH_QUALITY,
+            android::AudioResampler::DYN_LOW_QUALITY,
+            android::AudioResampler::DYN_MED_QUALITY,
+            android::AudioResampler::DYN_HIGH_QUALITY,
+    };
+
+    // in this test we assume a maximum transition band between 12kHz and 20kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<float, float>(
+                8, 48000, 32000, 12000, 20000, kQualityArray[i]);
+    }
+
+    // in this test we assume a maximum transition band between 7kHz and 15kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    // (the weird ratio triggers interpolative resampling)
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<float, float>(
+                8, 48000, 22101, 7000, 15000, kQualityArray[i]);
+    }
+}
+
diff --git a/media/libaudioprocessing/tests/run_all_unit_tests.sh b/media/libaudioprocessing/tests/run_all_unit_tests.sh
new file mode 100755
index 0000000..15a94c2
--- /dev/null
+++ b/media/libaudioprocessing/tests/run_all_unit_tests.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+if [ -z "$ANDROID_BUILD_TOP" ]; then
+    echo "Android build environment not set"
+    exit -1
+fi
+
+echo "waiting for device"
+adb root && adb wait-for-device remount
+
+adb shell /data/nativetest/resampler_tests/resampler_tests
+adb shell /data/nativetest64/resampler_tests/resampler_tests
diff --git a/media/libaudioprocessing/tests/test-mixer.cpp b/media/libaudioprocessing/tests/test-mixer.cpp
new file mode 100644
index 0000000..75dbf91
--- /dev/null
+++ b/media/libaudioprocessing/tests/test-mixer.cpp
@@ -0,0 +1,328 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+#include <inttypes.h>
+#include <math.h>
+#include <vector>
+#include <audio_utils/primitives.h>
+#include <audio_utils/sndfile.h>
+#include <media/AudioBufferProvider.h>
+#include <media/AudioMixer.h>
+#include "test_utils.h"
+
+/* Testing is typically through creation of an output WAV file from several
+ * source inputs, to be later analyzed by an audio program such as Audacity.
+ *
+ * Sine or chirp functions are typically more useful as input to the mixer
+ * as they show up as straight lines on a spectrogram if successfully mixed.
+ *
+ * A sample shell script is provided: mixer_to_wave_tests.sh
+ */
+
+using namespace android;
+
+static void usage(const char* name) {
+    fprintf(stderr, "Usage: %s [-f] [-m] [-c channels]"
+                    " [-s sample-rate] [-o <output-file>] [-a <aux-buffer-file>] [-P csv]"
+                    " (<input-file> | <command>)+\n", name);
+    fprintf(stderr, "    -f    enable floating point input track by default\n");
+    fprintf(stderr, "    -m    enable floating point mixer output\n");
+    fprintf(stderr, "    -c    number of mixer output channels\n");
+    fprintf(stderr, "    -s    mixer sample-rate\n");
+    fprintf(stderr, "    -o    <output-file> WAV file, pcm16 (or float if -m specified)\n");
+    fprintf(stderr, "    -a    <aux-buffer-file>\n");
+    fprintf(stderr, "    -P    # frames provided per call to resample() in CSV format\n");
+    fprintf(stderr, "    <input-file> is a WAV file\n");
+    fprintf(stderr, "    <command> can be 'sine:[(i|f),]<channels>,<frequency>,<samplerate>'\n");
+    fprintf(stderr, "                     'chirp:[(i|f),]<channels>,<samplerate>'\n");
+}
+
+static int writeFile(const char *filename, const void *buffer,
+        uint32_t sampleRate, uint32_t channels, size_t frames, bool isBufferFloat) {
+    if (filename == NULL) {
+        return 0; // ok to pass in NULL filename
+    }
+    // write output to file.
+    SF_INFO info;
+    info.frames = 0;
+    info.samplerate = sampleRate;
+    info.channels = channels;
+    info.format = SF_FORMAT_WAV | (isBufferFloat ? SF_FORMAT_FLOAT : SF_FORMAT_PCM_16);
+    printf("saving file:%s  channels:%u  samplerate:%u  frames:%zu\n",
+            filename, info.channels, info.samplerate, frames);
+    SNDFILE *sf = sf_open(filename, SFM_WRITE, &info);
+    if (sf == NULL) {
+        perror(filename);
+        return EXIT_FAILURE;
+    }
+    if (isBufferFloat) {
+        (void) sf_writef_float(sf, (float*)buffer, frames);
+    } else {
+        (void) sf_writef_short(sf, (short*)buffer, frames);
+    }
+    sf_close(sf);
+    return EXIT_SUCCESS;
+}
+
+const char *parseFormat(const char *s, bool *useFloat) {
+    if (!strncmp(s, "f,", 2)) {
+        *useFloat = true;
+        return s + 2;
+    }
+    if (!strncmp(s, "i,", 2)) {
+        *useFloat = false;
+        return s + 2;
+    }
+    return s;
+}
+
+int main(int argc, char* argv[]) {
+    const char* const progname = argv[0];
+    bool useInputFloat = false;
+    bool useMixerFloat = false;
+    bool useRamp = true;
+    uint32_t outputSampleRate = 48000;
+    uint32_t outputChannels = 2; // stereo for now
+    std::vector<int> Pvalues;
+    const char* outputFilename = NULL;
+    const char* auxFilename = NULL;
+    std::vector<int32_t> names;
+    std::vector<SignalProvider> providers;
+    std::vector<audio_format_t> formats;
+
+    for (int ch; (ch = getopt(argc, argv, "fmc:s:o:a:P:")) != -1;) {
+        switch (ch) {
+        case 'f':
+            useInputFloat = true;
+            break;
+        case 'm':
+            useMixerFloat = true;
+            break;
+        case 'c':
+            outputChannels = atoi(optarg);
+            break;
+        case 's':
+            outputSampleRate = atoi(optarg);
+            break;
+        case 'o':
+            outputFilename = optarg;
+            break;
+        case 'a':
+            auxFilename = optarg;
+            break;
+        case 'P':
+            if (parseCSV(optarg, Pvalues) < 0) {
+                fprintf(stderr, "incorrect syntax for -P option\n");
+                return EXIT_FAILURE;
+            }
+            break;
+        case '?':
+        default:
+            usage(progname);
+            return EXIT_FAILURE;
+        }
+    }
+    argc -= optind;
+    argv += optind;
+
+    if (argc == 0) {
+        usage(progname);
+        return EXIT_FAILURE;
+    }
+    if ((unsigned)argc > AudioMixer::MAX_NUM_TRACKS) {
+        fprintf(stderr, "too many tracks: %d > %u", argc, AudioMixer::MAX_NUM_TRACKS);
+        return EXIT_FAILURE;
+    }
+
+    size_t outputFrames = 0;
+
+    // create providers for each track
+    names.resize(argc);
+    providers.resize(argc);
+    formats.resize(argc);
+    for (int i = 0; i < argc; ++i) {
+        static const char chirp[] = "chirp:";
+        static const char sine[] = "sine:";
+        static const double kSeconds = 1;
+        bool useFloat = useInputFloat;
+
+        if (!strncmp(argv[i], chirp, strlen(chirp))) {
+            std::vector<int> v;
+            const char *s = parseFormat(argv[i] + strlen(chirp), &useFloat);
+
+            parseCSV(s, v);
+            if (v.size() == 2) {
+                printf("creating chirp(%d %d)\n", v[0], v[1]);
+                if (useFloat) {
+                    providers[i].setChirp<float>(v[0], 0, v[1]/2, v[1], kSeconds);
+                    formats[i] = AUDIO_FORMAT_PCM_FLOAT;
+                } else {
+                    providers[i].setChirp<int16_t>(v[0], 0, v[1]/2, v[1], kSeconds);
+                    formats[i] = AUDIO_FORMAT_PCM_16_BIT;
+                }
+                providers[i].setIncr(Pvalues);
+            } else {
+                fprintf(stderr, "malformed input '%s'\n", argv[i]);
+            }
+        } else if (!strncmp(argv[i], sine, strlen(sine))) {
+            std::vector<int> v;
+            const char *s = parseFormat(argv[i] + strlen(sine), &useFloat);
+
+            parseCSV(s, v);
+            if (v.size() == 3) {
+                printf("creating sine(%d %d %d)\n", v[0], v[1], v[2]);
+                if (useFloat) {
+                    providers[i].setSine<float>(v[0], v[1], v[2], kSeconds);
+                    formats[i] = AUDIO_FORMAT_PCM_FLOAT;
+                } else {
+                    providers[i].setSine<int16_t>(v[0], v[1], v[2], kSeconds);
+                    formats[i] = AUDIO_FORMAT_PCM_16_BIT;
+                }
+                providers[i].setIncr(Pvalues);
+            } else {
+                fprintf(stderr, "malformed input '%s'\n", argv[i]);
+            }
+        } else {
+            printf("creating filename(%s)\n", argv[i]);
+            if (useInputFloat) {
+                providers[i].setFile<float>(argv[i]);
+                formats[i] = AUDIO_FORMAT_PCM_FLOAT;
+            } else {
+                providers[i].setFile<short>(argv[i]);
+                formats[i] = AUDIO_FORMAT_PCM_16_BIT;
+            }
+            providers[i].setIncr(Pvalues);
+        }
+        // calculate the number of output frames
+        size_t nframes = (int64_t) providers[i].getNumFrames() * outputSampleRate
+                / providers[i].getSampleRate();
+        if (i == 0 || outputFrames > nframes) { // choose minimum for outputFrames
+            outputFrames = nframes;
+        }
+    }
+
+    // create the output buffer.
+    const size_t outputFrameSize = outputChannels
+            * (useMixerFloat ? sizeof(float) : sizeof(int16_t));
+    const size_t outputSize = outputFrames * outputFrameSize;
+    const audio_channel_mask_t outputChannelMask =
+            audio_channel_out_mask_from_count(outputChannels);
+    void *outputAddr = NULL;
+    (void) posix_memalign(&outputAddr, 32, outputSize);
+    memset(outputAddr, 0, outputSize);
+
+    // create the aux buffer, if needed.
+    const size_t auxFrameSize = sizeof(int32_t); // Q4.27 always
+    const size_t auxSize = outputFrames * auxFrameSize;
+    void *auxAddr = NULL;
+    if (auxFilename) {
+        (void) posix_memalign(&auxAddr, 32, auxSize);
+        memset(auxAddr, 0, auxSize);
+    }
+
+    // create the mixer.
+    const size_t mixerFrameCount = 320; // typical numbers may range from 240 or 960
+    AudioMixer *mixer = new AudioMixer(mixerFrameCount, outputSampleRate);
+    audio_format_t mixerFormat = useMixerFloat
+            ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
+    float f = AudioMixer::UNITY_GAIN_FLOAT / providers.size(); // normalize volume by # tracks
+    static float f0; // zero
+
+    // set up the tracks.
+    for (size_t i = 0; i < providers.size(); ++i) {
+        //printf("track %d out of %d\n", i, providers.size());
+        uint32_t channelMask = audio_channel_out_mask_from_count(providers[i].getNumChannels());
+        int32_t name = mixer->getTrackName(channelMask,
+                formats[i], AUDIO_SESSION_OUTPUT_MIX);
+        ALOG_ASSERT(name >= 0);
+        names[i] = name;
+        mixer->setBufferProvider(name, &providers[i]);
+        mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
+                (void *)outputAddr);
+        mixer->setParameter(
+                name,
+                AudioMixer::TRACK,
+                AudioMixer::MIXER_FORMAT,
+                (void *)(uintptr_t)mixerFormat);
+        mixer->setParameter(
+                name,
+                AudioMixer::TRACK,
+                AudioMixer::FORMAT,
+                (void *)(uintptr_t)formats[i]);
+        mixer->setParameter(
+                name,
+                AudioMixer::TRACK,
+                AudioMixer::MIXER_CHANNEL_MASK,
+                (void *)(uintptr_t)outputChannelMask);
+        mixer->setParameter(
+                name,
+                AudioMixer::TRACK,
+                AudioMixer::CHANNEL_MASK,
+                (void *)(uintptr_t)channelMask);
+        mixer->setParameter(
+                name,
+                AudioMixer::RESAMPLE,
+                AudioMixer::SAMPLE_RATE,
+                (void *)(uintptr_t)providers[i].getSampleRate());
+        if (useRamp) {
+            mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f0);
+            mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f0);
+            mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::VOLUME0, &f);
+            mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::VOLUME1, &f);
+        } else {
+            mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f);
+            mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f);
+        }
+        if (auxFilename) {
+            mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::AUX_BUFFER,
+                    (void *) auxAddr);
+            mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::AUXLEVEL, &f0);
+            mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::AUXLEVEL, &f);
+        }
+        mixer->enable(name);
+    }
+
+    // pump the mixer to process data.
+    size_t i;
+    for (i = 0; i < outputFrames - mixerFrameCount; i += mixerFrameCount) {
+        for (size_t j = 0; j < names.size(); ++j) {
+            mixer->setParameter(names[j], AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
+                    (char *) outputAddr + i * outputFrameSize);
+            if (auxFilename) {
+                mixer->setParameter(names[j], AudioMixer::TRACK, AudioMixer::AUX_BUFFER,
+                        (char *) auxAddr + i * auxFrameSize);
+            }
+        }
+        mixer->process();
+    }
+    outputFrames = i; // reset output frames to the data actually produced.
+
+    // write to files
+    writeFile(outputFilename, outputAddr,
+            outputSampleRate, outputChannels, outputFrames, useMixerFloat);
+    if (auxFilename) {
+        // Aux buffer is always in q4_27 format for now.
+        // memcpy_to_i16_from_q4_27(), but with stereo frame count (not sample count)
+        ditherAndClamp((int32_t*)auxAddr, (int32_t*)auxAddr, outputFrames >> 1);
+        writeFile(auxFilename, auxAddr, outputSampleRate, 1, outputFrames, false);
+    }
+
+    delete mixer;
+    free(outputAddr);
+    free(auxAddr);
+    return EXIT_SUCCESS;
+}
diff --git a/media/libaudioprocessing/tests/test-resampler.cpp b/media/libaudioprocessing/tests/test-resampler.cpp
new file mode 100644
index 0000000..fbc9326
--- /dev/null
+++ b/media/libaudioprocessing/tests/test-resampler.cpp
@@ -0,0 +1,515 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <time.h>
+#include <math.h>
+#include <audio_utils/primitives.h>
+#include <audio_utils/sndfile.h>
+#include <utils/Vector.h>
+#include <media/AudioBufferProvider.h>
+#include <media/AudioResampler.h>
+
+using namespace android;
+
+static bool gVerbose = false;
+
+static int usage(const char* name) {
+    fprintf(stderr,"Usage: %s [-p] [-f] [-F] [-v] [-c channels]"
+                   " [-q {dq|lq|mq|hq|vhq|dlq|dmq|dhq}]"
+                   " [-i input-sample-rate] [-o output-sample-rate]"
+                   " [-O csv] [-P csv] [<input-file>]"
+                   " <output-file>\n", name);
+    fprintf(stderr,"    -p    enable profiling\n");
+    fprintf(stderr,"    -f    enable filter profiling\n");
+    fprintf(stderr,"    -F    enable floating point -q {dlq|dmq|dhq} only");
+    fprintf(stderr,"    -v    verbose : log buffer provider calls\n");
+    fprintf(stderr,"    -c    # channels (1-2 for lq|mq|hq; 1-8 for dlq|dmq|dhq)\n");
+    fprintf(stderr,"    -q    resampler quality\n");
+    fprintf(stderr,"              dq  : default quality\n");
+    fprintf(stderr,"              lq  : low quality\n");
+    fprintf(stderr,"              mq  : medium quality\n");
+    fprintf(stderr,"              hq  : high quality\n");
+    fprintf(stderr,"              vhq : very high quality\n");
+    fprintf(stderr,"              dlq : dynamic low quality\n");
+    fprintf(stderr,"              dmq : dynamic medium quality\n");
+    fprintf(stderr,"              dhq : dynamic high quality\n");
+    fprintf(stderr,"    -i    input file sample rate (ignored if input file is specified)\n");
+    fprintf(stderr,"    -o    output file sample rate\n");
+    fprintf(stderr,"    -O    # frames output per call to resample() in CSV format\n");
+    fprintf(stderr,"    -P    # frames provided per call to resample() in CSV format\n");
+    return -1;
+}
+
+// Convert a list of integers in CSV format to a Vector of those values.
+// Returns the number of elements in the list, or -1 on error.
+int parseCSV(const char *string, Vector<int>& values)
+{
+    // pass 1: count the number of values and do syntax check
+    size_t numValues = 0;
+    bool hadDigit = false;
+    for (const char *p = string; ; ) {
+        switch (*p++) {
+        case '0': case '1': case '2': case '3': case '4':
+        case '5': case '6': case '7': case '8': case '9':
+            hadDigit = true;
+            break;
+        case '\0':
+            if (hadDigit) {
+                // pass 2: allocate and initialize vector of values
+                values.resize(++numValues);
+                values.editItemAt(0) = atoi(p = optarg);
+                for (size_t i = 1; i < numValues; ) {
+                    if (*p++ == ',') {
+                        values.editItemAt(i++) = atoi(p);
+                    }
+                }
+                return numValues;
+            }
+            // fall through
+        case ',':
+            if (hadDigit) {
+                hadDigit = false;
+                numValues++;
+                break;
+            }
+            // fall through
+        default:
+            return -1;
+        }
+    }
+}
+
+int main(int argc, char* argv[]) {
+    const char* const progname = argv[0];
+    bool profileResample = false;
+    bool profileFilter = false;
+    bool useFloat = false;
+    int channels = 1;
+    int input_freq = 0;
+    int output_freq = 0;
+    AudioResampler::src_quality quality = AudioResampler::DEFAULT_QUALITY;
+    Vector<int> Ovalues;
+    Vector<int> Pvalues;
+
+    int ch;
+    while ((ch = getopt(argc, argv, "pfFvc:q:i:o:O:P:")) != -1) {
+        switch (ch) {
+        case 'p':
+            profileResample = true;
+            break;
+        case 'f':
+            profileFilter = true;
+            break;
+        case 'F':
+            useFloat = true;
+            break;
+        case 'v':
+            gVerbose = true;
+            break;
+        case 'c':
+            channels = atoi(optarg);
+            break;
+        case 'q':
+            if (!strcmp(optarg, "dq"))
+                quality = AudioResampler::DEFAULT_QUALITY;
+            else if (!strcmp(optarg, "lq"))
+                quality = AudioResampler::LOW_QUALITY;
+            else if (!strcmp(optarg, "mq"))
+                quality = AudioResampler::MED_QUALITY;
+            else if (!strcmp(optarg, "hq"))
+                quality = AudioResampler::HIGH_QUALITY;
+            else if (!strcmp(optarg, "vhq"))
+                quality = AudioResampler::VERY_HIGH_QUALITY;
+            else if (!strcmp(optarg, "dlq"))
+                quality = AudioResampler::DYN_LOW_QUALITY;
+            else if (!strcmp(optarg, "dmq"))
+                quality = AudioResampler::DYN_MED_QUALITY;
+            else if (!strcmp(optarg, "dhq"))
+                quality = AudioResampler::DYN_HIGH_QUALITY;
+            else {
+                usage(progname);
+                return -1;
+            }
+            break;
+        case 'i':
+            input_freq = atoi(optarg);
+            break;
+        case 'o':
+            output_freq = atoi(optarg);
+            break;
+        case 'O':
+            if (parseCSV(optarg, Ovalues) < 0) {
+                fprintf(stderr, "incorrect syntax for -O option\n");
+                return -1;
+            }
+            break;
+        case 'P':
+            if (parseCSV(optarg, Pvalues) < 0) {
+                fprintf(stderr, "incorrect syntax for -P option\n");
+                return -1;
+            }
+            break;
+        case '?':
+        default:
+            usage(progname);
+            return -1;
+        }
+    }
+
+    if (channels < 1
+            || channels > (quality < AudioResampler::DYN_LOW_QUALITY ? 2 : 8)) {
+        fprintf(stderr, "invalid number of audio channels %d\n", channels);
+        return -1;
+    }
+    if (useFloat && quality < AudioResampler::DYN_LOW_QUALITY) {
+        fprintf(stderr, "float processing is only possible for dynamic resamplers\n");
+        return -1;
+    }
+
+    argc -= optind;
+    argv += optind;
+
+    const char* file_in = NULL;
+    const char* file_out = NULL;
+    if (argc == 1) {
+        file_out = argv[0];
+    } else if (argc == 2) {
+        file_in = argv[0];
+        file_out = argv[1];
+    } else {
+        usage(progname);
+        return -1;
+    }
+
+    // ----------------------------------------------------------
+
+    size_t input_size;
+    void* input_vaddr;
+    if (argc == 2) {
+        SF_INFO info;
+        info.format = 0;
+        SNDFILE *sf = sf_open(file_in, SFM_READ, &info);
+        if (sf == NULL) {
+            perror(file_in);
+            return EXIT_FAILURE;
+        }
+        input_size = info.frames * info.channels * sizeof(short);
+        input_vaddr = malloc(input_size);
+        (void) sf_readf_short(sf, (short *) input_vaddr, info.frames);
+        sf_close(sf);
+        channels = info.channels;
+        input_freq = info.samplerate;
+    } else {
+        // data for testing is exactly (input sampling rate/1000)/2 seconds
+        // so 44.1khz input is 22.05 seconds
+        double k = 1000; // Hz / s
+        double time = (input_freq / 2) / k;
+        size_t input_frames = size_t(input_freq * time);
+        input_size = channels * sizeof(int16_t) * input_frames;
+        input_vaddr = malloc(input_size);
+        int16_t* in = (int16_t*)input_vaddr;
+        for (size_t i=0 ; i<input_frames ; i++) {
+            double t = double(i) / input_freq;
+            double y = sin(M_PI * k * t * t);
+            int16_t yi = floor(y * 32767.0 + 0.5);
+            for (int j = 0; j < channels; j++) {
+                in[i*channels + j] = yi / (1 + j);
+            }
+        }
+    }
+    size_t input_framesize = channels * sizeof(int16_t);
+    size_t input_frames = input_size / input_framesize;
+
+    // For float processing, convert input int16_t to float array
+    if (useFloat) {
+        void *new_vaddr;
+
+        input_framesize = channels * sizeof(float);
+        input_size = input_frames * input_framesize;
+        new_vaddr = malloc(input_size);
+        memcpy_to_float_from_i16(reinterpret_cast<float*>(new_vaddr),
+                reinterpret_cast<int16_t*>(input_vaddr), input_frames * channels);
+        free(input_vaddr);
+        input_vaddr = new_vaddr;
+    }
+
+    // ----------------------------------------------------------
+
+    class Provider: public AudioBufferProvider {
+        const void*     mAddr;      // base address
+        const size_t    mNumFrames; // total frames
+        const size_t    mFrameSize; // size of each frame in bytes
+        size_t          mNextFrame; // index of next frame to provide
+        size_t          mUnrel;     // number of frames not yet released
+        const Vector<int> mPvalues; // number of frames provided per call
+        size_t          mNextPidx;  // index of next entry in mPvalues to use
+    public:
+        Provider(const void* addr, size_t frames, size_t frameSize, const Vector<int>& Pvalues)
+          : mAddr(addr),
+            mNumFrames(frames),
+            mFrameSize(frameSize),
+            mNextFrame(0), mUnrel(0), mPvalues(Pvalues), mNextPidx(0) {
+        }
+        virtual status_t getNextBuffer(Buffer* buffer) {
+            size_t requestedFrames = buffer->frameCount;
+            if (requestedFrames > mNumFrames - mNextFrame) {
+                buffer->frameCount = mNumFrames - mNextFrame;
+            }
+            if (!mPvalues.isEmpty()) {
+                size_t provided = mPvalues[mNextPidx++];
+                printf("mPvalue[%zu]=%zu not %zu\n", mNextPidx-1, provided, buffer->frameCount);
+                if (provided < buffer->frameCount) {
+                    buffer->frameCount = provided;
+                }
+                if (mNextPidx >= mPvalues.size()) {
+                    mNextPidx = 0;
+                }
+            }
+            if (gVerbose) {
+                printf("getNextBuffer() requested %zu frames out of %zu frames available,"
+                        " and returned %zu frames\n",
+                        requestedFrames, (size_t) (mNumFrames - mNextFrame), buffer->frameCount);
+            }
+            mUnrel = buffer->frameCount;
+            if (buffer->frameCount > 0) {
+                buffer->raw = (char *)mAddr + mFrameSize * mNextFrame;
+                return NO_ERROR;
+            } else {
+                buffer->raw = NULL;
+                return NOT_ENOUGH_DATA;
+            }
+        }
+        virtual void releaseBuffer(Buffer* buffer) {
+            if (buffer->frameCount > mUnrel) {
+                fprintf(stderr, "ERROR releaseBuffer() released %zu frames but only %zu available "
+                        "to release\n", buffer->frameCount, mUnrel);
+                mNextFrame += mUnrel;
+                mUnrel = 0;
+            } else {
+                if (gVerbose) {
+                    printf("releaseBuffer() released %zu frames out of %zu frames available "
+                            "to release\n", buffer->frameCount, mUnrel);
+                }
+                mNextFrame += buffer->frameCount;
+                mUnrel -= buffer->frameCount;
+            }
+            buffer->frameCount = 0;
+            buffer->raw = NULL;
+        }
+        void reset() {
+            mNextFrame = 0;
+        }
+    } provider(input_vaddr, input_frames, input_framesize, Pvalues);
+
+    if (gVerbose) {
+        printf("%zu input frames\n", input_frames);
+    }
+
+    audio_format_t format = useFloat ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
+    int output_channels = channels > 2 ? channels : 2; // output is at least stereo samples
+    size_t output_framesize = output_channels * (useFloat ? sizeof(float) : sizeof(int32_t));
+    size_t output_frames = ((int64_t) input_frames * output_freq) / input_freq;
+    size_t output_size = output_frames * output_framesize;
+
+    if (profileFilter) {
+        // Check how fast sample rate changes are that require filter changes.
+        // The delta sample rate changes must indicate a downsampling ratio,
+        // and must be larger than 10% changes.
+        //
+        // On fast devices, filters should be generated between 0.1ms - 1ms.
+        // (single threaded).
+        AudioResampler* resampler = AudioResampler::create(format, channels,
+                8000, quality);
+        int looplimit = 100;
+        timespec start, end;
+        clock_gettime(CLOCK_MONOTONIC, &start);
+        for (int i = 0; i < looplimit; ++i) {
+            resampler->setSampleRate(9000);
+            resampler->setSampleRate(12000);
+            resampler->setSampleRate(20000);
+            resampler->setSampleRate(30000);
+        }
+        clock_gettime(CLOCK_MONOTONIC, &end);
+        int64_t start_ns = start.tv_sec * 1000000000LL + start.tv_nsec;
+        int64_t end_ns = end.tv_sec * 1000000000LL + end.tv_nsec;
+        int64_t time = end_ns - start_ns;
+        printf("%.2f sample rate changes with filter calculation/sec\n",
+                looplimit * 4 / (time / 1e9));
+
+        // Check how fast sample rate changes are without filter changes.
+        // This should be very fast, probably 0.1us - 1us per sample rate
+        // change.
+        resampler->setSampleRate(1000);
+        looplimit = 1000;
+        clock_gettime(CLOCK_MONOTONIC, &start);
+        for (int i = 0; i < looplimit; ++i) {
+            resampler->setSampleRate(1000+i);
+        }
+        clock_gettime(CLOCK_MONOTONIC, &end);
+        start_ns = start.tv_sec * 1000000000LL + start.tv_nsec;
+        end_ns = end.tv_sec * 1000000000LL + end.tv_nsec;
+        time = end_ns - start_ns;
+        printf("%.2f sample rate changes without filter calculation/sec\n",
+                looplimit / (time / 1e9));
+        resampler->reset();
+        delete resampler;
+    }
+
+    void* output_vaddr = malloc(output_size);
+    AudioResampler* resampler = AudioResampler::create(format, channels,
+            output_freq, quality);
+
+    resampler->setSampleRate(input_freq);
+    resampler->setVolume(AudioResampler::UNITY_GAIN_FLOAT, AudioResampler::UNITY_GAIN_FLOAT);
+
+    if (profileResample) {
+        /*
+         * For profiling on mobile devices, upon experimentation
+         * it is better to run a few trials with a shorter loop limit,
+         * and take the minimum time.
+         *
+         * Long tests can cause CPU temperature to build up and thermal throttling
+         * to reduce CPU frequency.
+         *
+         * For frequency checks (index=0, or 1, etc.):
+         * "cat /sys/devices/system/cpu/cpu${index}/cpufreq/scaling_*_freq"
+         *
+         * For temperature checks (index=0, or 1, etc.):
+         * "cat /sys/class/thermal/thermal_zone${index}/temp"
+         *
+         * Another way to avoid thermal throttling is to fix the CPU frequency
+         * at a lower level which prevents excessive temperatures.
+         */
+        const int trials = 4;
+        const int looplimit = 4;
+        timespec start, end;
+        int64_t time = 0;
+
+        for (int n = 0; n < trials; ++n) {
+            clock_gettime(CLOCK_MONOTONIC, &start);
+            for (int i = 0; i < looplimit; ++i) {
+                resampler->resample((int*) output_vaddr, output_frames, &provider);
+                provider.reset(); //  during benchmarking reset only the provider
+            }
+            clock_gettime(CLOCK_MONOTONIC, &end);
+            int64_t start_ns = start.tv_sec * 1000000000LL + start.tv_nsec;
+            int64_t end_ns = end.tv_sec * 1000000000LL + end.tv_nsec;
+            int64_t diff_ns = end_ns - start_ns;
+            if (n == 0 || diff_ns < time) {
+                time = diff_ns;   // save the best out of our trials.
+            }
+        }
+        // Mfrms/s is "Millions of output frames per second".
+        printf("quality: %d  channels: %d  msec: %" PRId64 "  Mfrms/s: %.2lf\n",
+                quality, channels, time/1000000, output_frames * looplimit / (time / 1e9) / 1e6);
+        resampler->reset();
+
+        // TODO fix legacy bug: reset does not clear buffers.
+        // delete and recreate resampler here.
+        delete resampler;
+        resampler = AudioResampler::create(format, channels,
+                    output_freq, quality);
+        resampler->setSampleRate(input_freq);
+        resampler->setVolume(AudioResampler::UNITY_GAIN_FLOAT, AudioResampler::UNITY_GAIN_FLOAT);
+    }
+
+    memset(output_vaddr, 0, output_size);
+    if (gVerbose) {
+        printf("resample() %zu output frames\n", output_frames);
+    }
+    if (Ovalues.isEmpty()) {
+        Ovalues.push(output_frames);
+    }
+    for (size_t i = 0, j = 0; i < output_frames; ) {
+        size_t thisFrames = Ovalues[j++];
+        if (j >= Ovalues.size()) {
+            j = 0;
+        }
+        if (thisFrames == 0 || thisFrames > output_frames - i) {
+            thisFrames = output_frames - i;
+        }
+        resampler->resample((int*) output_vaddr + output_channels*i, thisFrames, &provider);
+        i += thisFrames;
+    }
+    if (gVerbose) {
+        printf("resample() complete\n");
+    }
+    resampler->reset();
+    if (gVerbose) {
+        printf("reset() complete\n");
+    }
+    delete resampler;
+    resampler = NULL;
+
+    // For float processing, convert output format from float to Q4.27,
+    // which is then converted to int16_t for final storage.
+    if (useFloat) {
+        memcpy_to_q4_27_from_float(reinterpret_cast<int32_t*>(output_vaddr),
+                reinterpret_cast<float*>(output_vaddr), output_frames * output_channels);
+    }
+
+    // mono takes left channel only (out of stereo output pair)
+    // stereo and multichannel preserve all channels.
+    int32_t* out = (int32_t*) output_vaddr;
+    int16_t* convert = (int16_t*) malloc(output_frames * channels * sizeof(int16_t));
+
+    const int volumeShift = 12; // shift requirement for Q4.27 to Q.15
+    // round to half towards zero and saturate at int16 (non-dithered)
+    const int roundVal = (1<<(volumeShift-1)) - 1; // volumePrecision > 0
+
+    for (size_t i = 0; i < output_frames; i++) {
+        for (int j = 0; j < channels; j++) {
+            int32_t s = out[i * output_channels + j] + roundVal; // add offset here
+            if (s < 0) {
+                s = (s + 1) >> volumeShift; // round to 0
+                if (s < -32768) {
+                    s = -32768;
+                }
+            } else {
+                s = s >> volumeShift;
+                if (s > 32767) {
+                    s = 32767;
+                }
+            }
+            convert[i * channels + j] = int16_t(s);
+        }
+    }
+
+    // write output to disk
+    SF_INFO info;
+    info.frames = 0;
+    info.samplerate = output_freq;
+    info.channels = channels;
+    info.format = SF_FORMAT_WAV | SF_FORMAT_PCM_16;
+    SNDFILE *sf = sf_open(file_out, SFM_WRITE, &info);
+    if (sf == NULL) {
+        perror(file_out);
+        return EXIT_FAILURE;
+    }
+    (void) sf_writef_short(sf, convert, output_frames);
+    sf_close(sf);
+
+    return EXIT_SUCCESS;
+}
diff --git a/media/libaudioprocessing/tests/test_utils.h b/media/libaudioprocessing/tests/test_utils.h
new file mode 100644
index 0000000..b61a929
--- /dev/null
+++ b/media/libaudioprocessing/tests/test_utils.h
@@ -0,0 +1,313 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_TEST_UTILS_H
+#define ANDROID_AUDIO_TEST_UTILS_H
+
+#ifndef LOG_TAG
+#define LOG_TAG "test_utils"
+#endif
+
+#include <log/log.h>
+
+#include <audio_utils/sndfile.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+#endif
+
+template<typename T, typename U>
+struct is_same
+{
+    static const bool value = false;
+};
+
+template<typename T>
+struct is_same<T, T>  // partial specialization
+{
+    static const bool value = true;
+};
+
+template<typename T>
+static inline T convertValue(double val)
+{
+    if (is_same<T, int16_t>::value) {
+        return floor(val * 32767.0 + 0.5);
+    } else if (is_same<T, int32_t>::value) {
+        return floor(val * (1UL<<31) + 0.5);
+    }
+    return val; // assume float or double
+}
+
+// Convert a list of integers in CSV format to a Vector of those values.
+// Returns the number of elements in the list, or -1 on error.
+static inline int parseCSV(const char *string, std::vector<int>& values)
+{
+    // pass 1: count the number of values and do syntax check
+    size_t numValues = 0;
+    bool hadDigit = false;
+    for (const char *p = string; ; ) {
+        switch (*p++) {
+        case '0': case '1': case '2': case '3': case '4':
+        case '5': case '6': case '7': case '8': case '9':
+            hadDigit = true;
+            break;
+        case '\0':
+            if (hadDigit) {
+                // pass 2: allocate and initialize vector of values
+                values.resize(++numValues);
+                values[0] = atoi(p = string);
+                for (size_t i = 1; i < numValues; ) {
+                    if (*p++ == ',') {
+                        values[i++] = atoi(p);
+                    }
+                }
+                return numValues;
+            }
+            // fall through
+        case ',':
+            if (hadDigit) {
+                hadDigit = false;
+                numValues++;
+                break;
+            }
+            // fall through
+        default:
+            return -1;
+        }
+    }
+}
+
+/* Creates a type-independent audio buffer provider from
+ * a buffer base address, size, framesize, and input increment array.
+ *
+ * No allocation or deallocation of the provided buffer is done.
+ */
+class TestProvider : public android::AudioBufferProvider {
+public:
+    TestProvider(void* addr, size_t frames, size_t frameSize,
+            const std::vector<int>& inputIncr)
+    : mAddr(addr),
+      mNumFrames(frames),
+      mFrameSize(frameSize),
+      mNextFrame(0), mUnrel(0), mInputIncr(inputIncr), mNextIdx(0)
+    {
+    }
+
+    TestProvider()
+    : mAddr(NULL), mNumFrames(0), mFrameSize(0),
+      mNextFrame(0), mUnrel(0), mNextIdx(0)
+    {
+    }
+
+    void setIncr(const std::vector<int>& inputIncr) {
+        mInputIncr = inputIncr;
+        mNextIdx = 0;
+    }
+
+    virtual android::status_t getNextBuffer(Buffer* buffer)
+    {
+        size_t requestedFrames = buffer->frameCount;
+        if (requestedFrames > mNumFrames - mNextFrame) {
+            buffer->frameCount = mNumFrames - mNextFrame;
+        }
+        if (!mInputIncr.empty()) {
+            size_t provided = mInputIncr[mNextIdx++];
+            ALOGV("getNextBuffer() mValue[%zu]=%zu not %zu",
+                    mNextIdx-1, provided, buffer->frameCount);
+            if (provided < buffer->frameCount) {
+                buffer->frameCount = provided;
+            }
+            if (mNextIdx >= mInputIncr.size()) {
+                mNextIdx = 0;
+            }
+        }
+        ALOGV("getNextBuffer() requested %zu frames out of %zu frames available"
+                " and returned %zu frames",
+                requestedFrames, mNumFrames - mNextFrame, buffer->frameCount);
+        mUnrel = buffer->frameCount;
+        if (buffer->frameCount > 0) {
+            buffer->raw = (char *)mAddr + mFrameSize * mNextFrame;
+            return android::NO_ERROR;
+        } else {
+            buffer->raw = NULL;
+            return android::NOT_ENOUGH_DATA;
+        }
+    }
+
+    virtual void releaseBuffer(Buffer* buffer)
+    {
+        if (buffer->frameCount > mUnrel) {
+            ALOGE("releaseBuffer() released %zu frames but only %zu available "
+                    "to release", buffer->frameCount, mUnrel);
+            mNextFrame += mUnrel;
+            mUnrel = 0;
+        } else {
+
+            ALOGV("releaseBuffer() released %zu frames out of %zu frames available "
+                    "to release", buffer->frameCount, mUnrel);
+            mNextFrame += buffer->frameCount;
+            mUnrel -= buffer->frameCount;
+        }
+        buffer->frameCount = 0;
+        buffer->raw = NULL;
+    }
+
+    void reset()
+    {
+        mNextFrame = 0;
+    }
+
+    size_t getNumFrames()
+    {
+        return mNumFrames;
+    }
+
+
+protected:
+    void* mAddr;   // base address
+    size_t mNumFrames;   // total frames
+    int mFrameSize;      // frame size (# channels * bytes per sample)
+    size_t mNextFrame;   // index of next frame to provide
+    size_t mUnrel;       // number of frames not yet released
+    std::vector<int> mInputIncr; // number of frames provided per call
+    size_t mNextIdx;     // index of next entry in mInputIncr to use
+};
+
+/* Creates a buffer filled with a sine wave.
+ */
+template<typename T>
+static void createSine(void *vbuffer, size_t frames,
+        size_t channels, double sampleRate, double freq)
+{
+    double tscale = 1. / sampleRate;
+    T* buffer = reinterpret_cast<T*>(vbuffer);
+    for (size_t i = 0; i < frames; ++i) {
+        double t = i * tscale;
+        double y = sin(2. * M_PI * freq * t);
+        T yt = convertValue<T>(y);
+
+        for (size_t j = 0; j < channels; ++j) {
+            buffer[i*channels + j] = yt / T(j + 1);
+        }
+    }
+}
+
+/* Creates a buffer filled with a chirp signal (a sine wave sweep).
+ *
+ * When creating the Chirp, note that the frequency is the true sinusoidal
+ * frequency not the sampling rate.
+ *
+ * http://en.wikipedia.org/wiki/Chirp
+ */
+template<typename T>
+static void createChirp(void *vbuffer, size_t frames,
+        size_t channels, double sampleRate,  double minfreq, double maxfreq)
+{
+    double tscale = 1. / sampleRate;
+    T *buffer = reinterpret_cast<T*>(vbuffer);
+    // note the chirp constant k has a divide-by-two.
+    double k = (maxfreq - minfreq) / (2. * tscale * frames);
+    for (size_t i = 0; i < frames; ++i) {
+        double t = i * tscale;
+        double y = sin(2. * M_PI * (k * t + minfreq) * t);
+        T yt = convertValue<T>(y);
+
+        for (size_t j = 0; j < channels; ++j) {
+            buffer[i*channels + j] = yt / T(j + 1);
+        }
+    }
+}
+
+/* This derived class creates a buffer provider of datatype T,
+ * consisting of an input signal, e.g. from createChirp().
+ * The number of frames can be obtained from the base class
+ * TestProvider::getNumFrames().
+ */
+
+class SignalProvider : public TestProvider {
+public:
+    SignalProvider()
+    : mSampleRate(0),
+      mChannels(0)
+    {
+    }
+
+    virtual ~SignalProvider()
+    {
+        free(mAddr);
+        mAddr = NULL;
+    }
+
+    template <typename T>
+    void setChirp(size_t channels, double minfreq, double maxfreq, double sampleRate, double time)
+    {
+        createBufferByFrames<T>(channels, sampleRate, sampleRate*time);
+        createChirp<T>(mAddr, mNumFrames, mChannels, mSampleRate, minfreq, maxfreq);
+    }
+
+    template <typename T>
+    void setSine(size_t channels,
+            double freq, double sampleRate, double time)
+    {
+        createBufferByFrames<T>(channels, sampleRate, sampleRate*time);
+        createSine<T>(mAddr, mNumFrames,  mChannels, mSampleRate, freq);
+    }
+
+    template <typename T>
+    void setFile(const char *file_in)
+    {
+        SF_INFO info;
+        info.format = 0;
+        SNDFILE *sf = sf_open(file_in, SFM_READ, &info);
+        if (sf == NULL) {
+            perror(file_in);
+            return;
+        }
+        createBufferByFrames<T>(info.channels, info.samplerate, info.frames);
+        if (is_same<T, float>::value) {
+            (void) sf_readf_float(sf, (float *) mAddr, mNumFrames);
+        } else if (is_same<T, short>::value) {
+            (void) sf_readf_short(sf, (short *) mAddr, mNumFrames);
+        }
+        sf_close(sf);
+    }
+
+    template <typename T>
+    void createBufferByFrames(size_t channels, uint32_t sampleRate, size_t frames)
+    {
+        mNumFrames = frames;
+        mChannels = channels;
+        mFrameSize = mChannels * sizeof(T);
+        free(mAddr);
+        mAddr = malloc(mFrameSize * mNumFrames);
+        mSampleRate = sampleRate;
+    }
+
+    uint32_t getSampleRate() const {
+        return mSampleRate;
+    }
+
+    uint32_t getNumChannels() const {
+        return mChannels;
+    }
+
+protected:
+    uint32_t mSampleRate;
+    uint32_t mChannels;
+};
+
+#endif // ANDROID_AUDIO_TEST_UTILS_H