aaudio: convert mono output to stereo

MMAP hardware streams are often stereo.
But apps often want to output a mono stream.
Since converting from mono to stereo is easy,
we can go ahead and open a stereo hardware stream
and then just convert the apps mono data to stereo for the HW.

Add getDeviceChannelCount().

Test:  adb shell write_sine_callback -pl -s10 -c1 -m3
Test:  adb shell write_sine_callback -pl -s10 -c1 -m3 -x
Change-Id: I444a38c6f5cd32d1d6113f16aacec68285a1bc82
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index b611160..6b25302 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -104,7 +104,7 @@
     request.setUserId(getuid());
     request.setProcessId(getpid());
     request.setSharingModeMatchRequired(isSharingModeMatchRequired());
-    request.setInService(mInService);
+    request.setInService(isInService());
 
     request.getConfiguration().setDeviceId(getDeviceId());
     request.getConfiguration().setSampleRate(getSampleRate());
@@ -118,11 +118,24 @@
 
     request.getConfiguration().setBufferCapacity(builder.getBufferCapacity());
 
+    mDeviceChannelCount = getSamplesPerFrame(); // Assume it will be the same. Update if not.
+
     mServiceStreamHandle = mServiceInterface.openStream(request, configurationOutput);
+    if (mServiceStreamHandle < 0
+            && request.getConfiguration().getSamplesPerFrame() == 1 // mono?
+            && getDirection() == AAUDIO_DIRECTION_OUTPUT
+            && !isInService()) {
+        // if that failed then try switching from mono to stereo if OUTPUT.
+        // Only do this in the client. Otherwise we end up with a mono mixer in the service
+        // that writes to a stereo MMAP stream.
+        ALOGD("%s - openStream() returned %d, try switching from MONO to STEREO",
+              __func__, mServiceStreamHandle);
+        request.getConfiguration().setSamplesPerFrame(2); // stereo
+        mServiceStreamHandle = mServiceInterface.openStream(request, configurationOutput);
+    }
     if (mServiceStreamHandle < 0) {
-        result = mServiceStreamHandle;
-        ALOGE("%s - openStream() returned %d", __func__, result);
-        return result;
+        ALOGE("%s - openStream() returned %d", __func__, mServiceStreamHandle);
+        return mServiceStreamHandle;
     }
 
     result = configurationOutput.validate();
@@ -130,8 +143,12 @@
         goto error;
     }
     // Save results of the open.
+    if (getSamplesPerFrame() == AAUDIO_UNSPECIFIED) {
+        setSamplesPerFrame(configurationOutput.getSamplesPerFrame());
+    }
+    mDeviceChannelCount = configurationOutput.getSamplesPerFrame();
+
     setSampleRate(configurationOutput.getSampleRate());
-    setSamplesPerFrame(configurationOutput.getSamplesPerFrame());
     setDeviceId(configurationOutput.getDeviceId());
     setSessionId(configurationOutput.getSessionId());
     setSharingMode(configurationOutput.getSharingMode());
@@ -160,7 +177,6 @@
         goto error;
     }
 
-
     // Validate result from server.
     framesPerBurst = mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
     if (framesPerBurst < MIN_FRAMES_PER_BURST || framesPerBurst > MAX_FRAMES_PER_BURST) {
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 0f54f8c..0e0724b 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -138,7 +138,14 @@
     // Calculate timeout for an operation involving framesPerOperation.
     int64_t calculateReasonableTimeout(int32_t framesPerOperation);
 
-    aaudio_format_t          mDeviceFormat = AAUDIO_FORMAT_UNSPECIFIED;
+    aaudio_format_t getDeviceFormat() const { return mDeviceFormat; }
+
+    int32_t getDeviceChannelCount() const { return mDeviceChannelCount; }
+
+    /**
+     * @return true if running in audio service, versus in app process
+     */
+    bool isInService() const { return mInService; }
 
     IsochronousClockModel    mClockModel;      // timing model for chasing the HAL
 
@@ -187,6 +194,11 @@
     EndpointDescriptor       mEndpointDescriptor; // buffer description with resolved addresses
 
     int64_t                  mServiceLatencyNanos = 0;
+
+    // Sometimes the hardware is operating with a different format or channel count from the app.
+    // Then we require conversion in AAudio.
+    aaudio_format_t          mDeviceFormat = AAUDIO_FORMAT_UNSPECIFIED;
+    int32_t                  mDeviceChannelCount = 0;
 };
 
 } /* namespace aaudio */
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index 62f0fc8..0719fe1 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -176,16 +176,16 @@
         int32_t numSamples = framesToProcess * getSamplesPerFrame();
 
         // TODO factor this out into a utility function
-        if (mDeviceFormat == getFormat()) {
+        if (getDeviceFormat() == getFormat()) {
             memcpy(destination, wrappingBuffer.data[partIndex], numBytes);
-        } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16
+        } else if (getDeviceFormat() == AAUDIO_FORMAT_PCM_I16
                    && getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
             AAudioConvert_pcm16ToFloat(
                     (const int16_t *) wrappingBuffer.data[partIndex],
                     (float *) destination,
                     numSamples,
                     1.0f);
-        } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT
+        } else if (getDeviceFormat() == AAUDIO_FORMAT_PCM_FLOAT
                    && getFormat() == AAUDIO_FORMAT_PCM_I16) {
             AAudioConvert_floatToPcm16(
                     (const float *) wrappingBuffer.data[partIndex],
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 5660c1b..7695dfa 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -194,7 +194,7 @@
     // ALOGD("AudioStreamInternal::writeNowWithConversion(%p, %d)",
     //              buffer, numFrames);
     WrappingBuffer wrappingBuffer;
-    uint8_t *source = (uint8_t *) buffer;
+    uint8_t *byteBuffer = (uint8_t *) buffer;
     int32_t framesLeft = numFrames;
 
     mAudioEndpoint.getEmptyFramesAvailable(&wrappingBuffer);
@@ -208,69 +208,26 @@
             if (framesToWrite > framesAvailable) {
                 framesToWrite = framesAvailable;
             }
+
             int32_t numBytes = getBytesPerFrame() * framesToWrite;
-            int32_t numSamples = framesToWrite * getSamplesPerFrame();
             // Data conversion.
             float levelFrom;
             float levelTo;
-            bool ramping = mVolumeRamp.nextSegment(framesToWrite, &levelFrom, &levelTo);
-            // The formats are validated when the stream is opened so we do not have to
-            // check for illegal combinations here.
-            // TODO factor this out into a utility function
-            if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
-                if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
-                    AAudio_linearRamp(
-                            (const float *) source,
-                            (float *) wrappingBuffer.data[partIndex],
-                            framesToWrite,
-                            getSamplesPerFrame(),
-                            levelFrom,
-                            levelTo);
-                } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
-                    if (ramping) {
-                        AAudioConvert_floatToPcm16(
-                                (const float *) source,
-                                (int16_t *) wrappingBuffer.data[partIndex],
-                                framesToWrite,
-                                getSamplesPerFrame(),
-                                levelFrom,
-                                levelTo);
-                    } else {
-                        AAudioConvert_floatToPcm16(
-                                (const float *) source,
-                                (int16_t *) wrappingBuffer.data[partIndex],
-                                numSamples,
-                                levelTo);
-                    }
-                }
-            } else if (getFormat() == AAUDIO_FORMAT_PCM_I16) {
-                if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
-                    if (ramping) {
-                        AAudioConvert_pcm16ToFloat(
-                                (const int16_t *) source,
-                                (float *) wrappingBuffer.data[partIndex],
-                                framesToWrite,
-                                getSamplesPerFrame(),
-                                levelFrom,
-                                levelTo);
-                    } else {
-                        AAudioConvert_pcm16ToFloat(
-                                (const int16_t *) source,
-                                (float *) wrappingBuffer.data[partIndex],
-                                numSamples,
-                                levelTo);
-                    }
-                } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
-                    AAudio_linearRamp(
-                            (const int16_t *) source,
-                            (int16_t *) wrappingBuffer.data[partIndex],
-                            framesToWrite,
-                            getSamplesPerFrame(),
-                            levelFrom,
-                            levelTo);
-                }
-            }
-            source += numBytes;
+            mVolumeRamp.nextSegment(framesToWrite, &levelFrom, &levelTo);
+
+            AAudioDataConverter::FormattedData source(
+                    (void *)byteBuffer,
+                    getFormat(),
+                    getSamplesPerFrame());
+            AAudioDataConverter::FormattedData destination(
+                    wrappingBuffer.data[partIndex],
+                    getDeviceFormat(),
+                    getDeviceChannelCount());
+
+            AAudioDataConverter::convert(source, destination, framesToWrite,
+                                         levelFrom, levelTo);
+
+            byteBuffer += numBytes;
             framesLeft -= framesToWrite;
         } else {
             break;
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index 854c691..40b31b9 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -27,6 +27,7 @@
 #include <aaudio/AAudioTesting.h>
 #include <math.h>
 #include <system/audio-base.h>
+#include <assert.h>
 
 #include "utility/AAudioUtilities.h"
 
@@ -72,7 +73,7 @@
                                 int16_t *destination,
                                 int32_t numSamples,
                                 float amplitude) {
-    float scaler = amplitude;
+    const float scaler = amplitude;
     for (int i = 0; i < numSamples; i++) {
         float sample = *source++;
         *destination++ = clipAndClampFloatToPcm16(sample, scaler);
@@ -103,7 +104,7 @@
                                 float *destination,
                                 int32_t numSamples,
                                 float amplitude) {
-    float scaler = amplitude / SHORT_SCALE;
+    const float scaler = amplitude / SHORT_SCALE;
     for (int i = 0; i < numSamples; i++) {
         destination[i] = source[i] * scaler;
     }
@@ -117,7 +118,7 @@
                                 float amplitude1,
                                 float amplitude2) {
     float scaler = amplitude1 / SHORT_SCALE;
-    float delta = (amplitude2 - amplitude1) / (SHORT_SCALE * (float) numFrames);
+    const float delta = (amplitude2 - amplitude1) / (SHORT_SCALE * (float) numFrames);
     for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
         for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
             *destination++ = *source++ * scaler;
@@ -134,7 +135,7 @@
                        float amplitude1,
                        float amplitude2) {
     float scaler = amplitude1;
-    float delta = (amplitude2 - amplitude1) / numFrames;
+    const float delta = (amplitude2 - amplitude1) / numFrames;
     for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
         for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
             float sample = *source++;
@@ -158,7 +159,7 @@
                        float amplitude2) {
     // Because we are converting from int16 to 1nt16, we do not have to scale by 1/32768.
     float scaler = amplitude1;
-    float delta = (amplitude2 - amplitude1) / numFrames;
+    const float delta = (amplitude2 - amplitude1) / numFrames;
     for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
         for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
             // No need to clip because int16_t range is inherently limited.
@@ -169,6 +170,255 @@
     }
 }
 
+// *************************************************************************************
+// Convert Mono To Stereo at the same time as converting format.
+void AAudioConvert_formatMonoToStereo(const float *source,
+                                      int16_t *destination,
+                                      int32_t numFrames,
+                                      float amplitude) {
+    const float scaler = amplitude;
+    for (int i = 0; i < numFrames; i++) {
+        float sample = *source++;
+        int16_t sample16 = clipAndClampFloatToPcm16(sample, scaler);
+        *destination++ = sample16;
+        *destination++ = sample16;
+    }
+}
+
+void AAudioConvert_formatMonoToStereo(const float *source,
+                                      int16_t *destination,
+                                      int32_t numFrames,
+                                      float amplitude1,
+                                      float amplitude2) {
+    // divide by numFrames so that we almost reach amplitude2
+    const float delta = (amplitude2 - amplitude1) / numFrames;
+    for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
+        const float scaler = amplitude1 + (frameIndex * delta);
+        const float sample = *source++;
+        int16_t sample16 = clipAndClampFloatToPcm16(sample, scaler);
+        *destination++ = sample16;
+        *destination++ = sample16;
+    }
+}
+
+void AAudioConvert_formatMonoToStereo(const int16_t *source,
+                                      float *destination,
+                                      int32_t numFrames,
+                                      float amplitude) {
+    const float scaler = amplitude / SHORT_SCALE;
+    for (int i = 0; i < numFrames; i++) {
+        float sample = source[i] * scaler;
+        *destination++ = sample;
+        *destination++ = sample;
+    }
+}
+
+// This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
+void AAudioConvert_formatMonoToStereo(const int16_t *source,
+                                      float *destination,
+                                      int32_t numFrames,
+                                      float amplitude1,
+                                      float amplitude2) {
+    const float scaler1 = amplitude1 / SHORT_SCALE;
+    const float delta = (amplitude2 - amplitude1) / (SHORT_SCALE * (float) numFrames);
+    for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
+        float scaler = scaler1 + (frameIndex * delta);
+        float sample = source[frameIndex] * scaler;
+        *destination++ = sample;
+        *destination++ = sample;
+    }
+}
+
+// This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
+void AAudio_linearRampMonoToStereo(const float *source,
+                                   float *destination,
+                                   int32_t numFrames,
+                                   float amplitude1,
+                                   float amplitude2) {
+    const float delta = (amplitude2 - amplitude1) / numFrames;
+    for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
+        float sample = *source++;
+
+        // Clip to valid range of a float sample to prevent excessive volume.
+        if (sample > MAX_HEADROOM) sample = MAX_HEADROOM;
+        else if (sample < MIN_HEADROOM) sample = MIN_HEADROOM;
+
+        const float scaler = amplitude1 + (frameIndex * delta);
+        float sampleScaled = sample * scaler;
+        *destination++ = sampleScaled;
+        *destination++ = sampleScaled;
+    }
+}
+
+// This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
+void AAudio_linearRampMonoToStereo(const int16_t *source,
+                                   int16_t *destination,
+                                   int32_t numFrames,
+                                   float amplitude1,
+                                   float amplitude2) {
+    // Because we are converting from int16 to 1nt16, we do not have to scale by 1/32768.
+    const float delta = (amplitude2 - amplitude1) / numFrames;
+    for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
+        const float scaler = amplitude1 + (frameIndex * delta);
+        // No need to clip because int16_t range is inherently limited.
+        const float sample =  *source++ * scaler;
+        int16_t sample16 = (int16_t) roundf(sample);
+        *destination++ = sample16;
+        *destination++ = sample16;
+    }
+}
+
+// *************************************************************************************
+void AAudioDataConverter::convert(
+        const FormattedData &source,
+        const FormattedData &destination,
+        int32_t numFrames,
+        float levelFrom,
+        float levelTo) {
+
+    if (source.channelCount == 1 && destination.channelCount == 2) {
+        convertMonoToStereo(source,
+                            destination,
+                            numFrames,
+                            levelFrom,
+                            levelTo);
+    } else {
+        // We only support mono to stereo conversion. Otherwise source and destination
+        // must match.
+        assert(source.channelCount == destination.channelCount);
+        convertChannelsMatch(source,
+                             destination,
+                             numFrames,
+                             levelFrom,
+                             levelTo);
+    }
+}
+
+void AAudioDataConverter::convertMonoToStereo(
+        const FormattedData &source,
+        const FormattedData &destination,
+        int32_t numFrames,
+        float levelFrom,
+        float levelTo) {
+
+    // The formats are validated when the stream is opened so we do not have to
+    // check for illegal combinations here.
+    if (source.format == AAUDIO_FORMAT_PCM_FLOAT) {
+        if (destination.format == AAUDIO_FORMAT_PCM_FLOAT) {
+            AAudio_linearRampMonoToStereo(
+                    (const float *) source.data,
+                    (float *) destination.data,
+                    numFrames,
+                    levelFrom,
+                    levelTo);
+        } else if (destination.format == AAUDIO_FORMAT_PCM_I16) {
+            if (levelFrom != levelTo) {
+                AAudioConvert_formatMonoToStereo(
+                        (const float *) source.data,
+                        (int16_t *) destination.data,
+                        numFrames,
+                        levelFrom,
+                        levelTo);
+            } else {
+                AAudioConvert_formatMonoToStereo(
+                        (const float *) source.data,
+                        (int16_t *) destination.data,
+                        numFrames,
+                        levelTo);
+            }
+        }
+    } else if (source.format == AAUDIO_FORMAT_PCM_I16) {
+        if (destination.format == AAUDIO_FORMAT_PCM_FLOAT) {
+            if (levelFrom != levelTo) {
+                AAudioConvert_formatMonoToStereo(
+                        (const int16_t *) source.data,
+                        (float *) destination.data,
+                        numFrames,
+                        levelFrom,
+                        levelTo);
+            } else {
+                AAudioConvert_formatMonoToStereo(
+                        (const int16_t *) source.data,
+                        (float *) destination.data,
+                        numFrames,
+                        levelTo);
+            }
+        } else if (destination.format == AAUDIO_FORMAT_PCM_I16) {
+            AAudio_linearRampMonoToStereo(
+                    (const int16_t *) source.data,
+                    (int16_t *) destination.data,
+                    numFrames,
+                    levelFrom,
+                    levelTo);
+        }
+    }
+}
+
+void AAudioDataConverter::convertChannelsMatch(
+        const FormattedData &source,
+        const FormattedData &destination,
+        int32_t numFrames,
+        float levelFrom,
+        float levelTo) {
+    const int32_t numSamples = numFrames * source.channelCount;
+
+    // The formats are validated when the stream is opened so we do not have to
+    // check for illegal combinations here.
+    if (source.format == AAUDIO_FORMAT_PCM_FLOAT) {
+        if (destination.format == AAUDIO_FORMAT_PCM_FLOAT) {
+            AAudio_linearRamp(
+                    (const float *) source.data,
+                    (float *) destination.data,
+                    numFrames,
+                    source.channelCount,
+                    levelFrom,
+                    levelTo);
+        } else if (destination.format == AAUDIO_FORMAT_PCM_I16) {
+            if (levelFrom != levelTo) {
+                AAudioConvert_floatToPcm16(
+                        (const float *) source.data,
+                        (int16_t *) destination.data,
+                        numFrames,
+                        source.channelCount,
+                        levelFrom,
+                        levelTo);
+            } else {
+                AAudioConvert_floatToPcm16(
+                        (const float *) source.data,
+                        (int16_t *) destination.data,
+                        numSamples,
+                        levelTo);
+            }
+        }
+    } else if (source.format == AAUDIO_FORMAT_PCM_I16) {
+        if (destination.format == AAUDIO_FORMAT_PCM_FLOAT) {
+            if (levelFrom != levelTo) {
+                AAudioConvert_pcm16ToFloat(
+                        (const int16_t *) source.data,
+                        (float *) destination.data,
+                        numFrames,
+                        source.channelCount,
+                        levelFrom,
+                        levelTo);
+            } else {
+                AAudioConvert_pcm16ToFloat(
+                        (const int16_t *) source.data,
+                        (float *) destination.data,
+                        numSamples,
+                        levelTo);
+            }
+        } else if (destination.format == AAUDIO_FORMAT_PCM_I16) {
+            AAudio_linearRamp(
+                    (const int16_t *) source.data,
+                    (int16_t *) destination.data,
+                    numFrames,
+                    source.channelCount,
+                    levelFrom,
+                    levelTo);
+        }
+    }
+}
+
 status_t AAudioConvert_aaudioToAndroidStatus(aaudio_result_t result) {
     // This covers the case for AAUDIO_OK and for positive results.
     if (result >= 0) {
diff --git a/media/libaaudio/src/utility/AAudioUtilities.h b/media/libaaudio/src/utility/AAudioUtilities.h
index dc6a671..cea88fb 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.h
+++ b/media/libaaudio/src/utility/AAudioUtilities.h
@@ -159,6 +159,41 @@
                        float amplitude1,
                        float amplitude2);
 
+class AAudioDataConverter {
+public:
+
+    struct FormattedData {
+
+        FormattedData(void *data, aaudio_format_t format, int32_t channelCount)
+            : data(data)
+            , format(format)
+            , channelCount(channelCount) {}
+
+        const void            *data = nullptr;
+        const aaudio_format_t  format = AAUDIO_FORMAT_UNSPECIFIED;
+        const int32_t          channelCount = 1;
+    };
+
+    static void convert(const FormattedData &source,
+                        const FormattedData &destination,
+                        int32_t numFrames,
+                        float levelFrom,
+                        float levelTo);
+
+private:
+    static void convertMonoToStereo(const FormattedData &source,
+                                    const FormattedData &destination,
+                                    int32_t numFrames,
+                                    float levelFrom,
+                                    float levelTo);
+
+    static void convertChannelsMatch(const FormattedData &source,
+                                     const FormattedData &destination,
+                                     int32_t numFrames,
+                                     float levelFrom,
+                                     float levelTo);
+};
+
 /**
  * Calculate the number of bytes and prevent numeric overflow.
  * @param numFrames frame count
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index ac3202b..c708fee 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -32,7 +32,6 @@
 #include "AAudioService.h"
 #include "AAudioServiceStreamMMAP.h"
 #include "AAudioServiceStreamShared.h"
-#include "AAudioServiceStreamMMAP.h"
 #include "binding/IAAudioService.h"
 #include "ServiceUtilities.h"