aaudio: convert mono output to stereo

MMAP hardware streams are often stereo.
But apps often want to output a mono stream.
Since converting from mono to stereo is easy,
we can go ahead and open a stereo hardware stream
and then just convert the apps mono data to stereo for the HW.

Add getDeviceChannelCount().

Test:  adb shell write_sine_callback -pl -s10 -c1 -m3
Test:  adb shell write_sine_callback -pl -s10 -c1 -m3 -x
Change-Id: I444a38c6f5cd32d1d6113f16aacec68285a1bc82
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index b611160..6b25302 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -104,7 +104,7 @@
     request.setUserId(getuid());
     request.setProcessId(getpid());
     request.setSharingModeMatchRequired(isSharingModeMatchRequired());
-    request.setInService(mInService);
+    request.setInService(isInService());
 
     request.getConfiguration().setDeviceId(getDeviceId());
     request.getConfiguration().setSampleRate(getSampleRate());
@@ -118,11 +118,24 @@
 
     request.getConfiguration().setBufferCapacity(builder.getBufferCapacity());
 
+    mDeviceChannelCount = getSamplesPerFrame(); // Assume it will be the same. Update if not.
+
     mServiceStreamHandle = mServiceInterface.openStream(request, configurationOutput);
+    if (mServiceStreamHandle < 0
+            && request.getConfiguration().getSamplesPerFrame() == 1 // mono?
+            && getDirection() == AAUDIO_DIRECTION_OUTPUT
+            && !isInService()) {
+        // if that failed then try switching from mono to stereo if OUTPUT.
+        // Only do this in the client. Otherwise we end up with a mono mixer in the service
+        // that writes to a stereo MMAP stream.
+        ALOGD("%s - openStream() returned %d, try switching from MONO to STEREO",
+              __func__, mServiceStreamHandle);
+        request.getConfiguration().setSamplesPerFrame(2); // stereo
+        mServiceStreamHandle = mServiceInterface.openStream(request, configurationOutput);
+    }
     if (mServiceStreamHandle < 0) {
-        result = mServiceStreamHandle;
-        ALOGE("%s - openStream() returned %d", __func__, result);
-        return result;
+        ALOGE("%s - openStream() returned %d", __func__, mServiceStreamHandle);
+        return mServiceStreamHandle;
     }
 
     result = configurationOutput.validate();
@@ -130,8 +143,12 @@
         goto error;
     }
     // Save results of the open.
+    if (getSamplesPerFrame() == AAUDIO_UNSPECIFIED) {
+        setSamplesPerFrame(configurationOutput.getSamplesPerFrame());
+    }
+    mDeviceChannelCount = configurationOutput.getSamplesPerFrame();
+
     setSampleRate(configurationOutput.getSampleRate());
-    setSamplesPerFrame(configurationOutput.getSamplesPerFrame());
     setDeviceId(configurationOutput.getDeviceId());
     setSessionId(configurationOutput.getSessionId());
     setSharingMode(configurationOutput.getSharingMode());
@@ -160,7 +177,6 @@
         goto error;
     }
 
-
     // Validate result from server.
     framesPerBurst = mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
     if (framesPerBurst < MIN_FRAMES_PER_BURST || framesPerBurst > MAX_FRAMES_PER_BURST) {
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 0f54f8c..0e0724b 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -138,7 +138,14 @@
     // Calculate timeout for an operation involving framesPerOperation.
     int64_t calculateReasonableTimeout(int32_t framesPerOperation);
 
-    aaudio_format_t          mDeviceFormat = AAUDIO_FORMAT_UNSPECIFIED;
+    aaudio_format_t getDeviceFormat() const { return mDeviceFormat; }
+
+    int32_t getDeviceChannelCount() const { return mDeviceChannelCount; }
+
+    /**
+     * @return true if running in audio service, versus in app process
+     */
+    bool isInService() const { return mInService; }
 
     IsochronousClockModel    mClockModel;      // timing model for chasing the HAL
 
@@ -187,6 +194,11 @@
     EndpointDescriptor       mEndpointDescriptor; // buffer description with resolved addresses
 
     int64_t                  mServiceLatencyNanos = 0;
+
+    // Sometimes the hardware is operating with a different format or channel count from the app.
+    // Then we require conversion in AAudio.
+    aaudio_format_t          mDeviceFormat = AAUDIO_FORMAT_UNSPECIFIED;
+    int32_t                  mDeviceChannelCount = 0;
 };
 
 } /* namespace aaudio */
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index 62f0fc8..0719fe1 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -176,16 +176,16 @@
         int32_t numSamples = framesToProcess * getSamplesPerFrame();
 
         // TODO factor this out into a utility function
-        if (mDeviceFormat == getFormat()) {
+        if (getDeviceFormat() == getFormat()) {
             memcpy(destination, wrappingBuffer.data[partIndex], numBytes);
-        } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16
+        } else if (getDeviceFormat() == AAUDIO_FORMAT_PCM_I16
                    && getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
             AAudioConvert_pcm16ToFloat(
                     (const int16_t *) wrappingBuffer.data[partIndex],
                     (float *) destination,
                     numSamples,
                     1.0f);
-        } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT
+        } else if (getDeviceFormat() == AAUDIO_FORMAT_PCM_FLOAT
                    && getFormat() == AAUDIO_FORMAT_PCM_I16) {
             AAudioConvert_floatToPcm16(
                     (const float *) wrappingBuffer.data[partIndex],
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 5660c1b..7695dfa 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -194,7 +194,7 @@
     // ALOGD("AudioStreamInternal::writeNowWithConversion(%p, %d)",
     //              buffer, numFrames);
     WrappingBuffer wrappingBuffer;
-    uint8_t *source = (uint8_t *) buffer;
+    uint8_t *byteBuffer = (uint8_t *) buffer;
     int32_t framesLeft = numFrames;
 
     mAudioEndpoint.getEmptyFramesAvailable(&wrappingBuffer);
@@ -208,69 +208,26 @@
             if (framesToWrite > framesAvailable) {
                 framesToWrite = framesAvailable;
             }
+
             int32_t numBytes = getBytesPerFrame() * framesToWrite;
-            int32_t numSamples = framesToWrite * getSamplesPerFrame();
             // Data conversion.
             float levelFrom;
             float levelTo;
-            bool ramping = mVolumeRamp.nextSegment(framesToWrite, &levelFrom, &levelTo);
-            // The formats are validated when the stream is opened so we do not have to
-            // check for illegal combinations here.
-            // TODO factor this out into a utility function
-            if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
-                if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
-                    AAudio_linearRamp(
-                            (const float *) source,
-                            (float *) wrappingBuffer.data[partIndex],
-                            framesToWrite,
-                            getSamplesPerFrame(),
-                            levelFrom,
-                            levelTo);
-                } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
-                    if (ramping) {
-                        AAudioConvert_floatToPcm16(
-                                (const float *) source,
-                                (int16_t *) wrappingBuffer.data[partIndex],
-                                framesToWrite,
-                                getSamplesPerFrame(),
-                                levelFrom,
-                                levelTo);
-                    } else {
-                        AAudioConvert_floatToPcm16(
-                                (const float *) source,
-                                (int16_t *) wrappingBuffer.data[partIndex],
-                                numSamples,
-                                levelTo);
-                    }
-                }
-            } else if (getFormat() == AAUDIO_FORMAT_PCM_I16) {
-                if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
-                    if (ramping) {
-                        AAudioConvert_pcm16ToFloat(
-                                (const int16_t *) source,
-                                (float *) wrappingBuffer.data[partIndex],
-                                framesToWrite,
-                                getSamplesPerFrame(),
-                                levelFrom,
-                                levelTo);
-                    } else {
-                        AAudioConvert_pcm16ToFloat(
-                                (const int16_t *) source,
-                                (float *) wrappingBuffer.data[partIndex],
-                                numSamples,
-                                levelTo);
-                    }
-                } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
-                    AAudio_linearRamp(
-                            (const int16_t *) source,
-                            (int16_t *) wrappingBuffer.data[partIndex],
-                            framesToWrite,
-                            getSamplesPerFrame(),
-                            levelFrom,
-                            levelTo);
-                }
-            }
-            source += numBytes;
+            mVolumeRamp.nextSegment(framesToWrite, &levelFrom, &levelTo);
+
+            AAudioDataConverter::FormattedData source(
+                    (void *)byteBuffer,
+                    getFormat(),
+                    getSamplesPerFrame());
+            AAudioDataConverter::FormattedData destination(
+                    wrappingBuffer.data[partIndex],
+                    getDeviceFormat(),
+                    getDeviceChannelCount());
+
+            AAudioDataConverter::convert(source, destination, framesToWrite,
+                                         levelFrom, levelTo);
+
+            byteBuffer += numBytes;
             framesLeft -= framesToWrite;
         } else {
             break;