Merge "Move up AudioFlinger::createTrack and openRecord parameter checks"
diff --git a/cmds/screenrecord/Android.mk b/cmds/screenrecord/Android.mk
index d77fdb6..6747e60 100644
--- a/cmds/screenrecord/Android.mk
+++ b/cmds/screenrecord/Android.mk
@@ -19,6 +19,7 @@
 LOCAL_SRC_FILES := \
 	screenrecord.cpp \
 	EglWindow.cpp \
+	FrameOutput.cpp \
 	TextRenderer.cpp \
 	Overlay.cpp \
 	Program.cpp
diff --git a/cmds/screenrecord/EglWindow.cpp b/cmds/screenrecord/EglWindow.cpp
index aa0517f..c16f2ad 100644
--- a/cmds/screenrecord/EglWindow.cpp
+++ b/cmds/screenrecord/EglWindow.cpp
@@ -35,11 +35,16 @@
 
 
 status_t EglWindow::createWindow(const sp<IGraphicBufferProducer>& surface) {
-    status_t err = eglSetupContext();
+    if (mEglSurface != EGL_NO_SURFACE) {
+        ALOGE("surface already created");
+        return UNKNOWN_ERROR;
+    }
+    status_t err = eglSetupContext(false);
     if (err != NO_ERROR) {
         return err;
     }
 
+    // Cache the current dimensions.  We're not expecting these to change.
     surface->query(NATIVE_WINDOW_WIDTH, &mWidth);
     surface->query(NATIVE_WINDOW_HEIGHT, &mHeight);
 
@@ -56,6 +61,34 @@
     return NO_ERROR;
 }
 
+status_t EglWindow::createPbuffer(int width, int height) {
+    if (mEglSurface != EGL_NO_SURFACE) {
+        ALOGE("surface already created");
+        return UNKNOWN_ERROR;
+    }
+    status_t err = eglSetupContext(true);
+    if (err != NO_ERROR) {
+        return err;
+    }
+
+    mWidth = width;
+    mHeight = height;
+
+    EGLint pbufferAttribs[] = {
+            EGL_WIDTH, width,
+            EGL_HEIGHT, height,
+            EGL_NONE
+    };
+    mEglSurface = eglCreatePbufferSurface(mEglDisplay, mEglConfig, pbufferAttribs);
+    if (mEglSurface == EGL_NO_SURFACE) {
+        ALOGE("eglCreatePbufferSurface error: %#x", eglGetError());
+        eglRelease();
+        return UNKNOWN_ERROR;
+    }
+
+    return NO_ERROR;
+}
+
 status_t EglWindow::makeCurrent() const {
     if (!eglMakeCurrent(mEglDisplay, mEglSurface, mEglSurface, mEglContext)) {
         ALOGE("eglMakeCurrent failed: %#x", eglGetError());
@@ -64,7 +97,7 @@
     return NO_ERROR;
 }
 
-status_t EglWindow::eglSetupContext() {
+status_t EglWindow::eglSetupContext(bool forPbuffer) {
     EGLBoolean result;
 
     mEglDisplay = eglGetDisplay(EGL_DEFAULT_DISPLAY);
@@ -82,17 +115,28 @@
     ALOGV("Initialized EGL v%d.%d", majorVersion, minorVersion);
 
     EGLint numConfigs = 0;
-    EGLint configAttribs[] = {
-        EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
-        EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
-        EGL_RECORDABLE_ANDROID, 1,
-        EGL_RED_SIZE, 8,
-        EGL_GREEN_SIZE, 8,
-        EGL_BLUE_SIZE, 8,
-        EGL_NONE
+    EGLint windowConfigAttribs[] = {
+            EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
+            EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
+            EGL_RECORDABLE_ANDROID, 1,
+            EGL_RED_SIZE, 8,
+            EGL_GREEN_SIZE, 8,
+            EGL_BLUE_SIZE, 8,
+            // no alpha
+            EGL_NONE
     };
-    result = eglChooseConfig(mEglDisplay, configAttribs, &mEglConfig, 1,
-            &numConfigs);
+    EGLint pbufferConfigAttribs[] = {
+            EGL_SURFACE_TYPE, EGL_PBUFFER_BIT,
+            EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
+            EGL_RED_SIZE, 8,
+            EGL_GREEN_SIZE, 8,
+            EGL_BLUE_SIZE, 8,
+            EGL_ALPHA_SIZE, 8,
+            EGL_NONE
+    };
+    result = eglChooseConfig(mEglDisplay,
+            forPbuffer ? pbufferConfigAttribs : windowConfigAttribs,
+            &mEglConfig, 1, &numConfigs);
     if (result != EGL_TRUE) {
         ALOGE("eglChooseConfig error: %#x", eglGetError());
         return UNKNOWN_ERROR;
diff --git a/cmds/screenrecord/EglWindow.h b/cmds/screenrecord/EglWindow.h
index 02a2efc..69d0c31 100644
--- a/cmds/screenrecord/EglWindow.h
+++ b/cmds/screenrecord/EglWindow.h
@@ -44,6 +44,9 @@
     // Creates an EGL window for the supplied surface.
     status_t createWindow(const sp<IGraphicBufferProducer>& surface);
 
+    // Creates an EGL pbuffer surface.
+    status_t createPbuffer(int width, int height);
+
     // Return width and height values (obtained from IGBP).
     int getWidth() const { return mWidth; }
     int getHeight() const { return mHeight; }
@@ -65,7 +68,7 @@
     EglWindow& operator=(const EglWindow&);
 
     // Init display, create config and context.
-    status_t eglSetupContext();
+    status_t eglSetupContext(bool forPbuffer);
     void eglRelease();
 
     // Basic EGL goodies.
diff --git a/cmds/screenrecord/FrameOutput.cpp b/cmds/screenrecord/FrameOutput.cpp
new file mode 100644
index 0000000..b5cf2f9
--- /dev/null
+++ b/cmds/screenrecord/FrameOutput.cpp
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "ScreenRecord"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+
+#include "FrameOutput.h"
+
+using namespace android;
+
+static const bool kShowTiming = false;      // set to "true" for debugging
+static const int kGlBytesPerPixel = 4;      // GL_RGBA
+static const int kOutBytesPerPixel = 3;     // RGB only
+
+inline void FrameOutput::setValueLE(uint8_t* buf, uint32_t value) {
+    // Since we're running on an Android device, we're (almost) guaranteed
+    // to be little-endian, and (almost) guaranteed that unaligned 32-bit
+    // writes will work without any performance penalty... but do it
+    // byte-by-byte anyway.
+    buf[0] = (uint8_t) value;
+    buf[1] = (uint8_t) (value >> 8);
+    buf[2] = (uint8_t) (value >> 16);
+    buf[3] = (uint8_t) (value >> 24);
+}
+
+status_t FrameOutput::createInputSurface(int width, int height,
+        sp<IGraphicBufferProducer>* pBufferProducer) {
+    status_t err;
+
+    err = mEglWindow.createPbuffer(width, height);
+    if (err != NO_ERROR) {
+        return err;
+    }
+    mEglWindow.makeCurrent();
+
+    glViewport(0, 0, width, height);
+    glDisable(GL_DEPTH_TEST);
+    glDisable(GL_CULL_FACE);
+
+    // Shader for rendering the external texture.
+    err = mExtTexProgram.setup(Program::PROGRAM_EXTERNAL_TEXTURE);
+    if (err != NO_ERROR) {
+        return err;
+    }
+
+    // Input side (buffers from virtual display).
+    glGenTextures(1, &mExtTextureName);
+    if (mExtTextureName == 0) {
+        ALOGE("glGenTextures failed: %#x", glGetError());
+        return UNKNOWN_ERROR;
+    }
+
+    mBufferQueue = new BufferQueue(/*new GraphicBufferAlloc()*/);
+    mGlConsumer = new GLConsumer(mBufferQueue, mExtTextureName,
+                GL_TEXTURE_EXTERNAL_OES);
+    mGlConsumer->setName(String8("virtual display"));
+    mGlConsumer->setDefaultBufferSize(width, height);
+    mGlConsumer->setDefaultMaxBufferCount(5);
+    mGlConsumer->setConsumerUsageBits(GRALLOC_USAGE_HW_TEXTURE);
+
+    mGlConsumer->setFrameAvailableListener(this);
+
+    mPixelBuf = new uint8_t[width * height * kGlBytesPerPixel];
+
+    *pBufferProducer = mBufferQueue;
+
+    ALOGD("FrameOutput::createInputSurface OK");
+    return NO_ERROR;
+}
+
+status_t FrameOutput::copyFrame(FILE* fp, long timeoutUsec) {
+    Mutex::Autolock _l(mMutex);
+    ALOGV("copyFrame %ld\n", timeoutUsec);
+
+    if (!mFrameAvailable) {
+        nsecs_t timeoutNsec = (nsecs_t)timeoutUsec * 1000;
+        int cc = mEventCond.waitRelative(mMutex, timeoutNsec);
+        if (cc == -ETIMEDOUT) {
+            ALOGV("cond wait timed out");
+            return ETIMEDOUT;
+        } else if (cc != 0) {
+            ALOGW("cond wait returned error %d", cc);
+            return cc;
+        }
+    }
+    if (!mFrameAvailable) {
+        // This happens when Ctrl-C is hit.  Apparently POSIX says that the
+        // pthread wait call doesn't return EINTR, treating this instead as
+        // an instance of a "spurious wakeup".  We didn't get a frame, so
+        // we just treat it as a timeout.
+        return ETIMEDOUT;
+    }
+
+    // A frame is available.  Clear the flag for the next round.
+    mFrameAvailable = false;
+
+    float texMatrix[16];
+    mGlConsumer->updateTexImage();
+    mGlConsumer->getTransformMatrix(texMatrix);
+
+    // The data is in an external texture, so we need to render it to the
+    // pbuffer to get access to RGB pixel data.  We also want to flip it
+    // upside-down for easy conversion to a bitmap.
+    int width = mEglWindow.getWidth();
+    int height = mEglWindow.getHeight();
+    status_t err = mExtTexProgram.blit(mExtTextureName, texMatrix, 0, 0,
+            width, height, true);
+    if (err != NO_ERROR) {
+        return err;
+    }
+
+    // GLES only guarantees that glReadPixels() will work with GL_RGBA, so we
+    // need to get 4 bytes/pixel and reduce it.  Depending on the size of the
+    // screen and the device capabilities, this can take a while.
+    int64_t startWhenNsec, pixWhenNsec, endWhenNsec;
+    if (kShowTiming) {
+        startWhenNsec = systemTime(CLOCK_MONOTONIC);
+    }
+    GLenum glErr;
+    glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, mPixelBuf);
+    if ((glErr = glGetError()) != GL_NO_ERROR) {
+        ALOGE("glReadPixels failed: %#x", glErr);
+        return UNKNOWN_ERROR;
+    }
+    if (kShowTiming) {
+        pixWhenNsec = systemTime(CLOCK_MONOTONIC);
+    }
+    reduceRgbaToRgb(mPixelBuf, width * height);
+    if (kShowTiming) {
+        endWhenNsec = systemTime(CLOCK_MONOTONIC);
+        ALOGD("got pixels (get=%.3f ms, reduce=%.3fms)",
+                (pixWhenNsec - startWhenNsec) / 1000000.0,
+                (endWhenNsec - pixWhenNsec) / 1000000.0);
+    }
+
+    // Fill out the header.
+    size_t headerLen = sizeof(uint32_t) * 5;
+    size_t rgbDataLen = width * height * kOutBytesPerPixel;
+    size_t packetLen = headerLen - sizeof(uint32_t) + rgbDataLen;
+    uint8_t header[headerLen];
+    setValueLE(&header[0], packetLen);
+    setValueLE(&header[4], width);
+    setValueLE(&header[8], height);
+    setValueLE(&header[12], width * kOutBytesPerPixel);
+    setValueLE(&header[16], HAL_PIXEL_FORMAT_RGB_888);
+
+    // Currently using buffered I/O rather than writev().  Not expecting it
+    // to make much of a difference, but it might be worth a test for larger
+    // frame sizes.
+    if (kShowTiming) {
+        startWhenNsec = systemTime(CLOCK_MONOTONIC);
+    }
+    fwrite(header, 1, headerLen, fp);
+    fwrite(mPixelBuf, 1, rgbDataLen, fp);
+    fflush(fp);
+    if (kShowTiming) {
+        endWhenNsec = systemTime(CLOCK_MONOTONIC);
+        ALOGD("wrote pixels (%.3f ms)",
+                (endWhenNsec - startWhenNsec) / 1000000.0);
+    }
+
+    if (ferror(fp)) {
+        // errno may not be useful; log it anyway
+        ALOGE("write failed (errno=%d)", errno);
+        return UNKNOWN_ERROR;
+    }
+
+    return NO_ERROR;
+}
+
+void FrameOutput::reduceRgbaToRgb(uint8_t* buf, unsigned int pixelCount) {
+    // Convert RGBA to RGB.
+    //
+    // Unaligned 32-bit accesses are allowed on ARM, so we could do this
+    // with 32-bit copies advancing at different rates (taking care at the
+    // end to not go one byte over).
+    const uint8_t* readPtr = buf;
+    for (unsigned int i = 0; i < pixelCount; i++) {
+        *buf++ = *readPtr++;
+        *buf++ = *readPtr++;
+        *buf++ = *readPtr++;
+        readPtr++;
+    }
+}
+
+// Callback; executes on arbitrary thread.
+void FrameOutput::onFrameAvailable() {
+    Mutex::Autolock _l(mMutex);
+    mFrameAvailable = true;
+    mEventCond.signal();
+}
diff --git a/cmds/screenrecord/FrameOutput.h b/cmds/screenrecord/FrameOutput.h
new file mode 100644
index 0000000..b8e9e68
--- /dev/null
+++ b/cmds/screenrecord/FrameOutput.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SCREENRECORD_FRAMEOUTPUT_H
+#define SCREENRECORD_FRAMEOUTPUT_H
+
+#include "Program.h"
+#include "EglWindow.h"
+
+#include <gui/BufferQueue.h>
+#include <gui/GLConsumer.h>
+
+namespace android {
+
+/*
+ * Support for "frames" output format.
+ */
+class FrameOutput : public GLConsumer::FrameAvailableListener {
+public:
+    FrameOutput() : mFrameAvailable(false),
+        mExtTextureName(0),
+        mPixelBuf(NULL)
+        {}
+    virtual ~FrameOutput() {
+        delete[] mPixelBuf;
+    }
+
+    // Create an "input surface", similar in purpose to a MediaCodec input
+    // surface, that the virtual display can send buffers to.  Also configures
+    // EGL with a pbuffer surface on the current thread.
+    status_t createInputSurface(int width, int height,
+            sp<IGraphicBufferProducer>* pBufferProducer);
+
+    // Copy one from input to output.  If no frame is available, this will wait up to the
+    // specified number of microseconds.
+    //
+    // Returns ETIMEDOUT if the timeout expired before we found a frame.
+    status_t copyFrame(FILE* fp, long timeoutUsec);
+
+    // Prepare to copy frames.  Makes the EGL context used by this object current.
+    void prepareToCopy() {
+        mEglWindow.makeCurrent();
+    }
+
+private:
+    FrameOutput(const FrameOutput&);
+    FrameOutput& operator=(const FrameOutput&);
+
+    // (overrides GLConsumer::FrameAvailableListener method)
+    virtual void onFrameAvailable();
+
+    // Reduces RGBA to RGB, in place.
+    static void reduceRgbaToRgb(uint8_t* buf, unsigned int pixelCount);
+
+    // Put a 32-bit value into a buffer, in little-endian byte order.
+    static void setValueLE(uint8_t* buf, uint32_t value);
+
+    // Used to wait for the FrameAvailableListener callback.
+    Mutex mMutex;
+    Condition mEventCond;
+
+    // Set by the FrameAvailableListener callback.
+    bool mFrameAvailable;
+
+    // Our queue.  The producer side is passed to the virtual display, the
+    // consumer side feeds into our GLConsumer.
+    sp<BufferQueue> mBufferQueue;
+
+    // This receives frames from the virtual display and makes them available
+    // as an external texture.
+    sp<GLConsumer> mGlConsumer;
+
+    // EGL display / context / surface.
+    EglWindow mEglWindow;
+
+    // GL rendering support.
+    Program mExtTexProgram;
+
+    // External texture, updated by GLConsumer.
+    GLuint mExtTextureName;
+
+    // Pixel data buffer.
+    uint8_t* mPixelBuf;
+};
+
+}; // namespace android
+
+#endif /*SCREENRECORD_FRAMEOUTPUT_H*/
diff --git a/cmds/screenrecord/Program.cpp b/cmds/screenrecord/Program.cpp
index a198204..73cae6e 100644
--- a/cmds/screenrecord/Program.cpp
+++ b/cmds/screenrecord/Program.cpp
@@ -201,7 +201,7 @@
 
 
 status_t Program::blit(GLuint texName, const float* texMatrix,
-        int32_t x, int32_t y, int32_t w, int32_t h) const {
+        int32_t x, int32_t y, int32_t w, int32_t h, bool invert) const {
     ALOGV("Program::blit %d xy=%d,%d wh=%d,%d", texName, x, y, w, h);
 
     const float pos[] = {
@@ -218,7 +218,7 @@
     };
     status_t err;
 
-    err = beforeDraw(texName, texMatrix, pos, uv);
+    err = beforeDraw(texName, texMatrix, pos, uv, invert);
     if (err == NO_ERROR) {
         glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
         err = afterDraw();
@@ -232,7 +232,7 @@
 
     status_t err;
 
-    err = beforeDraw(texName, texMatrix, vertices, texes);
+    err = beforeDraw(texName, texMatrix, vertices, texes, false);
     if (err == NO_ERROR) {
         glDrawArrays(GL_TRIANGLES, 0, count);
         err = afterDraw();
@@ -241,7 +241,7 @@
 }
 
 status_t Program::beforeDraw(GLuint texName, const float* texMatrix,
-        const float* vertices, const float* texes) const {
+        const float* vertices, const float* texes, bool invert) const {
     // Create an orthographic projection matrix based on viewport size.
     GLint vp[4];
     glGetIntegerv(GL_VIEWPORT, vp);
@@ -251,6 +251,10 @@
         0.0f,               0.0f,               1.0f,   0.0f,
         -1.0f,              1.0f,               0.0f,   1.0f,
     };
+    if (invert) {
+        screenToNdc[5] = -screenToNdc[5];
+        screenToNdc[13] = -screenToNdc[13];
+    }
 
     glUseProgram(mProgram);
 
diff --git a/cmds/screenrecord/Program.h b/cmds/screenrecord/Program.h
index e47bc0d..558be8d 100644
--- a/cmds/screenrecord/Program.h
+++ b/cmds/screenrecord/Program.h
@@ -51,9 +51,11 @@
     // Release the program and associated resources.
     void release();
 
-    // Blit the specified texture to { x, y, x+w, y+h }.
+    // Blit the specified texture to { x, y, x+w, y+h }.  Inverts the
+    // content if "invert" is set.
     status_t blit(GLuint texName, const float* texMatrix,
-            int32_t x, int32_t y, int32_t w, int32_t h) const;
+            int32_t x, int32_t y, int32_t w, int32_t h,
+            bool invert = false) const;
 
     // Draw a number of triangles.
     status_t drawTriangles(GLuint texName, const float* texMatrix,
@@ -67,7 +69,7 @@
 
     // Common code for draw functions.
     status_t beforeDraw(GLuint texName, const float* texMatrix,
-            const float* vertices, const float* texes) const;
+            const float* vertices, const float* texes, bool invert) const;
     status_t afterDraw() const;
 
     // GLES 2 shader utilities.
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index b6f150c..a17fc51 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -50,6 +50,7 @@
 
 #include "screenrecord.h"
 #include "Overlay.h"
+#include "FrameOutput.h"
 
 using namespace android;
 
@@ -58,11 +59,14 @@
 static const uint32_t kMaxTimeLimitSec = 180;       // 3 minutes
 static const uint32_t kFallbackWidth = 1280;        // 720p
 static const uint32_t kFallbackHeight = 720;
+static const char* kMimeTypeAvc = "video/avc";
 
 // Command-line parameters.
 static bool gVerbose = false;           // chatty on stdout
 static bool gRotate = false;            // rotate 90 degrees
-static bool gRawOutput = false;         // generate raw H.264 byte stream output
+static enum {
+    FORMAT_MP4, FORMAT_H264, FORMAT_FRAMES
+} gOutputFormat = FORMAT_MP4;           // data format for output
 static bool gSizeSpecified = false;     // was size explicitly requested?
 static bool gWantInfoScreen = false;    // do we want initial info screen?
 static bool gWantFrameTime = false;     // do we want times on each frame?
@@ -142,14 +146,14 @@
     status_t err;
 
     if (gVerbose) {
-        printf("Configuring recorder for %dx%d video at %.2fMbps\n",
-                gVideoWidth, gVideoHeight, gBitRate / 1000000.0);
+        printf("Configuring recorder for %dx%d %s at %.2fMbps\n",
+                gVideoWidth, gVideoHeight, kMimeTypeAvc, gBitRate / 1000000.0);
     }
 
     sp<AMessage> format = new AMessage;
     format->setInt32("width", gVideoWidth);
     format->setInt32("height", gVideoHeight);
-    format->setString("mime", "video/avc");
+    format->setString("mime", kMimeTypeAvc);
     format->setInt32("color-format", OMX_COLOR_FormatAndroidOpaque);
     format->setInt32("bitrate", gBitRate);
     format->setFloat("frame-rate", displayFps);
@@ -159,16 +163,18 @@
     looper->setName("screenrecord_looper");
     looper->start();
     ALOGV("Creating codec");
-    sp<MediaCodec> codec = MediaCodec::CreateByType(looper, "video/avc", true);
+    sp<MediaCodec> codec = MediaCodec::CreateByType(looper, kMimeTypeAvc, true);
     if (codec == NULL) {
-        fprintf(stderr, "ERROR: unable to create video/avc codec instance\n");
+        fprintf(stderr, "ERROR: unable to create %s codec instance\n",
+                kMimeTypeAvc);
         return UNKNOWN_ERROR;
     }
 
     err = codec->configure(format, NULL, NULL,
             MediaCodec::CONFIGURE_FLAG_ENCODE);
     if (err != NO_ERROR) {
-        fprintf(stderr, "ERROR: unable to configure codec (err=%d)\n", err);
+        fprintf(stderr, "ERROR: unable to configure %s codec at %dx%d (err=%d)\n",
+                kMimeTypeAvc, gVideoWidth, gVideoHeight, err);
         codec->release();
         return err;
     }
@@ -513,7 +519,7 @@
 }
 
 /*
- * Main "do work" method.
+ * Main "do work" start point.
  *
  * Configures codec, muxer, and virtual display, then starts moving bits
  * around.
@@ -555,30 +561,40 @@
 
     // Configure and start the encoder.
     sp<MediaCodec> encoder;
+    sp<FrameOutput> frameOutput;
     sp<IGraphicBufferProducer> encoderInputSurface;
-    err = prepareEncoder(mainDpyInfo.fps, &encoder, &encoderInputSurface);
+    if (gOutputFormat != FORMAT_FRAMES) {
+        err = prepareEncoder(mainDpyInfo.fps, &encoder, &encoderInputSurface);
 
-    if (err != NO_ERROR && !gSizeSpecified) {
-        // fallback is defined for landscape; swap if we're in portrait
-        bool needSwap = gVideoWidth < gVideoHeight;
-        uint32_t newWidth = needSwap ? kFallbackHeight : kFallbackWidth;
-        uint32_t newHeight = needSwap ? kFallbackWidth : kFallbackHeight;
-        if (gVideoWidth != newWidth && gVideoHeight != newHeight) {
-            ALOGV("Retrying with 720p");
-            fprintf(stderr, "WARNING: failed at %dx%d, retrying at %dx%d\n",
-                    gVideoWidth, gVideoHeight, newWidth, newHeight);
-            gVideoWidth = newWidth;
-            gVideoHeight = newHeight;
-            err = prepareEncoder(mainDpyInfo.fps, &encoder,
-                    &encoderInputSurface);
+        if (err != NO_ERROR && !gSizeSpecified) {
+            // fallback is defined for landscape; swap if we're in portrait
+            bool needSwap = gVideoWidth < gVideoHeight;
+            uint32_t newWidth = needSwap ? kFallbackHeight : kFallbackWidth;
+            uint32_t newHeight = needSwap ? kFallbackWidth : kFallbackHeight;
+            if (gVideoWidth != newWidth && gVideoHeight != newHeight) {
+                ALOGV("Retrying with 720p");
+                fprintf(stderr, "WARNING: failed at %dx%d, retrying at %dx%d\n",
+                        gVideoWidth, gVideoHeight, newWidth, newHeight);
+                gVideoWidth = newWidth;
+                gVideoHeight = newHeight;
+                err = prepareEncoder(mainDpyInfo.fps, &encoder,
+                        &encoderInputSurface);
+            }
+        }
+        if (err != NO_ERROR) return err;
+
+        // From here on, we must explicitly release() the encoder before it goes
+        // out of scope, or we will get an assertion failure from stagefright
+        // later on in a different thread.
+    } else {
+        // We're not using an encoder at all.  The "encoder input surface" we hand to
+        // SurfaceFlinger will just feed directly to us.
+        frameOutput = new FrameOutput();
+        err = frameOutput->createInputSurface(gVideoWidth, gVideoHeight, &encoderInputSurface);
+        if (err != NO_ERROR) {
+            return err;
         }
     }
-    if (err != NO_ERROR) return err;
-
-    // From here on, we must explicitly release() the encoder before it goes
-    // out of scope, or we will get an assertion failure from stagefright
-    // later on in a different thread.
-
 
     // Draw the "info" page by rendering a frame with GLES and sending
     // it directly to the encoder.
@@ -595,7 +611,7 @@
         overlay = new Overlay();
         err = overlay->start(encoderInputSurface, &bufferProducer);
         if (err != NO_ERROR) {
-            encoder->release();
+            if (encoder != NULL) encoder->release();
             return err;
         }
         if (gVerbose) {
@@ -610,46 +626,83 @@
     sp<IBinder> dpy;
     err = prepareVirtualDisplay(mainDpyInfo, bufferProducer, &dpy);
     if (err != NO_ERROR) {
-        encoder->release();
+        if (encoder != NULL) encoder->release();
         return err;
     }
 
     sp<MediaMuxer> muxer = NULL;
     FILE* rawFp = NULL;
-    if (gRawOutput) {
-        rawFp = prepareRawOutput(fileName);
-        if (rawFp == NULL) {
-            encoder->release();
-            return -1;
+    switch (gOutputFormat) {
+        case FORMAT_MP4: {
+            // Configure muxer.  We have to wait for the CSD blob from the encoder
+            // before we can start it.
+            muxer = new MediaMuxer(fileName, MediaMuxer::OUTPUT_FORMAT_MPEG_4);
+            if (gRotate) {
+                muxer->setOrientationHint(90);  // TODO: does this do anything?
+            }
+            break;
+        }
+        case FORMAT_H264:
+        case FORMAT_FRAMES: {
+            rawFp = prepareRawOutput(fileName);
+            if (rawFp == NULL) {
+                if (encoder != NULL) encoder->release();
+                return -1;
+            }
+            break;
+        }
+        default:
+            fprintf(stderr, "ERROR: unknown format %d\n", gOutputFormat);
+            abort();
+    }
+
+    if (gOutputFormat == FORMAT_FRAMES) {
+        // TODO: if we want to make this a proper feature, we should output
+        //       an outer header with version info.  Right now we never change
+        //       the frame size or format, so we could conceivably just send
+        //       the current frame header once and then follow it with an
+        //       unbroken stream of data.
+
+        // Make the EGL context current again.  This gets unhooked if we're
+        // using "--bugreport" mode.
+        // TODO: figure out if we can eliminate this
+        frameOutput->prepareToCopy();
+
+        while (!gStopRequested) {
+            // Poll for frames, the same way we do for MediaCodec.  We do
+            // all of the work on the main thread.
+            //
+            // Ideally we'd sleep indefinitely and wake when the
+            // stop was requested, but this will do for now.  (It almost
+            // works because wait() wakes when a signal hits, but we
+            // need to handle the edge cases.)
+            err = frameOutput->copyFrame(rawFp, 250000);
+            if (err == ETIMEDOUT) {
+                err = NO_ERROR;
+            } else if (err != NO_ERROR) {
+                ALOGE("Got error %d from copyFrame()", err);
+                break;
+            }
         }
     } else {
-        // Configure muxer.  We have to wait for the CSD blob from the encoder
-        // before we can start it.
-        muxer = new MediaMuxer(fileName, MediaMuxer::OUTPUT_FORMAT_MPEG_4);
-        if (gRotate) {
-            muxer->setOrientationHint(90);  // TODO: does this do anything?
+        // Main encoder loop.
+        err = runEncoder(encoder, muxer, rawFp, mainDpy, dpy,
+                mainDpyInfo.orientation);
+        if (err != NO_ERROR) {
+            fprintf(stderr, "Encoder failed (err=%d)\n", err);
+            // fall through to cleanup
         }
-    }
 
-    // Main encoder loop.
-    err = runEncoder(encoder, muxer, rawFp, mainDpy, dpy,
-            mainDpyInfo.orientation);
-    if (err != NO_ERROR) {
-        fprintf(stderr, "Encoder failed (err=%d)\n", err);
-        // fall through to cleanup
-    }
-
-    if (gVerbose) {
-        printf("Stopping encoder and muxer\n");
+        if (gVerbose) {
+            printf("Stopping encoder and muxer\n");
+        }
     }
 
     // Shut everything down, starting with the producer side.
     encoderInputSurface = NULL;
     SurfaceComposerClient::destroyDisplay(dpy);
-    if (overlay != NULL) {
-        overlay->stop();
-    }
-    encoder->stop();
+    if (overlay != NULL) overlay->stop();
+    if (encoder != NULL) encoder->stop();
     if (muxer != NULL) {
         // If we don't stop muxer explicitly, i.e. let the destructor run,
         // it may hang (b/11050628).
@@ -657,7 +710,7 @@
     } else if (rawFp != stdout) {
         fclose(rawFp);
     }
-    encoder->release();
+    if (encoder != NULL) encoder->release();
 
     return err;
 }
@@ -819,11 +872,12 @@
         { "size",               required_argument,  NULL, 's' },
         { "bit-rate",           required_argument,  NULL, 'b' },
         { "time-limit",         required_argument,  NULL, 't' },
+        { "bugreport",          no_argument,        NULL, 'u' },
+        // "unofficial" options
         { "show-device-info",   no_argument,        NULL, 'i' },
         { "show-frame-time",    no_argument,        NULL, 'f' },
-        { "bugreport",          no_argument,        NULL, 'u' },
         { "rotate",             no_argument,        NULL, 'r' },
-        { "raw",                no_argument,        NULL, 'w' },
+        { "output-format",      required_argument,  NULL, 'o' },
         { NULL,                 0,                  NULL, 0 }
     };
 
@@ -875,23 +929,31 @@
                 return 2;
             }
             break;
+        case 'u':
+            gWantInfoScreen = true;
+            gWantFrameTime = true;
+            break;
         case 'i':
             gWantInfoScreen = true;
             break;
         case 'f':
             gWantFrameTime = true;
             break;
-        case 'u':
-            gWantInfoScreen = true;
-            gWantFrameTime = true;
-            break;
         case 'r':
             // experimental feature
             gRotate = true;
             break;
-        case 'w':
-            // experimental feature
-            gRawOutput = true;
+        case 'o':
+            if (strcmp(optarg, "mp4") == 0) {
+                gOutputFormat = FORMAT_MP4;
+            } else if (strcmp(optarg, "h264") == 0) {
+                gOutputFormat = FORMAT_H264;
+            } else if (strcmp(optarg, "frames") == 0) {
+                gOutputFormat = FORMAT_FRAMES;
+            } else {
+                fprintf(stderr, "Unknown format '%s'\n", optarg);
+                return 2;
+            }
             break;
         default:
             if (ic != '?') {
@@ -907,7 +969,7 @@
     }
 
     const char* fileName = argv[optind];
-    if (!gRawOutput) {
+    if (gOutputFormat == FORMAT_MP4) {
         // MediaMuxer tries to create the file in the constructor, but we don't
         // learn about the failure until muxer.start(), which returns a generic
         // error code without logging anything.  We attempt to create the file
diff --git a/libvideoeditor/lvpp/Android.mk b/libvideoeditor/lvpp/Android.mk
index 2286827..860d351 100755
--- a/libvideoeditor/lvpp/Android.mk
+++ b/libvideoeditor/lvpp/Android.mk
@@ -71,7 +71,6 @@
     $(TOP)/frameworks/av/media/libstagefright \
     $(TOP)/frameworks/av/media/libstagefright/include \
     $(TOP)/frameworks/av/media/libstagefright/rtsp \
-    $(call include-path-for, corecg graphics) \
     $(TOP)/frameworks/av/libvideoeditor/osal/inc \
     $(TOP)/frameworks/av/libvideoeditor/vss/common/inc \
     $(TOP)/frameworks/av/libvideoeditor/vss/mcs/inc \
diff --git a/libvideoeditor/vss/stagefrightshells/src/Android.mk b/libvideoeditor/vss/stagefrightshells/src/Android.mk
index e30b85d..9188942 100755
--- a/libvideoeditor/vss/stagefrightshells/src/Android.mk
+++ b/libvideoeditor/vss/stagefrightshells/src/Android.mk
@@ -33,7 +33,6 @@
     $(TOP)/frameworks/av/media/libstagefright \
     $(TOP)/frameworks/av/media/libstagefright/include \
     $(TOP)/frameworks/av/media/libstagefright/rtsp \
-    $(call include-path-for, corecg graphics) \
     $(TOP)/frameworks/av/libvideoeditor/lvpp \
     $(TOP)/frameworks/av/libvideoeditor/osal/inc \
     $(TOP)/frameworks/av/libvideoeditor/vss/inc \
diff --git a/media/libeffects/visualizer/Android.mk b/media/libeffects/visualizer/Android.mk
index dd2d306..c92c543 100644
--- a/media/libeffects/visualizer/Android.mk
+++ b/media/libeffects/visualizer/Android.mk
@@ -17,7 +17,6 @@
 LOCAL_MODULE:= libvisualizer
 
 LOCAL_C_INCLUDES := \
-	$(call include-path-for, graphics corecg) \
 	$(call include-path-for, audio-effects)
 
 
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index e0acae6..f3770e4 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -72,7 +72,6 @@
 LOCAL_MODULE:= libmedia
 
 LOCAL_C_INCLUDES := \
-    $(call include-path-for, graphics corecg) \
     $(TOP)/frameworks/native/include/media/openmax \
     external/icu4c/common \
     external/icu4c/i18n \
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index 8f21632..4189a5e 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -45,7 +45,6 @@
     libstagefright_rtsp         \
 
 LOCAL_C_INCLUDES :=                                                 \
-    $(call include-path-for, graphics corecg)                       \
     $(TOP)/frameworks/av/media/libstagefright/include               \
     $(TOP)/frameworks/av/media/libstagefright/rtsp                  \
     $(TOP)/frameworks/av/media/libstagefright/wifi-display          \
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index 67d83b1..f00b82a 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -193,6 +193,7 @@
         t->mainBuffer = NULL;
         t->auxBuffer = NULL;
         t->downmixerBufferProvider = NULL;
+        t->mSinkFormat = AUDIO_FORMAT_PCM_16_BIT;
 
         status_t status = initTrackDownmix(&mState.tracks[n], n, channelMask);
         if (status == OK) {
@@ -440,6 +441,13 @@
         //         for a specific track? or per mixer?
         /* case DOWNMIX_TYPE:
             break          */
+        case SINK_FORMAT: {
+            audio_format_t format = static_cast<audio_format_t>(valueInt);
+            if (track.mSinkFormat != format) {
+                track.mSinkFormat = format;
+                ALOGV("setParameter(TRACK, SINK_FORMAT, %#x)", format);
+            }
+            } break;
         default:
             LOG_FATAL("bad param");
         }
@@ -1043,7 +1051,7 @@
 void AudioMixer::process__nop(state_t* state, int64_t pts)
 {
     uint32_t e0 = state->enabledTracks;
-    size_t bufSize = state->frameCount * sizeof(int16_t) * MAX_NUM_CHANNELS;
+    size_t sampleCount = state->frameCount * MAX_NUM_CHANNELS;
     while (e0) {
         // process by group of tracks with same output buffer to
         // avoid multiple memset() on same buffer
@@ -1062,7 +1070,8 @@
             }
             e0 &= ~(e1);
 
-            memset(t1.mainBuffer, 0, bufSize);
+            memset(t1.mainBuffer, 0, sampleCount
+                    * audio_bytes_per_sample(t1.mSinkFormat));
         }
 
         while (e1) {
@@ -1170,8 +1179,18 @@
                     }
                 }
             }
-            ditherAndClamp(out, outTemp, BLOCKSIZE);
-            out += BLOCKSIZE;
+            switch (t1.mSinkFormat) {
+            case AUDIO_FORMAT_PCM_FLOAT:
+                memcpy_to_float_from_q19_12(reinterpret_cast<float *>(out), outTemp, BLOCKSIZE * 2);
+                out += BLOCKSIZE * 2; // output is 2 floats/frame.
+                break;
+            case AUDIO_FORMAT_PCM_16_BIT:
+                ditherAndClamp(out, outTemp, BLOCKSIZE);
+                out += BLOCKSIZE; // output is 1 int32_t (2 int16_t samples)/frame
+                break;
+            default:
+                LOG_ALWAYS_FATAL("bad sink format: %d", t1.mSinkFormat);
+            }
             numFrames += BLOCKSIZE;
         } while (numFrames < state->frameCount);
     }
@@ -1253,7 +1272,16 @@
                 }
             }
         }
-        ditherAndClamp(out, outTemp, numFrames);
+        switch (t1.mSinkFormat) {
+        case AUDIO_FORMAT_PCM_FLOAT:
+            memcpy_to_float_from_q19_12(reinterpret_cast<float*>(out), outTemp, numFrames*2);
+            break;
+        case AUDIO_FORMAT_PCM_16_BIT:
+            ditherAndClamp(out, outTemp, numFrames);
+            break;
+        default:
+            LOG_ALWAYS_FATAL("bad sink format: %d", t1.mSinkFormat);
+        }
     }
 }
 
@@ -1294,27 +1322,45 @@
         }
         size_t outFrames = b.frameCount;
 
-        if (CC_UNLIKELY(uint32_t(vl) > UNITY_GAIN || uint32_t(vr) > UNITY_GAIN)) {
-            // volume is boosted, so we might need to clamp even though
-            // we process only one track.
+        switch (t.mSinkFormat) {
+        case AUDIO_FORMAT_PCM_FLOAT: {
+            float *fout = reinterpret_cast<float*>(out);
+            static float scale = 1. / (32768. * 4096.); // exact when inverted
             do {
                 uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
                 in += 2;
-                int32_t l = mulRL(1, rl, vrl) >> 12;
-                int32_t r = mulRL(0, rl, vrl) >> 12;
-                // clamping...
-                l = clamp16(l);
-                r = clamp16(r);
-                *out++ = (r<<16) | (l & 0xFFFF);
+                int32_t l = mulRL(1, rl, vrl);
+                int32_t r = mulRL(0, rl, vrl);
+                *fout++ = static_cast<float>(l) * scale;
+                *fout++ = static_cast<float>(r) * scale;
             } while (--outFrames);
-        } else {
-            do {
-                uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
-                in += 2;
-                int32_t l = mulRL(1, rl, vrl) >> 12;
-                int32_t r = mulRL(0, rl, vrl) >> 12;
-                *out++ = (r<<16) | (l & 0xFFFF);
-            } while (--outFrames);
+            } break;
+        case AUDIO_FORMAT_PCM_16_BIT:
+            if (CC_UNLIKELY(uint32_t(vl) > UNITY_GAIN || uint32_t(vr) > UNITY_GAIN)) {
+                // volume is boosted, so we might need to clamp even though
+                // we process only one track.
+                do {
+                    uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
+                    in += 2;
+                    int32_t l = mulRL(1, rl, vrl) >> 12;
+                    int32_t r = mulRL(0, rl, vrl) >> 12;
+                    // clamping...
+                    l = clamp16(l);
+                    r = clamp16(r);
+                    *out++ = (r<<16) | (l & 0xFFFF);
+                } while (--outFrames);
+            } else {
+                do {
+                    uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
+                    in += 2;
+                    int32_t l = mulRL(1, rl, vrl) >> 12;
+                    int32_t r = mulRL(0, rl, vrl) >> 12;
+                    *out++ = (r<<16) | (l & 0xFFFF);
+                } while (--outFrames);
+            }
+            break;
+        default:
+            LOG_ALWAYS_FATAL("bad sink format: %d", t.mSinkFormat);
         }
         numFrames -= b.frameCount;
         t.bufferProvider->releaseBuffer(&b);
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
index d286986..3355db4 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/services/audioflinger/AudioMixer.h
@@ -77,6 +77,7 @@
         MAIN_BUFFER     = 0x4002,
         AUX_BUFFER      = 0x4003,
         DOWNMIX_TYPE    = 0X4004,
+        SINK_FORMAT     = 0x4005, // AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
         // for target RESAMPLE
         SAMPLE_RATE     = 0x4100, // Configure sample rate conversion on this track name;
                                   // parameter 'value' is the new sample rate in Hz.
@@ -193,7 +194,9 @@
 
         int32_t     sessionId;
 
-        int32_t     padding[2];
+        audio_format_t mSinkFormat; // at this time: AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
+
+        int32_t     padding[1];
 
         // 16-byte boundary