Merge "Fix calculations for an obscure combo of MPEG audio options." into jb-mr1-dev
diff --git a/include/media/IRemoteDisplay.h b/include/media/IRemoteDisplay.h
index f39286e..a61704e 100644
--- a/include/media/IRemoteDisplay.h
+++ b/include/media/IRemoteDisplay.h
@@ -39,10 +39,8 @@
 public:
     DECLARE_META_INTERFACE(RemoteDisplay);
 
-    // Disconnects the remote display.
-    // The remote display should respond back to the IRemoteDisplayClient with an
-    // onDisplayDisconnected() event when the disconnection is complete.
-    virtual status_t disconnect() = 0;
+    // Disconnects the remote display and stops listening for new connections.
+    virtual status_t dispose() = 0;
 };
 
 
diff --git a/include/media/IRemoteDisplayClient.h b/include/media/IRemoteDisplayClient.h
index 38a0c9a..553ad36 100644
--- a/include/media/IRemoteDisplayClient.h
+++ b/include/media/IRemoteDisplayClient.h
@@ -40,9 +40,9 @@
 
     enum {
         // Error: An unknown / generic error occurred.
-        kErrorUnknown = 0,
+        kDisplayErrorUnknown = 1,
         // Error: The connection was dropped unexpectedly.
-        kErrorConnectionDropped = 1,
+        kDisplayErrorConnectionDropped = 2,
     };
 
     // Indicates that the remote display has been connected successfully.
@@ -52,7 +52,8 @@
             uint32_t width, uint32_t height, uint32_t flags) = 0; // one-way
 
     // Indicates that the remote display has been disconnected normally.
-    // This method should only be called once the client has called 'disconnect()'.
+    // This method should only be called once the client has called 'dispose()'
+    // on the IRemoteDisplay.
     // It is currently an error for the display to disconnect for any other reason.
     virtual void onDisplayDisconnected() = 0; // one-way
 
diff --git a/include/media/stagefright/Utils.h b/include/media/stagefright/Utils.h
index d87902e..8213af9 100644
--- a/include/media/stagefright/Utils.h
+++ b/include/media/stagefright/Utils.h
@@ -42,6 +42,8 @@
 struct AMessage;
 status_t convertMetaDataToMessage(
         const sp<MetaData> &meta, sp<AMessage> *format);
+void convertMessageToMetaData(
+        const sp<AMessage> &format, sp<MetaData> &meta);
 
 }  // namespace android
 
diff --git a/media/libmedia/IRemoteDisplay.cpp b/media/libmedia/IRemoteDisplay.cpp
index 5d6ab34..da25a15 100644
--- a/media/libmedia/IRemoteDisplay.cpp
+++ b/media/libmedia/IRemoteDisplay.cpp
@@ -22,7 +22,7 @@
 namespace android {
 
 enum {
-    DISCONNECT = IBinder::FIRST_CALL_TRANSACTION,
+    DISPOSE = IBinder::FIRST_CALL_TRANSACTION,
 };
 
 class BpRemoteDisplay: public BpInterface<IRemoteDisplay>
@@ -33,11 +33,11 @@
     {
     }
 
-    status_t disconnect()
+    status_t dispose()
     {
         Parcel data, reply;
         data.writeInterfaceToken(IRemoteDisplay::getInterfaceDescriptor());
-        remote()->transact(DISCONNECT, data, &reply);
+        remote()->transact(DISPOSE, data, &reply);
         return reply.readInt32();
     }
 };
@@ -50,9 +50,9 @@
     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
 {
     switch (code) {
-        case DISCONNECT: {
+        case DISPOSE: {
             CHECK_INTERFACE(IRemoteDisplay, data, reply);
-            reply->writeInt32(disconnect());
+            reply->writeInt32(dispose());
             return NO_ERROR;
         }
         default:
diff --git a/media/libmediaplayerservice/MediaPlayerFactory.cpp b/media/libmediaplayerservice/MediaPlayerFactory.cpp
index f821cc3..3f69c11 100644
--- a/media/libmediaplayerservice/MediaPlayerFactory.cpp
+++ b/media/libmediaplayerservice/MediaPlayerFactory.cpp
@@ -246,6 +246,7 @@
                                                  ".midi",
                                                  ".smf",
                                                  ".xmf",
+                                                 ".mxmf",
                                                  ".imy",
                                                  ".rtttl",
                                                  ".rtx",
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 9005500..423d6ce 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -283,7 +283,7 @@
 
 sp<IRemoteDisplay> MediaPlayerService::listenForRemoteDisplay(
         const sp<IRemoteDisplayClient>& client, const String8& iface) {
-    return new RemoteDisplay(client, iface.string());;
+    return new RemoteDisplay(client, iface.string());
 }
 
 status_t MediaPlayerService::enableRemoteDisplay(const char *iface) {
@@ -299,7 +299,7 @@
     }
 
     if (mRemoteDisplay != NULL) {
-        mRemoteDisplay->disconnect();
+        mRemoteDisplay->dispose();
         mRemoteDisplay.clear();
     }
 
diff --git a/media/libmediaplayerservice/RemoteDisplay.cpp b/media/libmediaplayerservice/RemoteDisplay.cpp
index 1cc605e..5542bb5 100644
--- a/media/libmediaplayerservice/RemoteDisplay.cpp
+++ b/media/libmediaplayerservice/RemoteDisplay.cpp
@@ -39,7 +39,7 @@
 RemoteDisplay::~RemoteDisplay() {
 }
 
-status_t RemoteDisplay::disconnect() {
+status_t RemoteDisplay::dispose() {
     mSource->stop();
 
     mLooper->stop();
diff --git a/media/libmediaplayerservice/RemoteDisplay.h b/media/libmediaplayerservice/RemoteDisplay.h
index 63c5286..0d87250 100644
--- a/media/libmediaplayerservice/RemoteDisplay.h
+++ b/media/libmediaplayerservice/RemoteDisplay.h
@@ -33,7 +33,7 @@
 struct RemoteDisplay : public BnRemoteDisplay {
     RemoteDisplay(const sp<IRemoteDisplayClient> &client, const char *iface);
 
-    virtual status_t disconnect();
+    virtual status_t dispose();
 
 protected:
     virtual ~RemoteDisplay();
diff --git a/media/libmediaplayerservice/nuplayer/mp4/MP4Source.cpp b/media/libmediaplayerservice/nuplayer/mp4/MP4Source.cpp
index c80d13f..ffb3a65 100644
--- a/media/libmediaplayerservice/nuplayer/mp4/MP4Source.cpp
+++ b/media/libmediaplayerservice/nuplayer/mp4/MP4Source.cpp
@@ -93,6 +93,10 @@
         return total;
     }
 
+    bool isSeekable() {
+        return false;
+    }
+
 private:
     sp<NuPlayer::NuPlayerStreamListener> mListener;
     off64_t mPosition;
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 1522e75..f40982e 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -19,6 +19,7 @@
         ESDS.cpp                          \
         FileSource.cpp                    \
         FLACExtractor.cpp                 \
+        FragmentedMP4Extractor.cpp        \
         HTTPBase.cpp                      \
         JPEGSource.cpp                    \
         MP3Extractor.cpp                  \
diff --git a/media/libstagefright/DRMExtractor.cpp b/media/libstagefright/DRMExtractor.cpp
index 524c3aa..63cb430 100644
--- a/media/libstagefright/DRMExtractor.cpp
+++ b/media/libstagefright/DRMExtractor.cpp
@@ -15,11 +15,6 @@
  */
 
 #include "include/DRMExtractor.h"
-#include "include/AMRExtractor.h"
-#include "include/MP3Extractor.h"
-#include "include/MPEG4Extractor.h"
-#include "include/WAVExtractor.h"
-#include "include/OggExtractor.h"
 
 #include <arpa/inet.h>
 #include <utils/String8.h>
diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp
index 1de808e..9d0eea2 100644
--- a/media/libstagefright/DataSource.cpp
+++ b/media/libstagefright/DataSource.cpp
@@ -20,17 +20,18 @@
 #include "include/chromium_http_stub.h"
 #endif
 
-#include "include/MP3Extractor.h"
-#include "include/MPEG4Extractor.h"
-#include "include/WAVExtractor.h"
-#include "include/OggExtractor.h"
-#include "include/MPEG2PSExtractor.h"
-#include "include/MPEG2TSExtractor.h"
-#include "include/NuCachedSource2.h"
-#include "include/HTTPBase.h"
+#include "include/AACExtractor.h"
 #include "include/DRMExtractor.h"
 #include "include/FLACExtractor.h"
-#include "include/AACExtractor.h"
+#include "include/FragmentedMP4Extractor.h"
+#include "include/HTTPBase.h"
+#include "include/MP3Extractor.h"
+#include "include/MPEG2PSExtractor.h"
+#include "include/MPEG2TSExtractor.h"
+#include "include/MPEG4Extractor.h"
+#include "include/NuCachedSource2.h"
+#include "include/OggExtractor.h"
+#include "include/WAVExtractor.h"
 #include "include/WVMExtractor.h"
 
 #include "matroska/MatroskaExtractor.h"
@@ -110,6 +111,7 @@
 // static
 void DataSource::RegisterDefaultSniffers() {
     RegisterSniffer(SniffMPEG4);
+    RegisterSniffer(SniffFragmentedMP4);
     RegisterSniffer(SniffMatroska);
     RegisterSniffer(SniffOgg);
     RegisterSniffer(SniffWAV);
diff --git a/media/libstagefright/FragmentedMP4Extractor.cpp b/media/libstagefright/FragmentedMP4Extractor.cpp
new file mode 100644
index 0000000..82712ef
--- /dev/null
+++ b/media/libstagefright/FragmentedMP4Extractor.cpp
@@ -0,0 +1,460 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "FragmentedMP4Extractor"
+#include <utils/Log.h>
+
+#include "include/FragmentedMP4Extractor.h"
+#include "include/SampleTable.h"
+#include "include/ESDS.h"
+
+#include <arpa/inet.h>
+
+#include <ctype.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <cutils/properties.h> // for property_get
+
+#include <media/stagefright/foundation/ABitReader.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
+#include <utils/String8.h>
+
+namespace android {
+
+class FragmentedMPEG4Source : public MediaSource {
+public:
+    // Caller retains ownership of the Parser
+    FragmentedMPEG4Source(bool audio,
+                const sp<MetaData> &format,
+                const sp<FragmentedMP4Parser> &parser,
+                const sp<FragmentedMP4Extractor> &extractor);
+
+    virtual status_t start(MetaData *params = NULL);
+    virtual status_t stop();
+
+    virtual sp<MetaData> getFormat();
+
+    virtual status_t read(
+            MediaBuffer **buffer, const ReadOptions *options = NULL);
+
+protected:
+    virtual ~FragmentedMPEG4Source();
+
+private:
+    Mutex mLock;
+
+    sp<MetaData> mFormat;
+    sp<FragmentedMP4Parser> mParser;
+    sp<FragmentedMP4Extractor> mExtractor;
+    bool mIsAudioTrack;
+    uint32_t mCurrentSampleIndex;
+
+    bool mIsAVC;
+    size_t mNALLengthSize;
+
+    bool mStarted;
+
+    MediaBufferGroup *mGroup;
+
+    bool mWantsNALFragments;
+
+    uint8_t *mSrcBuffer;
+
+    FragmentedMPEG4Source(const FragmentedMPEG4Source &);
+    FragmentedMPEG4Source &operator=(const FragmentedMPEG4Source &);
+};
+
+
+FragmentedMP4Extractor::FragmentedMP4Extractor(const sp<DataSource> &source)
+    : mLooper(new ALooper),
+      mParser(new FragmentedMP4Parser()),
+      mDataSource(source),
+      mInitCheck(NO_INIT),
+      mFileMetaData(new MetaData) {
+    ALOGV("FragmentedMP4Extractor");
+    mLooper->registerHandler(mParser);
+    mLooper->start(false /* runOnCallingThread */);
+    mParser->start(mDataSource);
+
+    bool hasVideo = mParser->getFormat(false /* audio */, true /* synchronous */) != NULL;
+    bool hasAudio = mParser->getFormat(true /* audio */, true /* synchronous */) != NULL;
+
+    ALOGV("number of tracks: %d", countTracks());
+
+    if (hasVideo) {
+        mFileMetaData->setCString(
+                kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_MPEG4);
+    } else if (hasAudio) {
+        mFileMetaData->setCString(kKeyMIMEType, "audio/mp4");
+    } else {
+        ALOGE("no audio and no video, no idea what file type this is");
+    }
+    // tracks are numbered such that video track is first, audio track is second
+    if (hasAudio && hasVideo) {
+        mTrackCount = 2;
+        mAudioTrackIndex = 1;
+    } else if (hasAudio) {
+        mTrackCount = 1;
+        mAudioTrackIndex = 0;
+    } else if (hasVideo) {
+        mTrackCount = 1;
+        mAudioTrackIndex = -1;
+    } else {
+        mTrackCount = 0;
+        mAudioTrackIndex = -1;
+    }
+}
+
+FragmentedMP4Extractor::~FragmentedMP4Extractor() {
+    ALOGV("~FragmentedMP4Extractor");
+    mLooper->stop();
+}
+
+uint32_t FragmentedMP4Extractor::flags() const {
+    return CAN_PAUSE |
+            (mParser->isSeekable() ? (CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK) : 0);
+}
+
+sp<MetaData> FragmentedMP4Extractor::getMetaData() {
+    return mFileMetaData;
+}
+
+size_t FragmentedMP4Extractor::countTracks() {
+    return mTrackCount;
+}
+
+
+sp<MetaData> FragmentedMP4Extractor::getTrackMetaData(
+        size_t index, uint32_t flags) {
+    if (index >= countTracks()) {
+        return NULL;
+    }
+
+    sp<AMessage> msg = mParser->getFormat(index == mAudioTrackIndex, true /* synchronous */);
+
+    if (msg == NULL) {
+        ALOGV("got null format for track %d", index);
+        return NULL;
+    }
+
+    sp<MetaData> meta = new MetaData();
+    convertMessageToMetaData(msg, meta);
+    return meta;
+}
+
+static void MakeFourCCString(uint32_t x, char *s) {
+    s[0] = x >> 24;
+    s[1] = (x >> 16) & 0xff;
+    s[2] = (x >> 8) & 0xff;
+    s[3] = x & 0xff;
+    s[4] = '\0';
+}
+
+sp<MediaSource> FragmentedMP4Extractor::getTrack(size_t index) {
+    if (index >= countTracks()) {
+        return NULL;
+    }
+    return new FragmentedMPEG4Source(index == mAudioTrackIndex, getTrackMetaData(index, 0), mParser, this);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+
+FragmentedMPEG4Source::FragmentedMPEG4Source(
+        bool audio,
+        const sp<MetaData> &format,
+        const sp<FragmentedMP4Parser> &parser,
+        const sp<FragmentedMP4Extractor> &extractor)
+    : mFormat(format),
+      mParser(parser),
+      mExtractor(extractor),
+      mIsAudioTrack(audio),
+      mStarted(false),
+      mGroup(NULL),
+      mWantsNALFragments(false),
+      mSrcBuffer(NULL) {
+}
+
+FragmentedMPEG4Source::~FragmentedMPEG4Source() {
+    if (mStarted) {
+        stop();
+    }
+}
+
+status_t FragmentedMPEG4Source::start(MetaData *params) {
+    Mutex::Autolock autoLock(mLock);
+
+    CHECK(!mStarted);
+
+    int32_t val;
+    if (params && params->findInt32(kKeyWantsNALFragments, &val)
+        && val != 0) {
+        mWantsNALFragments = true;
+    } else {
+        mWantsNALFragments = false;
+    }
+    ALOGV("caller wants NAL fragments: %s", mWantsNALFragments ? "yes" : "no");
+
+    mGroup = new MediaBufferGroup;
+
+    int32_t max_size = 65536;
+    // XXX CHECK(mFormat->findInt32(kKeyMaxInputSize, &max_size));
+
+    mGroup->add_buffer(new MediaBuffer(max_size));
+
+    mSrcBuffer = new uint8_t[max_size];
+
+    mStarted = true;
+
+    return OK;
+}
+
+status_t FragmentedMPEG4Source::stop() {
+    Mutex::Autolock autoLock(mLock);
+
+    CHECK(mStarted);
+
+    delete[] mSrcBuffer;
+    mSrcBuffer = NULL;
+
+    delete mGroup;
+    mGroup = NULL;
+
+    mStarted = false;
+    mCurrentSampleIndex = 0;
+
+    return OK;
+}
+
+sp<MetaData> FragmentedMPEG4Source::getFormat() {
+    Mutex::Autolock autoLock(mLock);
+
+    return mFormat;
+}
+
+
+status_t FragmentedMPEG4Source::read(
+        MediaBuffer **out, const ReadOptions *options) {
+    int64_t seekTimeUs;
+    ReadOptions::SeekMode mode;
+    if (options && options->getSeekTo(&seekTimeUs, &mode)) {
+        mParser->seekTo(mIsAudioTrack, seekTimeUs);
+    }
+    MediaBuffer *buffer = NULL;
+    mGroup->acquire_buffer(&buffer);
+    sp<ABuffer> parseBuffer;
+
+    status_t ret = mParser->dequeueAccessUnit(mIsAudioTrack, &parseBuffer, true /* synchronous */);
+    if (ret != OK) {
+        buffer->release();
+        ALOGV("returning %d", ret);
+        return ret;
+    }
+    sp<AMessage> meta = parseBuffer->meta();
+    int64_t timeUs;
+    CHECK(meta->findInt64("timeUs", &timeUs));
+    buffer->meta_data()->setInt64(kKeyTime, timeUs);
+    buffer->set_range(0, parseBuffer->size());
+    memcpy(buffer->data(), parseBuffer->data(), parseBuffer->size());
+    *out = buffer;
+    return OK;
+}
+
+
+static bool isCompatibleBrand(uint32_t fourcc) {
+    static const uint32_t kCompatibleBrands[] = {
+        FOURCC('i', 's', 'o', 'm'),
+        FOURCC('i', 's', 'o', '2'),
+        FOURCC('a', 'v', 'c', '1'),
+        FOURCC('3', 'g', 'p', '4'),
+        FOURCC('m', 'p', '4', '1'),
+        FOURCC('m', 'p', '4', '2'),
+
+        // Won't promise that the following file types can be played.
+        // Just give these file types a chance.
+        FOURCC('q', 't', ' ', ' '),  // Apple's QuickTime
+        FOURCC('M', 'S', 'N', 'V'),  // Sony's PSP
+
+        FOURCC('3', 'g', '2', 'a'),  // 3GPP2
+        FOURCC('3', 'g', '2', 'b'),
+    };
+
+    for (size_t i = 0;
+         i < sizeof(kCompatibleBrands) / sizeof(kCompatibleBrands[0]);
+         ++i) {
+        if (kCompatibleBrands[i] == fourcc) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
+// Attempt to actually parse the 'ftyp' atom and determine if a suitable
+// compatible brand is present.
+// Also try to identify where this file's metadata ends
+// (end of the 'moov' atom) and report it to the caller as part of
+// the metadata.
+static bool Sniff(
+        const sp<DataSource> &source, String8 *mimeType, float *confidence,
+        sp<AMessage> *meta) {
+    // We scan up to 128k bytes to identify this file as an MP4.
+    static const off64_t kMaxScanOffset = 128ll * 1024ll;
+
+    off64_t offset = 0ll;
+    bool foundGoodFileType = false;
+    bool isFragmented = false;
+    off64_t moovAtomEndOffset = -1ll;
+    bool done = false;
+
+    while (!done && offset < kMaxScanOffset) {
+        uint32_t hdr[2];
+        if (source->readAt(offset, hdr, 8) < 8) {
+            return false;
+        }
+
+        uint64_t chunkSize = ntohl(hdr[0]);
+        uint32_t chunkType = ntohl(hdr[1]);
+        off64_t chunkDataOffset = offset + 8;
+
+        if (chunkSize == 1) {
+            if (source->readAt(offset + 8, &chunkSize, 8) < 8) {
+                return false;
+            }
+
+            chunkSize = ntoh64(chunkSize);
+            chunkDataOffset += 8;
+
+            if (chunkSize < 16) {
+                // The smallest valid chunk is 16 bytes long in this case.
+                return false;
+            }
+        } else if (chunkSize < 8) {
+            // The smallest valid chunk is 8 bytes long.
+            return false;
+        }
+
+        off64_t chunkDataSize = offset + chunkSize - chunkDataOffset;
+
+        char chunkstring[5];
+        MakeFourCCString(chunkType, chunkstring);
+        ALOGV("saw chunk type %s, size %lld @ %lld", chunkstring, chunkSize, offset);
+        switch (chunkType) {
+            case FOURCC('f', 't', 'y', 'p'):
+            {
+                if (chunkDataSize < 8) {
+                    return false;
+                }
+
+                uint32_t numCompatibleBrands = (chunkDataSize - 8) / 4;
+                for (size_t i = 0; i < numCompatibleBrands + 2; ++i) {
+                    if (i == 1) {
+                        // Skip this index, it refers to the minorVersion,
+                        // not a brand.
+                        continue;
+                    }
+
+                    uint32_t brand;
+                    if (source->readAt(
+                                chunkDataOffset + 4 * i, &brand, 4) < 4) {
+                        return false;
+                    }
+
+                    brand = ntohl(brand);
+                    char brandstring[5];
+                    MakeFourCCString(brand, brandstring);
+                    ALOGV("Brand: %s", brandstring);
+
+                    if (isCompatibleBrand(brand)) {
+                        foundGoodFileType = true;
+                        break;
+                    }
+                }
+
+                if (!foundGoodFileType) {
+                    return false;
+                }
+
+                break;
+            }
+
+            case FOURCC('m', 'o', 'o', 'v'):
+            {
+                moovAtomEndOffset = offset + chunkSize;
+                break;
+            }
+
+            case FOURCC('m', 'o', 'o', 'f'):
+            {
+                // this is kind of broken, since we might not actually find a
+                // moof box in the first 128k.
+                isFragmented = true;
+                done = true;
+                break;
+            }
+
+            default:
+                break;
+        }
+
+        offset += chunkSize;
+    }
+
+    if (!foundGoodFileType || !isFragmented) {
+        return false;
+    }
+
+    *mimeType = MEDIA_MIMETYPE_CONTAINER_MPEG4;
+    *confidence = 0.5f; // slightly more than MPEG4Extractor
+
+    if (moovAtomEndOffset >= 0) {
+        *meta = new AMessage;
+        (*meta)->setInt64("meta-data-size", moovAtomEndOffset);
+        (*meta)->setInt32("fragmented", 1); // tell MediaExtractor what to instantiate
+
+        ALOGV("found metadata size: %lld", moovAtomEndOffset);
+    }
+
+    return true;
+}
+
+// used by DataSource::RegisterDefaultSniffers
+bool SniffFragmentedMP4(
+        const sp<DataSource> &source, String8 *mimeType, float *confidence,
+        sp<AMessage> *meta) {
+    ALOGV("SniffFragmentedMP4");
+    char prop[PROPERTY_VALUE_MAX];
+    if (property_get("media.stagefright.use-fragmp4", prop, NULL)
+            && (!strcmp(prop, "1") || !strcasecmp(prop, "true"))) {
+        return Sniff(source, mimeType, confidence, meta);
+    }
+
+    return false;
+}
+
+}  // namespace android
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index a572541..7d49ef0 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+//#define LOG_NDEBUG 0
 #define LOG_TAG "MPEG4Extractor"
 #include <utils/Log.h>
 
@@ -408,7 +409,7 @@
 }
 
 // Reads an encoded integer 7 bits at a time until it encounters the high bit clear.
-int32_t readSize(off64_t offset,
+static int32_t readSize(off64_t offset,
         const sp<DataSource> DataSource, uint8_t *numOfBytes) {
     uint32_t size = 0;
     uint8_t data;
diff --git a/media/libstagefright/MediaDefs.cpp b/media/libstagefright/MediaDefs.cpp
index 2740d6b..e7b5903 100644
--- a/media/libstagefright/MediaDefs.cpp
+++ b/media/libstagefright/MediaDefs.cpp
@@ -42,7 +42,7 @@
 const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS = "audio/aac-adts";
 
 const char *MEDIA_MIMETYPE_CONTAINER_MPEG4 = "video/mp4";
-const char *MEDIA_MIMETYPE_CONTAINER_WAV = "audio/wav";
+const char *MEDIA_MIMETYPE_CONTAINER_WAV = "audio/x-wav";
 const char *MEDIA_MIMETYPE_CONTAINER_OGG = "application/ogg";
 const char *MEDIA_MIMETYPE_CONTAINER_MATROSKA = "video/x-matroska";
 const char *MEDIA_MIMETYPE_CONTAINER_MPEG2TS = "video/mp2ts";
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
index 9ab6611..b18c916 100644
--- a/media/libstagefright/MediaExtractor.cpp
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -21,6 +21,7 @@
 #include "include/AMRExtractor.h"
 #include "include/MP3Extractor.h"
 #include "include/MPEG4Extractor.h"
+#include "include/FragmentedMP4Extractor.h"
 #include "include/WAVExtractor.h"
 #include "include/OggExtractor.h"
 #include "include/MPEG2PSExtractor.h"
@@ -93,7 +94,12 @@
     MediaExtractor *ret = NULL;
     if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG4)
             || !strcasecmp(mime, "audio/mp4")) {
-        ret = new MPEG4Extractor(source);
+        int fragmented = 0;
+        if (meta != NULL && meta->findInt32("fragmented", &fragmented) && fragmented) {
+            ret = new FragmentedMP4Extractor(source);
+        } else {
+            ret = new MPEG4Extractor(source);
+        }
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG)) {
         ret = new MP3Extractor(source, meta);
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_NB)
diff --git a/media/libstagefright/MetaData.cpp b/media/libstagefright/MetaData.cpp
index 755594a..a01ec97 100644
--- a/media/libstagefright/MetaData.cpp
+++ b/media/libstagefright/MetaData.cpp
@@ -22,6 +22,8 @@
 #include <string.h>
 
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/MetaData.h>
 
 namespace android {
@@ -318,6 +320,12 @@
 
         default:
             out = String8::format("(unknown type %d, size %d)", mType, mSize);
+            if (mSize <= 48) { // if it's less than three lines of hex data, dump it
+                AString foo;
+                hexdump(data, mSize, 0, &foo);
+                out.append("\n");
+                out.append(foo.c_str());
+            }
             break;
     }
     return out;
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 2a16f66..74e9222 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -241,5 +241,196 @@
     return OK;
 }
 
+static size_t reassembleAVCC(const sp<ABuffer> &csd0, const sp<ABuffer> csd1, char *avcc) {
+
+    avcc[0] = 1;        // version
+    avcc[1] = 0x64;     // profile
+    avcc[2] = 0;        // unused (?)
+    avcc[3] = 0xd;      // level
+    avcc[4] = 0xff;     // reserved+size
+
+    size_t i = 0;
+    int numparams = 0;
+    int lastparamoffset = 0;
+    int avccidx = 6;
+    do {
+        if (i >= csd0->size() - 4 ||
+                memcmp(csd0->data() + i, "\x00\x00\x00\x01", 4) == 0) {
+            if (i >= csd0->size() - 4) {
+                // there can't be another param here, so use all the rest
+                i = csd0->size();
+            }
+            ALOGV("block at %d, last was %d", i, lastparamoffset);
+            if (lastparamoffset > 0) {
+                int size = i - lastparamoffset;
+                avcc[avccidx++] = size >> 8;
+                avcc[avccidx++] = size & 0xff;
+                memcpy(avcc+avccidx, csd0->data() + lastparamoffset, size);
+                avccidx += size;
+                numparams++;
+            }
+            i += 4;
+            lastparamoffset = i;
+        } else {
+            i++;
+        }
+    } while(i < csd0->size());
+    ALOGV("csd0 contains %d params", numparams);
+
+    avcc[5] = 0xe0 | numparams;
+    //and now csd-1
+    i = 0;
+    numparams = 0;
+    lastparamoffset = 0;
+    int numpicparamsoffset = avccidx;
+    avccidx++;
+    do {
+        if (i >= csd1->size() - 4 ||
+                memcmp(csd1->data() + i, "\x00\x00\x00\x01", 4) == 0) {
+            if (i >= csd1->size() - 4) {
+                // there can't be another param here, so use all the rest
+                i = csd1->size();
+            }
+            ALOGV("block at %d, last was %d", i, lastparamoffset);
+            if (lastparamoffset > 0) {
+                int size = i - lastparamoffset;
+                avcc[avccidx++] = size >> 8;
+                avcc[avccidx++] = size & 0xff;
+                memcpy(avcc+avccidx, csd1->data() + lastparamoffset, size);
+                avccidx += size;
+                numparams++;
+            }
+            i += 4;
+            lastparamoffset = i;
+        } else {
+            i++;
+        }
+    } while(i < csd1->size());
+    avcc[numpicparamsoffset] = numparams;
+    return avccidx;
+}
+
+static void reassembleESDS(const sp<ABuffer> &csd0, char *esds) {
+    int csd0size = csd0->size();
+    esds[0] = 3; // kTag_ESDescriptor;
+    int esdescriptorsize = 26 + csd0size;
+    CHECK(esdescriptorsize < 268435456); // 7 bits per byte, so max is 2^28-1
+    esds[1] = 0x80 | (esdescriptorsize >> 21);
+    esds[2] = 0x80 | ((esdescriptorsize >> 14) & 0x7f);
+    esds[3] = 0x80 | ((esdescriptorsize >> 7) & 0x7f);
+    esds[4] = (esdescriptorsize & 0x7f);
+    esds[5] = esds[6] = 0; // es id
+    esds[7] = 0; // flags
+    esds[8] = 4; // kTag_DecoderConfigDescriptor
+    int configdescriptorsize = 18 + csd0size;
+    esds[9] = 0x80 | (configdescriptorsize >> 21);
+    esds[10] = 0x80 | ((configdescriptorsize >> 14) & 0x7f);
+    esds[11] = 0x80 | ((configdescriptorsize >> 7) & 0x7f);
+    esds[12] = (configdescriptorsize & 0x7f);
+    esds[13] = 0x40; // objectTypeIndication
+    esds[14] = 0x15; // not sure what 14-25 mean, they are ignored by ESDS.cpp,
+    esds[15] = 0x00; // but the actual values here were taken from a real file.
+    esds[16] = 0x18;
+    esds[17] = 0x00;
+    esds[18] = 0x00;
+    esds[19] = 0x00;
+    esds[20] = 0xfa;
+    esds[21] = 0x00;
+    esds[22] = 0x00;
+    esds[23] = 0x00;
+    esds[24] = 0xfa;
+    esds[25] = 0x00;
+    esds[26] = 5; // kTag_DecoderSpecificInfo;
+    esds[27] = 0x80 | (csd0size >> 21);
+    esds[28] = 0x80 | ((csd0size >> 14) & 0x7f);
+    esds[29] = 0x80 | ((csd0size >> 7) & 0x7f);
+    esds[30] = (csd0size & 0x7f);
+    memcpy((void*)&esds[31], csd0->data(), csd0size);
+    // data following this is ignored, so don't bother appending it
+
+}
+
+void convertMessageToMetaData(const sp<AMessage> &msg, sp<MetaData> &meta) {
+    AString mime;
+    if (msg->findString("mime", &mime)) {
+        meta->setCString(kKeyMIMEType, mime.c_str());
+    } else {
+        ALOGW("did not find mime type");
+    }
+
+    int64_t durationUs;
+    if (msg->findInt64("durationUs", &durationUs)) {
+        meta->setInt64(kKeyDuration, durationUs);
+    }
+
+    if (mime.startsWith("video/")) {
+        int32_t width;
+        int32_t height;
+        if (msg->findInt32("width", &width) && msg->findInt32("height", &height)) {
+            meta->setInt32(kKeyWidth, width);
+            meta->setInt32(kKeyHeight, height);
+        } else {
+            ALOGW("did not find width and/or height");
+        }
+    } else if (mime.startsWith("audio/")) {
+        int32_t numChannels;
+        if (msg->findInt32("channel-count", &numChannels)) {
+            meta->setInt32(kKeyChannelCount, numChannels);
+        }
+        int32_t sampleRate;
+        if (msg->findInt32("sample-rate", &sampleRate)) {
+            meta->setInt32(kKeySampleRate, sampleRate);
+        }
+        int32_t channelMask;
+        if (msg->findInt32("channel-mask", &channelMask)) {
+            meta->setInt32(kKeyChannelMask, channelMask);
+        }
+        int32_t delay = 0;
+        if (msg->findInt32("encoder-delay", &delay)) {
+            meta->setInt32(kKeyEncoderDelay, delay);
+        }
+        int32_t padding = 0;
+        if (msg->findInt32("encoder-padding", &padding)) {
+            meta->setInt32(kKeyEncoderPadding, padding);
+        }
+
+        int32_t isADTS;
+        if (msg->findInt32("is-adts", &isADTS)) {
+            meta->setInt32(kKeyIsADTS, isADTS);
+        }
+    }
+
+    int32_t maxInputSize;
+    if (msg->findInt32("max-input-size", &maxInputSize)) {
+        meta->setInt32(kKeyMaxInputSize, maxInputSize);
+    }
+
+    // reassemble the csd data into its original form
+    sp<ABuffer> csd0;
+    if (msg->findBuffer("csd-0", &csd0)) {
+        if (mime.startsWith("video/")) { // do we need to be stricter than this?
+            sp<ABuffer> csd1;
+            if (msg->findBuffer("csd-1", &csd1)) {
+                char avcc[1024]; // that oughta be enough, right?
+                size_t outsize = reassembleAVCC(csd0, csd1, avcc);
+                meta->setData(kKeyAVCC, kKeyAVCC, avcc, outsize);
+            }
+        } else if (mime.startsWith("audio/")) {
+            int csd0size = csd0->size();
+            char esds[csd0size + 31];
+            reassembleESDS(csd0, esds);
+            meta->setData(kKeyESDS, kKeyESDS, esds, sizeof(esds));
+        }
+    }
+
+    // XXX TODO add whatever other keys there are
+
+#if 0
+    ALOGI("converted %s to:", msg->debugString(0).c_str());
+    meta->dumpToLog();
+#endif
+}
+
+
 }  // namespace android
 
diff --git a/media/libstagefright/WAVExtractor.cpp b/media/libstagefright/WAVExtractor.cpp
index 851321d..a38400b 100644
--- a/media/libstagefright/WAVExtractor.cpp
+++ b/media/libstagefright/WAVExtractor.cpp
@@ -106,7 +106,7 @@
         return meta;
     }
 
-    meta->setCString(kKeyMIMEType, "audio/x-wav");
+    meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_WAV);
 
     return meta;
 }
@@ -509,4 +509,3 @@
 }
 
 }  // namespace android
-
diff --git a/media/libstagefright/include/FragmentedMP4Extractor.h b/media/libstagefright/include/FragmentedMP4Extractor.h
new file mode 100644
index 0000000..763cd3a
--- /dev/null
+++ b/media/libstagefright/include/FragmentedMP4Extractor.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FRAGMENTED_MP4_EXTRACTOR_H_
+
+#define FRAGMENTED_MP4_EXTRACTOR_H_
+
+#include "include/FragmentedMP4Parser.h"
+
+#include <media/stagefright/MediaExtractor.h>
+#include <utils/Vector.h>
+#include <utils/String8.h>
+
+namespace android {
+
+struct AMessage;
+class DataSource;
+class SampleTable;
+class String8;
+
+class FragmentedMP4Extractor : public MediaExtractor {
+public:
+    // Extractor assumes ownership of "source".
+    FragmentedMP4Extractor(const sp<DataSource> &source);
+
+    virtual size_t countTracks();
+    virtual sp<MediaSource> getTrack(size_t index);
+    virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
+    virtual sp<MetaData> getMetaData();
+    virtual uint32_t flags() const;
+
+protected:
+    virtual ~FragmentedMP4Extractor();
+
+private:
+    sp<ALooper> mLooper;
+    sp<FragmentedMP4Parser> mParser;
+    sp<DataSource> mDataSource;
+    status_t mInitCheck;
+    size_t mAudioTrackIndex;
+    size_t mTrackCount;
+
+    sp<MetaData> mFileMetaData;
+
+    Vector<uint32_t> mPath;
+
+    FragmentedMP4Extractor(const FragmentedMP4Extractor &);
+    FragmentedMP4Extractor &operator=(const FragmentedMP4Extractor &);
+};
+
+bool SniffFragmentedMP4(
+        const sp<DataSource> &source, String8 *mimeType, float *confidence,
+        sp<AMessage> *);
+
+}  // namespace android
+
+#endif  // MPEG4_EXTRACTOR_H_
diff --git a/media/libstagefright/include/FragmentedMP4Parser.h b/media/libstagefright/include/FragmentedMP4Parser.h
index bd8fe32..0edafb9 100644
--- a/media/libstagefright/include/FragmentedMP4Parser.h
+++ b/media/libstagefright/include/FragmentedMP4Parser.h
@@ -19,6 +19,7 @@
 #define PARSER_H_
 
 #include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/DataSource.h>
 #include <utils/Vector.h>
 
 namespace android {
@@ -30,6 +31,7 @@
         Source() {}
 
         virtual ssize_t readAt(off64_t offset, void *data, size_t size) = 0;
+        virtual bool isSeekable() = 0;
 
         protected:
         virtual ~Source() {}
@@ -42,9 +44,12 @@
 
     void start(const char *filename);
     void start(const sp<Source> &source);
+    void start(sp<DataSource> &source);
 
-    sp<AMessage> getFormat(bool audio);
-    status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit);
+    sp<AMessage> getFormat(bool audio, bool synchronous = false);
+    status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit, bool synchronous = false);
+    status_t seekTo(bool audio, int64_t timeUs);
+    bool isSeekable() const;
 
     virtual void onMessageReceived(const sp<AMessage> &msg);
 
@@ -58,6 +63,7 @@
         kWhatReadMore,
         kWhatGetFormat,
         kWhatDequeueAccessUnit,
+        kWhatSeekTo,
     };
 
     struct TrackFragment;
@@ -97,6 +103,11 @@
         off64_t mOffset;
     };
 
+    struct SidxEntry {
+        size_t mSize;
+        uint32_t mDurationUs;
+    };
+
     struct TrackInfo {
         enum Flags {
             kTrackEnabled     = 0x01,
@@ -107,6 +118,7 @@
         uint32_t mTrackID;
         uint32_t mFlags;
         uint32_t mDuration;  // This is the duration in terms of movie timescale!
+        uint64_t mSidxDuration; // usec, from sidx box, which can use a different timescale
 
         uint32_t mMediaTimeScale;
 
@@ -121,6 +133,7 @@
 
         uint32_t mDecodingTime;
 
+        Vector<SidxEntry> mSidx;
         sp<StaticTrackFragment> mStaticFragment;
         List<sp<TrackFragment> > mFragments;
     };
@@ -151,6 +164,8 @@
     sp<Source> mSource;
     off_t mBufferPos;
     bool mSuspended;
+    bool mDoneWithMoov;
+    off_t mFirstMoofOffset; // used as the starting point for offsets calculated from the sidx box
     sp<ABuffer> mBuffer;
     Vector<Container> mStack;
     KeyedVector<uint32_t, TrackInfo> mTracks;  // TrackInfo by trackID
@@ -164,6 +179,7 @@
 
     status_t onProceed();
     status_t onDequeueAccessUnit(size_t trackIndex, sp<ABuffer> *accessUnit);
+    status_t onSeekTo(bool wantAudio, int64_t position);
 
     void enter(off64_t offset, uint32_t type, uint64_t size);
 
@@ -222,6 +238,9 @@
     status_t parseMediaData(
             uint32_t type, size_t offset, uint64_t size);
 
+    status_t parseSegmentIndex(
+            uint32_t type, size_t offset, uint64_t size);
+
     TrackInfo *editTrack(uint32_t trackID, bool createIfNecessary = false);
 
     ssize_t findTrack(bool wantAudio) const;
diff --git a/media/libstagefright/mp4/FragmentedMP4Parser.cpp b/media/libstagefright/mp4/FragmentedMP4Parser.cpp
index e130a80..7fe4e63 100644
--- a/media/libstagefright/mp4/FragmentedMP4Parser.cpp
+++ b/media/libstagefright/mp4/FragmentedMP4Parser.cpp
@@ -18,8 +18,8 @@
 #define LOG_TAG "FragmentedMP4Parser"
 #include <utils/Log.h>
 
-#include "include/FragmentedMP4Parser.h"
 #include "include/ESDS.h"
+#include "include/FragmentedMP4Parser.h"
 #include "TrackFragment.h"
 
 
@@ -31,6 +31,7 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/Utils.h>
 
+
 namespace android {
 
 static const char *Fourcc2String(uint32_t fourcc) {
@@ -121,6 +122,8 @@
     },
 
     { FOURCC('m', 'f', 'r', 'a'), 0, NULL },
+
+    { FOURCC('s', 'i', 'd', 'x'), 0, &FragmentedMP4Parser::parseSegmentIndex },
 };
 
 struct FileSource : public FragmentedMP4Parser::Source {
@@ -134,15 +137,92 @@
         return fread(data, 1, size, mFile);
     }
 
+    virtual bool isSeekable() {
+        return true;
+    }
+
     private:
     FILE *mFile;
 
     DISALLOW_EVIL_CONSTRUCTORS(FileSource);
 };
 
+struct ReadTracker : public RefBase {
+    ReadTracker(off64_t size) {
+        allocSize = 1 + size / 8192; // 1 bit per kilobyte
+        bitmap = (char*) calloc(1, allocSize);
+    }
+    virtual ~ReadTracker() {
+        dumpToLog();
+        free(bitmap);
+    }
+    void mark(off64_t offset, size_t size) {
+        int firstbit = offset / 1024;
+        int lastbit = (offset + size - 1) / 1024;
+        for (int i = firstbit; i <= lastbit; i++) {
+            bitmap[i/8] |= (0x80 >> (i & 7));
+        }
+    }
+
+ private:
+    void dumpToLog() {
+        // 96 chars per line, each char represents one kilobyte, 1 kb per bit
+        int numlines = allocSize / 12;
+        char buf[97];
+        char *cur = bitmap;
+        for (int i = 0; i < numlines; i++ && cur) {
+            for (int j = 0; j < 12; j++) {
+                for (int k = 0; k < 8; k++) {
+                    buf[(j * 8) + k] = (*cur & (0x80 >> k)) ? 'X' : '.';
+                }
+                cur++;
+            }
+            buf[96] = '\0';
+            ALOGI("%5dk: %s", i * 96, buf);
+        }
+    }
+
+    size_t allocSize;
+    char *bitmap;
+};
+
+struct DataSourceSource : public FragmentedMP4Parser::Source {
+    DataSourceSource(sp<DataSource> &source)
+        : mDataSource(source) {
+            CHECK(mDataSource != NULL);
+#if 0
+            off64_t size;
+            if (source->getSize(&size) == OK) {
+                mReadTracker = new ReadTracker(size);
+            } else {
+                ALOGE("couldn't get data source size");
+            }
+#endif
+        }
+
+    virtual ssize_t readAt(off64_t offset, void *data, size_t size) {
+        if (mReadTracker != NULL) {
+            mReadTracker->mark(offset, size);
+        }
+        return mDataSource->readAt(offset, data, size);
+    }
+
+    virtual bool isSeekable() {
+        return true;
+    }
+
+    private:
+    sp<DataSource> mDataSource;
+    sp<ReadTracker> mReadTracker;
+
+    DISALLOW_EVIL_CONSTRUCTORS(DataSourceSource);
+};
+
 FragmentedMP4Parser::FragmentedMP4Parser()
     : mBufferPos(0),
       mSuspended(false),
+      mDoneWithMoov(false),
+      mFirstMoofOffset(0),
       mFinalResult(OK) {
 }
 
@@ -153,54 +233,142 @@
     sp<AMessage> msg = new AMessage(kWhatStart, id());
     msg->setObject("source", new FileSource(filename));
     msg->post();
+    ALOGV("Parser::start(%s)", filename);
 }
 
 void FragmentedMP4Parser::start(const sp<Source> &source) {
     sp<AMessage> msg = new AMessage(kWhatStart, id());
     msg->setObject("source", source);
     msg->post();
+    ALOGV("Parser::start(Source)");
 }
 
-sp<AMessage> FragmentedMP4Parser::getFormat(bool audio) {
-    sp<AMessage> msg = new AMessage(kWhatGetFormat, id());
-    msg->setInt32("audio", audio);
+void FragmentedMP4Parser::start(sp<DataSource> &source) {
+    sp<AMessage> msg = new AMessage(kWhatStart, id());
+    msg->setObject("source", new DataSourceSource(source));
+    msg->post();
+    ALOGV("Parser::start(DataSource)");
+}
+
+sp<AMessage> FragmentedMP4Parser::getFormat(bool audio, bool synchronous) {
+
+    while (true) {
+        bool moovDone = mDoneWithMoov;
+        sp<AMessage> msg = new AMessage(kWhatGetFormat, id());
+        msg->setInt32("audio", audio);
+
+        sp<AMessage> response;
+        status_t err = msg->postAndAwaitResponse(&response);
+
+        if (err != OK) {
+            ALOGV("getFormat post failed: %d", err);
+            return NULL;
+        }
+
+        if (response->findInt32("err", &err) && err != OK) {
+            if (synchronous && err == -EWOULDBLOCK && !moovDone) {
+                resumeIfNecessary();
+                ALOGV("@getFormat parser not ready yet, retrying");
+                usleep(10000);
+                continue;
+            }
+            ALOGV("getFormat failed: %d", err);
+            return NULL;
+        }
+
+        sp<AMessage> format;
+        CHECK(response->findMessage("format", &format));
+
+        ALOGV("returning format %s", format->debugString().c_str());
+        return format;
+    }
+}
+
+status_t FragmentedMP4Parser::seekTo(bool wantAudio, int64_t timeUs) {
+    sp<AMessage> msg = new AMessage(kWhatSeekTo, id());
+    msg->setInt32("audio", wantAudio);
+    msg->setInt64("position", timeUs);
 
     sp<AMessage> response;
     status_t err = msg->postAndAwaitResponse(&response);
-
-    if (err != OK) {
-        return NULL;
-    }
-
-    if (response->findInt32("err", &err) && err != OK) {
-        return NULL;
-    }
-
-    sp<AMessage> format;
-    CHECK(response->findMessage("format", &format));
-
-    ALOGV("returning format %s", format->debugString().c_str());
-    return format;
+    return err;
 }
 
-status_t FragmentedMP4Parser::dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit) {
-    sp<AMessage> msg = new AMessage(kWhatDequeueAccessUnit, id());
-    msg->setInt32("audio", audio);
-
-    sp<AMessage> response;
-    status_t err = msg->postAndAwaitResponse(&response);
-
-    if (err != OK) {
-        return err;
+bool FragmentedMP4Parser::isSeekable() const {
+    while (mFirstMoofOffset == 0 && mFinalResult == OK) {
+        usleep(10000);
     }
-
-    if (response->findInt32("err", &err) && err != OK) {
-        return err;
+    bool seekable = mSource->isSeekable();
+    for (size_t i = 0; seekable && i < mTracks.size(); i++) {
+        const TrackInfo *info = &mTracks.valueAt(i);
+        seekable &= !info->mSidx.empty();
     }
+    return seekable;
+}
 
-    CHECK(response->findBuffer("accessUnit", accessUnit));
+status_t FragmentedMP4Parser::onSeekTo(bool wantAudio, int64_t position) {
+    status_t err = -EINVAL;
+    ssize_t trackIndex = findTrack(wantAudio);
+    if (trackIndex < 0) {
+        err = trackIndex;
+    } else {
+        TrackInfo *info = &mTracks.editValueAt(trackIndex);
 
-    return OK;
+        int numSidxEntries = info->mSidx.size();
+        int64_t totalTime = 0;
+        off_t totalOffset = mFirstMoofOffset;
+        for (int i = 0; i < numSidxEntries; i++) {
+            const SidxEntry *se = &info->mSidx[i];
+            totalTime += se->mDurationUs;
+            if (totalTime > position) {
+                mBuffer->setRange(0,0);
+                mBufferPos = totalOffset;
+                if (mFinalResult == ERROR_END_OF_STREAM) {
+                    mFinalResult = OK;
+                    mSuspended = true; // force resume
+                    resumeIfNecessary();
+                }
+                info->mFragments.clear();
+                info->mDecodingTime = position * info->mMediaTimeScale / 1000000ll;
+                return OK;
+            }
+            totalOffset += se->mSize;
+        }
+    }
+    ALOGV("seekTo out of range");
+    return err;
+}
+
+status_t FragmentedMP4Parser::dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit,
+                                                bool synchronous) {
+
+    while (true) {
+        sp<AMessage> msg = new AMessage(kWhatDequeueAccessUnit, id());
+        msg->setInt32("audio", audio);
+
+        sp<AMessage> response;
+        status_t err = msg->postAndAwaitResponse(&response);
+
+        if (err != OK) {
+            ALOGV("dequeue fail 1: %d", err);
+            return err;
+        }
+
+        if (response->findInt32("err", &err) && err != OK) {
+            if (synchronous && err == -EWOULDBLOCK) {
+                resumeIfNecessary();
+                ALOGV("Parser not ready yet, retrying");
+                usleep(10000);
+                continue;
+            }
+            ALOGV("dequeue fail 2: %d, %d", err, synchronous);
+            return err;
+        }
+
+        CHECK(response->findBuffer("accessUnit", accessUnit));
+
+        return OK;
+    }
 }
 
 ssize_t FragmentedMP4Parser::findTrack(bool wantAudio) const {
@@ -272,7 +440,7 @@
             size_t maxBytesToRead = mBuffer->capacity() - mBuffer->size();
 
             if (maxBytesToRead < needed) {
-                ALOGI("resizing buffer.");
+                ALOGV("resizing buffer.");
 
                 sp<ABuffer> newBuffer =
                     new ABuffer((mBuffer->size() + needed + 1023) & ~1023);
@@ -290,7 +458,7 @@
                     mBuffer->data() + mBuffer->size(), needed);
 
             if (n < (ssize_t)needed) {
-                ALOGI("%s", "Reached EOF");
+                ALOGV("Reached EOF when reading %d @ %d + %d", needed, mBufferPos, mBuffer->size());
                 if (n < 0) {
                     mFinalResult = n;
                 } else if (n == 0) {
@@ -321,8 +489,16 @@
             } else {
                 TrackInfo *info = &mTracks.editValueAt(trackIndex);
 
+                sp<AMessage> format = info->mSampleDescs.itemAt(0).mFormat;
+                if (info->mSidxDuration) {
+                    format->setInt64("durationUs", info->mSidxDuration);
+                } else {
+                    // this is probably going to be zero. Oh well...
+                    format->setInt64("durationUs",
+                                     1000000ll * info->mDuration / info->mMediaTimeScale);
+                }
                 response->setMessage(
-                        "format", info->mSampleDescs.itemAt(0).mFormat);
+                        "format", format);
 
                 err = OK;
             }
@@ -366,6 +542,30 @@
             break;
         }
 
+        case kWhatSeekTo:
+        {
+            ALOGV("kWhatSeekTo");
+            int32_t wantAudio;
+            CHECK(msg->findInt32("audio", &wantAudio));
+            int64_t position;
+            CHECK(msg->findInt64("position", &position));
+
+            status_t err = -EWOULDBLOCK;
+            sp<AMessage> response = new AMessage;
+
+            ssize_t trackIndex = findTrack(wantAudio);
+
+            if (trackIndex < 0) {
+                err = trackIndex;
+            } else {
+                err = onSeekTo(wantAudio, position);
+            }
+            response->setInt32("err", err);
+            uint32_t replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+            response->postReply(replyID);
+            break;
+        }
         default:
             TRESPASS();
     }
@@ -429,6 +629,12 @@
     if ((i < kNumDispatchers && kDispatchTable[i].mHandler == 0)
             || isSampleEntryBox || ptype == FOURCC('i', 'l', 's', 't')) {
         // This is a container box.
+        if (type == FOURCC('m', 'o', 'o', 'f')) {
+            if (mFirstMoofOffset == 0) {
+                ALOGV("first moof @ %08x", mBufferPos + offset);
+                mFirstMoofOffset = mBufferPos + offset - 8; // point at the size
+            }
+        }
         if (type == FOURCC('m', 'e', 't', 'a')) {
             if ((err = need(offset + 4)) < OK) {
                 return err;
@@ -589,7 +795,7 @@
         return;
     }
 
-    ALOGI("resuming.");
+    ALOGV("resuming.");
 
     mSuspended = false;
     (new AMessage(kWhatProceed, id()))->post();
@@ -647,7 +853,7 @@
 
         int cmp = CompareSampleLocation(sampleInfo, mdatInfo);
 
-        if (cmp < 0) {
+        if (cmp < 0 && !mSource->isSeekable()) {
             return -EPIPE;
         } else if (cmp == 0) {
             if (i > 0) {
@@ -669,6 +875,8 @@
         size_t numDroppable = 0;
         bool done = false;
 
+        // XXX FIXME: if one of the tracks is not advanced (e.g. if you play an audio+video
+        // file with sf2), then mMediaData will not be pruned and keeps growing
         for (size_t i = 0; !done && i < mMediaData.size(); ++i) {
             const MediaDataInfo &mdatInfo = mMediaData.itemAt(i);
 
@@ -896,6 +1104,8 @@
 
                     static_cast<DynamicTrackFragment *>(
                             fragment.get())->signalCompletion();
+                } else if (container->mType == FOURCC('m', 'o', 'o', 'v')) {
+                    mDoneWithMoov = true;
                 }
 
                 container = NULL;
@@ -953,6 +1163,10 @@
     TrackInfo *info = editTrack(trackID, true /* createIfNecessary */);
     info->mFlags = flags;
     info->mDuration = duration;
+    if (info->mDuration == 0xffffffff) {
+        // ffmpeg sets this to -1, which is incorrect.
+        info->mDuration = 0;
+    }
 
     info->mStaticFragment = new StaticTrackFragment;
 
@@ -1363,13 +1577,100 @@
     info->mOffset = mBufferPos + offset;
 
     if (mMediaData.size() > 10) {
-        ALOGI("suspending for now.");
+        ALOGV("suspending for now.");
         mSuspended = true;
     }
 
     return OK;
 }
 
+status_t FragmentedMP4Parser::parseSegmentIndex(
+        uint32_t type, size_t offset, uint64_t size) {
+    ALOGV("sidx box type %d, offset %d, size %d", type, int(offset), int(size));
+//    AString sidxstr;
+//    hexdump(mBuffer->data() + offset, size, 0 /* indent */, &sidxstr);
+//    ALOGV("raw sidx:");
+//    ALOGV("%s", sidxstr.c_str());
+    if (offset + 12 > size) {
+        return -EINVAL;
+    }
+
+    uint32_t flags = readU32(offset);
+
+    uint32_t version = flags >> 24;
+    flags &= 0xffffff;
+
+    ALOGV("sidx version %d", version);
+
+    uint32_t referenceId = readU32(offset + 4);
+    uint32_t timeScale = readU32(offset + 8);
+    ALOGV("sidx refid/timescale: %d/%d", referenceId, timeScale);
+
+    uint64_t earliestPresentationTime;
+    uint64_t firstOffset;
+
+    offset += 12;
+
+    if (version == 0) {
+        if (offset + 8 > size) {
+            return -EINVAL;
+        }
+        earliestPresentationTime = readU32(offset);
+        firstOffset = readU32(offset + 4);
+        offset += 8;
+    } else {
+        if (offset + 16 > size) {
+            return -EINVAL;
+        }
+        earliestPresentationTime = readU64(offset);
+        firstOffset = readU64(offset + 8);
+        offset += 16;
+    }
+    ALOGV("sidx pres/off: %Ld/%Ld", earliestPresentationTime, firstOffset);
+
+    if (offset + 4 > size) {
+        return -EINVAL;
+    }
+    if (readU16(offset) != 0) { // reserved
+        return -EINVAL;
+    }
+    int32_t referenceCount = readU16(offset + 2);
+    offset += 4;
+    ALOGV("refcount: %d", referenceCount);
+
+    if (offset + referenceCount * 12 > size) {
+        return -EINVAL;
+    }
+
+    TrackInfo *info = editTrack(mCurrentTrackID);
+    uint64_t total_duration = 0;
+    for (int i = 0; i < referenceCount; i++) {
+        uint32_t d1 = readU32(offset);
+        uint32_t d2 = readU32(offset + 4);
+        uint32_t d3 = readU32(offset + 8);
+
+        if (d1 & 0x80000000) {
+            ALOGW("sub-sidx boxes not supported yet");
+        }
+        bool sap = d3 & 0x80000000;
+        bool saptype = d3 >> 28;
+        if (!sap || saptype > 2) {
+            ALOGW("not a stream access point, or unsupported type");
+        }
+        total_duration += d2;
+        offset += 12;
+        ALOGV(" item %d, %08x %08x %08x", i, d1, d2, d3);
+        SidxEntry se;
+        se.mSize = d1 & 0x7fffffff;
+        se.mDurationUs = 1000000LL * d2 / timeScale;
+        info->mSidx.add(se);
+    }
+
+    info->mSidxDuration = total_duration * 1000000 / timeScale;
+    ALOGV("duration: %lld", info->mSidxDuration);
+    return OK;
+}
+
 status_t FragmentedMP4Parser::parseTrackExtends(
         uint32_t type, size_t offset, uint64_t size) {
     if (offset + 24 > size) {
@@ -1407,6 +1708,7 @@
     info.mTrackID = trackID;
     info.mFlags = 0;
     info.mDuration = 0xffffffff;
+    info.mSidxDuration = 0;
     info.mMediaTimeScale = 0;
     info.mMediaHandlerType = 0;
     info.mDefaultSampleDescriptionIndex = 0;
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index 1370c62..e27a065 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -14,8 +14,10 @@
     camera2/CameraMetadata.cpp \
     camera2/Parameters.cpp \
     camera2/FrameProcessor.cpp \
-    camera2/CaptureProcessor.cpp \
-    camera2/CallbackProcessor.cpp
+    camera2/JpegProcessor.cpp \
+    camera2/CallbackProcessor.cpp \
+    camera2/ZslProcessor.cpp \
+    camera2/CaptureSequencer.cpp \
 
 LOCAL_SHARED_LIBRARIES:= \
     libui \
diff --git a/services/camera/libcameraservice/Camera2Client.cpp b/services/camera/libcameraservice/Camera2Client.cpp
index acd290d..5400604 100644
--- a/services/camera/libcameraservice/Camera2Client.cpp
+++ b/services/camera/libcameraservice/Camera2Client.cpp
@@ -59,12 +59,21 @@
         mRecordingHeapCount(kDefaultRecordingHeapCount)
 {
     ATRACE_CALL();
-    ALOGV("%s: Created client for camera %d", __FUNCTION__, cameraId);
+    ALOGI("Camera %d: Opened", cameraId);
 
     mDevice = new Camera2Device(cameraId);
 
     SharedParameters::Lock l(mParameters);
     l.mParameters.state = Parameters::DISCONNECTED;
+
+    char value[PROPERTY_VALUE_MAX];
+    property_get("camera.zsl_mode", value, "0");
+    if (!strcmp(value,"1")) {
+        ALOGI("Camera %d: Enabling ZSL mode", cameraId);
+        l.mParameters.zslMode = true;
+    } else {
+        l.mParameters.zslMode = false;
+    }
 }
 
 status_t Camera2Client::checkPid(const char* checkLocation) const {
@@ -100,20 +109,32 @@
         return NO_INIT;
     }
 
-    mFrameProcessor = new FrameProcessor(this);
-    String8 frameThreadName = String8::format("Camera2Client[%d]::FrameProcessor",
-            mCameraId);
-    mFrameProcessor->run(frameThreadName.string());
+    String8 threadName;
 
-    mCaptureProcessor = new CaptureProcessor(this);
-    String8 captureThreadName =
-            String8::format("Camera2Client[%d]::CaptureProcessor", mCameraId);
-    mCaptureProcessor->run(captureThreadName.string());
+    mFrameProcessor = new FrameProcessor(this);
+    threadName = String8::format("Camera2Client[%d]::FrameProcessor",
+            mCameraId);
+    mFrameProcessor->run(threadName.string());
+
+    mCaptureSequencer = new CaptureSequencer(this);
+    threadName = String8::format("Camera2Client[%d]::CaptureSequencer",
+            mCameraId);
+    mCaptureSequencer->run(threadName.string());
+
+    mJpegProcessor = new JpegProcessor(this, mCaptureSequencer);
+    threadName = String8::format("Camera2Client[%d]::JpegProcessor",
+            mCameraId);
+    mJpegProcessor->run(threadName.string());
+
+    mZslProcessor = new ZslProcessor(this, mCaptureSequencer);
+    threadName = String8::format("Camera2Client[%d]::ZslProcessor",
+            mCameraId);
+    mZslProcessor->run(threadName.string());
 
     mCallbackProcessor = new CallbackProcessor(this);
-    String8 callbackThreadName =
-            String8::format("Camera2Client[%d]::CallbackProcessor", mCameraId);
-    mCallbackProcessor->run(callbackThreadName.string());
+    threadName = String8::format("Camera2Client[%d]::CallbackProcessor",
+            mCameraId);
+    mCallbackProcessor->run(threadName.string());
 
     if (gLogLevel >= 1) {
         ALOGD("%s: Default parameters converted from camera %d:", __FUNCTION__,
@@ -126,7 +147,7 @@
 
 Camera2Client::~Camera2Client() {
     ATRACE_CALL();
-    ALOGV("%s: Camera %d: Shutting down client.", __FUNCTION__, mCameraId);
+    ALOGV("Camera %d: Shutting down", mCameraId);
 
     mDestructionStarted = true;
 
@@ -135,7 +156,7 @@
     disconnect();
 
     mFrameProcessor->requestExit();
-    ALOGV("%s: Camera %d: Shutdown complete", __FUNCTION__, mCameraId);
+    ALOGI("Camera %d: Closed", mCameraId);
 }
 
 status_t Camera2Client::dump(int fd, const Vector<String16>& args) {
@@ -299,10 +320,12 @@
             p.videoStabilization ? "enabled" : "disabled");
 
     result.append("  Current streams:\n");
-    result.appendFormat("    Preview stream ID: %d\n", mPreviewStreamId);
+    result.appendFormat("    Preview stream ID: %d\n",
+            getPreviewStreamId());
     result.appendFormat("    Capture stream ID: %d\n",
-            mCaptureProcessor->getStreamId());
-    result.appendFormat("    Recording stream ID: %d\n", mRecordingStreamId);
+            getCaptureStreamId());
+    result.appendFormat("    Recording stream ID: %d\n",
+            getRecordingStreamId());
 
     result.append("  Current requests:\n");
     if (mPreviewRequest.entryCount() != 0) {
@@ -314,15 +337,6 @@
         write(fd, result.string(), result.size());
     }
 
-    if (mCaptureRequest.entryCount() != 0) {
-        result = "    Capture request:\n";
-        write(fd, result.string(), result.size());
-        mCaptureRequest.dump(fd, 2, 6);
-    } else {
-        result = "    Capture request: undefined\n";
-        write(fd, result.string(), result.size());
-    }
-
     if (mRecordingRequest.entryCount() != 0) {
         result = "    Recording request:\n";
         write(fd, result.string(), result.size());
@@ -332,6 +346,8 @@
         write(fd, result.string(), result.size());
     }
 
+    mCaptureSequencer->dump(fd, args);
+
     mFrameProcessor->dump(fd, args);
 
     result = "  Device dump:\n";
@@ -366,7 +382,7 @@
         mPreviewStreamId = NO_STREAM;
     }
 
-    mCaptureProcessor->deleteStream();
+    mJpegProcessor->deleteStream();
 
     if (mRecordingStreamId != NO_STREAM) {
         mDevice->deleteStream(mRecordingStreamId);
@@ -623,6 +639,14 @@
             return res;
         }
     }
+    if (params.zslMode) {
+        res = mZslProcessor->updateStream(params);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to update ZSL stream: %s (%d)",
+                    __FUNCTION__, mCameraId, strerror(-res), res);
+            return res;
+        }
+    }
 
     if (mPreviewRequest.entryCount() == 0) {
         res = updatePreviewRequest(params);
@@ -633,18 +657,20 @@
         }
     }
 
+    Vector<uint8_t> outputStreams;
+    outputStreams.push(getPreviewStreamId());
+
     if (callbacksEnabled) {
-        uint8_t outputStreams[2] =
-                { mPreviewStreamId, mCallbackProcessor->getStreamId() };
-        res = mPreviewRequest.update(
-                ANDROID_REQUEST_OUTPUT_STREAMS,
-                outputStreams, 2);
-    } else {
-        uint8_t outputStreams[1] = { mPreviewStreamId };
-        res = mPreviewRequest.update(
-                ANDROID_REQUEST_OUTPUT_STREAMS,
-                outputStreams, 1);
+        outputStreams.push(getCallbackStreamId());
     }
+    if (params.zslMode) {
+        outputStreams.push(getZslStreamId());
+    }
+
+    res = mPreviewRequest.update(
+        ANDROID_REQUEST_OUTPUT_STREAMS,
+        outputStreams);
+
     if (res != OK) {
         ALOGE("%s: Camera %d: Unable to set up preview request: %s (%d)",
                 __FUNCTION__, mCameraId, strerror(-res), res);
@@ -817,14 +843,19 @@
     }
 
     if (callbacksEnabled) {
-        uint8_t outputStreams[3] =
-                { mPreviewStreamId, mRecordingStreamId,
-                  mCallbackProcessor->getStreamId() };
+        uint8_t outputStreams[3] ={
+            getPreviewStreamId(),
+            getRecordingStreamId(),
+            getCallbackStreamId()
+        };
         res = mRecordingRequest.update(
                 ANDROID_REQUEST_OUTPUT_STREAMS,
                 outputStreams, 3);
     } else {
-        uint8_t outputStreams[2] = { mPreviewStreamId, mRecordingStreamId };
+        uint8_t outputStreams[2] = {
+            getPreviewStreamId(),
+            getRecordingStreamId()
+        };
         res = mRecordingRequest.update(
                 ANDROID_REQUEST_OUTPUT_STREAMS,
                 outputStreams, 2);
@@ -1020,8 +1051,18 @@
                     __FUNCTION__, mCameraId);
             return INVALID_OPERATION;
         case Parameters::PREVIEW:
-        case Parameters::RECORD:
             // Good to go for takePicture
+            res = commandStopFaceDetectionL(l.mParameters);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to stop face detection for still capture",
+                        __FUNCTION__, mCameraId);
+                return res;
+            }
+            l.mParameters.state = Parameters::STILL_CAPTURE;
+            break;
+        case Parameters::RECORD:
+            // Good to go for video snapshot
+            l.mParameters.state = Parameters::VIDEO_SNAPSHOT;
             break;
         case Parameters::STILL_CAPTURE:
         case Parameters::VIDEO_SNAPSHOT:
@@ -1032,130 +1073,20 @@
 
     ALOGV("%s: Camera %d: Starting picture capture", __FUNCTION__, mCameraId);
 
-    res = mCaptureProcessor->updateStream(l.mParameters);
+    res = mJpegProcessor->updateStream(l.mParameters);
     if (res != OK) {
         ALOGE("%s: Camera %d: Can't set up still image stream: %s (%d)",
                 __FUNCTION__, mCameraId, strerror(-res), res);
         return res;
     }
 
-    if (mCaptureRequest.entryCount() == 0) {
-        res = updateCaptureRequest(l.mParameters);
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Can't create still image capture request: "
-                    "%s (%d)", __FUNCTION__, mCameraId, strerror(-res), res);
-            return res;
-        }
-    }
-
-    bool callbacksEnabled = l.mParameters.previewCallbackFlags &
-            CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK;
-    bool recordingEnabled = (l.mParameters.state == Parameters::RECORD);
-
-    int captureStreamId = mCaptureProcessor->getStreamId();
-
-    int streamSwitch = (callbacksEnabled ? 0x2 : 0x0) +
-            (recordingEnabled ? 0x1 : 0x0);
-    switch ( streamSwitch ) {
-        case 0: { // No recording, callbacks
-            uint8_t streamIds[2] = {
-                mPreviewStreamId,
-                captureStreamId
-            };
-            res = mCaptureRequest.update(ANDROID_REQUEST_OUTPUT_STREAMS,
-                    streamIds, 2);
-            break;
-        }
-        case 1: { // Recording
-            uint8_t streamIds[3] = {
-                mPreviewStreamId,
-                mRecordingStreamId,
-                captureStreamId
-            };
-            res = mCaptureRequest.update(ANDROID_REQUEST_OUTPUT_STREAMS,
-                    streamIds, 3);
-            break;
-        }
-        case 2: { // Callbacks
-            uint8_t streamIds[3] = {
-                mPreviewStreamId,
-                mCallbackProcessor->getStreamId(),
-                captureStreamId
-            };
-            res = mCaptureRequest.update(ANDROID_REQUEST_OUTPUT_STREAMS,
-                    streamIds, 3);
-            break;
-        }
-        case 3: { // Both
-            uint8_t streamIds[4] = {
-                mPreviewStreamId,
-                mCallbackProcessor->getStreamId(),
-                mRecordingStreamId,
-                captureStreamId
-            };
-            res = mCaptureRequest.update(ANDROID_REQUEST_OUTPUT_STREAMS,
-                    streamIds, 4);
-            break;
-        }
-    };
+    res = mCaptureSequencer->startCapture();
     if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to set up still image capture request: "
-                "%s (%d)",
+        ALOGE("%s: Camera %d: Unable to start capture: %s (%d)",
                 __FUNCTION__, mCameraId, strerror(-res), res);
-        return res;
-    }
-    res = mCaptureRequest.sort();
-    if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to sort capture request: %s (%d)",
-                __FUNCTION__, mCameraId, strerror(-res), res);
-        return res;
     }
 
-    CameraMetadata captureCopy = mCaptureRequest;
-    if (captureCopy.entryCount() == 0) {
-        ALOGE("%s: Camera %d: Unable to copy capture request for HAL device",
-                __FUNCTION__, mCameraId);
-        return NO_MEMORY;
-    }
-
-    if (l.mParameters.state == Parameters::PREVIEW) {
-        res = mDevice->clearStreamingRequest();
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Unable to stop preview for still capture: "
-                    "%s (%d)",
-                    __FUNCTION__, mCameraId, strerror(-res), res);
-            return res;
-        }
-    }
-    // TODO: Capture should be atomic with setStreamingRequest here
-    res = mDevice->capture(captureCopy);
-    if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to submit still image capture request: "
-                "%s (%d)",
-                __FUNCTION__, mCameraId, strerror(-res), res);
-        return res;
-    }
-
-    switch (l.mParameters.state) {
-        case Parameters::PREVIEW:
-            l.mParameters.state = Parameters::STILL_CAPTURE;
-            res = commandStopFaceDetectionL(l.mParameters);
-            if (res != OK) {
-                ALOGE("%s: Camera %d: Unable to stop face detection for still capture",
-                        __FUNCTION__, mCameraId);
-                return res;
-            }
-            break;
-        case Parameters::RECORD:
-            l.mParameters.state = Parameters::VIDEO_SNAPSHOT;
-            break;
-        default:
-            ALOGE("%s: Camera %d: Unknown state for still capture!",
-                    __FUNCTION__, mCameraId);
-            return INVALID_OPERATION;
-    }
-
-    return OK;
+    return res;
 }
 
 status_t Camera2Client::setParameters(const String8& params) {
@@ -1501,6 +1432,7 @@
 void Camera2Client::notifyAutoExposure(uint8_t newState, int triggerId) {
     ALOGV("%s: Autoexposure state now %d, last trigger %d",
             __FUNCTION__, newState, triggerId);
+    mCaptureSequencer->notifyAutoExposure(newState, triggerId);
 }
 
 void Camera2Client::notifyAutoWhitebalance(uint8_t newState, int triggerId) {
@@ -1508,7 +1440,7 @@
             __FUNCTION__, newState, triggerId);
 }
 
-int Camera2Client::getCameraId() {
+int Camera2Client::getCameraId() const {
     return mCameraId;
 }
 
@@ -1520,6 +1452,35 @@
     return mParameters;
 }
 
+int Camera2Client::getPreviewStreamId() const {
+    return mPreviewStreamId;
+}
+
+int Camera2Client::getCaptureStreamId() const {
+    return mJpegProcessor->getStreamId();
+}
+
+int Camera2Client::getCallbackStreamId() const {
+    return mCallbackProcessor->getStreamId();
+}
+
+int Camera2Client::getRecordingStreamId() const {
+    return mRecordingStreamId;
+}
+
+int Camera2Client::getZslStreamId() const {
+    return mZslProcessor->getStreamId();
+}
+
+status_t Camera2Client::registerFrameListener(int32_t id,
+        wp<camera2::FrameProcessor::FilteredListener> listener) {
+    return mFrameProcessor->registerListener(id, listener);
+}
+
+status_t Camera2Client::removeFrameListener(int32_t id) {
+    return mFrameProcessor->removeListener(id);
+}
+
 Camera2Client::SharedCameraClient::Lock::Lock(SharedCameraClient &client):
         mCameraClient(client.mCameraClient),
         mSharedClient(client) {
@@ -1546,6 +1507,10 @@
     mCameraClient.clear();
 }
 
+const int32_t Camera2Client::kPreviewRequestId;
+const int32_t Camera2Client::kRecordRequestId;
+const int32_t Camera2Client::kFirstCaptureRequestId;
+
 void Camera2Client::onRecordingFrameAvailable() {
     ATRACE_CALL();
     status_t res;
@@ -1656,13 +1621,6 @@
                 __FUNCTION__, mCameraId, strerror(-res), res);
         return res;
     }
-    res = updateCaptureRequest(params);
-    if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to update capture request: %s (%d)",
-                __FUNCTION__, mCameraId, strerror(-res), res);
-        return res;
-    }
-
     res = updateRecordingRequest(params);
     if (res != OK) {
         ALOGE("%s: Camera %d: Unable to update recording request: %s (%d)",
@@ -1761,7 +1719,7 @@
         }
     }
 
-    res = updateRequestCommon(&mPreviewRequest, params);
+    res = params.updateRequest(&mPreviewRequest);
     if (res != OK) {
         ALOGE("%s: Camera %d: Unable to update common entries of preview "
                 "request: %s (%d)", __FUNCTION__, mCameraId,
@@ -1769,65 +1727,8 @@
         return res;
     }
 
-    return OK;
-}
-
-status_t Camera2Client::updateCaptureRequest(const Parameters &params) {
-    ATRACE_CALL();
-    status_t res;
-    if (mCaptureRequest.entryCount() == 0) {
-        res = mDevice->createDefaultRequest(CAMERA2_TEMPLATE_STILL_CAPTURE,
-                &mCaptureRequest);
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Unable to create default still image request:"
-                    " %s (%d)", __FUNCTION__, mCameraId, strerror(-res), res);
-            return res;
-        }
-    }
-
-    res = updateRequestCommon(&mCaptureRequest, params);
-    if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to update common entries of capture "
-                "request: %s (%d)", __FUNCTION__, mCameraId,
-                strerror(-res), res);
-        return res;
-    }
-
-    res = mCaptureRequest.update(ANDROID_JPEG_THUMBNAIL_SIZE,
-            params.jpegThumbSize, 2);
-    if (res != OK) return res;
-    res = mCaptureRequest.update(ANDROID_JPEG_THUMBNAIL_QUALITY,
-            &params.jpegThumbQuality, 1);
-    if (res != OK) return res;
-    res = mCaptureRequest.update(ANDROID_JPEG_QUALITY,
-            &params.jpegQuality, 1);
-    if (res != OK) return res;
-    res = mCaptureRequest.update(
-            ANDROID_JPEG_ORIENTATION,
-            &params.jpegRotation, 1);
-    if (res != OK) return res;
-
-    if (params.gpsEnabled) {
-        res = mCaptureRequest.update(
-                ANDROID_JPEG_GPS_COORDINATES,
-                params.gpsCoordinates, 3);
-        if (res != OK) return res;
-        res = mCaptureRequest.update(
-                ANDROID_JPEG_GPS_TIMESTAMP,
-                &params.gpsTimestamp, 1);
-        if (res != OK) return res;
-        res = mCaptureRequest.update(
-                ANDROID_JPEG_GPS_PROCESSING_METHOD,
-                params.gpsProcessingMethod);
-        if (res != OK) return res;
-    } else {
-        res = mCaptureRequest.erase(ANDROID_JPEG_GPS_COORDINATES);
-        if (res != OK) return res;
-        res = mCaptureRequest.erase(ANDROID_JPEG_GPS_TIMESTAMP);
-        if (res != OK) return res;
-        res = mCaptureRequest.erase(ANDROID_JPEG_GPS_PROCESSING_METHOD);
-        if (res != OK) return res;
-    }
+    res = mPreviewRequest.update(ANDROID_REQUEST_ID,
+            &kPreviewRequestId, 1);
 
     return OK;
 }
@@ -1845,7 +1746,7 @@
         }
     }
 
-    res = updateRequestCommon(&mRecordingRequest, params);
+    res = params.updateRequest(&mRecordingRequest);
     if (res != OK) {
         ALOGE("%s: Camera %d: Unable to update common entries of recording "
                 "request: %s (%d)", __FUNCTION__, mCameraId,
@@ -1913,197 +1814,6 @@
     return OK;
 }
 
-status_t Camera2Client::updateRequestCommon(CameraMetadata *request,
-        const Parameters &params) const {
-    ATRACE_CALL();
-    status_t res;
-    res = request->update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
-            params.previewFpsRange, 2);
-    if (res != OK) return res;
-
-    uint8_t wbMode = params.autoWhiteBalanceLock ?
-            (uint8_t)ANDROID_CONTROL_AWB_LOCKED : params.wbMode;
-    res = request->update(ANDROID_CONTROL_AWB_MODE,
-            &wbMode, 1);
-    if (res != OK) return res;
-    res = request->update(ANDROID_CONTROL_EFFECT_MODE,
-            &params.effectMode, 1);
-    if (res != OK) return res;
-    res = request->update(ANDROID_CONTROL_AE_ANTIBANDING_MODE,
-            &params.antibandingMode, 1);
-    if (res != OK) return res;
-
-    uint8_t controlMode =
-            (params.sceneMode == ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED) ?
-            ANDROID_CONTROL_AUTO : ANDROID_CONTROL_USE_SCENE_MODE;
-    res = request->update(ANDROID_CONTROL_MODE,
-            &controlMode, 1);
-    if (res != OK) return res;
-    if (controlMode == ANDROID_CONTROL_USE_SCENE_MODE) {
-        res = request->update(ANDROID_CONTROL_SCENE_MODE,
-                &params.sceneMode, 1);
-        if (res != OK) return res;
-    }
-
-    uint8_t flashMode = ANDROID_FLASH_OFF;
-    uint8_t aeMode;
-    switch (params.flashMode) {
-        case Parameters::FLASH_MODE_OFF:
-            aeMode = ANDROID_CONTROL_AE_ON; break;
-        case Parameters::FLASH_MODE_AUTO:
-            aeMode = ANDROID_CONTROL_AE_ON_AUTO_FLASH; break;
-        case Parameters::FLASH_MODE_ON:
-            aeMode = ANDROID_CONTROL_AE_ON_ALWAYS_FLASH; break;
-        case Parameters::FLASH_MODE_TORCH:
-            aeMode = ANDROID_CONTROL_AE_ON;
-            flashMode = ANDROID_FLASH_TORCH;
-            break;
-        case Parameters::FLASH_MODE_RED_EYE:
-            aeMode = ANDROID_CONTROL_AE_ON_AUTO_FLASH_REDEYE; break;
-        default:
-            ALOGE("%s: Camera %d: Unknown flash mode %d", __FUNCTION__,
-                    mCameraId, params.flashMode);
-            return BAD_VALUE;
-    }
-    if (params.autoExposureLock) aeMode = ANDROID_CONTROL_AE_LOCKED;
-
-    res = request->update(ANDROID_FLASH_MODE,
-            &flashMode, 1);
-    if (res != OK) return res;
-    res = request->update(ANDROID_CONTROL_AE_MODE,
-            &aeMode, 1);
-    if (res != OK) return res;
-
-    float focusDistance = 0; // infinity focus in diopters
-    uint8_t focusMode;
-    switch (params.focusMode) {
-        case Parameters::FOCUS_MODE_AUTO:
-        case Parameters::FOCUS_MODE_MACRO:
-        case Parameters::FOCUS_MODE_CONTINUOUS_VIDEO:
-        case Parameters::FOCUS_MODE_CONTINUOUS_PICTURE:
-        case Parameters::FOCUS_MODE_EDOF:
-            focusMode = params.focusMode;
-            break;
-        case Parameters::FOCUS_MODE_INFINITY:
-        case Parameters::FOCUS_MODE_FIXED:
-            focusMode = ANDROID_CONTROL_AF_OFF;
-            break;
-        default:
-            ALOGE("%s: Camera %d: Unknown focus mode %d", __FUNCTION__,
-                    mCameraId, params.focusMode);
-            return BAD_VALUE;
-    }
-    res = request->update(ANDROID_LENS_FOCUS_DISTANCE,
-            &focusDistance, 1);
-    if (res != OK) return res;
-    res = request->update(ANDROID_CONTROL_AF_MODE,
-            &focusMode, 1);
-    if (res != OK) return res;
-
-    size_t focusingAreasSize = params.focusingAreas.size() * 5;
-    int32_t *focusingAreas = new int32_t[focusingAreasSize];
-    for (size_t i = 0; i < focusingAreasSize; i += 5) {
-        if (params.focusingAreas[i].weight != 0) {
-            focusingAreas[i + 0] =
-                    params.normalizedXToArray(params.focusingAreas[i].left);
-            focusingAreas[i + 1] =
-                    params.normalizedYToArray(params.focusingAreas[i].top);
-            focusingAreas[i + 2] =
-                    params.normalizedXToArray(params.focusingAreas[i].right);
-            focusingAreas[i + 3] =
-                    params.normalizedYToArray(params.focusingAreas[i].bottom);
-        } else {
-            focusingAreas[i + 0] = 0;
-            focusingAreas[i + 1] = 0;
-            focusingAreas[i + 2] = 0;
-            focusingAreas[i + 3] = 0;
-        }
-        focusingAreas[i + 4] = params.focusingAreas[i].weight;
-    }
-    res = request->update(ANDROID_CONTROL_AF_REGIONS,
-            focusingAreas,focusingAreasSize);
-    if (res != OK) return res;
-    delete[] focusingAreas;
-
-    res = request->update(ANDROID_CONTROL_AE_EXP_COMPENSATION,
-            &params.exposureCompensation, 1);
-    if (res != OK) return res;
-
-    size_t meteringAreasSize = params.meteringAreas.size() * 5;
-    int32_t *meteringAreas = new int32_t[meteringAreasSize];
-    for (size_t i = 0; i < meteringAreasSize; i += 5) {
-        if (params.meteringAreas[i].weight != 0) {
-            meteringAreas[i + 0] =
-                params.normalizedXToArray(params.meteringAreas[i].left);
-            meteringAreas[i + 1] =
-                params.normalizedYToArray(params.meteringAreas[i].top);
-            meteringAreas[i + 2] =
-                params.normalizedXToArray(params.meteringAreas[i].right);
-            meteringAreas[i + 3] =
-                params.normalizedYToArray(params.meteringAreas[i].bottom);
-        } else {
-            meteringAreas[i + 0] = 0;
-            meteringAreas[i + 1] = 0;
-            meteringAreas[i + 2] = 0;
-            meteringAreas[i + 3] = 0;
-        }
-        meteringAreas[i + 4] = params.meteringAreas[i].weight;
-    }
-    res = request->update(ANDROID_CONTROL_AE_REGIONS,
-            meteringAreas, meteringAreasSize);
-    if (res != OK) return res;
-
-    res = request->update(ANDROID_CONTROL_AWB_REGIONS,
-            meteringAreas, meteringAreasSize);
-    if (res != OK) return res;
-    delete[] meteringAreas;
-
-    // Need to convert zoom index into a crop rectangle. The rectangle is
-    // chosen to maximize its area on the sensor
-
-    camera_metadata_ro_entry_t maxDigitalZoom =
-            mParameters.staticInfo(ANDROID_SCALER_AVAILABLE_MAX_ZOOM);
-    float zoomIncrement = (maxDigitalZoom.data.f[0] - 1) /
-            (params.NUM_ZOOM_STEPS-1);
-    float zoomRatio = 1 + zoomIncrement * params.zoom;
-
-    float zoomLeft, zoomTop, zoomWidth, zoomHeight;
-    if (params.previewWidth >= params.previewHeight) {
-        zoomWidth =  params.fastInfo.arrayWidth / zoomRatio;
-        zoomHeight = zoomWidth *
-                params.previewHeight / params.previewWidth;
-    } else {
-        zoomHeight = params.fastInfo.arrayHeight / zoomRatio;
-        zoomWidth = zoomHeight *
-                params.previewWidth / params.previewHeight;
-    }
-    zoomLeft = (params.fastInfo.arrayWidth - zoomWidth) / 2;
-    zoomTop = (params.fastInfo.arrayHeight - zoomHeight) / 2;
-
-    int32_t cropRegion[3] = { zoomLeft, zoomTop, zoomWidth };
-    res = request->update(ANDROID_SCALER_CROP_REGION,
-            cropRegion, 3);
-    if (res != OK) return res;
-
-    // TODO: Decide how to map recordingHint, or whether just to ignore it
-
-    uint8_t vstabMode = params.videoStabilization ?
-            ANDROID_CONTROL_VIDEO_STABILIZATION_ON :
-            ANDROID_CONTROL_VIDEO_STABILIZATION_OFF;
-    res = request->update(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE,
-            &vstabMode, 1);
-    if (res != OK) return res;
-
-    uint8_t faceDetectMode = params.enableFaceDetect ?
-            params.fastInfo.bestFaceDetectMode :
-            (uint8_t)ANDROID_STATS_FACE_DETECTION_OFF;
-    res = request->update(ANDROID_STATS_FACE_DETECT_MODE,
-            &faceDetectMode, 1);
-    if (res != OK) return res;
-
-    return OK;
-}
-
 size_t Camera2Client::calculateBufferSize(int width, int height,
         int format, int stride) {
     switch (format) {
diff --git a/services/camera/libcameraservice/Camera2Client.h b/services/camera/libcameraservice/Camera2Client.h
index b2fd636..df5dbf4 100644
--- a/services/camera/libcameraservice/Camera2Client.h
+++ b/services/camera/libcameraservice/Camera2Client.h
@@ -21,7 +21,9 @@
 #include "CameraService.h"
 #include "camera2/Parameters.h"
 #include "camera2/FrameProcessor.h"
-#include "camera2/CaptureProcessor.h"
+#include "camera2/JpegProcessor.h"
+#include "camera2/ZslProcessor.h"
+#include "camera2/CaptureSequencer.h"
 #include "camera2/CallbackProcessor.h"
 #include <binder/MemoryBase.h>
 #include <binder/MemoryHeapBase.h>
@@ -95,10 +97,20 @@
      * Interface used by independent components of Camera2Client.
      */
 
-    int getCameraId();
+    int getCameraId() const;
     const sp<Camera2Device>& getCameraDevice();
     camera2::SharedParameters& getParameters();
 
+    int getPreviewStreamId() const;
+    int getCaptureStreamId() const;
+    int getCallbackStreamId() const;
+    int getRecordingStreamId() const;
+    int getZslStreamId() const;
+
+    status_t registerFrameListener(int32_t id,
+            wp<camera2::FrameProcessor::FilteredListener> listener);
+    status_t removeFrameListener(int32_t id);
+
     // Simple class to ensure that access to ICameraClient is serialized by
     // requiring mCameraClientLock to be locked before access to mCameraClient
     // is possible.
@@ -123,6 +135,10 @@
     static size_t calculateBufferSize(int width, int height,
             int format, int stride);
 
+    static const int32_t kPreviewRequestId = 1000;
+    static const int32_t kRecordRequestId  = 2000;
+    static const int32_t kFirstCaptureRequestId = 3000;
+
 private:
     /** ICamera interface-related private members */
 
@@ -183,9 +199,9 @@
 
     /* Still image capture related members */
 
-    sp<camera2::CaptureProcessor> mCaptureProcessor;
-    CameraMetadata mCaptureRequest;
-    status_t updateCaptureRequest(const Parameters &params);
+    sp<camera2::CaptureSequencer> mCaptureSequencer;
+    sp<camera2::JpegProcessor> mJpegProcessor;
+    sp<camera2::ZslProcessor> mZslProcessor;
 
     /* Recording related members */
 
@@ -228,18 +244,6 @@
 
     // Verify that caller is the owner of the camera
     status_t checkPid(const char *checkLocation) const;
-
-    // Update parameters all requests use, based on mParameters
-    status_t updateRequestCommon(CameraMetadata *request, const Parameters &params) const;
-
-    // Map from sensor active array pixel coordinates to normalized camera
-    // parameter coordinates. The former are (0,0)-(array width - 1, array height
-    // - 1), the latter from (-1000,-1000)-(1000,1000)
-    int normalizedXToArray(int x) const;
-    int normalizedYToArray(int y) const;
-    int arrayXToNormalized(int width) const;
-    int arrayYToNormalized(int height) const;
-
 };
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/Camera2Device.cpp b/services/camera/libcameraservice/Camera2Device.cpp
index f62c0a0..a171c46 100644
--- a/services/camera/libcameraservice/Camera2Device.cpp
+++ b/services/camera/libcameraservice/Camera2Device.cpp
@@ -206,6 +206,42 @@
     return OK;
 }
 
+status_t Camera2Device::createReprocessStreamFromStream(int outputId, int *id) {
+    status_t res;
+    ALOGV("%s: E", __FUNCTION__);
+
+    bool found = false;
+    StreamList::iterator streamI;
+    for (streamI = mStreams.begin();
+         streamI != mStreams.end(); streamI++) {
+        if ((*streamI)->getId() == outputId) {
+            found = true;
+            break;
+        }
+    }
+    if (!found) {
+        ALOGE("%s: Camera %d: Output stream %d doesn't exist; can't create "
+                "reprocess stream from it!", __FUNCTION__, mId, outputId);
+        return BAD_VALUE;
+    }
+
+    sp<ReprocessStreamAdapter> stream = new ReprocessStreamAdapter(mDevice);
+
+    res = stream->connectToDevice((*streamI));
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to create reprocessing stream from "\
+                "stream %d: %s (%d)", __FUNCTION__, mId, outputId,
+                strerror(-res), res);
+        return res;
+    }
+
+    *id = stream->getId();
+
+    mReprocessStreams.push_back(stream);
+    return OK;
+}
+
+
 status_t Camera2Device::getStreamInfo(int id,
         uint32_t *width, uint32_t *height, uint32_t *format) {
     ALOGV("%s: E", __FUNCTION__);
@@ -277,6 +313,33 @@
     return OK;
 }
 
+status_t Camera2Device::deleteReprocessStream(int id) {
+    ALOGV("%s: E", __FUNCTION__);
+    bool found = false;
+    for (ReprocessStreamList::iterator streamI = mReprocessStreams.begin();
+         streamI != mReprocessStreams.end(); streamI++) {
+        if ((*streamI)->getId() == id) {
+            status_t res = (*streamI)->release();
+            if (res != OK) {
+                ALOGE("%s: Unable to release reprocess stream %d from "
+                        "HAL device: %s (%d)", __FUNCTION__, id,
+                        strerror(-res), res);
+                return res;
+            }
+            mReprocessStreams.erase(streamI);
+            found = true;
+            break;
+        }
+    }
+    if (!found) {
+        ALOGE("%s: Camera %d: Unable to find stream %d to delete",
+                __FUNCTION__, mId, id);
+        return BAD_VALUE;
+    }
+    return OK;
+}
+
+
 status_t Camera2Device::createDefaultRequest(int templateId,
         CameraMetadata *request) {
     status_t err;
@@ -405,6 +468,32 @@
     return res;
 }
 
+status_t Camera2Device::pushReprocessBuffer(int reprocessStreamId,
+        buffer_handle_t *buffer, wp<BufferReleasedListener> listener) {
+    ALOGV("%s: E", __FUNCTION__);
+    bool found = false;
+    status_t res = OK;
+    for (ReprocessStreamList::iterator streamI = mReprocessStreams.begin();
+         streamI != mReprocessStreams.end(); streamI++) {
+        if ((*streamI)->getId() == reprocessStreamId) {
+            res = (*streamI)->pushIntoStream(buffer, listener);
+            if (res != OK) {
+                ALOGE("%s: Unable to push buffer to reprocess stream %d: %s (%d)",
+                        __FUNCTION__, reprocessStreamId, strerror(-res), res);
+                return res;
+            }
+            found = true;
+            break;
+        }
+    }
+    if (!found) {
+        ALOGE("%s: Camera %d: Unable to find reprocess stream %d",
+                __FUNCTION__, mId, reprocessStreamId);
+        res = BAD_VALUE;
+    }
+    return res;
+}
+
 /**
  * Camera2Device::NotificationListener
  */
@@ -903,7 +992,7 @@
         }
 
         buffers[bufferIdx] = anwBuffers[bufferIdx]->handle;
-        ALOGV("%s: Buffer %p allocated", __FUNCTION__, (void*)(buffers[bufferIdx]));
+        ALOGV("%s: Buffer %p allocated", __FUNCTION__, (void*)buffers[bufferIdx]);
     }
 
     ALOGV("%s: Registering %d buffers with camera HAL", __FUNCTION__, mTotalBuffers);
@@ -1030,7 +1119,7 @@
             const_cast<StreamAdapter*>(static_cast<const StreamAdapter*>(w));
     stream->mFrameCount++;
     ALOGVV("Stream %d enqueue: Frame %d (%p) captured at %lld ns",
-            stream->mId, mFrameCount, (void*)(*buffer), timestamp);
+            stream->mId, stream->mFrameCount, (void*)(*buffer), timestamp);
     int state = stream->mState;
     if (state != ACTIVE) {
         ALOGE("%s: Called when in bad state: %d", __FUNCTION__, state);
@@ -1094,5 +1183,198 @@
     return native_window_set_crop(a, &crop);
 }
 
+/**
+ * Camera2Device::ReprocessStreamAdapter
+ */
+
+#ifndef container_of
+#define container_of(ptr, type, member) \
+    (type *)((char*)(ptr) - offsetof(type, member))
+#endif
+
+Camera2Device::ReprocessStreamAdapter::ReprocessStreamAdapter(camera2_device_t *d):
+        mState(RELEASED),
+        mDevice(d),
+        mId(-1),
+        mWidth(0), mHeight(0), mFormat(0),
+        mActiveBuffers(0),
+        mFrameCount(0)
+{
+    camera2_stream_in_ops::acquire_buffer = acquire_buffer;
+    camera2_stream_in_ops::release_buffer = release_buffer;
+}
+
+Camera2Device::ReprocessStreamAdapter::~ReprocessStreamAdapter() {
+    if (mState != RELEASED) {
+        release();
+    }
+}
+
+status_t Camera2Device::ReprocessStreamAdapter::connectToDevice(
+        const sp<StreamAdapter> &outputStream) {
+    status_t res;
+    ALOGV("%s: E", __FUNCTION__);
+
+    if (mState != RELEASED) return INVALID_OPERATION;
+    if (outputStream == NULL) {
+        ALOGE("%s: Null base stream passed to reprocess stream adapter",
+                __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    mBaseStream = outputStream;
+    mWidth = outputStream->getWidth();
+    mHeight = outputStream->getHeight();
+    mFormat = outputStream->getFormat();
+
+    ALOGV("%s: New reprocess stream parameters %d x %d, format 0x%x",
+            __FUNCTION__, mWidth, mHeight, mFormat);
+
+    // Allocate device-side stream interface
+
+    uint32_t id;
+    res = mDevice->ops->allocate_reprocess_stream_from_stream(mDevice,
+            outputStream->getId(), getStreamOps(),
+            &id);
+    if (res != OK) {
+        ALOGE("%s: Device reprocess stream allocation failed: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+
+    ALOGV("%s: Allocated reprocess stream id %d based on stream %d",
+            __FUNCTION__, id, outputStream->getId());
+
+    mId = id;
+
+    mState = ACTIVE;
+
+    return OK;
+}
+
+status_t Camera2Device::ReprocessStreamAdapter::release() {
+    status_t res;
+    ALOGV("%s: Releasing stream %d", __FUNCTION__, mId);
+    if (mState >= ACTIVE) {
+        res = mDevice->ops->release_reprocess_stream(mDevice, mId);
+        if (res != OK) {
+            ALOGE("%s: Unable to release stream %d",
+                    __FUNCTION__, mId);
+            return res;
+        }
+    }
+
+    List<QueueEntry>::iterator s;
+    for (s = mQueue.begin(); s != mQueue.end(); s++) {
+        sp<BufferReleasedListener> listener = s->releaseListener.promote();
+        if (listener != 0) listener->onBufferReleased(s->handle);
+    }
+    for (s = mInFlightQueue.begin(); s != mInFlightQueue.end(); s++) {
+        sp<BufferReleasedListener> listener = s->releaseListener.promote();
+        if (listener != 0) listener->onBufferReleased(s->handle);
+    }
+    mQueue.clear();
+    mInFlightQueue.clear();
+
+    mState = RELEASED;
+    return OK;
+}
+
+status_t Camera2Device::ReprocessStreamAdapter::pushIntoStream(
+    buffer_handle_t *handle, const wp<BufferReleasedListener> &releaseListener) {
+    // TODO: Some error checking here would be nice
+    ALOGV("%s: Pushing buffer %p to stream", __FUNCTION__, (void*)(*handle));
+
+    QueueEntry entry;
+    entry.handle = handle;
+    entry.releaseListener = releaseListener;
+    mQueue.push_back(entry);
+    return OK;
+}
+
+status_t Camera2Device::ReprocessStreamAdapter::dump(int fd,
+        const Vector<String16>& args) {
+    String8 result =
+            String8::format("      Reprocess stream %d: %d x %d, fmt 0x%x\n",
+                    mId, mWidth, mHeight, mFormat);
+    result.appendFormat("        acquired buffers: %d\n",
+            mActiveBuffers);
+    result.appendFormat("        frame count: %d\n",
+            mFrameCount);
+    write(fd, result.string(), result.size());
+    return OK;
+}
+
+const camera2_stream_in_ops *Camera2Device::ReprocessStreamAdapter::getStreamOps() {
+    return static_cast<camera2_stream_in_ops *>(this);
+}
+
+int Camera2Device::ReprocessStreamAdapter::acquire_buffer(
+    const camera2_stream_in_ops_t *w,
+        buffer_handle_t** buffer) {
+    int res;
+    ReprocessStreamAdapter* stream =
+            const_cast<ReprocessStreamAdapter*>(
+                static_cast<const ReprocessStreamAdapter*>(w));
+    if (stream->mState != ACTIVE) {
+        ALOGE("%s: Called when in bad state: %d", __FUNCTION__, stream->mState);
+        return INVALID_OPERATION;
+    }
+
+    if (stream->mQueue.empty()) {
+        *buffer = NULL;
+        return OK;
+    }
+
+    QueueEntry &entry = *(stream->mQueue.begin());
+
+    *buffer = entry.handle;
+
+    stream->mInFlightQueue.push_back(entry);
+    stream->mQueue.erase(stream->mQueue.begin());
+
+    stream->mActiveBuffers++;
+
+    ALOGV("Stream %d acquire: Buffer %p acquired", stream->mId,
+            (void*)(**buffer));
+    return OK;
+}
+
+int Camera2Device::ReprocessStreamAdapter::release_buffer(
+    const camera2_stream_in_ops_t* w,
+    buffer_handle_t* buffer) {
+    ReprocessStreamAdapter *stream =
+            const_cast<ReprocessStreamAdapter*>(
+                static_cast<const ReprocessStreamAdapter*>(w) );
+    stream->mFrameCount++;
+    ALOGV("Reprocess stream %d release: Frame %d (%p)",
+            stream->mId, stream->mFrameCount, (void*)*buffer);
+    int state = stream->mState;
+    if (state != ACTIVE) {
+        ALOGE("%s: Called when in bad state: %d", __FUNCTION__, state);
+        return INVALID_OPERATION;
+    }
+    stream->mActiveBuffers--;
+
+    List<QueueEntry>::iterator s;
+    for (s = stream->mInFlightQueue.begin(); s != stream->mInFlightQueue.end(); s++) {
+        if ( s->handle == buffer ) break;
+    }
+    if (s == stream->mInFlightQueue.end()) {
+        ALOGE("%s: Can't find buffer %p in in-flight list!", __FUNCTION__,
+                buffer);
+        return INVALID_OPERATION;
+    }
+
+    sp<BufferReleasedListener> listener = s->releaseListener.promote();
+    if (listener != 0) {
+        listener->onBufferReleased(s->handle);
+    } else {
+        ALOGE("%s: Can't free buffer - missing listener", __FUNCTION__);
+    }
+    stream->mInFlightQueue.erase(s);
+
+    return OK;
+}
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/Camera2Device.h b/services/camera/libcameraservice/Camera2Device.h
index 64f4608..a327d8d 100644
--- a/services/camera/libcameraservice/Camera2Device.h
+++ b/services/camera/libcameraservice/Camera2Device.h
@@ -80,6 +80,12 @@
             int *id);
 
     /**
+     * Create an input reprocess stream that uses buffers from an existing
+     * output stream.
+     */
+    status_t createReprocessStreamFromStream(int outputId, int *id);
+
+    /**
      * Get information about a given stream.
      */
     status_t getStreamInfo(int id,
@@ -97,6 +103,12 @@
     status_t deleteStream(int id);
 
     /**
+     * Delete reprocess stream. Must not be called if there are requests in
+     * flight which reference that stream.
+     */
+    status_t deleteReprocessStream(int id);
+
+    /**
      * Create a metadata buffer with fields that the HAL device believes are
      * best for the given use case
      */
@@ -163,6 +175,21 @@
      */
     status_t triggerPrecaptureMetering(uint32_t id);
 
+    /**
+     * Abstract interface for clients that want to listen to reprocess buffer
+     * release events
+     */
+    struct BufferReleasedListener: public virtual RefBase {
+        virtual void onBufferReleased(buffer_handle_t *handle) = 0;
+    };
+
+    /**
+     * Push a buffer to be reprocessed into a reprocessing stream, and
+     * provide a listener to call once the buffer is returned by the HAL
+     */
+    status_t pushReprocessBuffer(int reprocessStreamId,
+            buffer_handle_t *buffer, wp<BufferReleasedListener> listener);
+
   private:
 
     const int mId;
@@ -343,6 +370,86 @@
     typedef List<sp<StreamAdapter> > StreamList;
     StreamList mStreams;
 
+    /**
+     * Adapter from an ANativeWindow interface to camera2 device stream ops.
+     * Also takes care of allocating/deallocating stream in device interface
+     */
+    class ReprocessStreamAdapter: public camera2_stream_in_ops, public virtual RefBase {
+      public:
+        ReprocessStreamAdapter(camera2_device_t *d);
+
+        ~ReprocessStreamAdapter();
+
+        /**
+         * Create a HAL device reprocess stream based on an existing output stream.
+         */
+        status_t connectToDevice(const sp<StreamAdapter> &outputStream);
+
+        status_t release();
+
+        /**
+         * Push buffer into stream for reprocessing. Takes ownership until it notifies
+         * that the buffer has been released
+         */
+        status_t pushIntoStream(buffer_handle_t *handle,
+                const wp<BufferReleasedListener> &releaseListener);
+
+        /**
+         * Get stream parameters.
+         * Only valid after a successful connectToDevice call.
+         */
+        int      getId() const     { return mId; }
+        uint32_t getWidth() const  { return mWidth; }
+        uint32_t getHeight() const { return mHeight; }
+        uint32_t getFormat() const { return mFormat; }
+
+        // Dump stream information
+        status_t dump(int fd, const Vector<String16>& args);
+
+      private:
+        enum {
+            ERROR = -1,
+            RELEASED = 0,
+            ACTIVE
+        } mState;
+
+        sp<ANativeWindow> mConsumerInterface;
+        wp<StreamAdapter> mBaseStream;
+
+        struct QueueEntry {
+            buffer_handle_t *handle;
+            wp<BufferReleasedListener> releaseListener;
+        };
+
+        List<QueueEntry> mQueue;
+
+        List<QueueEntry> mInFlightQueue;
+
+        camera2_device_t *mDevice;
+
+        uint32_t mId;
+        uint32_t mWidth;
+        uint32_t mHeight;
+        uint32_t mFormat;
+
+        /** Debugging information */
+        uint32_t mActiveBuffers;
+        uint32_t mFrameCount;
+        int64_t  mLastTimestamp;
+
+        const camera2_stream_in_ops *getStreamOps();
+
+        static int acquire_buffer(const camera2_stream_in_ops_t *w,
+                buffer_handle_t** buffer);
+
+        static int release_buffer(const camera2_stream_in_ops_t* w,
+                buffer_handle_t* buffer);
+
+    }; // class ReprocessStreamAdapter
+
+    typedef List<sp<ReprocessStreamAdapter> > ReprocessStreamList;
+    ReprocessStreamList mReprocessStreams;
+
     // Receives HAL notifications and routes them to the NotificationListener
     static void notificationCallback(int32_t msg_type,
             int32_t ext1,
diff --git a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp b/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
index 854b890..bccb18e 100644
--- a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
@@ -136,7 +136,7 @@
     return mCallbackStreamId;
 }
 
-void CallbackProcessor::dump(int fd, const Vector<String16>& args) {
+void CallbackProcessor::dump(int fd, const Vector<String16>& args) const {
 }
 
 bool CallbackProcessor::threadLoop() {
diff --git a/services/camera/libcameraservice/camera2/CallbackProcessor.h b/services/camera/libcameraservice/camera2/CallbackProcessor.h
index 36c51a3..c2a1372 100644
--- a/services/camera/libcameraservice/camera2/CallbackProcessor.h
+++ b/services/camera/libcameraservice/camera2/CallbackProcessor.h
@@ -48,7 +48,7 @@
     status_t deleteStream();
     int getStreamId() const;
 
-    void dump(int fd, const Vector<String16>& args);
+    void dump(int fd, const Vector<String16>& args) const;
   private:
     static const nsecs_t kWaitDuration = 10000000; // 10 ms
     wp<Camera2Client> mClient;
diff --git a/services/camera/libcameraservice/camera2/CameraMetadata.cpp b/services/camera/libcameraservice/camera2/CameraMetadata.cpp
index 95377b2..8399e20 100644
--- a/services/camera/libcameraservice/camera2/CameraMetadata.cpp
+++ b/services/camera/libcameraservice/camera2/CameraMetadata.cpp
@@ -84,6 +84,10 @@
             get_camera_metadata_entry_count(mBuffer);
 }
 
+bool CameraMetadata::isEmpty() const {
+    return entryCount() == 0;
+}
+
 status_t CameraMetadata::sort() {
     return sort_camera_metadata(mBuffer);
 }
diff --git a/services/camera/libcameraservice/camera2/CameraMetadata.h b/services/camera/libcameraservice/camera2/CameraMetadata.h
index 340414e..aee6cd7 100644
--- a/services/camera/libcameraservice/camera2/CameraMetadata.h
+++ b/services/camera/libcameraservice/camera2/CameraMetadata.h
@@ -87,6 +87,11 @@
     size_t entryCount() const;
 
     /**
+     * Is the buffer empty (no entires)
+     */
+    bool isEmpty() const;
+
+    /**
      * Sort metadata buffer for faster find
      */
     status_t sort();
diff --git a/services/camera/libcameraservice/camera2/CaptureSequencer.cpp b/services/camera/libcameraservice/camera2/CaptureSequencer.cpp
new file mode 100644
index 0000000..532d2aa
--- /dev/null
+++ b/services/camera/libcameraservice/camera2/CaptureSequencer.cpp
@@ -0,0 +1,506 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera2Client::CaptureSequencer"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include <utils/Vector.h>
+
+#include "CaptureSequencer.h"
+#include "../Camera2Device.h"
+#include "../Camera2Client.h"
+#include "Parameters.h"
+
+namespace android {
+namespace camera2 {
+
+/** Public members */
+
+CaptureSequencer::CaptureSequencer(wp<Camera2Client> client):
+        Thread(false),
+        mStartCapture(false),
+        mBusy(false),
+        mNewAEState(false),
+        mNewFrameReceived(false),
+        mNewCaptureReceived(false),
+        mClient(client),
+        mCaptureState(IDLE),
+        mTriggerId(0),
+        mTimeoutCount(0),
+        mCaptureId(Camera2Client::kFirstCaptureRequestId) {
+}
+
+CaptureSequencer::~CaptureSequencer() {
+    ALOGV("%s: Exit", __FUNCTION__);
+}
+
+void CaptureSequencer::setZslProcessor(wp<ZslProcessor> processor) {
+    Mutex::Autolock l(mInputMutex);
+    mZslProcessor = processor;
+}
+
+status_t CaptureSequencer::startCapture() {
+    ATRACE_CALL();
+    Mutex::Autolock l(mInputMutex);
+    if (mBusy) {
+        ALOGE("%s: Already busy capturing!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+    if (!mStartCapture) {
+        mStartCapture = true;
+        mStartCaptureSignal.signal();
+    }
+    return OK;
+}
+
+void CaptureSequencer::notifyAutoExposure(uint8_t newState, int triggerId) {
+    ATRACE_CALL();
+    Mutex::Autolock l(mInputMutex);
+    mAEState = newState;
+    mAETriggerId = triggerId;
+    if (!mNewAEState) {
+        mNewAEState = true;
+        mNewNotifySignal.signal();
+    }
+}
+
+void CaptureSequencer::onFrameAvailable(int32_t frameId,
+        CameraMetadata &frame) {
+    ATRACE_CALL();
+    Mutex::Autolock l(mInputMutex);
+    mNewFrameId = frameId;
+    mNewFrame.acquire(frame);
+    if (!mNewFrameReceived) {
+        mNewFrameReceived = true;
+        mNewFrameSignal.signal();
+    }
+}
+
+void CaptureSequencer::onCaptureAvailable(nsecs_t timestamp) {
+    ATRACE_CALL();
+    Mutex::Autolock l(mInputMutex);
+    mCaptureTimestamp = timestamp;
+    if (!mNewCaptureReceived) {
+        mNewCaptureReceived = true;
+        mNewCaptureSignal.signal();
+    }
+}
+
+
+void CaptureSequencer::dump(int fd, const Vector<String16>& args) {
+    String8 result;
+    if (mCaptureRequest.entryCount() != 0) {
+        result = "    Capture request:\n";
+        write(fd, result.string(), result.size());
+        mCaptureRequest.dump(fd, 2, 6);
+    } else {
+        result = "    Capture request: undefined\n";
+        write(fd, result.string(), result.size());
+    }
+    result = String8::format("    Current capture state: %s\n",
+            kStateNames[mCaptureState]);
+    result.append("    Latest captured frame:\n");
+    write(fd, result.string(), result.size());
+    mNewFrame.dump(fd, 2, 6);
+}
+
+/** Private members */
+
+const char* CaptureSequencer::kStateNames[CaptureSequencer::NUM_CAPTURE_STATES+1] =
+{
+    "IDLE",
+    "START",
+    "ZSL_START",
+    "ZSL_WAITING",
+    "ZSL_REPROCESSING",
+    "STANDARD_START",
+    "STANDARD_PRECAPTURE",
+    "STANDARD_CAPTURING",
+    "DONE",
+    "ERROR",
+    "UNKNOWN"
+};
+
+const CaptureSequencer::StateManager
+        CaptureSequencer::kStateManagers[CaptureSequencer::NUM_CAPTURE_STATES-1] = {
+    &CaptureSequencer::manageIdle,
+    &CaptureSequencer::manageStart,
+    &CaptureSequencer::manageZslStart,
+    &CaptureSequencer::manageZslWaiting,
+    &CaptureSequencer::manageZslReprocessing,
+    &CaptureSequencer::manageStandardStart,
+    &CaptureSequencer::manageStandardPrecaptureWait,
+    &CaptureSequencer::manageStandardCapture,
+    &CaptureSequencer::manageStandardCaptureWait,
+    &CaptureSequencer::manageDone,
+};
+
+bool CaptureSequencer::threadLoop() {
+    status_t res;
+
+    sp<Camera2Client> client = mClient.promote();
+    if (client == 0) return false;
+
+    if (mCaptureState < ERROR) {
+        mCaptureState = (this->*kStateManagers[mCaptureState])(client);
+    } else {
+        ALOGE("%s: Bad capture state: %s",
+                __FUNCTION__, kStateNames[mCaptureState]);
+        return false;
+    }
+
+    return true;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageIdle(sp<Camera2Client> &client) {
+    status_t res;
+    ATRACE_CALL();
+    Mutex::Autolock l(mInputMutex);
+    while (!mStartCapture) {
+        res = mStartCaptureSignal.waitRelative(mInputMutex,
+                kWaitDuration);
+        if (res == TIMED_OUT) break;
+    }
+    if (mStartCapture) {
+        mStartCapture = false;
+        mBusy = true;
+        return START;
+    }
+    return IDLE;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageDone(sp<Camera2Client> &client) {
+    status_t res;
+    ATRACE_CALL();
+    mCaptureId++;
+
+    {
+        Mutex::Autolock l(mInputMutex);
+        mBusy = false;
+    }
+
+    SharedParameters::Lock l(client->getParameters());
+    switch (l.mParameters.state) {
+        case Parameters::STILL_CAPTURE:
+            l.mParameters.state = Parameters::STOPPED;
+            break;
+        case Parameters::VIDEO_SNAPSHOT:
+            l.mParameters.state = Parameters::RECORD;
+            break;
+        default:
+            ALOGE("%s: Camera %d: Still image produced unexpectedly "
+                    "in state %s!",
+                    __FUNCTION__, client->getCameraId(),
+                    Parameters::getStateName(l.mParameters.state));
+    }
+
+    return IDLE;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageStart(
+        sp<Camera2Client> &client) {
+    status_t res;
+    ATRACE_CALL();
+    SharedParameters::Lock l(client->getParameters());
+    CaptureState nextState = DONE;
+
+    res = updateCaptureRequest(l.mParameters, client);
+    if (res != OK ) {
+        ALOGE("%s: Camera %d: Can't update still image capture request: %s (%d)",
+                __FUNCTION__, client->getCameraId(), strerror(-res), res);
+        return DONE;
+    }
+
+    if (l.mParameters.zslMode &&
+            l.mParameters.state == Parameters::STILL_CAPTURE) {
+        nextState = ZSL_START;
+    } else {
+        nextState = STANDARD_START;
+    }
+
+    return nextState;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageZslStart(
+        sp<Camera2Client> &client) {
+    status_t res;
+    sp<ZslProcessor> processor = mZslProcessor.promote();
+    if (processor == 0) {
+        ALOGE("%s: No ZSL queue to use!", __FUNCTION__);
+        return DONE;
+    }
+
+    client->registerFrameListener(mCaptureId,
+            this);
+
+    res = client->getCameraDevice()->clearStreamingRequest();
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to stop preview for ZSL capture: "
+                "%s (%d)",
+                __FUNCTION__, client->getCameraId(), strerror(-res), res);
+        return DONE;
+    }
+    // TODO: Actually select the right thing here.
+    processor->pushToReprocess(mCaptureId);
+
+    mTimeoutCount = kMaxTimeoutsForCaptureEnd;
+    return STANDARD_CAPTURE_WAIT;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageZslWaiting(
+        sp<Camera2Client> &client) {
+    return DONE;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageZslReprocessing(
+        sp<Camera2Client> &client) {
+    return START;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageStandardStart(
+        sp<Camera2Client> &client) {
+    ATRACE_CALL();
+    client->registerFrameListener(mCaptureId,
+            this);
+    {
+        SharedParameters::Lock l(client->getParameters());
+        mTriggerId = l.mParameters.precaptureTriggerCounter++;
+    }
+    client->getCameraDevice()->triggerPrecaptureMetering(mTriggerId);
+
+    mAeInPrecapture = false;
+    mTimeoutCount = kMaxTimeoutsForPrecaptureStart;
+    return STANDARD_PRECAPTURE_WAIT;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageStandardPrecaptureWait(
+        sp<Camera2Client> &client) {
+    status_t res;
+    ATRACE_CALL();
+    Mutex::Autolock l(mInputMutex);
+    while (!mNewAEState) {
+        res = mNewNotifySignal.waitRelative(mInputMutex, kWaitDuration);
+        if (res == TIMED_OUT) {
+            mTimeoutCount--;
+            break;
+        }
+    }
+    if (mTimeoutCount <= 0) {
+        ALOGW("Timed out waiting for precapture %s",
+                mAeInPrecapture ? "end" : "start");
+        return STANDARD_CAPTURE;
+    }
+    if (mNewAEState) {
+        if (!mAeInPrecapture) {
+            // Waiting to see PRECAPTURE state
+            if (mAETriggerId == mTriggerId &&
+                    mAEState == ANDROID_CONTROL_AE_STATE_PRECAPTURE) {
+                ALOGV("%s: Got precapture start", __FUNCTION__);
+                mAeInPrecapture = true;
+                mTimeoutCount = kMaxTimeoutsForPrecaptureEnd;
+            }
+        } else {
+            // Waiting to see PRECAPTURE state end
+            if (mAETriggerId == mTriggerId &&
+                    mAEState != ANDROID_CONTROL_AE_STATE_PRECAPTURE) {
+                ALOGV("%s: Got precapture end", __FUNCTION__);
+                return STANDARD_CAPTURE;
+            }
+        }
+        mNewAEState = false;
+    }
+    return STANDARD_PRECAPTURE_WAIT;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageStandardCapture(
+        sp<Camera2Client> &client) {
+    status_t res;
+    ATRACE_CALL();
+    SharedParameters::Lock l(client->getParameters());
+    Vector<uint8_t> outputStreams;
+
+    outputStreams.push(client->getPreviewStreamId());
+    outputStreams.push(client->getCaptureStreamId());
+
+    if (l.mParameters.previewCallbackFlags &
+            CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK) {
+        outputStreams.push(client->getCallbackStreamId());
+    }
+
+    if (l.mParameters.state == Parameters::VIDEO_SNAPSHOT) {
+        outputStreams.push(client->getRecordingStreamId());
+    }
+
+    res = mCaptureRequest.update(ANDROID_REQUEST_OUTPUT_STREAMS,
+            outputStreams);
+    if (res == OK) {
+        res = mCaptureRequest.update(ANDROID_REQUEST_ID,
+                &mCaptureId, 1);
+    }
+    if (res == OK) {
+        res = mCaptureRequest.sort();
+    }
+
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to set up still capture request: %s (%d)",
+                __FUNCTION__, client->getCameraId(), strerror(-res), res);
+        return DONE;
+    }
+
+    CameraMetadata captureCopy = mCaptureRequest;
+    if (captureCopy.entryCount() == 0) {
+        ALOGE("%s: Camera %d: Unable to copy capture request for HAL device",
+                __FUNCTION__, client->getCameraId());
+        return DONE;
+    }
+
+    if (l.mParameters.state == Parameters::STILL_CAPTURE) {
+        res = client->getCameraDevice()->clearStreamingRequest();
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to stop preview for still capture: "
+                    "%s (%d)",
+                    __FUNCTION__, client->getCameraId(), strerror(-res), res);
+            return DONE;
+        }
+    }
+    // TODO: Capture should be atomic with setStreamingRequest here
+    res = client->getCameraDevice()->capture(captureCopy);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to submit still image capture request: "
+                "%s (%d)",
+                __FUNCTION__, client->getCameraId(), strerror(-res), res);
+        return DONE;
+    }
+
+    mTimeoutCount = kMaxTimeoutsForCaptureEnd;
+    return STANDARD_CAPTURE_WAIT;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageStandardCaptureWait(
+        sp<Camera2Client> &client) {
+    status_t res;
+    ATRACE_CALL();
+    Mutex::Autolock l(mInputMutex);
+    while (!mNewFrameReceived) {
+        res = mNewFrameSignal.waitRelative(mInputMutex, kWaitDuration);
+        if (res == TIMED_OUT) {
+            mTimeoutCount--;
+            break;
+        }
+    }
+    while (!mNewCaptureReceived) {
+        res = mNewCaptureSignal.waitRelative(mInputMutex, kWaitDuration);
+        if (res == TIMED_OUT) {
+            mTimeoutCount--;
+            break;
+        }
+    }
+    if (mTimeoutCount <= 0) {
+        ALOGW("Timed out waiting for capture to complete");
+        return DONE;
+    }
+    if (mNewFrameReceived && mNewCaptureReceived) {
+        if (mNewFrameId != mCaptureId) {
+            ALOGW("Mismatched capture frame IDs: Expected %d, got %d",
+                    mCaptureId, mNewFrameId);
+        }
+        camera_metadata_entry_t entry;
+        entry = mNewFrame.find(ANDROID_SENSOR_TIMESTAMP);
+        if (entry.count == 0) {
+            ALOGE("No timestamp field in capture frame!");
+        }
+        if (entry.data.i64[0] != mCaptureTimestamp) {
+            ALOGW("Mismatched capture timestamps: Metadata frame %lld,"
+                    " captured buffer %lld", entry.data.i64[0], mCaptureTimestamp);
+        }
+        client->removeFrameListener(mCaptureId);
+
+        mNewFrameReceived = false;
+        mNewCaptureReceived = false;
+        return DONE;
+    }
+    return STANDARD_CAPTURE_WAIT;
+}
+
+status_t CaptureSequencer::updateCaptureRequest(const Parameters &params,
+        sp<Camera2Client> &client) {
+    ATRACE_CALL();
+    status_t res;
+    if (mCaptureRequest.entryCount() == 0) {
+        res = client->getCameraDevice()->createDefaultRequest(
+                CAMERA2_TEMPLATE_STILL_CAPTURE,
+                &mCaptureRequest);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to create default still image request:"
+                    " %s (%d)", __FUNCTION__, client->getCameraId(),
+                    strerror(-res), res);
+            return res;
+        }
+    }
+
+    res = params.updateRequest(&mCaptureRequest);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to update common entries of capture "
+                "request: %s (%d)", __FUNCTION__, client->getCameraId(),
+                strerror(-res), res);
+        return res;
+    }
+
+    res = mCaptureRequest.update(ANDROID_JPEG_THUMBNAIL_SIZE,
+            params.jpegThumbSize, 2);
+    if (res != OK) return res;
+    res = mCaptureRequest.update(ANDROID_JPEG_THUMBNAIL_QUALITY,
+            &params.jpegThumbQuality, 1);
+    if (res != OK) return res;
+    res = mCaptureRequest.update(ANDROID_JPEG_QUALITY,
+            &params.jpegQuality, 1);
+    if (res != OK) return res;
+    res = mCaptureRequest.update(
+            ANDROID_JPEG_ORIENTATION,
+            &params.jpegRotation, 1);
+    if (res != OK) return res;
+
+    if (params.gpsEnabled) {
+        res = mCaptureRequest.update(
+                ANDROID_JPEG_GPS_COORDINATES,
+                params.gpsCoordinates, 3);
+        if (res != OK) return res;
+        res = mCaptureRequest.update(
+                ANDROID_JPEG_GPS_TIMESTAMP,
+                &params.gpsTimestamp, 1);
+        if (res != OK) return res;
+        res = mCaptureRequest.update(
+                ANDROID_JPEG_GPS_PROCESSING_METHOD,
+                params.gpsProcessingMethod);
+        if (res != OK) return res;
+    } else {
+        res = mCaptureRequest.erase(ANDROID_JPEG_GPS_COORDINATES);
+        if (res != OK) return res;
+        res = mCaptureRequest.erase(ANDROID_JPEG_GPS_TIMESTAMP);
+        if (res != OK) return res;
+        res = mCaptureRequest.erase(ANDROID_JPEG_GPS_PROCESSING_METHOD);
+        if (res != OK) return res;
+    }
+
+    return OK;
+}
+
+
+}; // namespace camera2
+}; // namespace android
diff --git a/services/camera/libcameraservice/camera2/CaptureSequencer.h b/services/camera/libcameraservice/camera2/CaptureSequencer.h
new file mode 100644
index 0000000..0492a43
--- /dev/null
+++ b/services/camera/libcameraservice/camera2/CaptureSequencer.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_CAPTURESEQUENCER_H
+#define ANDROID_SERVERS_CAMERA_CAMERA2_CAPTURESEQUENCER_H
+
+#include <utils/Thread.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
+#include <utils/Mutex.h>
+#include <utils/Condition.h>
+#include "CameraMetadata.h"
+#include "Parameters.h"
+#include "FrameProcessor.h"
+
+namespace android {
+
+class Camera2Client;
+
+namespace camera2 {
+
+class ZslProcessor;
+
+/**
+ * Manages the still image capture process for
+ * zero-shutter-lag, regular, and video snapshots.
+ */
+class CaptureSequencer:
+            virtual public Thread,
+            virtual public FrameProcessor::FilteredListener {
+  public:
+    CaptureSequencer(wp<Camera2Client> client);
+    ~CaptureSequencer();
+
+    // Get reference to the ZslProcessor, which holds the ZSL buffers and frames
+    void setZslProcessor(wp<ZslProcessor> processor);
+
+    // Begin still image capture
+    status_t startCapture();
+
+    // Notifications about AE state changes
+    void notifyAutoExposure(uint8_t newState, int triggerId);
+
+    // Notifications from the frame processor
+    virtual void onFrameAvailable(int32_t frameId, CameraMetadata &frame);
+
+    // Notifications from the capture processor
+    void onCaptureAvailable(nsecs_t timestamp);
+
+    void dump(int fd, const Vector<String16>& args);
+
+  private:
+    /**
+     * Accessed by other threads
+     */
+    Mutex mInputMutex;
+
+    bool mStartCapture;
+    bool mBusy;
+    Condition mStartCaptureSignal;
+
+    bool mNewAEState;
+    uint8_t mAEState;
+    int mAETriggerId;
+    Condition mNewNotifySignal;
+
+    bool mNewFrameReceived;
+    int32_t mNewFrameId;
+    CameraMetadata mNewFrame;
+    Condition mNewFrameSignal;
+
+    bool mNewCaptureReceived;
+    nsecs_t mCaptureTimestamp;
+    Condition mNewCaptureSignal;
+
+    /**
+     * Internal to CaptureSequencer
+     */
+    static const nsecs_t kWaitDuration = 100000000; // 100 ms
+    static const int kMaxTimeoutsForPrecaptureStart = 2; // 200 ms
+    static const int kMaxTimeoutsForPrecaptureEnd = 10;  // 1 sec
+    static const int kMaxTimeoutsForCaptureEnd    = 20;  // 2 sec
+
+    wp<Camera2Client> mClient;
+    wp<ZslProcessor> mZslProcessor;
+
+    enum CaptureState {
+        IDLE,
+        START,
+        ZSL_START,
+        ZSL_WAITING,
+        ZSL_REPROCESSING,
+        STANDARD_START,
+        STANDARD_PRECAPTURE_WAIT,
+        STANDARD_CAPTURE,
+        STANDARD_CAPTURE_WAIT,
+        DONE,
+        ERROR,
+        NUM_CAPTURE_STATES
+    } mCaptureState;
+    static const char* kStateNames[];
+
+    typedef CaptureState (CaptureSequencer::*StateManager)(sp<Camera2Client> &client);
+    static const StateManager kStateManagers[];
+
+    CameraMetadata mCaptureRequest;
+
+    int mTriggerId;
+    int mTimeoutCount;
+    bool mAeInPrecapture;
+
+    int32_t mCaptureId;
+
+    // Main internal methods
+
+    virtual bool threadLoop();
+
+    CaptureState manageIdle(sp<Camera2Client> &client);
+    CaptureState manageStart(sp<Camera2Client> &client);
+
+    CaptureState manageZslStart(sp<Camera2Client> &client);
+    CaptureState manageZslWaiting(sp<Camera2Client> &client);
+    CaptureState manageZslReprocessing(sp<Camera2Client> &client);
+
+    CaptureState manageStandardStart(sp<Camera2Client> &client);
+    CaptureState manageStandardPrecaptureWait(sp<Camera2Client> &client);
+    CaptureState manageStandardCapture(sp<Camera2Client> &client);
+    CaptureState manageStandardCaptureWait(sp<Camera2Client> &client);
+
+    CaptureState manageDone(sp<Camera2Client> &client);
+
+    // Utility methods
+
+    status_t updateCaptureRequest(const Parameters &params,
+            sp<Camera2Client> &client);
+};
+
+}; // namespace camera2
+}; // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/camera2/FrameProcessor.cpp b/services/camera/libcameraservice/camera2/FrameProcessor.cpp
index 5059754..e24db0b 100644
--- a/services/camera/libcameraservice/camera2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/FrameProcessor.cpp
@@ -36,6 +36,19 @@
     ALOGV("%s: Exit", __FUNCTION__);
 }
 
+status_t FrameProcessor::registerListener(int32_t id,
+        wp<FilteredListener> listener) {
+    Mutex::Autolock l(mInputMutex);
+    ALOGV("%s: Registering listener for frame id %d",
+            __FUNCTION__, id);
+    return mListeners.replaceValueFor(id, listener);
+}
+
+status_t FrameProcessor::removeListener(int32_t id) {
+    Mutex::Autolock l(mInputMutex);
+    return mListeners.removeItem(id);
+}
+
 void FrameProcessor::dump(int fd, const Vector<String16>& args) {
     String8 result("    Latest received frame:\n");
     write(fd, result.string(), result.size());
@@ -50,6 +63,7 @@
         sp<Camera2Client> client = mClient.promote();
         if (client == 0) return false;
         device = client->getCameraDevice();
+        if (device == 0) return false;
     }
 
     res = device->waitForNextFrame(kWaitDuration);
@@ -67,20 +81,28 @@
 
 void FrameProcessor::processNewFrames(sp<Camera2Client> &client) {
     status_t res;
+    ATRACE_CALL();
     CameraMetadata frame;
     while ( (res = client->getCameraDevice()->getNextFrame(&frame)) == OK) {
         camera_metadata_entry_t entry;
+
         entry = frame.find(ANDROID_REQUEST_FRAME_COUNT);
         if (entry.count == 0) {
-            ALOGE("%s: Camera %d: Error reading frame number: %s (%d)",
-                    __FUNCTION__, client->getCameraId(), strerror(-res), res);
+            ALOGE("%s: Camera %d: Error reading frame number",
+                    __FUNCTION__, client->getCameraId());
             break;
         }
 
         res = processFaceDetect(frame, client);
         if (res != OK) break;
 
-        mLastFrame.acquire(frame);
+        // Must be last - listener can take ownership of frame
+        res = processListener(frame, client);
+        if (res != OK) break;
+
+        if (!frame.isEmpty()) {
+            mLastFrame.acquire(frame);
+        }
     }
     if (res != NOT_ENOUGH_DATA) {
         ALOGE("%s: Camera %d: Error getting next frame: %s (%d)",
@@ -91,9 +113,43 @@
     return;
 }
 
-status_t FrameProcessor::processFaceDetect(
-    const CameraMetadata &frame, sp<Camera2Client> &client) {
+status_t FrameProcessor::processListener(CameraMetadata &frame,
+        sp<Camera2Client> &client) {
     status_t res;
+    ATRACE_CALL();
+    camera_metadata_entry_t entry;
+
+    entry = frame.find(ANDROID_REQUEST_ID);
+    if (entry.count == 0) {
+        ALOGE("%s: Camera %d: Error reading frame id",
+                __FUNCTION__, client->getCameraId());
+        return BAD_VALUE;
+    }
+    int32_t frameId = entry.data.i32[0];
+    ALOGV("%s: Got frame with ID %d", __FUNCTION__, frameId);
+
+    sp<FilteredListener> listener;
+    {
+        Mutex::Autolock l(mInputMutex);
+        ssize_t listenerIndex = mListeners.indexOfKey(frameId);
+        if (listenerIndex != NAME_NOT_FOUND) {
+            listener = mListeners[listenerIndex].promote();
+            if (listener == 0) {
+                mListeners.removeItemsAt(listenerIndex, 1);
+            }
+        }
+    }
+
+    if (listener != 0) {
+        listener->onFrameAvailable(frameId, frame);
+    }
+    return OK;
+}
+
+status_t FrameProcessor::processFaceDetect(const CameraMetadata &frame,
+        sp<Camera2Client> &client) {
+    status_t res;
+    ATRACE_CALL();
     camera_metadata_ro_entry_t entry;
     bool enableFaceDetect;
     int maxFaces;
@@ -209,6 +265,5 @@
     return OK;
 }
 
-
 }; // namespace camera2
 }; // namespace android
diff --git a/services/camera/libcameraservice/camera2/FrameProcessor.h b/services/camera/libcameraservice/camera2/FrameProcessor.h
index 2cdf7f0..25d489a 100644
--- a/services/camera/libcameraservice/camera2/FrameProcessor.h
+++ b/services/camera/libcameraservice/camera2/FrameProcessor.h
@@ -20,6 +20,7 @@
 #include <utils/Thread.h>
 #include <utils/String16.h>
 #include <utils/Vector.h>
+#include <utils/KeyedVector.h>
 #include "CameraMetadata.h"
 
 namespace android {
@@ -36,6 +37,17 @@
     FrameProcessor(wp<Camera2Client> client);
     ~FrameProcessor();
 
+    struct FilteredListener: virtual public RefBase {
+        // Listener may take ownership of frame
+        virtual void onFrameAvailable(int32_t frameId, CameraMetadata &frame) = 0;
+    };
+
+    // Register a listener for a specific frame ID (android.request.id).
+    // De-registers any existing listeners for that ID
+    status_t registerListener(int32_t id, wp<FilteredListener> listener);
+
+    status_t removeListener(int32_t id);
+
     void dump(int fd, const Vector<String16>& args);
   private:
     static const nsecs_t kWaitDuration = 10000000; // 10 ms
@@ -43,10 +55,17 @@
 
     virtual bool threadLoop();
 
+    Mutex mInputMutex;
+    KeyedVector<int32_t, wp<FilteredListener> > mListeners;
+
     void processNewFrames(sp<Camera2Client> &client);
+
     status_t processFaceDetect(const CameraMetadata &frame,
             sp<Camera2Client> &client);
 
+    status_t processListener(CameraMetadata &frame,
+            sp<Camera2Client> &client);
+
     CameraMetadata mLastFrame;
 };
 
diff --git a/services/camera/libcameraservice/camera2/CaptureProcessor.cpp b/services/camera/libcameraservice/camera2/JpegProcessor.cpp
similarity index 90%
rename from services/camera/libcameraservice/camera2/CaptureProcessor.cpp
rename to services/camera/libcameraservice/camera2/JpegProcessor.cpp
index b17f9d2..92148ca 100644
--- a/services/camera/libcameraservice/camera2/CaptureProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/JpegProcessor.cpp
@@ -14,14 +14,14 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "Camera2Client::CaptureProcessor"
+#define LOG_TAG "Camera2Client::JpegProcessor"
 #define ATRACE_TAG ATRACE_TAG_CAMERA
 //#define LOG_NDEBUG 0
 
 #include <utils/Log.h>
 #include <utils/Trace.h>
 
-#include "CaptureProcessor.h"
+#include "JpegProcessor.h"
 #include <gui/SurfaceTextureClient.h>
 #include "../Camera2Device.h"
 #include "../Camera2Client.h"
@@ -30,18 +30,21 @@
 namespace android {
 namespace camera2 {
 
-CaptureProcessor::CaptureProcessor(wp<Camera2Client> client):
+JpegProcessor::JpegProcessor(
+    wp<Camera2Client> client,
+    wp<CaptureSequencer> sequencer):
         Thread(false),
         mClient(client),
+        mSequencer(sequencer),
         mCaptureAvailable(false),
         mCaptureStreamId(NO_STREAM) {
 }
 
-CaptureProcessor::~CaptureProcessor() {
+JpegProcessor::~JpegProcessor() {
     ALOGV("%s: Exit", __FUNCTION__);
 }
 
-void CaptureProcessor::onFrameAvailable() {
+void JpegProcessor::onFrameAvailable() {
     Mutex::Autolock l(mInputMutex);
     if (!mCaptureAvailable) {
         mCaptureAvailable = true;
@@ -49,7 +52,7 @@
     }
 }
 
-status_t CaptureProcessor::updateStream(const Parameters &params) {
+status_t JpegProcessor::updateStream(const Parameters &params) {
     ATRACE_CALL();
     ALOGV("%s", __FUNCTION__);
     status_t res;
@@ -127,7 +130,7 @@
     return OK;
 }
 
-status_t CaptureProcessor::deleteStream() {
+status_t JpegProcessor::deleteStream() {
     ATRACE_CALL();
     status_t res;
 
@@ -144,15 +147,15 @@
     return OK;
 }
 
-int CaptureProcessor::getStreamId() const {
+int JpegProcessor::getStreamId() const {
     Mutex::Autolock l(mInputMutex);
     return mCaptureStreamId;
 }
 
-void CaptureProcessor::dump(int fd, const Vector<String16>& args) {
+void JpegProcessor::dump(int fd, const Vector<String16>& args) const {
 }
 
-bool CaptureProcessor::threadLoop() {
+bool JpegProcessor::threadLoop() {
     status_t res;
 
     {
@@ -174,7 +177,7 @@
     return true;
 }
 
-status_t CaptureProcessor::processNewCapture(sp<Camera2Client> &client) {
+status_t JpegProcessor::processNewCapture(sp<Camera2Client> &client) {
     ATRACE_CALL();
     status_t res;
     sp<Camera2Heap> captureHeap;
@@ -200,10 +203,7 @@
 
         switch (l.mParameters.state) {
             case Parameters::STILL_CAPTURE:
-                l.mParameters.state = Parameters::STOPPED;
-                break;
             case Parameters::VIDEO_SNAPSHOT:
-                l.mParameters.state = Parameters::RECORD;
                 break;
             default:
                 ALOGE("%s: Camera %d: Still image produced unexpectedly "
@@ -224,6 +224,11 @@
         return OK;
     }
 
+    sp<CaptureSequencer> sequencer = mSequencer.promote();
+    if (sequencer != 0) {
+        sequencer->onCaptureAvailable(imgBuffer.timestamp);
+    }
+
     // TODO: Optimize this to avoid memcopy
     void* captureMemory = mCaptureHeap->mHeap->getBase();
     size_t size = mCaptureHeap->mHeap->getSize();
diff --git a/services/camera/libcameraservice/camera2/CaptureProcessor.h b/services/camera/libcameraservice/camera2/JpegProcessor.h
similarity index 83%
rename from services/camera/libcameraservice/camera2/CaptureProcessor.h
rename to services/camera/libcameraservice/camera2/JpegProcessor.h
index 8e35739..6e7a860 100644
--- a/services/camera/libcameraservice/camera2/CaptureProcessor.h
+++ b/services/camera/libcameraservice/camera2/JpegProcessor.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_CAPTUREPROCESSOR_H
-#define ANDROID_SERVERS_CAMERA_CAMERA2_CAPTUREPROCESSOR_H
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_JPEGPROCESSOR_H
+#define ANDROID_SERVERS_CAMERA_CAMERA2_JPEGPROCESSOR_H
 
 #include <utils/Thread.h>
 #include <utils/String16.h>
@@ -33,14 +33,16 @@
 
 namespace camera2 {
 
+class CaptureSequencer;
+
 /***
  * Still image capture output image processing
  */
-class CaptureProcessor:
+class JpegProcessor:
             public Thread, public CpuConsumer::FrameAvailableListener {
   public:
-    CaptureProcessor(wp<Camera2Client> client);
-    ~CaptureProcessor();
+    JpegProcessor(wp<Camera2Client> client, wp<CaptureSequencer> sequencer);
+    ~JpegProcessor();
 
     void onFrameAvailable();
 
@@ -48,10 +50,11 @@
     status_t deleteStream();
     int getStreamId() const;
 
-    void dump(int fd, const Vector<String16>& args);
+    void dump(int fd, const Vector<String16>& args) const;
   private:
     static const nsecs_t kWaitDuration = 10000000; // 10 ms
     wp<Camera2Client> mClient;
+    wp<CaptureSequencer> mSequencer;
 
     mutable Mutex mInputMutex;
     bool mCaptureAvailable;
diff --git a/services/camera/libcameraservice/camera2/Parameters.cpp b/services/camera/libcameraservice/camera2/Parameters.cpp
index 2f7d023..f89d1e3 100644
--- a/services/camera/libcameraservice/camera2/Parameters.cpp
+++ b/services/camera/libcameraservice/camera2/Parameters.cpp
@@ -18,6 +18,9 @@
 #define ATRACE_TAG ATRACE_TAG_CAMERA
 //#define LOG_NDEBUG 0
 
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
 #include <math.h>
 #include <stdlib.h>
 
@@ -738,9 +741,11 @@
     enableFaceDetect = false;
 
     enableFocusMoveMessages = false;
-    afTriggerCounter = 0;
+    afTriggerCounter = 1;
     currentAfTriggerId = -1;
 
+    precaptureTriggerCounter = 1;
+
     previewCallbackFlags = 0;
 
     state = STOPPED;
@@ -1318,6 +1323,209 @@
     return OK;
 }
 
+status_t Parameters::updateRequest(CameraMetadata *request) const {
+    ATRACE_CALL();
+    status_t res;
+
+    uint8_t metadataMode = ANDROID_REQUEST_METADATA_FULL;
+    res = request->update(ANDROID_REQUEST_METADATA_MODE,
+            &metadataMode, 1);
+    if (res != OK) return res;
+
+    res = request->update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
+            previewFpsRange, 2);
+    if (res != OK) return res;
+
+    res = request->update(ANDROID_CONTROL_AWB_MODE,
+            &wbMode, 1);
+    if (res != OK) return res;
+
+    uint8_t reqWbLock = autoWhiteBalanceLock ?
+            ANDROID_CONTROL_AWB_LOCK_ON : ANDROID_CONTROL_AWB_LOCK_OFF;
+    res = request->update(ANDROID_CONTROL_AWB_LOCK,
+            &reqWbLock, 1);
+
+    res = request->update(ANDROID_CONTROL_EFFECT_MODE,
+            &effectMode, 1);
+    if (res != OK) return res;
+    res = request->update(ANDROID_CONTROL_AE_ANTIBANDING_MODE,
+            &antibandingMode, 1);
+    if (res != OK) return res;
+
+    uint8_t reqControlMode =
+            (sceneMode == ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED) ?
+            ANDROID_CONTROL_AUTO : ANDROID_CONTROL_USE_SCENE_MODE;
+    res = request->update(ANDROID_CONTROL_MODE,
+            &reqControlMode, 1);
+    if (res != OK) return res;
+    if (reqControlMode == ANDROID_CONTROL_USE_SCENE_MODE) {
+        res = request->update(ANDROID_CONTROL_SCENE_MODE,
+                &sceneMode, 1);
+        if (res != OK) return res;
+    }
+
+    uint8_t reqFlashMode = ANDROID_FLASH_OFF;
+    uint8_t reqAeMode;
+    switch (flashMode) {
+        case Parameters::FLASH_MODE_OFF:
+            reqAeMode = ANDROID_CONTROL_AE_ON; break;
+        case Parameters::FLASH_MODE_AUTO:
+            reqAeMode = ANDROID_CONTROL_AE_ON_AUTO_FLASH; break;
+        case Parameters::FLASH_MODE_ON:
+            reqAeMode = ANDROID_CONTROL_AE_ON_ALWAYS_FLASH; break;
+        case Parameters::FLASH_MODE_TORCH:
+            reqAeMode = ANDROID_CONTROL_AE_ON;
+            reqFlashMode = ANDROID_FLASH_TORCH;
+            break;
+        case Parameters::FLASH_MODE_RED_EYE:
+            reqAeMode = ANDROID_CONTROL_AE_ON_AUTO_FLASH_REDEYE; break;
+        default:
+            ALOGE("%s: Camera %d: Unknown flash mode %d", __FUNCTION__,
+                    cameraId, flashMode);
+            return BAD_VALUE;
+    }
+    res = request->update(ANDROID_FLASH_MODE,
+            &reqFlashMode, 1);
+    if (res != OK) return res;
+    res = request->update(ANDROID_CONTROL_AE_MODE,
+            &reqAeMode, 1);
+    if (res != OK) return res;
+
+    uint8_t reqAeLock = autoExposureLock ?
+            ANDROID_CONTROL_AE_LOCK_ON : ANDROID_CONTROL_AE_LOCK_OFF;
+    res = request->update(ANDROID_CONTROL_AE_LOCK,
+            &reqAeLock, 1);
+
+    float reqFocusDistance = 0; // infinity focus in diopters
+    uint8_t reqFocusMode;
+    switch (focusMode) {
+        case Parameters::FOCUS_MODE_AUTO:
+        case Parameters::FOCUS_MODE_MACRO:
+        case Parameters::FOCUS_MODE_CONTINUOUS_VIDEO:
+        case Parameters::FOCUS_MODE_CONTINUOUS_PICTURE:
+        case Parameters::FOCUS_MODE_EDOF:
+            reqFocusMode = focusMode;
+            break;
+        case Parameters::FOCUS_MODE_INFINITY:
+        case Parameters::FOCUS_MODE_FIXED:
+            reqFocusMode = ANDROID_CONTROL_AF_OFF;
+            break;
+        default:
+            ALOGE("%s: Camera %d: Unknown focus mode %d", __FUNCTION__,
+                    cameraId, focusMode);
+            return BAD_VALUE;
+    }
+    res = request->update(ANDROID_LENS_FOCUS_DISTANCE,
+            &reqFocusDistance, 1);
+    if (res != OK) return res;
+    res = request->update(ANDROID_CONTROL_AF_MODE,
+            &reqFocusMode, 1);
+    if (res != OK) return res;
+
+    size_t reqFocusingAreasSize = focusingAreas.size() * 5;
+    int32_t *reqFocusingAreas = new int32_t[reqFocusingAreasSize];
+    for (size_t i = 0; i < reqFocusingAreasSize; i += 5) {
+        if (focusingAreas[i].weight != 0) {
+            reqFocusingAreas[i + 0] =
+                    normalizedXToArray(focusingAreas[i].left);
+            reqFocusingAreas[i + 1] =
+                    normalizedYToArray(focusingAreas[i].top);
+            reqFocusingAreas[i + 2] =
+                    normalizedXToArray(focusingAreas[i].right);
+            reqFocusingAreas[i + 3] =
+                    normalizedYToArray(focusingAreas[i].bottom);
+        } else {
+            reqFocusingAreas[i + 0] = 0;
+            reqFocusingAreas[i + 1] = 0;
+            reqFocusingAreas[i + 2] = 0;
+            reqFocusingAreas[i + 3] = 0;
+        }
+        reqFocusingAreas[i + 4] = focusingAreas[i].weight;
+    }
+    res = request->update(ANDROID_CONTROL_AF_REGIONS,
+            reqFocusingAreas, reqFocusingAreasSize);
+    if (res != OK) return res;
+    delete[] reqFocusingAreas;
+
+    res = request->update(ANDROID_CONTROL_AE_EXP_COMPENSATION,
+            &exposureCompensation, 1);
+    if (res != OK) return res;
+
+    size_t reqMeteringAreasSize = meteringAreas.size() * 5;
+    int32_t *reqMeteringAreas = new int32_t[reqMeteringAreasSize];
+    for (size_t i = 0; i < reqMeteringAreasSize; i += 5) {
+        if (meteringAreas[i].weight != 0) {
+            reqMeteringAreas[i + 0] =
+                normalizedXToArray(meteringAreas[i].left);
+            reqMeteringAreas[i + 1] =
+                normalizedYToArray(meteringAreas[i].top);
+            reqMeteringAreas[i + 2] =
+                normalizedXToArray(meteringAreas[i].right);
+            reqMeteringAreas[i + 3] =
+                normalizedYToArray(meteringAreas[i].bottom);
+        } else {
+            reqMeteringAreas[i + 0] = 0;
+            reqMeteringAreas[i + 1] = 0;
+            reqMeteringAreas[i + 2] = 0;
+            reqMeteringAreas[i + 3] = 0;
+        }
+        reqMeteringAreas[i + 4] = meteringAreas[i].weight;
+    }
+    res = request->update(ANDROID_CONTROL_AE_REGIONS,
+            reqMeteringAreas, reqMeteringAreasSize);
+    if (res != OK) return res;
+
+    res = request->update(ANDROID_CONTROL_AWB_REGIONS,
+            reqMeteringAreas, reqMeteringAreasSize);
+    if (res != OK) return res;
+    delete[] reqMeteringAreas;
+
+    // Need to convert zoom index into a crop rectangle. The rectangle is
+    // chosen to maximize its area on the sensor
+
+    camera_metadata_ro_entry_t maxDigitalZoom =
+            staticInfo(ANDROID_SCALER_AVAILABLE_MAX_ZOOM);
+    float zoomIncrement = (maxDigitalZoom.data.f[0] - 1) /
+            (NUM_ZOOM_STEPS-1);
+    float zoomRatio = 1 + zoomIncrement * zoom;
+
+    float zoomLeft, zoomTop, zoomWidth, zoomHeight;
+    if (previewWidth >= previewHeight) {
+        zoomWidth =  fastInfo.arrayWidth / zoomRatio;
+        zoomHeight = zoomWidth *
+                previewHeight / previewWidth;
+    } else {
+        zoomHeight = fastInfo.arrayHeight / zoomRatio;
+        zoomWidth = zoomHeight *
+                previewWidth / previewHeight;
+    }
+    zoomLeft = (fastInfo.arrayWidth - zoomWidth) / 2;
+    zoomTop = (fastInfo.arrayHeight - zoomHeight) / 2;
+
+    int32_t reqCropRegion[3] = { zoomLeft, zoomTop, zoomWidth };
+    res = request->update(ANDROID_SCALER_CROP_REGION,
+            reqCropRegion, 3);
+    if (res != OK) return res;
+
+    // TODO: Decide how to map recordingHint, or whether just to ignore it
+
+    uint8_t reqVstabMode = videoStabilization ?
+            ANDROID_CONTROL_VIDEO_STABILIZATION_ON :
+            ANDROID_CONTROL_VIDEO_STABILIZATION_OFF;
+    res = request->update(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE,
+            &reqVstabMode, 1);
+    if (res != OK) return res;
+
+    uint8_t reqFaceDetectMode = enableFaceDetect ?
+            fastInfo.bestFaceDetectMode :
+            (uint8_t)ANDROID_STATS_FACE_DETECTION_OFF;
+    res = request->update(ANDROID_STATS_FACE_DETECT_MODE,
+            &reqFaceDetectMode, 1);
+    if (res != OK) return res;
+
+    return OK;
+}
+
 const char* Parameters::getStateName(State state) {
 #define CASE_ENUM_TO_CHAR(x) case x: return(#x); break;
     switch(state) {
diff --git a/services/camera/libcameraservice/camera2/Parameters.h b/services/camera/libcameraservice/camera2/Parameters.h
index 817d001..e71d086 100644
--- a/services/camera/libcameraservice/camera2/Parameters.h
+++ b/services/camera/libcameraservice/camera2/Parameters.h
@@ -29,12 +29,17 @@
 namespace android {
 namespace camera2 {
 
-// Current camera state; this is the full state of the Camera under the old
-// camera API (contents of the CameraParameters object in a more-efficient
-// format, plus other state). The enum values are mostly based off the
-// corresponding camera2 enums, not the camera1 strings. A few are defined here
-// if they don't cleanly map to camera2 values.
+/**
+ * Current camera state; this is the full state of the Camera under the old
+ * camera API (contents of the CameraParameters object in a more-efficient
+ * format, plus other state). The enum values are mostly based off the
+ * corresponding camera2 enums, not the camera1 strings. A few are defined here
+ * if they don't cleanly map to camera2 values.
+ */
 struct Parameters {
+    /**
+     * Parameters and other state
+     */
     int cameraId;
     int cameraFacing;
 
@@ -117,9 +122,13 @@
     int currentAfTriggerId;
     bool afInMotion;
 
+    int precaptureTriggerCounter;
+
     uint32_t previewCallbackFlags;
     bool previewCallbackOneShot;
 
+    bool zslMode;
+
     // Overall camera state
     enum State {
         DISCONNECTED,
@@ -149,7 +158,9 @@
         int32_t maxFaces;
     } fastInfo;
 
-    // Parameter manipulation and setup methods
+    /**
+     * Parameter manipulation and setup methods
+     */
 
     Parameters(int cameraId, int cameraFacing);
     ~Parameters();
@@ -170,6 +181,9 @@
     // Validate and update camera parameters based on new settings
     status_t set(const String8 &params);
 
+    // Update passed-in request for common parameters
+    status_t updateRequest(CameraMetadata *request) const;
+
     // Static methods for debugging and converting between camera1 and camera2
     // parameters
 
diff --git a/services/camera/libcameraservice/camera2/ZslProcessor.cpp b/services/camera/libcameraservice/camera2/ZslProcessor.cpp
new file mode 100644
index 0000000..a39585e
--- /dev/null
+++ b/services/camera/libcameraservice/camera2/ZslProcessor.cpp
@@ -0,0 +1,378 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera2Client::ZslProcessor"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+//#define LOG_NNDEBUG 0
+
+#ifdef LOG_NNDEBUG
+#define ALOGVV(...) ALOGV(__VA_ARGS__)
+#else
+#define ALOGVV(...) ((void)0)
+#endif
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include "ZslProcessor.h"
+#include <gui/SurfaceTextureClient.h>
+#include "../Camera2Device.h"
+#include "../Camera2Client.h"
+
+
+namespace android {
+namespace camera2 {
+
+ZslProcessor::ZslProcessor(
+    wp<Camera2Client> client,
+    wp<CaptureSequencer> sequencer):
+        Thread(false),
+        mState(RUNNING),
+        mClient(client),
+        mSequencer(sequencer),
+        mZslBufferAvailable(false),
+        mZslStreamId(NO_STREAM),
+        mZslReprocessStreamId(NO_STREAM),
+        mFrameListHead(0),
+        mZslQueueHead(0),
+        mZslQueueTail(0) {
+    mZslQueue.insertAt(0, kZslBufferDepth);
+    mFrameList.insertAt(0, kFrameListDepth);
+    sp<CaptureSequencer> captureSequencer = mSequencer.promote();
+    if (captureSequencer != 0) captureSequencer->setZslProcessor(this);
+}
+
+ZslProcessor::~ZslProcessor() {
+    ALOGV("%s: Exit", __FUNCTION__);
+}
+
+void ZslProcessor::onFrameAvailable() {
+    Mutex::Autolock l(mInputMutex);
+    if (!mZslBufferAvailable) {
+        mZslBufferAvailable = true;
+        mZslBufferAvailableSignal.signal();
+    }
+}
+
+void ZslProcessor::onFrameAvailable(int32_t frameId, CameraMetadata &frame) {
+    Mutex::Autolock l(mInputMutex);
+    camera_metadata_entry_t entry;
+    entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
+    nsecs_t timestamp = entry.data.i64[0];
+    ALOGVV("Got preview frame for timestamp %lld", timestamp);
+
+    if (mState != RUNNING) return;
+
+    mFrameList.editItemAt(mFrameListHead).acquire(frame);
+    mFrameListHead = (mFrameListHead + 1) % kFrameListDepth;
+
+    findMatchesLocked();
+}
+
+void ZslProcessor::onBufferReleased(buffer_handle_t *handle) {
+    Mutex::Autolock l(mInputMutex);
+
+    buffer_handle_t *expectedHandle =
+            &(mZslQueue[mZslQueueTail].buffer.mGraphicBuffer->handle);
+
+    if (handle != expectedHandle) {
+        ALOGE("%s: Expected buffer %p, got buffer %p",
+                __FUNCTION__, expectedHandle, handle);
+    }
+
+    mState = RUNNING;
+}
+
+status_t ZslProcessor::updateStream(const Parameters &params) {
+    ATRACE_CALL();
+    ALOGV("%s: Configuring ZSL streams", __FUNCTION__);
+    status_t res;
+
+    Mutex::Autolock l(mInputMutex);
+
+    sp<Camera2Client> client = mClient.promote();
+    if (client == 0) return OK;
+    sp<Camera2Device> device = client->getCameraDevice();
+
+    if (mZslConsumer == 0) {
+        // Create CPU buffer queue endpoint
+        mZslConsumer = new BufferItemConsumer(
+            GRALLOC_USAGE_HW_CAMERA_ZSL,
+            kZslBufferDepth,
+            true);
+        mZslConsumer->setFrameAvailableListener(this);
+        mZslConsumer->setName(String8("Camera2Client::ZslConsumer"));
+        mZslWindow = new SurfaceTextureClient(
+            mZslConsumer->getProducerInterface());
+    }
+
+    if (mZslStreamId != NO_STREAM) {
+        // Check if stream parameters have to change
+        uint32_t currentWidth, currentHeight;
+        res = device->getStreamInfo(mZslStreamId,
+                &currentWidth, &currentHeight, 0);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Error querying capture output stream info: "
+                    "%s (%d)", __FUNCTION__,
+                    client->getCameraId(), strerror(-res), res);
+            return res;
+        }
+        if (currentWidth != (uint32_t)params.pictureWidth ||
+                currentHeight != (uint32_t)params.pictureHeight) {
+            res = device->deleteStream(mZslReprocessStreamId);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to delete old reprocess stream "
+                        "for ZSL: %s (%d)", __FUNCTION__,
+                        client->getCameraId(), strerror(-res), res);
+                return res;
+            }
+            res = device->deleteStream(mZslStreamId);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to delete old output stream "
+                        "for ZSL: %s (%d)", __FUNCTION__,
+                        client->getCameraId(), strerror(-res), res);
+                return res;
+            }
+            mZslStreamId = NO_STREAM;
+        }
+    }
+
+    if (mZslStreamId == NO_STREAM) {
+        // Create stream for HAL production
+        res = device->createStream(mZslWindow,
+                params.pictureWidth, params.pictureHeight,
+                HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, 0,
+                &mZslStreamId);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Can't create output stream for ZSL: "
+                    "%s (%d)", __FUNCTION__, client->getCameraId(),
+                    strerror(-res), res);
+            return res;
+        }
+        res = device->createReprocessStreamFromStream(mZslStreamId,
+                &mZslReprocessStreamId);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Can't create reprocess stream for ZSL: "
+                    "%s (%d)", __FUNCTION__, client->getCameraId(),
+                    strerror(-res), res);
+            return res;
+        }
+    }
+    client->registerFrameListener(Camera2Client::kPreviewRequestId, this);
+
+    return OK;
+}
+
+status_t ZslProcessor::deleteStream() {
+    ATRACE_CALL();
+    status_t res;
+
+    Mutex::Autolock l(mInputMutex);
+
+    if (mZslStreamId != NO_STREAM) {
+        sp<Camera2Client> client = mClient.promote();
+        if (client == 0) return OK;
+        sp<Camera2Device> device = client->getCameraDevice();
+
+        device->deleteStream(mZslReprocessStreamId);
+        mZslReprocessStreamId = NO_STREAM;
+        device->deleteStream(mZslStreamId);
+        mZslStreamId = NO_STREAM;
+    }
+    return OK;
+}
+
+int ZslProcessor::getStreamId() const {
+    Mutex::Autolock l(mInputMutex);
+    return mZslStreamId;
+}
+
+int ZslProcessor::getReprocessStreamId() const {
+    Mutex::Autolock l(mInputMutex);
+    return mZslReprocessStreamId;
+}
+
+status_t ZslProcessor::pushToReprocess(int32_t requestId) {
+    ALOGV("%s: Send in reprocess request with id %d",
+            __FUNCTION__, requestId);
+    Mutex::Autolock l(mInputMutex);
+    status_t res;
+    sp<Camera2Client> client = mClient.promote();
+
+    if (client == 0) return false;
+
+    if (mZslQueueTail != mZslQueueHead) {
+        buffer_handle_t *handle =
+            &(mZslQueue[mZslQueueTail].buffer.mGraphicBuffer->handle);
+        CameraMetadata request = mZslQueue[mZslQueueTail].frame;
+        uint8_t requestType = ANDROID_REQUEST_TYPE_REPROCESS;
+        res = request.update(ANDROID_REQUEST_TYPE,
+                &requestType, 1);
+        uint8_t inputStreams[1] = { mZslReprocessStreamId };
+        if (res == OK) request.update(ANDROID_REQUEST_INPUT_STREAMS,
+                inputStreams, 1);
+        uint8_t outputStreams[1] = { client->getCaptureStreamId() };
+        if (res == OK) request.update(ANDROID_REQUEST_OUTPUT_STREAMS,
+                outputStreams, 1);
+        res = request.update(ANDROID_REQUEST_ID,
+                &requestId, 1);
+
+        if (res != OK ) {
+            ALOGE("%s: Unable to update frame to a reprocess request", __FUNCTION__);
+            return INVALID_OPERATION;
+        }
+
+        res = client->getCameraDevice()->pushReprocessBuffer(mZslReprocessStreamId,
+                handle, this);
+        if (res != OK) {
+            ALOGE("%s: Unable to push buffer for reprocessing: %s (%d)",
+                    __FUNCTION__, strerror(-res), res);
+            return res;
+        }
+
+        res = client->getCameraDevice()->capture(request);
+        if (res != OK ) {
+            ALOGE("%s: Unable to send ZSL reprocess request to capture: %s (%d)",
+                    __FUNCTION__, strerror(-res), res);
+            return res;
+        }
+
+        mState = LOCKED;
+    } else {
+        ALOGE("%s: Nothing to push", __FUNCTION__);
+        return BAD_VALUE;
+    }
+    return OK;
+}
+
+void ZslProcessor::dump(int fd, const Vector<String16>& args) const {
+}
+
+bool ZslProcessor::threadLoop() {
+    status_t res;
+
+    {
+        Mutex::Autolock l(mInputMutex);
+        while (!mZslBufferAvailable) {
+            res = mZslBufferAvailableSignal.waitRelative(mInputMutex,
+                    kWaitDuration);
+            if (res == TIMED_OUT) return true;
+        }
+        mZslBufferAvailable = false;
+    }
+
+    do {
+        sp<Camera2Client> client = mClient.promote();
+        if (client == 0) return false;
+        res = processNewZslBuffer(client);
+    } while (res == OK);
+
+    return true;
+}
+
+status_t ZslProcessor::processNewZslBuffer(sp<Camera2Client> &client) {
+    ATRACE_CALL();
+    status_t res;
+    Mutex::Autolock l(mInputMutex);
+
+    if (mState == LOCKED) {
+        BufferItemConsumer::BufferItem item;
+        res = mZslConsumer->acquireBuffer(&item);
+        if (res != OK) {
+            if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
+                ALOGE("%s: Camera %d: Error receiving ZSL image buffer: "
+                        "%s (%d)", __FUNCTION__,
+                        client->getCameraId(), strerror(-res), res);
+            }
+            return res;
+        }
+        mZslConsumer->releaseBuffer(item);
+        return OK;
+    }
+
+    ALOGVV("Got ZSL buffer: head: %d, tail: %d", mZslQueueHead, mZslQueueTail);
+
+    if ( (mZslQueueHead + 1) % kZslBufferDepth == mZslQueueTail) {
+        mZslConsumer->releaseBuffer(mZslQueue[mZslQueueTail].buffer);
+        mZslQueue.replaceAt(mZslQueueTail);
+        mZslQueueTail = (mZslQueueTail + 1) % kZslBufferDepth;
+    }
+
+    ZslPair &queueHead = mZslQueue.editItemAt(mZslQueueHead);
+
+    res = mZslConsumer->acquireBuffer(&(queueHead.buffer));
+    if (res != OK) {
+        if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
+            ALOGE("%s: Camera %d: Error receiving ZSL image buffer: "
+                    "%s (%d)", __FUNCTION__,
+                    client->getCameraId(), strerror(-res), res);
+        }
+        return res;
+    }
+    queueHead.frame.release();
+
+    mZslQueueHead = (mZslQueueHead + 1) % kZslBufferDepth;
+
+    ALOGVV("  Added buffer, timestamp %lld", queueHead.buffer.mTimestamp);
+
+    findMatchesLocked();
+
+    return OK;
+}
+
+void ZslProcessor::findMatchesLocked() {
+    for (size_t i = 0; i < mZslQueue.size(); i++) {
+        ZslPair &queueEntry = mZslQueue.editItemAt(i);
+        nsecs_t bufferTimestamp = queueEntry.buffer.mTimestamp;
+        if (queueEntry.frame.isEmpty() && bufferTimestamp != 0) {
+            // Have buffer, no matching frame. Look for one
+            for (size_t j = 0; j < mFrameList.size(); j++) {
+                bool match = false;
+                CameraMetadata &frame = mFrameList.editItemAt(j);
+                if (!frame.isEmpty()) {
+                    camera_metadata_entry_t entry;
+                    entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
+                    if (entry.count == 0) {
+                        ALOGE("%s: Can't find timestamp in frame!",
+                                __FUNCTION__);
+                        continue;
+                    }
+                    nsecs_t frameTimestamp = entry.data.i64[0];
+                    if (bufferTimestamp == frameTimestamp) {
+                        ALOGVV("%s: Found match %lld", __FUNCTION__,
+                                frameTimestamp);
+                        match = true;
+                    } else {
+                        int64_t delta = abs(bufferTimestamp - frameTimestamp);
+                        if ( delta < 1000000) {
+                            ALOGVV("%s: Found close match %lld (delta %lld)",
+                                    __FUNCTION__, bufferTimestamp, delta);
+                            match = true;
+                        }
+                    }
+                }
+                if (match) {
+                    queueEntry.frame.acquire(frame);
+                    break;
+                }
+            }
+        }
+    }
+}
+
+}; // namespace camera2
+}; // namespace android
diff --git a/services/camera/libcameraservice/camera2/ZslProcessor.h b/services/camera/libcameraservice/camera2/ZslProcessor.h
new file mode 100644
index 0000000..74921a3
--- /dev/null
+++ b/services/camera/libcameraservice/camera2/ZslProcessor.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSOR_H
+#define ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSOR_H
+
+#include <utils/Thread.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
+#include <utils/Mutex.h>
+#include <utils/Condition.h>
+#include <gui/BufferItemConsumer.h>
+#include "Parameters.h"
+#include "FrameProcessor.h"
+#include "CameraMetadata.h"
+#include "Camera2Heap.h"
+#include "../Camera2Device.h"
+
+namespace android {
+
+class Camera2Client;
+
+namespace camera2 {
+
+class CaptureSequencer;
+
+/***
+ * ZSL queue processing
+ */
+class ZslProcessor:
+            virtual public Thread,
+            virtual public BufferItemConsumer::FrameAvailableListener,
+            virtual public FrameProcessor::FilteredListener,
+            virtual public Camera2Device::BufferReleasedListener {
+  public:
+    ZslProcessor(wp<Camera2Client> client, wp<CaptureSequencer> sequencer);
+    ~ZslProcessor();
+
+    // From mZslConsumer
+    virtual void onFrameAvailable();
+    // From FrameProcessor
+    virtual void onFrameAvailable(int32_t frameId, CameraMetadata &frame);
+
+    virtual void onBufferReleased(buffer_handle_t *handle);
+
+    status_t updateStream(const Parameters &params);
+    status_t deleteStream();
+    int getStreamId() const;
+    int getReprocessStreamId() const;
+
+    status_t pushToReprocess(int32_t requestId);
+
+    void dump(int fd, const Vector<String16>& args) const;
+  private:
+    static const nsecs_t kWaitDuration = 10000000; // 10 ms
+
+    enum {
+        RUNNING,
+        LOCKED
+    } mState;
+
+    wp<Camera2Client> mClient;
+    wp<CaptureSequencer> mSequencer;
+
+    mutable Mutex mInputMutex;
+    bool mZslBufferAvailable;
+    Condition mZslBufferAvailableSignal;
+
+    enum {
+        NO_STREAM = -1
+    };
+
+    int mZslStreamId;
+    int mZslReprocessStreamId;
+    sp<BufferItemConsumer> mZslConsumer;
+    sp<ANativeWindow>      mZslWindow;
+
+    struct ZslPair {
+        BufferItemConsumer::BufferItem buffer;
+        CameraMetadata frame;
+    };
+
+    static const size_t kZslBufferDepth = 3;
+    static const size_t kFrameListDepth = kZslBufferDepth * 2;
+    Vector<CameraMetadata> mFrameList;
+    size_t mFrameListHead;
+
+    ZslPair mNextPair;
+
+    Vector<ZslPair> mZslQueue;
+    size_t mZslQueueHead;
+    size_t mZslQueueTail;
+
+    virtual bool threadLoop();
+
+    status_t processNewZslBuffer(sp<Camera2Client> &client);
+
+    // Match up entries from frame list to buffers in ZSL queue
+    void findMatchesLocked();
+};
+
+
+}; //namespace camera2
+}; //namespace android
+
+#endif