Camera: Rename new API to camera2, rearrange camera service

 - Support API rename from photography to camera2
 - Reorganize camera service files
   - API support files to api1/, api2/, api_pro/
   - HAL device support files into device{1,2,3}/
   - Common files into common/
   - Camera service remains at top-level

Change-Id: Ie474c12536f543832fba0a2dc936ac4fd39fe6a9
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
new file mode 100644
index 0000000..46aa60c
--- /dev/null
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -0,0 +1,1779 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera2"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include <cutils/properties.h>
+#include <gui/Surface.h>
+
+#include "api1/Camera2Client.h"
+
+#include "api1/client2/StreamingProcessor.h"
+#include "api1/client2/JpegProcessor.h"
+#include "api1/client2/CaptureSequencer.h"
+#include "api1/client2/CallbackProcessor.h"
+#include "api1/client2/ZslProcessor.h"
+#include "api1/client2/ZslProcessor3.h"
+
+#define ALOG1(...) ALOGD_IF(gLogLevel >= 1, __VA_ARGS__);
+#define ALOG2(...) ALOGD_IF(gLogLevel >= 2, __VA_ARGS__);
+
+namespace android {
+using namespace camera2;
+
+static int getCallingPid() {
+    return IPCThreadState::self()->getCallingPid();
+}
+
+// Interface used by CameraService
+
+Camera2Client::Camera2Client(const sp<CameraService>& cameraService,
+        const sp<ICameraClient>& cameraClient,
+        const String16& clientPackageName,
+        int cameraId,
+        int cameraFacing,
+        int clientPid,
+        uid_t clientUid,
+        int servicePid,
+        int deviceVersion):
+        Camera2ClientBase(cameraService, cameraClient, clientPackageName,
+                cameraId, cameraFacing, clientPid, clientUid, servicePid),
+        mParameters(cameraId, cameraFacing),
+        mDeviceVersion(deviceVersion)
+{
+    ATRACE_CALL();
+
+    SharedParameters::Lock l(mParameters);
+    l.mParameters.state = Parameters::DISCONNECTED;
+}
+
+status_t Camera2Client::initialize(camera_module_t *module)
+{
+    ATRACE_CALL();
+    ALOGV("%s: Initializing client for camera %d", __FUNCTION__, mCameraId);
+    status_t res;
+
+    res = Camera2ClientBase::initialize(module);
+    if (res != OK) {
+        return res;
+    }
+
+    SharedParameters::Lock l(mParameters);
+
+    res = l.mParameters.initialize(&(mDevice->info()));
+    if (res != OK) {
+        ALOGE("%s: Camera %d: unable to build defaults: %s (%d)",
+                __FUNCTION__, mCameraId, strerror(-res), res);
+        return NO_INIT;
+    }
+
+    String8 threadName;
+
+    mStreamingProcessor = new StreamingProcessor(this);
+    threadName = String8::format("C2-%d-StreamProc",
+            mCameraId);
+    mStreamingProcessor->run(threadName.string());
+
+    mFrameProcessor = new FrameProcessor(mDevice, this);
+    threadName = String8::format("C2-%d-FrameProc",
+            mCameraId);
+    mFrameProcessor->run(threadName.string());
+
+    mCaptureSequencer = new CaptureSequencer(this);
+    threadName = String8::format("C2-%d-CaptureSeq",
+            mCameraId);
+    mCaptureSequencer->run(threadName.string());
+
+    mJpegProcessor = new JpegProcessor(this, mCaptureSequencer);
+    threadName = String8::format("C2-%d-JpegProc",
+            mCameraId);
+    mJpegProcessor->run(threadName.string());
+
+    switch (mDeviceVersion) {
+        case CAMERA_DEVICE_API_VERSION_2_0: {
+            sp<ZslProcessor> zslProc =
+                    new ZslProcessor(this, mCaptureSequencer);
+            mZslProcessor = zslProc;
+            mZslProcessorThread = zslProc;
+            break;
+        }
+        case CAMERA_DEVICE_API_VERSION_3_0:{
+            sp<ZslProcessor3> zslProc =
+                    new ZslProcessor3(this, mCaptureSequencer);
+            mZslProcessor = zslProc;
+            mZslProcessorThread = zslProc;
+            break;
+        }
+        default:
+            break;
+    }
+    threadName = String8::format("C2-%d-ZslProc",
+            mCameraId);
+    mZslProcessorThread->run(threadName.string());
+
+    mCallbackProcessor = new CallbackProcessor(this);
+    threadName = String8::format("C2-%d-CallbkProc",
+            mCameraId);
+    mCallbackProcessor->run(threadName.string());
+
+    if (gLogLevel >= 1) {
+        ALOGD("%s: Default parameters converted from camera %d:", __FUNCTION__,
+              mCameraId);
+        ALOGD("%s", l.mParameters.paramsFlattened.string());
+    }
+
+    return OK;
+}
+
+Camera2Client::~Camera2Client() {
+    ATRACE_CALL();
+    ALOGV("~Camera2Client");
+
+    mDestructionStarted = true;
+
+    disconnect();
+
+    ALOGI("Camera %d: Closed", mCameraId);
+}
+
+status_t Camera2Client::dump(int fd, const Vector<String16>& args) {
+    String8 result;
+    result.appendFormat("Client2[%d] (%p) Client: %s PID: %d, dump:\n",
+            mCameraId,
+            getRemoteCallback()->asBinder().get(),
+            String8(mClientPackageName).string(),
+            mClientPid);
+    result.append("  State: ");
+#define CASE_APPEND_ENUM(x) case x: result.append(#x "\n"); break;
+
+    const Parameters& p = mParameters.unsafeAccess();
+
+    result.append(Parameters::getStateName(p.state));
+
+    result.append("\n  Current parameters:\n");
+    result.appendFormat("    Preview size: %d x %d\n",
+            p.previewWidth, p.previewHeight);
+    result.appendFormat("    Preview FPS range: %d - %d\n",
+            p.previewFpsRange[0], p.previewFpsRange[1]);
+    result.appendFormat("    Preview HAL pixel format: 0x%x\n",
+            p.previewFormat);
+    result.appendFormat("    Preview transform: %x\n",
+            p.previewTransform);
+    result.appendFormat("    Picture size: %d x %d\n",
+            p.pictureWidth, p.pictureHeight);
+    result.appendFormat("    Jpeg thumbnail size: %d x %d\n",
+            p.jpegThumbSize[0], p.jpegThumbSize[1]);
+    result.appendFormat("    Jpeg quality: %d, thumbnail quality: %d\n",
+            p.jpegQuality, p.jpegThumbQuality);
+    result.appendFormat("    Jpeg rotation: %d\n", p.jpegRotation);
+    result.appendFormat("    GPS tags %s\n",
+            p.gpsEnabled ? "enabled" : "disabled");
+    if (p.gpsEnabled) {
+        result.appendFormat("    GPS lat x long x alt: %f x %f x %f\n",
+                p.gpsCoordinates[0], p.gpsCoordinates[1],
+                p.gpsCoordinates[2]);
+        result.appendFormat("    GPS timestamp: %lld\n",
+                p.gpsTimestamp);
+        result.appendFormat("    GPS processing method: %s\n",
+                p.gpsProcessingMethod.string());
+    }
+
+    result.append("    White balance mode: ");
+    switch (p.wbMode) {
+        CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_AUTO)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_INCANDESCENT)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_FLUORESCENT)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_WARM_FLUORESCENT)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_DAYLIGHT)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_CLOUDY_DAYLIGHT)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_TWILIGHT)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_SHADE)
+        default: result.append("UNKNOWN\n");
+    }
+
+    result.append("    Effect mode: ");
+    switch (p.effectMode) {
+        CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_OFF)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_MONO)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_NEGATIVE)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_SOLARIZE)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_SEPIA)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_POSTERIZE)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_WHITEBOARD)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_BLACKBOARD)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_AQUA)
+        default: result.append("UNKNOWN\n");
+    }
+
+    result.append("    Antibanding mode: ");
+    switch (p.antibandingMode) {
+        CASE_APPEND_ENUM(ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_AE_ANTIBANDING_MODE_50HZ)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_AE_ANTIBANDING_MODE_60HZ)
+        default: result.append("UNKNOWN\n");
+    }
+
+    result.append("    Scene mode: ");
+    switch (p.sceneMode) {
+        case ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED:
+            result.append("AUTO\n"); break;
+        CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_ACTION)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_PORTRAIT)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_LANDSCAPE)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_NIGHT)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_NIGHT_PORTRAIT)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_THEATRE)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_BEACH)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_SNOW)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_SUNSET)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_STEADYPHOTO)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_FIREWORKS)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_SPORTS)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_PARTY)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_CANDLELIGHT)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_BARCODE)
+        default: result.append("UNKNOWN\n");
+    }
+
+    result.append("    Flash mode: ");
+    switch (p.flashMode) {
+        CASE_APPEND_ENUM(Parameters::FLASH_MODE_OFF)
+        CASE_APPEND_ENUM(Parameters::FLASH_MODE_AUTO)
+        CASE_APPEND_ENUM(Parameters::FLASH_MODE_ON)
+        CASE_APPEND_ENUM(Parameters::FLASH_MODE_TORCH)
+        CASE_APPEND_ENUM(Parameters::FLASH_MODE_RED_EYE)
+        CASE_APPEND_ENUM(Parameters::FLASH_MODE_INVALID)
+        default: result.append("UNKNOWN\n");
+    }
+
+    result.append("    Focus mode: ");
+    switch (p.focusMode) {
+        CASE_APPEND_ENUM(Parameters::FOCUS_MODE_AUTO)
+        CASE_APPEND_ENUM(Parameters::FOCUS_MODE_MACRO)
+        CASE_APPEND_ENUM(Parameters::FOCUS_MODE_CONTINUOUS_VIDEO)
+        CASE_APPEND_ENUM(Parameters::FOCUS_MODE_CONTINUOUS_PICTURE)
+        CASE_APPEND_ENUM(Parameters::FOCUS_MODE_EDOF)
+        CASE_APPEND_ENUM(Parameters::FOCUS_MODE_INFINITY)
+        CASE_APPEND_ENUM(Parameters::FOCUS_MODE_FIXED)
+        CASE_APPEND_ENUM(Parameters::FOCUS_MODE_INVALID)
+        default: result.append("UNKNOWN\n");
+    }
+
+    result.append("   Focus state: ");
+    switch (p.focusState) {
+        CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_INACTIVE)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED)
+        CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED)
+        default: result.append("UNKNOWN\n");
+    }
+
+    result.append("    Focusing areas:\n");
+    for (size_t i = 0; i < p.focusingAreas.size(); i++) {
+        result.appendFormat("      [ (%d, %d, %d, %d), weight %d ]\n",
+                p.focusingAreas[i].left,
+                p.focusingAreas[i].top,
+                p.focusingAreas[i].right,
+                p.focusingAreas[i].bottom,
+                p.focusingAreas[i].weight);
+    }
+
+    result.appendFormat("    Exposure compensation index: %d\n",
+            p.exposureCompensation);
+
+    result.appendFormat("    AE lock %s, AWB lock %s\n",
+            p.autoExposureLock ? "enabled" : "disabled",
+            p.autoWhiteBalanceLock ? "enabled" : "disabled" );
+
+    result.appendFormat("    Metering areas:\n");
+    for (size_t i = 0; i < p.meteringAreas.size(); i++) {
+        result.appendFormat("      [ (%d, %d, %d, %d), weight %d ]\n",
+                p.meteringAreas[i].left,
+                p.meteringAreas[i].top,
+                p.meteringAreas[i].right,
+                p.meteringAreas[i].bottom,
+                p.meteringAreas[i].weight);
+    }
+
+    result.appendFormat("    Zoom index: %d\n", p.zoom);
+    result.appendFormat("    Video size: %d x %d\n", p.videoWidth,
+            p.videoHeight);
+
+    result.appendFormat("    Recording hint is %s\n",
+            p.recordingHint ? "set" : "not set");
+
+    result.appendFormat("    Video stabilization is %s\n",
+            p.videoStabilization ? "enabled" : "disabled");
+
+    result.append("  Current streams:\n");
+    result.appendFormat("    Preview stream ID: %d\n",
+            getPreviewStreamId());
+    result.appendFormat("    Capture stream ID: %d\n",
+            getCaptureStreamId());
+    result.appendFormat("    Recording stream ID: %d\n",
+            getRecordingStreamId());
+
+    result.append("  Quirks for this camera:\n");
+    bool haveQuirk = false;
+    if (p.quirks.triggerAfWithAuto) {
+        result.appendFormat("    triggerAfWithAuto\n");
+        haveQuirk = true;
+    }
+    if (p.quirks.useZslFormat) {
+        result.appendFormat("    useZslFormat\n");
+        haveQuirk = true;
+    }
+    if (p.quirks.meteringCropRegion) {
+        result.appendFormat("    meteringCropRegion\n");
+        haveQuirk = true;
+    }
+    if (!haveQuirk) {
+        result.appendFormat("    none\n");
+    }
+
+    write(fd, result.string(), result.size());
+
+    mStreamingProcessor->dump(fd, args);
+
+    mCaptureSequencer->dump(fd, args);
+
+    mFrameProcessor->dump(fd, args);
+
+    mZslProcessor->dump(fd, args);
+
+    return dumpDevice(fd, args);
+#undef CASE_APPEND_ENUM
+}
+
+// ICamera interface
+
+void Camera2Client::disconnect() {
+    ATRACE_CALL();
+    Mutex::Autolock icl(mBinderSerializationLock);
+
+    // Allow both client and the media server to disconnect at all times
+    int callingPid = getCallingPid();
+    if (callingPid != mClientPid && callingPid != mServicePid) return;
+
+    if (mDevice == 0) return;
+
+    ALOGV("Camera %d: Shutting down", mCameraId);
+
+    /**
+     * disconnect() cannot call any methods that might need to promote a
+     * wp<Camera2Client>, since disconnect can be called from the destructor, at
+     * which point all such promotions will fail.
+     */
+
+    stopPreviewL();
+
+    {
+        SharedParameters::Lock l(mParameters);
+        if (l.mParameters.state == Parameters::DISCONNECTED) return;
+        l.mParameters.state = Parameters::DISCONNECTED;
+    }
+
+    mStreamingProcessor->deletePreviewStream();
+    mStreamingProcessor->deleteRecordingStream();
+    mJpegProcessor->deleteStream();
+    mCallbackProcessor->deleteStream();
+    mZslProcessor->deleteStream();
+
+    mStreamingProcessor->requestExit();
+    mFrameProcessor->requestExit();
+    mCaptureSequencer->requestExit();
+    mJpegProcessor->requestExit();
+    mZslProcessorThread->requestExit();
+    mCallbackProcessor->requestExit();
+
+    ALOGV("Camera %d: Waiting for threads", mCameraId);
+
+    mStreamingProcessor->join();
+    mFrameProcessor->join();
+    mCaptureSequencer->join();
+    mJpegProcessor->join();
+    mZslProcessorThread->join();
+    mCallbackProcessor->join();
+
+    ALOGV("Camera %d: Disconnecting device", mCameraId);
+
+    mDevice->disconnect();
+
+    mDevice.clear();
+
+    CameraService::Client::disconnect();
+}
+
+status_t Camera2Client::connect(const sp<ICameraClient>& client) {
+    ATRACE_CALL();
+    ALOGV("%s: E", __FUNCTION__);
+    Mutex::Autolock icl(mBinderSerializationLock);
+
+    if (mClientPid != 0 && getCallingPid() != mClientPid) {
+        ALOGE("%s: Camera %d: Connection attempt from pid %d; "
+                "current locked to pid %d", __FUNCTION__,
+                mCameraId, getCallingPid(), mClientPid);
+        return BAD_VALUE;
+    }
+
+    mClientPid = getCallingPid();
+
+    mRemoteCallback = client;
+    mSharedCameraCallbacks = client;
+
+    return OK;
+}
+
+status_t Camera2Client::lock() {
+    ATRACE_CALL();
+    ALOGV("%s: E", __FUNCTION__);
+    Mutex::Autolock icl(mBinderSerializationLock);
+    ALOGV("%s: Camera %d: Lock call from pid %d; current client pid %d",
+            __FUNCTION__, mCameraId, getCallingPid(), mClientPid);
+
+    if (mClientPid == 0) {
+        mClientPid = getCallingPid();
+        return OK;
+    }
+
+    if (mClientPid != getCallingPid()) {
+        ALOGE("%s: Camera %d: Lock call from pid %d; currently locked to pid %d",
+                __FUNCTION__, mCameraId, getCallingPid(), mClientPid);
+        return EBUSY;
+    }
+
+    return OK;
+}
+
+status_t Camera2Client::unlock() {
+    ATRACE_CALL();
+    ALOGV("%s: E", __FUNCTION__);
+    Mutex::Autolock icl(mBinderSerializationLock);
+    ALOGV("%s: Camera %d: Unlock call from pid %d; current client pid %d",
+            __FUNCTION__, mCameraId, getCallingPid(), mClientPid);
+
+    if (mClientPid == getCallingPid()) {
+        SharedParameters::Lock l(mParameters);
+        if (l.mParameters.state == Parameters::RECORD ||
+                l.mParameters.state == Parameters::VIDEO_SNAPSHOT) {
+            ALOGD("Not allowed to unlock camera during recording.");
+            return INVALID_OPERATION;
+        }
+        mClientPid = 0;
+        mRemoteCallback.clear();
+        mSharedCameraCallbacks.clear();
+        return OK;
+    }
+
+    ALOGE("%s: Camera %d: Unlock call from pid %d; currently locked to pid %d",
+            __FUNCTION__, mCameraId, getCallingPid(), mClientPid);
+    return EBUSY;
+}
+
+status_t Camera2Client::setPreviewDisplay(
+        const sp<Surface>& surface) {
+    ATRACE_CALL();
+    ALOGV("%s: E", __FUNCTION__);
+    Mutex::Autolock icl(mBinderSerializationLock);
+    status_t res;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    sp<IBinder> binder;
+    sp<ANativeWindow> window;
+    if (surface != 0) {
+        binder = surface->getIGraphicBufferProducer()->asBinder();
+        window = surface;
+    }
+
+    return setPreviewWindowL(binder,window);
+}
+
+status_t Camera2Client::setPreviewTexture(
+        const sp<IGraphicBufferProducer>& bufferProducer) {
+    ATRACE_CALL();
+    ALOGV("%s: E", __FUNCTION__);
+    Mutex::Autolock icl(mBinderSerializationLock);
+    status_t res;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    sp<IBinder> binder;
+    sp<ANativeWindow> window;
+    if (bufferProducer != 0) {
+        binder = bufferProducer->asBinder();
+        window = new Surface(bufferProducer);
+    }
+    return setPreviewWindowL(binder, window);
+}
+
+status_t Camera2Client::setPreviewWindowL(const sp<IBinder>& binder,
+        sp<ANativeWindow> window) {
+    ATRACE_CALL();
+    status_t res;
+
+    if (binder == mPreviewSurface) {
+        ALOGV("%s: Camera %d: New window is same as old window",
+                __FUNCTION__, mCameraId);
+        return NO_ERROR;
+    }
+
+    Parameters::State state;
+    {
+        SharedParameters::Lock l(mParameters);
+        state = l.mParameters.state;
+    }
+    switch (state) {
+        case Parameters::DISCONNECTED:
+        case Parameters::RECORD:
+        case Parameters::STILL_CAPTURE:
+        case Parameters::VIDEO_SNAPSHOT:
+            ALOGE("%s: Camera %d: Cannot set preview display while in state %s",
+                    __FUNCTION__, mCameraId,
+                    Parameters::getStateName(state));
+            return INVALID_OPERATION;
+        case Parameters::STOPPED:
+        case Parameters::WAITING_FOR_PREVIEW_WINDOW:
+            // OK
+            break;
+        case Parameters::PREVIEW:
+            // Already running preview - need to stop and create a new stream
+            res = stopStream();
+            if (res != OK) {
+                ALOGE("%s: Unable to stop preview to swap windows: %s (%d)",
+                        __FUNCTION__, strerror(-res), res);
+                return res;
+            }
+            state = Parameters::WAITING_FOR_PREVIEW_WINDOW;
+            break;
+    }
+
+    mPreviewSurface = binder;
+    res = mStreamingProcessor->setPreviewWindow(window);
+    if (res != OK) {
+        ALOGE("%s: Unable to set new preview window: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+
+    if (state == Parameters::WAITING_FOR_PREVIEW_WINDOW) {
+        SharedParameters::Lock l(mParameters);
+        l.mParameters.state = state;
+        return startPreviewL(l.mParameters, false);
+    }
+
+    return OK;
+}
+
+void Camera2Client::setPreviewCallbackFlag(int flag) {
+    ATRACE_CALL();
+    ALOGV("%s: Camera %d: Flag 0x%x", __FUNCTION__, mCameraId, flag);
+    Mutex::Autolock icl(mBinderSerializationLock);
+
+    if ( checkPid(__FUNCTION__) != OK) return;
+
+    SharedParameters::Lock l(mParameters);
+    setPreviewCallbackFlagL(l.mParameters, flag);
+}
+
+void Camera2Client::setPreviewCallbackFlagL(Parameters &params, int flag) {
+    status_t res = OK;
+
+    switch(params.state) {
+        case Parameters::STOPPED:
+        case Parameters::WAITING_FOR_PREVIEW_WINDOW:
+        case Parameters::PREVIEW:
+        case Parameters::STILL_CAPTURE:
+            // OK
+            break;
+        default:
+            if (flag & CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK) {
+                ALOGE("%s: Camera %d: Can't use preview callbacks "
+                        "in state %d", __FUNCTION__, mCameraId, params.state);
+                return;
+            }
+    }
+
+    if (flag & CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK) {
+        ALOGV("%s: setting oneshot", __FUNCTION__);
+        params.previewCallbackOneShot = true;
+    }
+    if (params.previewCallbackFlags != (uint32_t)flag) {
+
+        if (flag != CAMERA_FRAME_CALLBACK_FLAG_NOOP) {
+            // Disable any existing preview callback window when enabling
+            // preview callback flags
+            res = mCallbackProcessor->setCallbackWindow(NULL);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to clear preview callback surface:"
+                        " %s (%d)", __FUNCTION__, mCameraId, strerror(-res), res);
+                return;
+            }
+            params.previewCallbackSurface = false;
+        }
+
+        params.previewCallbackFlags = flag;
+
+        if (params.state == Parameters::PREVIEW) {
+            res = startPreviewL(params, true);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to refresh request in state %s",
+                        __FUNCTION__, mCameraId,
+                        Parameters::getStateName(params.state));
+            }
+        }
+    }
+}
+
+status_t Camera2Client::setPreviewCallbackTarget(
+        const sp<IGraphicBufferProducer>& callbackProducer) {
+    ATRACE_CALL();
+    ALOGV("%s: E", __FUNCTION__);
+    Mutex::Autolock icl(mBinderSerializationLock);
+    status_t res;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    sp<ANativeWindow> window;
+    if (callbackProducer != 0) {
+        window = new Surface(callbackProducer);
+    }
+
+    res = mCallbackProcessor->setCallbackWindow(window);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to set preview callback surface: %s (%d)",
+                __FUNCTION__, mCameraId, strerror(-res), res);
+        return res;
+    }
+
+    SharedParameters::Lock l(mParameters);
+
+    if (window != NULL) {
+        // Disable traditional callbacks when a valid callback target is given
+        l.mParameters.previewCallbackFlags = CAMERA_FRAME_CALLBACK_FLAG_NOOP;
+        l.mParameters.previewCallbackOneShot = false;
+        l.mParameters.previewCallbackSurface = true;
+    } else {
+        // Disable callback target if given a NULL interface.
+        l.mParameters.previewCallbackSurface = false;
+    }
+
+    switch(l.mParameters.state) {
+        case Parameters::PREVIEW:
+            res = startPreviewL(l.mParameters, true);
+            break;
+        case Parameters::RECORD:
+        case Parameters::VIDEO_SNAPSHOT:
+            res = startRecordingL(l.mParameters, true);
+            break;
+        default:
+            break;
+    }
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to refresh request in state %s",
+                __FUNCTION__, mCameraId,
+                Parameters::getStateName(l.mParameters.state));
+    }
+
+    return OK;
+}
+
+
+status_t Camera2Client::startPreview() {
+    ATRACE_CALL();
+    ALOGV("%s: E", __FUNCTION__);
+    Mutex::Autolock icl(mBinderSerializationLock);
+    status_t res;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+    SharedParameters::Lock l(mParameters);
+    return startPreviewL(l.mParameters, false);
+}
+
+status_t Camera2Client::startPreviewL(Parameters &params, bool restart) {
+    ATRACE_CALL();
+    status_t res;
+
+    ALOGV("%s: state == %d, restart = %d", __FUNCTION__, params.state, restart);
+
+    if ( (params.state == Parameters::PREVIEW ||
+                    params.state == Parameters::RECORD ||
+                    params.state == Parameters::VIDEO_SNAPSHOT)
+            && !restart) {
+        // Succeed attempt to re-enter a streaming state
+        ALOGI("%s: Camera %d: Preview already active, ignoring restart",
+                __FUNCTION__, mCameraId);
+        return OK;
+    }
+    if (params.state > Parameters::PREVIEW && !restart) {
+        ALOGE("%s: Can't start preview in state %s",
+                __FUNCTION__,
+                Parameters::getStateName(params.state));
+        return INVALID_OPERATION;
+    }
+
+    if (!mStreamingProcessor->haveValidPreviewWindow()) {
+        params.state = Parameters::WAITING_FOR_PREVIEW_WINDOW;
+        return OK;
+    }
+    params.state = Parameters::STOPPED;
+
+    res = mStreamingProcessor->updatePreviewStream(params);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to update preview stream: %s (%d)",
+                __FUNCTION__, mCameraId, strerror(-res), res);
+        return res;
+    }
+
+    // We could wait to create the JPEG output stream until first actual use
+    // (first takePicture call). However, this would substantially increase the
+    // first capture latency on HAL3 devices, and potentially on some HAL2
+    // devices. So create it unconditionally at preview start. As a drawback,
+    // this increases gralloc memory consumption for applications that don't
+    // ever take a picture.
+    // TODO: Find a better compromise, though this likely would involve HAL
+    // changes.
+    res = updateProcessorStream(mJpegProcessor, params);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Can't pre-configure still image "
+                "stream: %s (%d)",
+                __FUNCTION__, mCameraId, strerror(-res), res);
+        return res;
+    }
+
+    Vector<uint8_t> outputStreams;
+    bool callbacksEnabled = (params.previewCallbackFlags &
+            CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK) ||
+            params.previewCallbackSurface;
+
+    if (callbacksEnabled) {
+        // Can't have recording stream hanging around when enabling callbacks,
+        // since it exceeds the max stream count on some devices.
+        if (mStreamingProcessor->getRecordingStreamId() != NO_STREAM) {
+            ALOGV("%s: Camera %d: Clearing out recording stream before "
+                    "creating callback stream", __FUNCTION__, mCameraId);
+            res = mStreamingProcessor->stopStream();
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Can't stop streaming to delete "
+                        "recording stream", __FUNCTION__, mCameraId);
+                return res;
+            }
+            res = mStreamingProcessor->deleteRecordingStream();
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to delete recording stream before "
+                        "enabling callbacks: %s (%d)", __FUNCTION__, mCameraId,
+                        strerror(-res), res);
+                return res;
+            }
+        }
+
+        res = mCallbackProcessor->updateStream(params);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to update callback stream: %s (%d)",
+                    __FUNCTION__, mCameraId, strerror(-res), res);
+            return res;
+        }
+        outputStreams.push(getCallbackStreamId());
+    }
+    if (params.zslMode && !params.recordingHint) {
+        res = updateProcessorStream(mZslProcessor, params);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to update ZSL stream: %s (%d)",
+                    __FUNCTION__, mCameraId, strerror(-res), res);
+            return res;
+        }
+        outputStreams.push(getZslStreamId());
+    }
+
+    outputStreams.push(getPreviewStreamId());
+
+    if (!params.recordingHint) {
+        if (!restart) {
+            res = mStreamingProcessor->updatePreviewRequest(params);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Can't set up preview request: "
+                        "%s (%d)", __FUNCTION__, mCameraId,
+                        strerror(-res), res);
+                return res;
+            }
+        }
+        res = mStreamingProcessor->startStream(StreamingProcessor::PREVIEW,
+                outputStreams);
+    } else {
+        if (!restart) {
+            res = mStreamingProcessor->updateRecordingRequest(params);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Can't set up preview request with "
+                        "record hint: %s (%d)", __FUNCTION__, mCameraId,
+                        strerror(-res), res);
+                return res;
+            }
+        }
+        res = mStreamingProcessor->startStream(StreamingProcessor::RECORD,
+                outputStreams);
+    }
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to start streaming preview: %s (%d)",
+                __FUNCTION__, mCameraId, strerror(-res), res);
+        return res;
+    }
+
+    params.state = Parameters::PREVIEW;
+    return OK;
+}
+
+void Camera2Client::stopPreview() {
+    ATRACE_CALL();
+    ALOGV("%s: E", __FUNCTION__);
+    Mutex::Autolock icl(mBinderSerializationLock);
+    status_t res;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return;
+    stopPreviewL();
+}
+
+void Camera2Client::stopPreviewL() {
+    ATRACE_CALL();
+    status_t res;
+    const nsecs_t kStopCaptureTimeout = 3000000000LL; // 3 seconds
+    Parameters::State state;
+    {
+        SharedParameters::Lock l(mParameters);
+        state = l.mParameters.state;
+    }
+
+    switch (state) {
+        case Parameters::DISCONNECTED:
+            // Nothing to do.
+            break;
+        case Parameters::STOPPED:
+        case Parameters::VIDEO_SNAPSHOT:
+        case Parameters::STILL_CAPTURE:
+            mCaptureSequencer->waitUntilIdle(kStopCaptureTimeout);
+            // no break
+        case Parameters::RECORD:
+        case Parameters::PREVIEW:
+            res = stopStream();
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Can't stop streaming: %s (%d)",
+                        __FUNCTION__, mCameraId, strerror(-res), res);
+            }
+            res = mDevice->waitUntilDrained();
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
+                        __FUNCTION__, mCameraId, strerror(-res), res);
+            }
+            // no break
+        case Parameters::WAITING_FOR_PREVIEW_WINDOW: {
+            SharedParameters::Lock l(mParameters);
+            l.mParameters.state = Parameters::STOPPED;
+            commandStopFaceDetectionL(l.mParameters);
+            break;
+        }
+        default:
+            ALOGE("%s: Camera %d: Unknown state %d", __FUNCTION__, mCameraId,
+                    state);
+    }
+}
+
+bool Camera2Client::previewEnabled() {
+    ATRACE_CALL();
+    Mutex::Autolock icl(mBinderSerializationLock);
+    status_t res;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return false;
+
+    SharedParameters::Lock l(mParameters);
+    return l.mParameters.state == Parameters::PREVIEW;
+}
+
+status_t Camera2Client::storeMetaDataInBuffers(bool enabled) {
+    ATRACE_CALL();
+    Mutex::Autolock icl(mBinderSerializationLock);
+    status_t res;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    SharedParameters::Lock l(mParameters);
+    switch (l.mParameters.state) {
+        case Parameters::RECORD:
+        case Parameters::VIDEO_SNAPSHOT:
+            ALOGE("%s: Camera %d: Can't be called in state %s",
+                    __FUNCTION__, mCameraId,
+                    Parameters::getStateName(l.mParameters.state));
+            return INVALID_OPERATION;
+        default:
+            // OK
+            break;
+    }
+
+    l.mParameters.storeMetadataInBuffers = enabled;
+
+    return OK;
+}
+
+status_t Camera2Client::startRecording() {
+    ATRACE_CALL();
+    ALOGV("%s: E", __FUNCTION__);
+    Mutex::Autolock icl(mBinderSerializationLock);
+    status_t res;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+    SharedParameters::Lock l(mParameters);
+
+    return startRecordingL(l.mParameters, false);
+}
+
+status_t Camera2Client::startRecordingL(Parameters &params, bool restart) {
+    status_t res;
+
+    ALOGV("%s: state == %d, restart = %d", __FUNCTION__, params.state, restart);
+
+    switch (params.state) {
+        case Parameters::STOPPED:
+            res = startPreviewL(params, false);
+            if (res != OK) return res;
+            break;
+        case Parameters::PREVIEW:
+            // Ready to go
+            break;
+        case Parameters::RECORD:
+        case Parameters::VIDEO_SNAPSHOT:
+            // OK to call this when recording is already on, just skip unless
+            // we're looking to restart
+            if (!restart) return OK;
+            break;
+        default:
+            ALOGE("%s: Camera %d: Can't start recording in state %s",
+                    __FUNCTION__, mCameraId,
+                    Parameters::getStateName(params.state));
+            return INVALID_OPERATION;
+    };
+
+    if (!params.storeMetadataInBuffers) {
+        ALOGE("%s: Camera %d: Recording only supported in metadata mode, but "
+                "non-metadata recording mode requested!", __FUNCTION__,
+                mCameraId);
+        return INVALID_OPERATION;
+    }
+
+    if (!restart) {
+        mCameraService->playSound(CameraService::SOUND_RECORDING);
+        mStreamingProcessor->updateRecordingRequest(params);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to update recording request: %s (%d)",
+                    __FUNCTION__, mCameraId, strerror(-res), res);
+            return res;
+        }
+    }
+
+    // Not all devices can support a preview callback stream and a recording
+    // stream at the same time, so assume none of them can.
+    if (mCallbackProcessor->getStreamId() != NO_STREAM) {
+        ALOGV("%s: Camera %d: Clearing out callback stream before "
+                "creating recording stream", __FUNCTION__, mCameraId);
+        res = mStreamingProcessor->stopStream();
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Can't stop streaming to delete callback stream",
+                    __FUNCTION__, mCameraId);
+            return res;
+        }
+        res = mCallbackProcessor->deleteStream();
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to delete callback stream before "
+                    "record: %s (%d)", __FUNCTION__, mCameraId,
+                    strerror(-res), res);
+            return res;
+        }
+    }
+    // Disable callbacks if they're enabled; can't record and use callbacks,
+    // and we can't fail record start without stagefright asserting.
+    params.previewCallbackFlags = 0;
+
+    res = updateProcessorStream<
+            StreamingProcessor,
+            &StreamingProcessor::updateRecordingStream>(mStreamingProcessor,
+                                                        params);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to update recording stream: %s (%d)",
+                __FUNCTION__, mCameraId, strerror(-res), res);
+        return res;
+    }
+
+    Vector<uint8_t> outputStreams;
+    outputStreams.push(getPreviewStreamId());
+    outputStreams.push(getRecordingStreamId());
+
+    res = mStreamingProcessor->startStream(StreamingProcessor::RECORD,
+            outputStreams);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to start recording stream: %s (%d)",
+                __FUNCTION__, mCameraId, strerror(-res), res);
+        return res;
+    }
+
+    if (params.state < Parameters::RECORD) {
+        params.state = Parameters::RECORD;
+    }
+
+    return OK;
+}
+
+void Camera2Client::stopRecording() {
+    ATRACE_CALL();
+    ALOGV("%s: E", __FUNCTION__);
+    Mutex::Autolock icl(mBinderSerializationLock);
+    SharedParameters::Lock l(mParameters);
+
+    status_t res;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return;
+
+    switch (l.mParameters.state) {
+        case Parameters::RECORD:
+            // OK to stop
+            break;
+        case Parameters::STOPPED:
+        case Parameters::PREVIEW:
+        case Parameters::STILL_CAPTURE:
+        case Parameters::VIDEO_SNAPSHOT:
+        default:
+            ALOGE("%s: Camera %d: Can't stop recording in state %s",
+                    __FUNCTION__, mCameraId,
+                    Parameters::getStateName(l.mParameters.state));
+            return;
+    };
+
+    mCameraService->playSound(CameraService::SOUND_RECORDING);
+
+    res = startPreviewL(l.mParameters, true);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to return to preview",
+                __FUNCTION__, mCameraId);
+    }
+}
+
+bool Camera2Client::recordingEnabled() {
+    ATRACE_CALL();
+    Mutex::Autolock icl(mBinderSerializationLock);
+
+    if ( checkPid(__FUNCTION__) != OK) return false;
+
+    return recordingEnabledL();
+}
+
+bool Camera2Client::recordingEnabledL() {
+    ATRACE_CALL();
+    SharedParameters::Lock l(mParameters);
+
+    return (l.mParameters.state == Parameters::RECORD
+            || l.mParameters.state == Parameters::VIDEO_SNAPSHOT);
+}
+
+void Camera2Client::releaseRecordingFrame(const sp<IMemory>& mem) {
+    ATRACE_CALL();
+    Mutex::Autolock icl(mBinderSerializationLock);
+    if ( checkPid(__FUNCTION__) != OK) return;
+
+    mStreamingProcessor->releaseRecordingFrame(mem);
+}
+
+status_t Camera2Client::autoFocus() {
+    ATRACE_CALL();
+    Mutex::Autolock icl(mBinderSerializationLock);
+    ALOGV("%s: Camera %d", __FUNCTION__, mCameraId);
+    status_t res;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    int triggerId;
+    bool notifyImmediately = false;
+    bool notifySuccess = false;
+    {
+        SharedParameters::Lock l(mParameters);
+        if (l.mParameters.state < Parameters::PREVIEW) {
+            return INVALID_OPERATION;
+        }
+
+        /**
+          * If the camera does not support auto-focus, it is a no-op and
+          * onAutoFocus(boolean, Camera) callback will be called immediately
+          * with a fake value of success set to true.
+          *
+          * Similarly, if focus mode is set to INFINITY, there's no reason to
+          * bother the HAL.
+          */
+        if (l.mParameters.focusMode == Parameters::FOCUS_MODE_FIXED ||
+                l.mParameters.focusMode == Parameters::FOCUS_MODE_INFINITY) {
+            notifyImmediately = true;
+            notifySuccess = true;
+        }
+        /**
+         * If we're in CAF mode, and AF has already been locked, just fire back
+         * the callback right away; the HAL would not send a notification since
+         * no state change would happen on a AF trigger.
+         */
+        if ( (l.mParameters.focusMode == Parameters::FOCUS_MODE_CONTINUOUS_PICTURE ||
+                l.mParameters.focusMode == Parameters::FOCUS_MODE_CONTINUOUS_VIDEO) &&
+                l.mParameters.focusState == ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED ) {
+            notifyImmediately = true;
+            notifySuccess = true;
+        }
+        /**
+         * Send immediate notification back to client
+         */
+        if (notifyImmediately) {
+            SharedCameraCallbacks::Lock l(mSharedCameraCallbacks);
+            if (l.mRemoteCallback != 0) {
+                l.mRemoteCallback->notifyCallback(CAMERA_MSG_FOCUS,
+                        notifySuccess ? 1 : 0, 0);
+            }
+            return OK;
+        }
+        /**
+         * Handle quirk mode for AF in scene modes
+         */
+        if (l.mParameters.quirks.triggerAfWithAuto &&
+                l.mParameters.sceneMode != ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED &&
+                l.mParameters.focusMode != Parameters::FOCUS_MODE_AUTO &&
+                !l.mParameters.focusingAreas[0].isEmpty()) {
+            ALOGV("%s: Quirk: Switching from focusMode %d to AUTO",
+                    __FUNCTION__, l.mParameters.focusMode);
+            l.mParameters.shadowFocusMode = l.mParameters.focusMode;
+            l.mParameters.focusMode = Parameters::FOCUS_MODE_AUTO;
+            updateRequests(l.mParameters);
+        }
+
+        l.mParameters.currentAfTriggerId = ++l.mParameters.afTriggerCounter;
+        triggerId = l.mParameters.currentAfTriggerId;
+    }
+    syncWithDevice();
+
+    mDevice->triggerAutofocus(triggerId);
+
+    return OK;
+}
+
+status_t Camera2Client::cancelAutoFocus() {
+    ATRACE_CALL();
+    Mutex::Autolock icl(mBinderSerializationLock);
+    ALOGV("%s: Camera %d", __FUNCTION__, mCameraId);
+    status_t res;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    int triggerId;
+    {
+        SharedParameters::Lock l(mParameters);
+        // Canceling does nothing in FIXED or INFINITY modes
+        if (l.mParameters.focusMode == Parameters::FOCUS_MODE_FIXED ||
+                l.mParameters.focusMode == Parameters::FOCUS_MODE_INFINITY) {
+            return OK;
+        }
+        triggerId = ++l.mParameters.afTriggerCounter;
+
+        // When using triggerAfWithAuto quirk, may need to reset focus mode to
+        // the real state at this point. No need to cancel explicitly if
+        // changing the AF mode.
+        if (l.mParameters.shadowFocusMode != Parameters::FOCUS_MODE_INVALID) {
+            ALOGV("%s: Quirk: Restoring focus mode to %d", __FUNCTION__,
+                    l.mParameters.shadowFocusMode);
+            l.mParameters.focusMode = l.mParameters.shadowFocusMode;
+            l.mParameters.shadowFocusMode = Parameters::FOCUS_MODE_INVALID;
+            updateRequests(l.mParameters);
+
+            return OK;
+        }
+    }
+    syncWithDevice();
+
+    mDevice->triggerCancelAutofocus(triggerId);
+
+    return OK;
+}
+
+status_t Camera2Client::takePicture(int msgType) {
+    ATRACE_CALL();
+    Mutex::Autolock icl(mBinderSerializationLock);
+    status_t res;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    {
+        SharedParameters::Lock l(mParameters);
+        switch (l.mParameters.state) {
+            case Parameters::DISCONNECTED:
+            case Parameters::STOPPED:
+            case Parameters::WAITING_FOR_PREVIEW_WINDOW:
+                ALOGE("%s: Camera %d: Cannot take picture without preview enabled",
+                        __FUNCTION__, mCameraId);
+                return INVALID_OPERATION;
+            case Parameters::PREVIEW:
+                // Good to go for takePicture
+                res = commandStopFaceDetectionL(l.mParameters);
+                if (res != OK) {
+                    ALOGE("%s: Camera %d: Unable to stop face detection for still capture",
+                            __FUNCTION__, mCameraId);
+                    return res;
+                }
+                l.mParameters.state = Parameters::STILL_CAPTURE;
+                break;
+            case Parameters::RECORD:
+                // Good to go for video snapshot
+                l.mParameters.state = Parameters::VIDEO_SNAPSHOT;
+                break;
+            case Parameters::STILL_CAPTURE:
+            case Parameters::VIDEO_SNAPSHOT:
+                ALOGE("%s: Camera %d: Already taking a picture",
+                        __FUNCTION__, mCameraId);
+                return INVALID_OPERATION;
+        }
+
+        ALOGV("%s: Camera %d: Starting picture capture", __FUNCTION__, mCameraId);
+
+        res = updateProcessorStream(mJpegProcessor, l.mParameters);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Can't set up still image stream: %s (%d)",
+                    __FUNCTION__, mCameraId, strerror(-res), res);
+            return res;
+        }
+    }
+
+    // Need HAL to have correct settings before (possibly) triggering precapture
+    syncWithDevice();
+
+    res = mCaptureSequencer->startCapture(msgType);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to start capture: %s (%d)",
+                __FUNCTION__, mCameraId, strerror(-res), res);
+    }
+
+    return res;
+}
+
+status_t Camera2Client::setParameters(const String8& params) {
+    ATRACE_CALL();
+    ALOGV("%s: Camera %d", __FUNCTION__, mCameraId);
+    Mutex::Autolock icl(mBinderSerializationLock);
+    status_t res;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    SharedParameters::Lock l(mParameters);
+
+    res = l.mParameters.set(params);
+    if (res != OK) return res;
+
+    res = updateRequests(l.mParameters);
+
+    return res;
+}
+
+String8 Camera2Client::getParameters() const {
+    ATRACE_CALL();
+    ALOGV("%s: Camera %d", __FUNCTION__, mCameraId);
+    Mutex::Autolock icl(mBinderSerializationLock);
+    if ( checkPid(__FUNCTION__) != OK) return String8();
+
+    SharedParameters::ReadLock l(mParameters);
+
+    return l.mParameters.get();
+}
+
+status_t Camera2Client::sendCommand(int32_t cmd, int32_t arg1, int32_t arg2) {
+    ATRACE_CALL();
+    Mutex::Autolock icl(mBinderSerializationLock);
+    status_t res;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    ALOGV("%s: Camera %d: Command %d (%d, %d)", __FUNCTION__, mCameraId,
+            cmd, arg1, arg2);
+
+    switch (cmd) {
+        case CAMERA_CMD_START_SMOOTH_ZOOM:
+            return commandStartSmoothZoomL();
+        case CAMERA_CMD_STOP_SMOOTH_ZOOM:
+            return commandStopSmoothZoomL();
+        case CAMERA_CMD_SET_DISPLAY_ORIENTATION:
+            return commandSetDisplayOrientationL(arg1);
+        case CAMERA_CMD_ENABLE_SHUTTER_SOUND:
+            return commandEnableShutterSoundL(arg1 == 1);
+        case CAMERA_CMD_PLAY_RECORDING_SOUND:
+            return commandPlayRecordingSoundL();
+        case CAMERA_CMD_START_FACE_DETECTION:
+            return commandStartFaceDetectionL(arg1);
+        case CAMERA_CMD_STOP_FACE_DETECTION: {
+            SharedParameters::Lock l(mParameters);
+            return commandStopFaceDetectionL(l.mParameters);
+        }
+        case CAMERA_CMD_ENABLE_FOCUS_MOVE_MSG:
+            return commandEnableFocusMoveMsgL(arg1 == 1);
+        case CAMERA_CMD_PING:
+            return commandPingL();
+        case CAMERA_CMD_SET_VIDEO_BUFFER_COUNT:
+            return commandSetVideoBufferCountL(arg1);
+        default:
+            ALOGE("%s: Unknown command %d (arguments %d, %d)",
+                    __FUNCTION__, cmd, arg1, arg2);
+            return BAD_VALUE;
+    }
+}
+
+status_t Camera2Client::commandStartSmoothZoomL() {
+    ALOGE("%s: Unimplemented!", __FUNCTION__);
+    return OK;
+}
+
+status_t Camera2Client::commandStopSmoothZoomL() {
+    ALOGE("%s: Unimplemented!", __FUNCTION__);
+    return OK;
+}
+
+status_t Camera2Client::commandSetDisplayOrientationL(int degrees) {
+    int transform = Parameters::degToTransform(degrees,
+            mCameraFacing == CAMERA_FACING_FRONT);
+    if (transform == -1) {
+        ALOGE("%s: Camera %d: Error setting %d as display orientation value",
+                __FUNCTION__, mCameraId, degrees);
+        return BAD_VALUE;
+    }
+    SharedParameters::Lock l(mParameters);
+    if (transform != l.mParameters.previewTransform &&
+            getPreviewStreamId() != NO_STREAM) {
+        mDevice->setStreamTransform(getPreviewStreamId(), transform);
+    }
+    l.mParameters.previewTransform = transform;
+    return OK;
+}
+
+status_t Camera2Client::commandEnableShutterSoundL(bool enable) {
+    SharedParameters::Lock l(mParameters);
+    if (enable) {
+        l.mParameters.playShutterSound = true;
+        return OK;
+    }
+
+    // Disabling shutter sound may not be allowed. In that case only
+    // allow the mediaserver process to disable the sound.
+    char value[PROPERTY_VALUE_MAX];
+    property_get("ro.camera.sound.forced", value, "0");
+    if (strncmp(value, "0", 2) != 0) {
+        // Disabling shutter sound is not allowed. Deny if the current
+        // process is not mediaserver.
+        if (getCallingPid() != getpid()) {
+            ALOGE("Failed to disable shutter sound. Permission denied (pid %d)",
+                    getCallingPid());
+            return PERMISSION_DENIED;
+        }
+    }
+
+    l.mParameters.playShutterSound = false;
+    return OK;
+}
+
+status_t Camera2Client::commandPlayRecordingSoundL() {
+    mCameraService->playSound(CameraService::SOUND_RECORDING);
+    return OK;
+}
+
+status_t Camera2Client::commandStartFaceDetectionL(int /*type*/) {
+    ALOGV("%s: Camera %d: Starting face detection",
+          __FUNCTION__, mCameraId);
+    status_t res;
+    SharedParameters::Lock l(mParameters);
+    switch (l.mParameters.state) {
+        case Parameters::DISCONNECTED:
+        case Parameters::STOPPED:
+        case Parameters::WAITING_FOR_PREVIEW_WINDOW:
+        case Parameters::STILL_CAPTURE:
+            ALOGE("%s: Camera %d: Cannot start face detection without preview active",
+                    __FUNCTION__, mCameraId);
+            return INVALID_OPERATION;
+        case Parameters::PREVIEW:
+        case Parameters::RECORD:
+        case Parameters::VIDEO_SNAPSHOT:
+            // Good to go for starting face detect
+            break;
+    }
+    // Ignoring type
+    if (l.mParameters.fastInfo.bestFaceDetectMode ==
+            ANDROID_STATISTICS_FACE_DETECT_MODE_OFF) {
+        ALOGE("%s: Camera %d: Face detection not supported",
+                __FUNCTION__, mCameraId);
+        return BAD_VALUE;
+    }
+    if (l.mParameters.enableFaceDetect) return OK;
+
+    l.mParameters.enableFaceDetect = true;
+
+    res = updateRequests(l.mParameters);
+
+    return res;
+}
+
+status_t Camera2Client::commandStopFaceDetectionL(Parameters &params) {
+    status_t res = OK;
+    ALOGV("%s: Camera %d: Stopping face detection",
+          __FUNCTION__, mCameraId);
+
+    if (!params.enableFaceDetect) return OK;
+
+    params.enableFaceDetect = false;
+
+    if (params.state == Parameters::PREVIEW
+            || params.state == Parameters::RECORD
+            || params.state == Parameters::VIDEO_SNAPSHOT) {
+        res = updateRequests(params);
+    }
+
+    return res;
+}
+
+status_t Camera2Client::commandEnableFocusMoveMsgL(bool enable) {
+    SharedParameters::Lock l(mParameters);
+    l.mParameters.enableFocusMoveMessages = enable;
+
+    return OK;
+}
+
+status_t Camera2Client::commandPingL() {
+    // Always ping back if access is proper and device is alive
+    SharedParameters::Lock l(mParameters);
+    if (l.mParameters.state != Parameters::DISCONNECTED) {
+        return OK;
+    } else {
+        return NO_INIT;
+    }
+}
+
+status_t Camera2Client::commandSetVideoBufferCountL(size_t count) {
+    if (recordingEnabledL()) {
+        ALOGE("%s: Camera %d: Error setting video buffer count after "
+                "recording was started", __FUNCTION__, mCameraId);
+        return INVALID_OPERATION;
+    }
+
+    return mStreamingProcessor->setRecordingBufferCount(count);
+}
+
+/** Device-related methods */
+void Camera2Client::notifyAutoFocus(uint8_t newState, int triggerId) {
+    ALOGV("%s: Autofocus state now %d, last trigger %d",
+            __FUNCTION__, newState, triggerId);
+    bool sendCompletedMessage = false;
+    bool sendMovingMessage = false;
+
+    bool success = false;
+    bool afInMotion = false;
+    {
+        SharedParameters::Lock l(mParameters);
+        l.mParameters.focusState = newState;
+        switch (l.mParameters.focusMode) {
+            case Parameters::FOCUS_MODE_AUTO:
+            case Parameters::FOCUS_MODE_MACRO:
+                // Don't send notifications upstream if they're not for the current AF
+                // trigger. For example, if cancel was called in between, or if we
+                // already sent a notification about this AF call.
+                if (triggerId != l.mParameters.currentAfTriggerId) break;
+                switch (newState) {
+                    case ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED:
+                        success = true;
+                        // no break
+                    case ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED:
+                        sendCompletedMessage = true;
+                        l.mParameters.currentAfTriggerId = -1;
+                        break;
+                    case ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN:
+                        // Just starting focusing, ignore
+                        break;
+                    case ANDROID_CONTROL_AF_STATE_INACTIVE:
+                    case ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN:
+                    case ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED:
+                    default:
+                        // Unexpected in AUTO/MACRO mode
+                        ALOGE("%s: Unexpected AF state transition in AUTO/MACRO mode: %d",
+                                __FUNCTION__, newState);
+                        break;
+                }
+                break;
+            case Parameters::FOCUS_MODE_CONTINUOUS_VIDEO:
+            case Parameters::FOCUS_MODE_CONTINUOUS_PICTURE:
+                switch (newState) {
+                    case ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED:
+                        success = true;
+                        // no break
+                    case ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED:
+                        // Don't send notifications upstream if they're not for
+                        // the current AF trigger. For example, if cancel was
+                        // called in between, or if we already sent a
+                        // notification about this AF call.
+                        // Send both a 'AF done' callback and a 'AF move' callback
+                        if (triggerId != l.mParameters.currentAfTriggerId) break;
+                        sendCompletedMessage = true;
+                        afInMotion = false;
+                        if (l.mParameters.enableFocusMoveMessages &&
+                                l.mParameters.afInMotion) {
+                            sendMovingMessage = true;
+                        }
+                        l.mParameters.currentAfTriggerId = -1;
+                        break;
+                    case ANDROID_CONTROL_AF_STATE_INACTIVE:
+                        // Cancel was called, or we switched state; care if
+                        // currently moving
+                        afInMotion = false;
+                        if (l.mParameters.enableFocusMoveMessages &&
+                                l.mParameters.afInMotion) {
+                            sendMovingMessage = true;
+                        }
+                        break;
+                    case ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN:
+                        // Start passive scan, inform upstream
+                        afInMotion = true;
+                        // no break
+                    case ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED:
+                        // Stop passive scan, inform upstream
+                        if (l.mParameters.enableFocusMoveMessages) {
+                            sendMovingMessage = true;
+                        }
+                        break;
+                }
+                l.mParameters.afInMotion = afInMotion;
+                break;
+            case Parameters::FOCUS_MODE_EDOF:
+            case Parameters::FOCUS_MODE_INFINITY:
+            case Parameters::FOCUS_MODE_FIXED:
+            default:
+                if (newState != ANDROID_CONTROL_AF_STATE_INACTIVE) {
+                    ALOGE("%s: Unexpected AF state change %d "
+                            "(ID %d) in focus mode %d",
+                          __FUNCTION__, newState, triggerId,
+                            l.mParameters.focusMode);
+                }
+        }
+    }
+    if (sendMovingMessage) {
+        SharedCameraCallbacks::Lock l(mSharedCameraCallbacks);
+        if (l.mRemoteCallback != 0) {
+            l.mRemoteCallback->notifyCallback(CAMERA_MSG_FOCUS_MOVE,
+                    afInMotion ? 1 : 0, 0);
+        }
+    }
+    if (sendCompletedMessage) {
+        SharedCameraCallbacks::Lock l(mSharedCameraCallbacks);
+        if (l.mRemoteCallback != 0) {
+            l.mRemoteCallback->notifyCallback(CAMERA_MSG_FOCUS,
+                    success ? 1 : 0, 0);
+        }
+    }
+}
+
+void Camera2Client::notifyAutoExposure(uint8_t newState, int triggerId) {
+    ALOGV("%s: Autoexposure state now %d, last trigger %d",
+            __FUNCTION__, newState, triggerId);
+    mCaptureSequencer->notifyAutoExposure(newState, triggerId);
+}
+
+camera2::SharedParameters& Camera2Client::getParameters() {
+    return mParameters;
+}
+
+int Camera2Client::getPreviewStreamId() const {
+    return mStreamingProcessor->getPreviewStreamId();
+}
+
+int Camera2Client::getCaptureStreamId() const {
+    return mJpegProcessor->getStreamId();
+}
+
+int Camera2Client::getCallbackStreamId() const {
+    return mCallbackProcessor->getStreamId();
+}
+
+int Camera2Client::getRecordingStreamId() const {
+    return mStreamingProcessor->getRecordingStreamId();
+}
+
+int Camera2Client::getZslStreamId() const {
+    return mZslProcessor->getStreamId();
+}
+
+status_t Camera2Client::registerFrameListener(int32_t minId, int32_t maxId,
+        wp<camera2::FrameProcessor::FilteredListener> listener) {
+    return mFrameProcessor->registerListener(minId, maxId, listener);
+}
+
+status_t Camera2Client::removeFrameListener(int32_t minId, int32_t maxId,
+        wp<camera2::FrameProcessor::FilteredListener> listener) {
+    return mFrameProcessor->removeListener(minId, maxId, listener);
+}
+
+status_t Camera2Client::stopStream() {
+    return mStreamingProcessor->stopStream();
+}
+
+const int32_t Camera2Client::kPreviewRequestIdStart;
+const int32_t Camera2Client::kPreviewRequestIdEnd;
+const int32_t Camera2Client::kRecordingRequestIdStart;
+const int32_t Camera2Client::kRecordingRequestIdEnd;
+const int32_t Camera2Client::kCaptureRequestIdStart;
+const int32_t Camera2Client::kCaptureRequestIdEnd;
+
+/** Utility methods */
+
+status_t Camera2Client::updateRequests(Parameters &params) {
+    status_t res;
+
+    ALOGV("%s: Camera %d: state = %d", __FUNCTION__, getCameraId(), params.state);
+
+    res = mStreamingProcessor->incrementStreamingIds();
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to increment request IDs: %s (%d)",
+                __FUNCTION__, mCameraId, strerror(-res), res);
+        return res;
+    }
+
+    res = mStreamingProcessor->updatePreviewRequest(params);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to update preview request: %s (%d)",
+                __FUNCTION__, mCameraId, strerror(-res), res);
+        return res;
+    }
+    res = mStreamingProcessor->updateRecordingRequest(params);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to update recording request: %s (%d)",
+                __FUNCTION__, mCameraId, strerror(-res), res);
+        return res;
+    }
+
+    if (params.state == Parameters::PREVIEW) {
+        res = startPreviewL(params, true);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Error streaming new preview request: %s (%d)",
+                    __FUNCTION__, mCameraId, strerror(-res), res);
+            return res;
+        }
+    } else if (params.state == Parameters::RECORD ||
+            params.state == Parameters::VIDEO_SNAPSHOT) {
+        res = startRecordingL(params, true);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Error streaming new record request: %s (%d)",
+                    __FUNCTION__, mCameraId, strerror(-res), res);
+            return res;
+        }
+    }
+    return res;
+}
+
+
+size_t Camera2Client::calculateBufferSize(int width, int height,
+        int format, int stride) {
+    switch (format) {
+        case HAL_PIXEL_FORMAT_YCbCr_422_SP: // NV16
+            return width * height * 2;
+        case HAL_PIXEL_FORMAT_YCrCb_420_SP: // NV21
+            return width * height * 3 / 2;
+        case HAL_PIXEL_FORMAT_YCbCr_422_I: // YUY2
+            return width * height * 2;
+        case HAL_PIXEL_FORMAT_YV12: {      // YV12
+            size_t ySize = stride * height;
+            size_t uvStride = (stride / 2 + 0xF) & ~0xF;
+            size_t uvSize = uvStride * height / 2;
+            return ySize + uvSize * 2;
+        }
+        case HAL_PIXEL_FORMAT_RGB_565:
+            return width * height * 2;
+        case HAL_PIXEL_FORMAT_RGBA_8888:
+            return width * height * 4;
+        case HAL_PIXEL_FORMAT_RAW_SENSOR:
+            return width * height * 2;
+        default:
+            ALOGE("%s: Unknown preview format: %x",
+                    __FUNCTION__,  format);
+            return 0;
+    }
+}
+
+status_t Camera2Client::syncWithDevice() {
+    ATRACE_CALL();
+    const nsecs_t kMaxSyncTimeout = 500000000; // 500 ms
+    status_t res;
+
+    int32_t activeRequestId = mStreamingProcessor->getActiveRequestId();
+    if (activeRequestId == 0) return OK;
+
+    res = mDevice->waitUntilRequestReceived(activeRequestId, kMaxSyncTimeout);
+    if (res == TIMED_OUT) {
+        ALOGE("%s: Camera %d: Timed out waiting sync with HAL",
+                __FUNCTION__, mCameraId);
+    } else if (res != OK) {
+        ALOGE("%s: Camera %d: Error while waiting to sync with HAL",
+                __FUNCTION__, mCameraId);
+    }
+    return res;
+}
+
+template <typename ProcessorT>
+status_t Camera2Client::updateProcessorStream(sp<ProcessorT> processor,
+                                              camera2::Parameters params) {
+    // No default template arguments until C++11, so we need this overload
+    return updateProcessorStream<ProcessorT, &ProcessorT::updateStream>(
+            processor, params);
+}
+
+template <typename ProcessorT,
+          status_t (ProcessorT::*updateStreamF)(const Parameters &)>
+status_t Camera2Client::updateProcessorStream(sp<ProcessorT> processor,
+                                              Parameters params) {
+    status_t res;
+
+    // Get raw pointer since sp<T> doesn't have operator->*
+    ProcessorT *processorPtr = processor.get();
+    res = (processorPtr->*updateStreamF)(params);
+
+    /**
+     * Can't update the stream if it's busy?
+     *
+     * Then we need to stop the device (by temporarily clearing the request
+     * queue) and then try again. Resume streaming once we're done.
+     */
+    if (res == -EBUSY) {
+        ALOGV("%s: Camera %d: Pausing to update stream", __FUNCTION__,
+                mCameraId);
+        res = mStreamingProcessor->togglePauseStream(/*pause*/true);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Can't pause streaming: %s (%d)",
+                    __FUNCTION__, mCameraId, strerror(-res), res);
+        }
+
+        res = mDevice->waitUntilDrained();
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
+                    __FUNCTION__, mCameraId, strerror(-res), res);
+        }
+
+        res = (processorPtr->*updateStreamF)(params);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Failed to update processing stream "
+                  " despite having halted streaming first: %s (%d)",
+                  __FUNCTION__, mCameraId, strerror(-res), res);
+        }
+
+        res = mStreamingProcessor->togglePauseStream(/*pause*/false);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Can't unpause streaming: %s (%d)",
+                    __FUNCTION__, mCameraId, strerror(-res), res);
+        }
+    }
+
+    return res;
+}
+
+} // namespace android
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
new file mode 100644
index 0000000..ed448f3
--- /dev/null
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA2CLIENT_H
+#define ANDROID_SERVERS_CAMERA_CAMERA2CLIENT_H
+
+#include "CameraService.h"
+#include "common/CameraDeviceBase.h"
+#include "common/Camera2ClientBase.h"
+#include "api1/client2/Parameters.h"
+#include "api1/client2/FrameProcessor.h"
+//#include "api1/client2/StreamingProcessor.h"
+//#include "api1/client2/JpegProcessor.h"
+//#include "api1/client2/ZslProcessorInterface.h"
+//#include "api1/client2/CaptureSequencer.h"
+//#include "api1/client2/CallbackProcessor.h"
+
+namespace android {
+
+namespace camera2 {
+
+class StreamingProcessor;
+class JpegProcessor;
+class ZslProcessorInterface;
+class CaptureSequencer;
+class CallbackProcessor;
+
+}
+
+class IMemory;
+/**
+ * Interface between android.hardware.Camera API and Camera HAL device for versions
+ * CAMERA_DEVICE_API_VERSION_2_0 and 3_0.
+ */
+class Camera2Client :
+        public Camera2ClientBase<CameraService::Client>
+{
+public:
+    /**
+     * ICamera interface (see ICamera for details)
+     */
+
+    virtual void            disconnect();
+    virtual status_t        connect(const sp<ICameraClient>& client);
+    virtual status_t        lock();
+    virtual status_t        unlock();
+    virtual status_t        setPreviewDisplay(const sp<Surface>& surface);
+    virtual status_t        setPreviewTexture(
+        const sp<IGraphicBufferProducer>& bufferProducer);
+    virtual void            setPreviewCallbackFlag(int flag);
+    virtual status_t        setPreviewCallbackTarget(
+        const sp<IGraphicBufferProducer>& callbackProducer);
+
+    virtual status_t        startPreview();
+    virtual void            stopPreview();
+    virtual bool            previewEnabled();
+    virtual status_t        storeMetaDataInBuffers(bool enabled);
+    virtual status_t        startRecording();
+    virtual void            stopRecording();
+    virtual bool            recordingEnabled();
+    virtual void            releaseRecordingFrame(const sp<IMemory>& mem);
+    virtual status_t        autoFocus();
+    virtual status_t        cancelAutoFocus();
+    virtual status_t        takePicture(int msgType);
+    virtual status_t        setParameters(const String8& params);
+    virtual String8         getParameters() const;
+    virtual status_t        sendCommand(int32_t cmd, int32_t arg1, int32_t arg2);
+
+    /**
+     * Interface used by CameraService
+     */
+
+    Camera2Client(const sp<CameraService>& cameraService,
+            const sp<ICameraClient>& cameraClient,
+            const String16& clientPackageName,
+            int cameraId,
+            int cameraFacing,
+            int clientPid,
+            uid_t clientUid,
+            int servicePid,
+            int deviceVersion);
+
+    virtual ~Camera2Client();
+
+    status_t initialize(camera_module_t *module);
+
+    virtual status_t dump(int fd, const Vector<String16>& args);
+
+    /**
+     * Interface used by CameraDeviceBase
+     */
+
+    virtual void notifyAutoFocus(uint8_t newState, int triggerId);
+    virtual void notifyAutoExposure(uint8_t newState, int triggerId);
+
+    /**
+     * Interface used by independent components of Camera2Client.
+     */
+
+    camera2::SharedParameters& getParameters();
+
+    int getPreviewStreamId() const;
+    int getCaptureStreamId() const;
+    int getCallbackStreamId() const;
+    int getRecordingStreamId() const;
+    int getZslStreamId() const;
+
+    status_t registerFrameListener(int32_t minId, int32_t maxId,
+            wp<camera2::FrameProcessor::FilteredListener> listener);
+    status_t removeFrameListener(int32_t minId, int32_t maxId,
+            wp<camera2::FrameProcessor::FilteredListener> listener);
+
+    status_t stopStream();
+
+    static size_t calculateBufferSize(int width, int height,
+            int format, int stride);
+
+    static const int32_t kPreviewRequestIdStart = 10000000;
+    static const int32_t kPreviewRequestIdEnd   = 20000000;
+
+    static const int32_t kRecordingRequestIdStart  = 20000000;
+    static const int32_t kRecordingRequestIdEnd    = 30000000;
+
+    static const int32_t kCaptureRequestIdStart = 30000000;
+    static const int32_t kCaptureRequestIdEnd   = 40000000;
+
+private:
+    /** ICamera interface-related private members */
+    typedef camera2::Parameters Parameters;
+
+    status_t setPreviewWindowL(const sp<IBinder>& binder,
+            sp<ANativeWindow> window);
+    status_t startPreviewL(Parameters &params, bool restart);
+    void     stopPreviewL();
+    status_t startRecordingL(Parameters &params, bool restart);
+    bool     recordingEnabledL();
+
+    // Individual commands for sendCommand()
+    status_t commandStartSmoothZoomL();
+    status_t commandStopSmoothZoomL();
+    status_t commandSetDisplayOrientationL(int degrees);
+    status_t commandEnableShutterSoundL(bool enable);
+    status_t commandPlayRecordingSoundL();
+    status_t commandStartFaceDetectionL(int type);
+    status_t commandStopFaceDetectionL(Parameters &params);
+    status_t commandEnableFocusMoveMsgL(bool enable);
+    status_t commandPingL();
+    status_t commandSetVideoBufferCountL(size_t count);
+
+    // Current camera device configuration
+    camera2::SharedParameters mParameters;
+
+    /** Camera device-related private members */
+
+    void     setPreviewCallbackFlagL(Parameters &params, int flag);
+    status_t updateRequests(Parameters &params);
+    int mDeviceVersion;
+
+    // Used with stream IDs
+    static const int NO_STREAM = -1;
+
+    template <typename ProcessorT>
+    status_t updateProcessorStream(sp<ProcessorT> processor, Parameters params);
+    template <typename ProcessorT,
+              status_t (ProcessorT::*updateStreamF)(const Parameters &)>
+    status_t updateProcessorStream(sp<ProcessorT> processor, Parameters params);
+
+    sp<camera2::FrameProcessor> mFrameProcessor;
+
+    /* Preview/Recording related members */
+
+    sp<IBinder> mPreviewSurface;
+    sp<camera2::StreamingProcessor> mStreamingProcessor;
+
+    /** Preview callback related members */
+
+    sp<camera2::CallbackProcessor> mCallbackProcessor;
+
+    /* Still image capture related members */
+
+    sp<camera2::CaptureSequencer> mCaptureSequencer;
+    sp<camera2::JpegProcessor> mJpegProcessor;
+    sp<camera2::ZslProcessorInterface> mZslProcessor;
+    sp<Thread> mZslProcessorThread;
+
+    /** Notification-related members */
+
+    bool mAfInMotion;
+
+    /** Utility members */
+
+    // Wait until the camera device has received the latest control settings
+    status_t syncWithDevice();
+};
+
+}; // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
new file mode 100644
index 0000000..ad8856b
--- /dev/null
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -0,0 +1,972 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "CameraClient"
+//#define LOG_NDEBUG 0
+
+#include <cutils/properties.h>
+#include <gui/Surface.h>
+
+#include "api1/CameraClient.h"
+#include "device1/CameraHardwareInterface.h"
+#include "CameraService.h"
+
+namespace android {
+
+#define LOG1(...) ALOGD_IF(gLogLevel >= 1, __VA_ARGS__);
+#define LOG2(...) ALOGD_IF(gLogLevel >= 2, __VA_ARGS__);
+
+static int getCallingPid() {
+    return IPCThreadState::self()->getCallingPid();
+}
+
+CameraClient::CameraClient(const sp<CameraService>& cameraService,
+        const sp<ICameraClient>& cameraClient,
+        const String16& clientPackageName,
+        int cameraId, int cameraFacing,
+        int clientPid, int clientUid,
+        int servicePid):
+        Client(cameraService, cameraClient, clientPackageName,
+                cameraId, cameraFacing, clientPid, clientUid, servicePid)
+{
+    int callingPid = getCallingPid();
+    LOG1("CameraClient::CameraClient E (pid %d, id %d)", callingPid, cameraId);
+
+    mHardware = NULL;
+    mMsgEnabled = 0;
+    mSurface = 0;
+    mPreviewWindow = 0;
+    mDestructionStarted = false;
+
+    // Callback is disabled by default
+    mPreviewCallbackFlag = CAMERA_FRAME_CALLBACK_FLAG_NOOP;
+    mOrientation = getOrientation(0, mCameraFacing == CAMERA_FACING_FRONT);
+    mPlayShutterSound = true;
+    LOG1("CameraClient::CameraClient X (pid %d, id %d)", callingPid, cameraId);
+}
+
+status_t CameraClient::initialize(camera_module_t *module) {
+    int callingPid = getCallingPid();
+    status_t res;
+
+    LOG1("CameraClient::initialize E (pid %d, id %d)", callingPid, mCameraId);
+
+    // Verify ops permissions
+    res = startCameraOps();
+    if (res != OK) {
+        return res;
+    }
+
+    char camera_device_name[10];
+    snprintf(camera_device_name, sizeof(camera_device_name), "%d", mCameraId);
+
+    mHardware = new CameraHardwareInterface(camera_device_name);
+    res = mHardware->initialize(&module->common);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: unable to initialize device: %s (%d)",
+                __FUNCTION__, mCameraId, strerror(-res), res);
+        mHardware.clear();
+        return NO_INIT;
+    }
+
+    mHardware->setCallbacks(notifyCallback,
+            dataCallback,
+            dataCallbackTimestamp,
+            (void *)mCameraId);
+
+    // Enable zoom, error, focus, and metadata messages by default
+    enableMsgType(CAMERA_MSG_ERROR | CAMERA_MSG_ZOOM | CAMERA_MSG_FOCUS |
+                  CAMERA_MSG_PREVIEW_METADATA | CAMERA_MSG_FOCUS_MOVE);
+
+    LOG1("CameraClient::initialize X (pid %d, id %d)", callingPid, mCameraId);
+    return OK;
+}
+
+
+// tear down the client
+CameraClient::~CameraClient() {
+    // this lock should never be NULL
+    Mutex* lock = mCameraService->getClientLockById(mCameraId);
+    lock->lock();
+    mDestructionStarted = true;
+    // client will not be accessed from callback. should unlock to prevent dead-lock in disconnect
+    lock->unlock();
+    int callingPid = getCallingPid();
+    LOG1("CameraClient::~CameraClient E (pid %d, this %p)", callingPid, this);
+
+    disconnect();
+    LOG1("CameraClient::~CameraClient X (pid %d, this %p)", callingPid, this);
+}
+
+status_t CameraClient::dump(int fd, const Vector<String16>& args) {
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+
+    size_t len = snprintf(buffer, SIZE, "Client[%d] (%p) PID: %d\n",
+            mCameraId,
+            getRemoteCallback()->asBinder().get(),
+            mClientPid);
+    len = (len > SIZE - 1) ? SIZE - 1 : len;
+    write(fd, buffer, len);
+    return mHardware->dump(fd, args);
+}
+
+// ----------------------------------------------------------------------------
+
+status_t CameraClient::checkPid() const {
+    int callingPid = getCallingPid();
+    if (callingPid == mClientPid) return NO_ERROR;
+
+    ALOGW("attempt to use a locked camera from a different process"
+         " (old pid %d, new pid %d)", mClientPid, callingPid);
+    return EBUSY;
+}
+
+status_t CameraClient::checkPidAndHardware() const {
+    status_t result = checkPid();
+    if (result != NO_ERROR) return result;
+    if (mHardware == 0) {
+        ALOGE("attempt to use a camera after disconnect() (pid %d)", getCallingPid());
+        return INVALID_OPERATION;
+    }
+    return NO_ERROR;
+}
+
+status_t CameraClient::lock() {
+    int callingPid = getCallingPid();
+    LOG1("lock (pid %d)", callingPid);
+    Mutex::Autolock lock(mLock);
+
+    // lock camera to this client if the the camera is unlocked
+    if (mClientPid == 0) {
+        mClientPid = callingPid;
+        return NO_ERROR;
+    }
+
+    // returns NO_ERROR if the client already owns the camera, EBUSY otherwise
+    return checkPid();
+}
+
+status_t CameraClient::unlock() {
+    int callingPid = getCallingPid();
+    LOG1("unlock (pid %d)", callingPid);
+    Mutex::Autolock lock(mLock);
+
+    // allow anyone to use camera (after they lock the camera)
+    status_t result = checkPid();
+    if (result == NO_ERROR) {
+        if (mHardware->recordingEnabled()) {
+            ALOGE("Not allowed to unlock camera during recording.");
+            return INVALID_OPERATION;
+        }
+        mClientPid = 0;
+        LOG1("clear mRemoteCallback (pid %d)", callingPid);
+        // we need to remove the reference to ICameraClient so that when the app
+        // goes away, the reference count goes to 0.
+        mRemoteCallback.clear();
+    }
+    return result;
+}
+
+// connect a new client to the camera
+status_t CameraClient::connect(const sp<ICameraClient>& client) {
+    int callingPid = getCallingPid();
+    LOG1("connect E (pid %d)", callingPid);
+    Mutex::Autolock lock(mLock);
+
+    if (mClientPid != 0 && checkPid() != NO_ERROR) {
+        ALOGW("Tried to connect to a locked camera (old pid %d, new pid %d)",
+                mClientPid, callingPid);
+        return EBUSY;
+    }
+
+    if (mRemoteCallback != 0 &&
+        (client->asBinder() == mRemoteCallback->asBinder())) {
+        LOG1("Connect to the same client");
+        return NO_ERROR;
+    }
+
+    mPreviewCallbackFlag = CAMERA_FRAME_CALLBACK_FLAG_NOOP;
+    mClientPid = callingPid;
+    mRemoteCallback = client;
+
+    LOG1("connect X (pid %d)", callingPid);
+    return NO_ERROR;
+}
+
+static void disconnectWindow(const sp<ANativeWindow>& window) {
+    if (window != 0) {
+        status_t result = native_window_api_disconnect(window.get(),
+                NATIVE_WINDOW_API_CAMERA);
+        if (result != NO_ERROR) {
+            ALOGW("native_window_api_disconnect failed: %s (%d)", strerror(-result),
+                    result);
+        }
+    }
+}
+
+void CameraClient::disconnect() {
+    int callingPid = getCallingPid();
+    LOG1("disconnect E (pid %d)", callingPid);
+    Mutex::Autolock lock(mLock);
+
+    // Allow both client and the media server to disconnect at all times
+    if (callingPid != mClientPid && callingPid != mServicePid) {
+        ALOGW("different client - don't disconnect");
+        return;
+    }
+
+    if (mClientPid <= 0) {
+        LOG1("camera is unlocked (mClientPid = %d), don't tear down hardware", mClientPid);
+        return;
+    }
+
+    // Make sure disconnect() is done once and once only, whether it is called
+    // from the user directly, or called by the destructor.
+    if (mHardware == 0) return;
+
+    LOG1("hardware teardown");
+    // Before destroying mHardware, we must make sure it's in the
+    // idle state.
+    // Turn off all messages.
+    disableMsgType(CAMERA_MSG_ALL_MSGS);
+    mHardware->stopPreview();
+    mHardware->cancelPicture();
+    // Release the hardware resources.
+    mHardware->release();
+
+    // Release the held ANativeWindow resources.
+    if (mPreviewWindow != 0) {
+        disconnectWindow(mPreviewWindow);
+        mPreviewWindow = 0;
+        mHardware->setPreviewWindow(mPreviewWindow);
+    }
+    mHardware.clear();
+
+    CameraService::Client::disconnect();
+
+    LOG1("disconnect X (pid %d)", callingPid);
+}
+
+// ----------------------------------------------------------------------------
+
+status_t CameraClient::setPreviewWindow(const sp<IBinder>& binder,
+        const sp<ANativeWindow>& window) {
+    Mutex::Autolock lock(mLock);
+    status_t result = checkPidAndHardware();
+    if (result != NO_ERROR) return result;
+
+    // return if no change in surface.
+    if (binder == mSurface) {
+        return NO_ERROR;
+    }
+
+    if (window != 0) {
+        result = native_window_api_connect(window.get(), NATIVE_WINDOW_API_CAMERA);
+        if (result != NO_ERROR) {
+            ALOGE("native_window_api_connect failed: %s (%d)", strerror(-result),
+                    result);
+            return result;
+        }
+    }
+
+    // If preview has been already started, register preview buffers now.
+    if (mHardware->previewEnabled()) {
+        if (window != 0) {
+            native_window_set_scaling_mode(window.get(),
+                    NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
+            native_window_set_buffers_transform(window.get(), mOrientation);
+            result = mHardware->setPreviewWindow(window);
+        }
+    }
+
+    if (result == NO_ERROR) {
+        // Everything has succeeded.  Disconnect the old window and remember the
+        // new window.
+        disconnectWindow(mPreviewWindow);
+        mSurface = binder;
+        mPreviewWindow = window;
+    } else {
+        // Something went wrong after we connected to the new window, so
+        // disconnect here.
+        disconnectWindow(window);
+    }
+
+    return result;
+}
+
+// set the Surface that the preview will use
+status_t CameraClient::setPreviewDisplay(const sp<Surface>& surface) {
+    LOG1("setPreviewDisplay(%p) (pid %d)", surface.get(), getCallingPid());
+
+    sp<IBinder> binder(surface != 0 ? surface->getIGraphicBufferProducer()->asBinder() : 0);
+    sp<ANativeWindow> window(surface);
+    return setPreviewWindow(binder, window);
+}
+
+// set the SurfaceTextureClient that the preview will use
+status_t CameraClient::setPreviewTexture(
+        const sp<IGraphicBufferProducer>& bufferProducer) {
+    LOG1("setPreviewTexture(%p) (pid %d)", bufferProducer.get(),
+            getCallingPid());
+
+    sp<IBinder> binder;
+    sp<ANativeWindow> window;
+    if (bufferProducer != 0) {
+        binder = bufferProducer->asBinder();
+        window = new Surface(bufferProducer);
+    }
+    return setPreviewWindow(binder, window);
+}
+
+// set the preview callback flag to affect how the received frames from
+// preview are handled.
+void CameraClient::setPreviewCallbackFlag(int callback_flag) {
+    LOG1("setPreviewCallbackFlag(%d) (pid %d)", callback_flag, getCallingPid());
+    Mutex::Autolock lock(mLock);
+    if (checkPidAndHardware() != NO_ERROR) return;
+
+    mPreviewCallbackFlag = callback_flag;
+    if (mPreviewCallbackFlag & CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK) {
+        enableMsgType(CAMERA_MSG_PREVIEW_FRAME);
+    } else {
+        disableMsgType(CAMERA_MSG_PREVIEW_FRAME);
+    }
+}
+
+status_t CameraClient::setPreviewCallbackTarget(
+        const sp<IGraphicBufferProducer>& callbackProducer) {
+    (void)callbackProducer;
+    ALOGE("%s: Unimplemented!", __FUNCTION__);
+    return INVALID_OPERATION;
+}
+
+// start preview mode
+status_t CameraClient::startPreview() {
+    LOG1("startPreview (pid %d)", getCallingPid());
+    return startCameraMode(CAMERA_PREVIEW_MODE);
+}
+
+// start recording mode
+status_t CameraClient::startRecording() {
+    LOG1("startRecording (pid %d)", getCallingPid());
+    return startCameraMode(CAMERA_RECORDING_MODE);
+}
+
+// start preview or recording
+status_t CameraClient::startCameraMode(camera_mode mode) {
+    LOG1("startCameraMode(%d)", mode);
+    Mutex::Autolock lock(mLock);
+    status_t result = checkPidAndHardware();
+    if (result != NO_ERROR) return result;
+
+    switch(mode) {
+        case CAMERA_PREVIEW_MODE:
+            if (mSurface == 0 && mPreviewWindow == 0) {
+                LOG1("mSurface is not set yet.");
+                // still able to start preview in this case.
+            }
+            return startPreviewMode();
+        case CAMERA_RECORDING_MODE:
+            if (mSurface == 0 && mPreviewWindow == 0) {
+                ALOGE("mSurface or mPreviewWindow must be set before startRecordingMode.");
+                return INVALID_OPERATION;
+            }
+            return startRecordingMode();
+        default:
+            return UNKNOWN_ERROR;
+    }
+}
+
+status_t CameraClient::startPreviewMode() {
+    LOG1("startPreviewMode");
+    status_t result = NO_ERROR;
+
+    // if preview has been enabled, nothing needs to be done
+    if (mHardware->previewEnabled()) {
+        return NO_ERROR;
+    }
+
+    if (mPreviewWindow != 0) {
+        native_window_set_scaling_mode(mPreviewWindow.get(),
+                NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
+        native_window_set_buffers_transform(mPreviewWindow.get(),
+                mOrientation);
+    }
+    mHardware->setPreviewWindow(mPreviewWindow);
+    result = mHardware->startPreview();
+
+    return result;
+}
+
+status_t CameraClient::startRecordingMode() {
+    LOG1("startRecordingMode");
+    status_t result = NO_ERROR;
+
+    // if recording has been enabled, nothing needs to be done
+    if (mHardware->recordingEnabled()) {
+        return NO_ERROR;
+    }
+
+    // if preview has not been started, start preview first
+    if (!mHardware->previewEnabled()) {
+        result = startPreviewMode();
+        if (result != NO_ERROR) {
+            return result;
+        }
+    }
+
+    // start recording mode
+    enableMsgType(CAMERA_MSG_VIDEO_FRAME);
+    mCameraService->playSound(CameraService::SOUND_RECORDING);
+    result = mHardware->startRecording();
+    if (result != NO_ERROR) {
+        ALOGE("mHardware->startRecording() failed with status %d", result);
+    }
+    return result;
+}
+
+// stop preview mode
+void CameraClient::stopPreview() {
+    LOG1("stopPreview (pid %d)", getCallingPid());
+    Mutex::Autolock lock(mLock);
+    if (checkPidAndHardware() != NO_ERROR) return;
+
+
+    disableMsgType(CAMERA_MSG_PREVIEW_FRAME);
+    mHardware->stopPreview();
+
+    mPreviewBuffer.clear();
+}
+
+// stop recording mode
+void CameraClient::stopRecording() {
+    LOG1("stopRecording (pid %d)", getCallingPid());
+    Mutex::Autolock lock(mLock);
+    if (checkPidAndHardware() != NO_ERROR) return;
+
+    disableMsgType(CAMERA_MSG_VIDEO_FRAME);
+    mHardware->stopRecording();
+    mCameraService->playSound(CameraService::SOUND_RECORDING);
+
+    mPreviewBuffer.clear();
+}
+
+// release a recording frame
+void CameraClient::releaseRecordingFrame(const sp<IMemory>& mem) {
+    Mutex::Autolock lock(mLock);
+    if (checkPidAndHardware() != NO_ERROR) return;
+    mHardware->releaseRecordingFrame(mem);
+}
+
+status_t CameraClient::storeMetaDataInBuffers(bool enabled)
+{
+    LOG1("storeMetaDataInBuffers: %s", enabled? "true": "false");
+    Mutex::Autolock lock(mLock);
+    if (checkPidAndHardware() != NO_ERROR) {
+        return UNKNOWN_ERROR;
+    }
+    return mHardware->storeMetaDataInBuffers(enabled);
+}
+
+bool CameraClient::previewEnabled() {
+    LOG1("previewEnabled (pid %d)", getCallingPid());
+
+    Mutex::Autolock lock(mLock);
+    if (checkPidAndHardware() != NO_ERROR) return false;
+    return mHardware->previewEnabled();
+}
+
+bool CameraClient::recordingEnabled() {
+    LOG1("recordingEnabled (pid %d)", getCallingPid());
+
+    Mutex::Autolock lock(mLock);
+    if (checkPidAndHardware() != NO_ERROR) return false;
+    return mHardware->recordingEnabled();
+}
+
+status_t CameraClient::autoFocus() {
+    LOG1("autoFocus (pid %d)", getCallingPid());
+
+    Mutex::Autolock lock(mLock);
+    status_t result = checkPidAndHardware();
+    if (result != NO_ERROR) return result;
+
+    return mHardware->autoFocus();
+}
+
+status_t CameraClient::cancelAutoFocus() {
+    LOG1("cancelAutoFocus (pid %d)", getCallingPid());
+
+    Mutex::Autolock lock(mLock);
+    status_t result = checkPidAndHardware();
+    if (result != NO_ERROR) return result;
+
+    return mHardware->cancelAutoFocus();
+}
+
+// take a picture - image is returned in callback
+status_t CameraClient::takePicture(int msgType) {
+    LOG1("takePicture (pid %d): 0x%x", getCallingPid(), msgType);
+
+    Mutex::Autolock lock(mLock);
+    status_t result = checkPidAndHardware();
+    if (result != NO_ERROR) return result;
+
+    if ((msgType & CAMERA_MSG_RAW_IMAGE) &&
+        (msgType & CAMERA_MSG_RAW_IMAGE_NOTIFY)) {
+        ALOGE("CAMERA_MSG_RAW_IMAGE and CAMERA_MSG_RAW_IMAGE_NOTIFY"
+                " cannot be both enabled");
+        return BAD_VALUE;
+    }
+
+    // We only accept picture related message types
+    // and ignore other types of messages for takePicture().
+    int picMsgType = msgType
+                        & (CAMERA_MSG_SHUTTER |
+                           CAMERA_MSG_POSTVIEW_FRAME |
+                           CAMERA_MSG_RAW_IMAGE |
+                           CAMERA_MSG_RAW_IMAGE_NOTIFY |
+                           CAMERA_MSG_COMPRESSED_IMAGE);
+
+    enableMsgType(picMsgType);
+
+    return mHardware->takePicture();
+}
+
+// set preview/capture parameters - key/value pairs
+status_t CameraClient::setParameters(const String8& params) {
+    LOG1("setParameters (pid %d) (%s)", getCallingPid(), params.string());
+
+    Mutex::Autolock lock(mLock);
+    status_t result = checkPidAndHardware();
+    if (result != NO_ERROR) return result;
+
+    CameraParameters p(params);
+    return mHardware->setParameters(p);
+}
+
+// get preview/capture parameters - key/value pairs
+String8 CameraClient::getParameters() const {
+    Mutex::Autolock lock(mLock);
+    if (checkPidAndHardware() != NO_ERROR) return String8();
+
+    String8 params(mHardware->getParameters().flatten());
+    LOG1("getParameters (pid %d) (%s)", getCallingPid(), params.string());
+    return params;
+}
+
+// enable shutter sound
+status_t CameraClient::enableShutterSound(bool enable) {
+    LOG1("enableShutterSound (pid %d)", getCallingPid());
+
+    status_t result = checkPidAndHardware();
+    if (result != NO_ERROR) return result;
+
+    if (enable) {
+        mPlayShutterSound = true;
+        return OK;
+    }
+
+    // Disabling shutter sound may not be allowed. In that case only
+    // allow the mediaserver process to disable the sound.
+    char value[PROPERTY_VALUE_MAX];
+    property_get("ro.camera.sound.forced", value, "0");
+    if (strcmp(value, "0") != 0) {
+        // Disabling shutter sound is not allowed. Deny if the current
+        // process is not mediaserver.
+        if (getCallingPid() != getpid()) {
+            ALOGE("Failed to disable shutter sound. Permission denied (pid %d)", getCallingPid());
+            return PERMISSION_DENIED;
+        }
+    }
+
+    mPlayShutterSound = false;
+    return OK;
+}
+
+status_t CameraClient::sendCommand(int32_t cmd, int32_t arg1, int32_t arg2) {
+    LOG1("sendCommand (pid %d)", getCallingPid());
+    int orientation;
+    Mutex::Autolock lock(mLock);
+    status_t result = checkPidAndHardware();
+    if (result != NO_ERROR) return result;
+
+    if (cmd == CAMERA_CMD_SET_DISPLAY_ORIENTATION) {
+        // Mirror the preview if the camera is front-facing.
+        orientation = getOrientation(arg1, mCameraFacing == CAMERA_FACING_FRONT);
+        if (orientation == -1) return BAD_VALUE;
+
+        if (mOrientation != orientation) {
+            mOrientation = orientation;
+            if (mPreviewWindow != 0) {
+                native_window_set_buffers_transform(mPreviewWindow.get(),
+                        mOrientation);
+            }
+        }
+        return OK;
+    } else if (cmd == CAMERA_CMD_ENABLE_SHUTTER_SOUND) {
+        switch (arg1) {
+            case 0:
+                return enableShutterSound(false);
+            case 1:
+                return enableShutterSound(true);
+            default:
+                return BAD_VALUE;
+        }
+        return OK;
+    } else if (cmd == CAMERA_CMD_PLAY_RECORDING_SOUND) {
+        mCameraService->playSound(CameraService::SOUND_RECORDING);
+    } else if (cmd == CAMERA_CMD_SET_VIDEO_BUFFER_COUNT) {
+        // Silently ignore this command
+        return INVALID_OPERATION;
+    } else if (cmd == CAMERA_CMD_PING) {
+        // If mHardware is 0, checkPidAndHardware will return error.
+        return OK;
+    }
+
+    return mHardware->sendCommand(cmd, arg1, arg2);
+}
+
+// ----------------------------------------------------------------------------
+
+void CameraClient::enableMsgType(int32_t msgType) {
+    android_atomic_or(msgType, &mMsgEnabled);
+    mHardware->enableMsgType(msgType);
+}
+
+void CameraClient::disableMsgType(int32_t msgType) {
+    android_atomic_and(~msgType, &mMsgEnabled);
+    mHardware->disableMsgType(msgType);
+}
+
+#define CHECK_MESSAGE_INTERVAL 10 // 10ms
+bool CameraClient::lockIfMessageWanted(int32_t msgType) {
+    int sleepCount = 0;
+    while (mMsgEnabled & msgType) {
+        if (mLock.tryLock() == NO_ERROR) {
+            if (sleepCount > 0) {
+                LOG1("lockIfMessageWanted(%d): waited for %d ms",
+                    msgType, sleepCount * CHECK_MESSAGE_INTERVAL);
+            }
+            return true;
+        }
+        if (sleepCount++ == 0) {
+            LOG1("lockIfMessageWanted(%d): enter sleep", msgType);
+        }
+        usleep(CHECK_MESSAGE_INTERVAL * 1000);
+    }
+    ALOGW("lockIfMessageWanted(%d): dropped unwanted message", msgType);
+    return false;
+}
+
+// Callback messages can be dispatched to internal handlers or pass to our
+// client's callback functions, depending on the message type.
+//
+// notifyCallback:
+//      CAMERA_MSG_SHUTTER              handleShutter
+//      (others)                        c->notifyCallback
+// dataCallback:
+//      CAMERA_MSG_PREVIEW_FRAME        handlePreviewData
+//      CAMERA_MSG_POSTVIEW_FRAME       handlePostview
+//      CAMERA_MSG_RAW_IMAGE            handleRawPicture
+//      CAMERA_MSG_COMPRESSED_IMAGE     handleCompressedPicture
+//      (others)                        c->dataCallback
+// dataCallbackTimestamp
+//      (others)                        c->dataCallbackTimestamp
+//
+// NOTE: the *Callback functions grab mLock of the client before passing
+// control to handle* functions. So the handle* functions must release the
+// lock before calling the ICameraClient's callbacks, so those callbacks can
+// invoke methods in the Client class again (For example, the preview frame
+// callback may want to releaseRecordingFrame). The handle* functions must
+// release the lock after all accesses to member variables, so it must be
+// handled very carefully.
+
+void CameraClient::notifyCallback(int32_t msgType, int32_t ext1,
+        int32_t ext2, void* user) {
+    LOG2("notifyCallback(%d)", msgType);
+
+    Mutex* lock = getClientLockFromCookie(user);
+    if (lock == NULL) return;
+    Mutex::Autolock alock(*lock);
+
+    CameraClient* client =
+            static_cast<CameraClient*>(getClientFromCookie(user));
+    if (client == NULL) return;
+
+    if (!client->lockIfMessageWanted(msgType)) return;
+
+    switch (msgType) {
+        case CAMERA_MSG_SHUTTER:
+            // ext1 is the dimension of the yuv picture.
+            client->handleShutter();
+            break;
+        default:
+            client->handleGenericNotify(msgType, ext1, ext2);
+            break;
+    }
+}
+
+void CameraClient::dataCallback(int32_t msgType,
+        const sp<IMemory>& dataPtr, camera_frame_metadata_t *metadata, void* user) {
+    LOG2("dataCallback(%d)", msgType);
+
+    Mutex* lock = getClientLockFromCookie(user);
+    if (lock == NULL) return;
+    Mutex::Autolock alock(*lock);
+
+    CameraClient* client =
+            static_cast<CameraClient*>(getClientFromCookie(user));
+    if (client == NULL) return;
+
+    if (!client->lockIfMessageWanted(msgType)) return;
+    if (dataPtr == 0 && metadata == NULL) {
+        ALOGE("Null data returned in data callback");
+        client->handleGenericNotify(CAMERA_MSG_ERROR, UNKNOWN_ERROR, 0);
+        return;
+    }
+
+    switch (msgType & ~CAMERA_MSG_PREVIEW_METADATA) {
+        case CAMERA_MSG_PREVIEW_FRAME:
+            client->handlePreviewData(msgType, dataPtr, metadata);
+            break;
+        case CAMERA_MSG_POSTVIEW_FRAME:
+            client->handlePostview(dataPtr);
+            break;
+        case CAMERA_MSG_RAW_IMAGE:
+            client->handleRawPicture(dataPtr);
+            break;
+        case CAMERA_MSG_COMPRESSED_IMAGE:
+            client->handleCompressedPicture(dataPtr);
+            break;
+        default:
+            client->handleGenericData(msgType, dataPtr, metadata);
+            break;
+    }
+}
+
+void CameraClient::dataCallbackTimestamp(nsecs_t timestamp,
+        int32_t msgType, const sp<IMemory>& dataPtr, void* user) {
+    LOG2("dataCallbackTimestamp(%d)", msgType);
+
+    Mutex* lock = getClientLockFromCookie(user);
+    if (lock == NULL) return;
+    Mutex::Autolock alock(*lock);
+
+    CameraClient* client =
+            static_cast<CameraClient*>(getClientFromCookie(user));
+    if (client == NULL) return;
+
+    if (!client->lockIfMessageWanted(msgType)) return;
+
+    if (dataPtr == 0) {
+        ALOGE("Null data returned in data with timestamp callback");
+        client->handleGenericNotify(CAMERA_MSG_ERROR, UNKNOWN_ERROR, 0);
+        return;
+    }
+
+    client->handleGenericDataTimestamp(timestamp, msgType, dataPtr);
+}
+
+// snapshot taken callback
+void CameraClient::handleShutter(void) {
+    if (mPlayShutterSound) {
+        mCameraService->playSound(CameraService::SOUND_SHUTTER);
+    }
+
+    sp<ICameraClient> c = mRemoteCallback;
+    if (c != 0) {
+        mLock.unlock();
+        c->notifyCallback(CAMERA_MSG_SHUTTER, 0, 0);
+        if (!lockIfMessageWanted(CAMERA_MSG_SHUTTER)) return;
+    }
+    disableMsgType(CAMERA_MSG_SHUTTER);
+
+    mLock.unlock();
+}
+
+// preview callback - frame buffer update
+void CameraClient::handlePreviewData(int32_t msgType,
+                                              const sp<IMemory>& mem,
+                                              camera_frame_metadata_t *metadata) {
+    ssize_t offset;
+    size_t size;
+    sp<IMemoryHeap> heap = mem->getMemory(&offset, &size);
+
+    // local copy of the callback flags
+    int flags = mPreviewCallbackFlag;
+
+    // is callback enabled?
+    if (!(flags & CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK)) {
+        // If the enable bit is off, the copy-out and one-shot bits are ignored
+        LOG2("frame callback is disabled");
+        mLock.unlock();
+        return;
+    }
+
+    // hold a strong pointer to the client
+    sp<ICameraClient> c = mRemoteCallback;
+
+    // clear callback flags if no client or one-shot mode
+    if (c == 0 || (mPreviewCallbackFlag & CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK)) {
+        LOG2("Disable preview callback");
+        mPreviewCallbackFlag &= ~(CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK |
+                                  CAMERA_FRAME_CALLBACK_FLAG_COPY_OUT_MASK |
+                                  CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK);
+        disableMsgType(CAMERA_MSG_PREVIEW_FRAME);
+    }
+
+    if (c != 0) {
+        // Is the received frame copied out or not?
+        if (flags & CAMERA_FRAME_CALLBACK_FLAG_COPY_OUT_MASK) {
+            LOG2("frame is copied");
+            copyFrameAndPostCopiedFrame(msgType, c, heap, offset, size, metadata);
+        } else {
+            LOG2("frame is forwarded");
+            mLock.unlock();
+            c->dataCallback(msgType, mem, metadata);
+        }
+    } else {
+        mLock.unlock();
+    }
+}
+
+// picture callback - postview image ready
+void CameraClient::handlePostview(const sp<IMemory>& mem) {
+    disableMsgType(CAMERA_MSG_POSTVIEW_FRAME);
+
+    sp<ICameraClient> c = mRemoteCallback;
+    mLock.unlock();
+    if (c != 0) {
+        c->dataCallback(CAMERA_MSG_POSTVIEW_FRAME, mem, NULL);
+    }
+}
+
+// picture callback - raw image ready
+void CameraClient::handleRawPicture(const sp<IMemory>& mem) {
+    disableMsgType(CAMERA_MSG_RAW_IMAGE);
+
+    ssize_t offset;
+    size_t size;
+    sp<IMemoryHeap> heap = mem->getMemory(&offset, &size);
+
+    sp<ICameraClient> c = mRemoteCallback;
+    mLock.unlock();
+    if (c != 0) {
+        c->dataCallback(CAMERA_MSG_RAW_IMAGE, mem, NULL);
+    }
+}
+
+// picture callback - compressed picture ready
+void CameraClient::handleCompressedPicture(const sp<IMemory>& mem) {
+    disableMsgType(CAMERA_MSG_COMPRESSED_IMAGE);
+
+    sp<ICameraClient> c = mRemoteCallback;
+    mLock.unlock();
+    if (c != 0) {
+        c->dataCallback(CAMERA_MSG_COMPRESSED_IMAGE, mem, NULL);
+    }
+}
+
+
+void CameraClient::handleGenericNotify(int32_t msgType,
+    int32_t ext1, int32_t ext2) {
+    sp<ICameraClient> c = mRemoteCallback;
+    mLock.unlock();
+    if (c != 0) {
+        c->notifyCallback(msgType, ext1, ext2);
+    }
+}
+
+void CameraClient::handleGenericData(int32_t msgType,
+    const sp<IMemory>& dataPtr, camera_frame_metadata_t *metadata) {
+    sp<ICameraClient> c = mRemoteCallback;
+    mLock.unlock();
+    if (c != 0) {
+        c->dataCallback(msgType, dataPtr, metadata);
+    }
+}
+
+void CameraClient::handleGenericDataTimestamp(nsecs_t timestamp,
+    int32_t msgType, const sp<IMemory>& dataPtr) {
+    sp<ICameraClient> c = mRemoteCallback;
+    mLock.unlock();
+    if (c != 0) {
+        c->dataCallbackTimestamp(timestamp, msgType, dataPtr);
+    }
+}
+
+void CameraClient::copyFrameAndPostCopiedFrame(
+        int32_t msgType, const sp<ICameraClient>& client,
+        const sp<IMemoryHeap>& heap, size_t offset, size_t size,
+        camera_frame_metadata_t *metadata) {
+    LOG2("copyFrameAndPostCopiedFrame");
+    // It is necessary to copy out of pmem before sending this to
+    // the callback. For efficiency, reuse the same MemoryHeapBase
+    // provided it's big enough. Don't allocate the memory or
+    // perform the copy if there's no callback.
+    // hold the preview lock while we grab a reference to the preview buffer
+    sp<MemoryHeapBase> previewBuffer;
+
+    if (mPreviewBuffer == 0) {
+        mPreviewBuffer = new MemoryHeapBase(size, 0, NULL);
+    } else if (size > mPreviewBuffer->virtualSize()) {
+        mPreviewBuffer.clear();
+        mPreviewBuffer = new MemoryHeapBase(size, 0, NULL);
+    }
+    if (mPreviewBuffer == 0) {
+        ALOGE("failed to allocate space for preview buffer");
+        mLock.unlock();
+        return;
+    }
+    previewBuffer = mPreviewBuffer;
+
+    memcpy(previewBuffer->base(), (uint8_t *)heap->base() + offset, size);
+
+    sp<MemoryBase> frame = new MemoryBase(previewBuffer, 0, size);
+    if (frame == 0) {
+        ALOGE("failed to allocate space for frame callback");
+        mLock.unlock();
+        return;
+    }
+
+    mLock.unlock();
+    client->dataCallback(msgType, frame, metadata);
+}
+
+int CameraClient::getOrientation(int degrees, bool mirror) {
+    if (!mirror) {
+        if (degrees == 0) return 0;
+        else if (degrees == 90) return HAL_TRANSFORM_ROT_90;
+        else if (degrees == 180) return HAL_TRANSFORM_ROT_180;
+        else if (degrees == 270) return HAL_TRANSFORM_ROT_270;
+    } else {  // Do mirror (horizontal flip)
+        if (degrees == 0) {           // FLIP_H and ROT_0
+            return HAL_TRANSFORM_FLIP_H;
+        } else if (degrees == 90) {   // FLIP_H and ROT_90
+            return HAL_TRANSFORM_FLIP_H | HAL_TRANSFORM_ROT_90;
+        } else if (degrees == 180) {  // FLIP_H and ROT_180
+            return HAL_TRANSFORM_FLIP_V;
+        } else if (degrees == 270) {  // FLIP_H and ROT_270
+            return HAL_TRANSFORM_FLIP_V | HAL_TRANSFORM_ROT_90;
+        }
+    }
+    ALOGE("Invalid setDisplayOrientation degrees=%d", degrees);
+    return -1;
+}
+
+}; // namespace android
diff --git a/services/camera/libcameraservice/api1/CameraClient.h b/services/camera/libcameraservice/api1/CameraClient.h
new file mode 100644
index 0000000..abde75a
--- /dev/null
+++ b/services/camera/libcameraservice/api1/CameraClient.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERACLIENT_H
+#define ANDROID_SERVERS_CAMERA_CAMERACLIENT_H
+
+#include "CameraService.h"
+
+namespace android {
+
+class MemoryHeapBase;
+class CameraHardwareInterface;
+
+/**
+ * Interface between android.hardware.Camera API and Camera HAL device for version
+ * CAMERA_DEVICE_API_VERSION_1_0.
+ */
+
+class CameraClient : public CameraService::Client
+{
+public:
+    // ICamera interface (see ICamera for details)
+    virtual void            disconnect();
+    virtual status_t        connect(const sp<ICameraClient>& client);
+    virtual status_t        lock();
+    virtual status_t        unlock();
+    virtual status_t        setPreviewDisplay(const sp<Surface>& surface);
+    virtual status_t        setPreviewTexture(const sp<IGraphicBufferProducer>& bufferProducer);
+    virtual void            setPreviewCallbackFlag(int flag);
+    virtual status_t        setPreviewCallbackTarget(
+            const sp<IGraphicBufferProducer>& callbackProducer);
+    virtual status_t        startPreview();
+    virtual void            stopPreview();
+    virtual bool            previewEnabled();
+    virtual status_t        storeMetaDataInBuffers(bool enabled);
+    virtual status_t        startRecording();
+    virtual void            stopRecording();
+    virtual bool            recordingEnabled();
+    virtual void            releaseRecordingFrame(const sp<IMemory>& mem);
+    virtual status_t        autoFocus();
+    virtual status_t        cancelAutoFocus();
+    virtual status_t        takePicture(int msgType);
+    virtual status_t        setParameters(const String8& params);
+    virtual String8         getParameters() const;
+    virtual status_t        sendCommand(int32_t cmd, int32_t arg1, int32_t arg2);
+
+    // Interface used by CameraService
+    CameraClient(const sp<CameraService>& cameraService,
+            const sp<ICameraClient>& cameraClient,
+            const String16& clientPackageName,
+            int cameraId,
+            int cameraFacing,
+            int clientPid,
+            int clientUid,
+            int servicePid);
+    ~CameraClient();
+
+    status_t initialize(camera_module_t *module);
+
+    status_t dump(int fd, const Vector<String16>& args);
+
+private:
+
+    // check whether the calling process matches mClientPid.
+    status_t                checkPid() const;
+    status_t                checkPidAndHardware() const;  // also check mHardware != 0
+
+    // these are internal functions used to set up preview buffers
+    status_t                registerPreviewBuffers();
+
+    // camera operation mode
+    enum camera_mode {
+        CAMERA_PREVIEW_MODE   = 0,  // frame automatically released
+        CAMERA_RECORDING_MODE = 1,  // frame has to be explicitly released by releaseRecordingFrame()
+    };
+    // these are internal functions used for preview/recording
+    status_t                startCameraMode(camera_mode mode);
+    status_t                startPreviewMode();
+    status_t                startRecordingMode();
+
+    // internal function used by sendCommand to enable/disable shutter sound.
+    status_t                enableShutterSound(bool enable);
+
+    // these are static callback functions
+    static void             notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2, void* user);
+    static void             dataCallback(int32_t msgType, const sp<IMemory>& dataPtr,
+            camera_frame_metadata_t *metadata, void* user);
+    static void             dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr, void* user);
+    // handlers for messages
+    void                    handleShutter(void);
+    void                    handlePreviewData(int32_t msgType, const sp<IMemory>& mem,
+            camera_frame_metadata_t *metadata);
+    void                    handlePostview(const sp<IMemory>& mem);
+    void                    handleRawPicture(const sp<IMemory>& mem);
+    void                    handleCompressedPicture(const sp<IMemory>& mem);
+    void                    handleGenericNotify(int32_t msgType, int32_t ext1, int32_t ext2);
+    void                    handleGenericData(int32_t msgType, const sp<IMemory>& dataPtr,
+            camera_frame_metadata_t *metadata);
+    void                    handleGenericDataTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr);
+
+    void                    copyFrameAndPostCopiedFrame(
+        int32_t msgType,
+        const sp<ICameraClient>& client,
+        const sp<IMemoryHeap>& heap,
+        size_t offset, size_t size,
+        camera_frame_metadata_t *metadata);
+
+    int                     getOrientation(int orientation, bool mirror);
+
+    status_t                setPreviewWindow(
+        const sp<IBinder>& binder,
+        const sp<ANativeWindow>& window);
+
+
+    // these are initialized in the constructor.
+    sp<CameraHardwareInterface>     mHardware;       // cleared after disconnect()
+    int                             mPreviewCallbackFlag;
+    int                             mOrientation;     // Current display orientation
+    bool                            mPlayShutterSound;
+
+    // Ensures atomicity among the public methods
+    mutable Mutex                   mLock;
+    // This is a binder of Surface or Surface.
+    sp<IBinder>                     mSurface;
+    sp<ANativeWindow>               mPreviewWindow;
+
+    // If the user want us to return a copy of the preview frame (instead
+    // of the original one), we allocate mPreviewBuffer and reuse it if possible.
+    sp<MemoryHeapBase>              mPreviewBuffer;
+
+    // We need to avoid the deadlock when the incoming command thread and
+    // the CameraHardwareInterface callback thread both want to grab mLock.
+    // An extra flag is used to tell the callback thread that it should stop
+    // trying to deliver the callback messages if the client is not
+    // interested in it anymore. For example, if the client is calling
+    // stopPreview(), the preview frame messages do not need to be delivered
+    // anymore.
+
+    // This function takes the same parameter as the enableMsgType() and
+    // disableMsgType() functions in CameraHardwareInterface.
+    void                    enableMsgType(int32_t msgType);
+    void                    disableMsgType(int32_t msgType);
+    volatile int32_t        mMsgEnabled;
+
+    // This function keeps trying to grab mLock, or give up if the message
+    // is found to be disabled. It returns true if mLock is grabbed.
+    bool                    lockIfMessageWanted(int32_t msgType);
+};
+
+}
+
+#endif
diff --git a/services/camera/libcameraservice/api1/client2/BurstCapture.cpp b/services/camera/libcameraservice/api1/client2/BurstCapture.cpp
new file mode 100644
index 0000000..0bfdfd4
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/BurstCapture.cpp
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "Camera2-BurstCapture"
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include "BurstCapture.h"
+
+#include "api1/Camera2Client.h"
+#include "api1/client2/JpegCompressor.h"
+
+namespace android {
+namespace camera2 {
+
+BurstCapture::BurstCapture(wp<Camera2Client> client, wp<CaptureSequencer> sequencer):
+    mCaptureStreamId(NO_STREAM),
+    mClient(client),
+    mSequencer(sequencer)
+{
+}
+
+BurstCapture::~BurstCapture() {
+}
+
+status_t BurstCapture::start(Vector<CameraMetadata> &/*metadatas*/,
+                             int32_t /*firstCaptureId*/) {
+    ALOGE("Not completely implemented");
+    return INVALID_OPERATION;
+}
+
+void BurstCapture::onFrameAvailable() {
+    ALOGV("%s", __FUNCTION__);
+    Mutex::Autolock l(mInputMutex);
+    if(!mInputChanged) {
+        mInputChanged = true;
+        mInputSignal.signal();
+    }
+}
+
+bool BurstCapture::threadLoop() {
+    status_t res;
+    {
+        Mutex::Autolock l(mInputMutex);
+        while(!mInputChanged) {
+            res = mInputSignal.waitRelative(mInputMutex, kWaitDuration);
+            if(res == TIMED_OUT) return true;
+        }
+        mInputChanged = false;
+    }
+
+    do {
+        sp<Camera2Client> client = mClient.promote();
+        if(client == 0) return false;
+        ALOGV("%s: Calling processFrameAvailable()", __FUNCTION__);
+        res = processFrameAvailable(client);
+    } while(res == OK);
+
+    return true;
+}
+
+CpuConsumer::LockedBuffer* BurstCapture::jpegEncode(
+    CpuConsumer::LockedBuffer *imgBuffer,
+    int /*quality*/)
+{
+    ALOGV("%s", __FUNCTION__);
+
+    CpuConsumer::LockedBuffer *imgEncoded = new CpuConsumer::LockedBuffer;
+    uint8_t *data = new uint8_t[ANDROID_JPEG_MAX_SIZE];
+    imgEncoded->data = data;
+    imgEncoded->width = imgBuffer->width;
+    imgEncoded->height = imgBuffer->height;
+    imgEncoded->stride = imgBuffer->stride;
+
+    Vector<CpuConsumer::LockedBuffer*> buffers;
+    buffers.push_back(imgBuffer);
+    buffers.push_back(imgEncoded);
+
+    sp<JpegCompressor> jpeg = new JpegCompressor();
+    jpeg->start(buffers, 1);
+
+    bool success = jpeg->waitForDone(10 * 1e9);
+    if(success) {
+        return buffers[1];
+    }
+    else {
+        ALOGE("%s: JPEG encode timed out", __FUNCTION__);
+        return NULL;  // TODO: maybe change function return value to status_t
+    }
+}
+
+status_t BurstCapture::processFrameAvailable(sp<Camera2Client> &/*client*/) {
+    ALOGE("Not implemented");
+    return INVALID_OPERATION;
+}
+
+} // namespace camera2
+} // namespace android
diff --git a/services/camera/libcameraservice/api1/client2/BurstCapture.h b/services/camera/libcameraservice/api1/client2/BurstCapture.h
new file mode 100644
index 0000000..ea321fd
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/BurstCapture.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_BURST_CAPTURE_H
+#define ANDROID_SERVERS_CAMERA_BURST_CAPTURE_H
+
+#include <camera/CameraMetadata.h>
+#include <binder/MemoryBase.h>
+#include <binder/MemoryHeapBase.h>
+#include <gui/CpuConsumer.h>
+
+#include "device2/Camera2Device.h"
+
+namespace android {
+
+class Camera2Client;
+
+namespace camera2 {
+
+class CaptureSequencer;
+
+class BurstCapture : public virtual Thread,
+                     public virtual CpuConsumer::FrameAvailableListener
+{
+public:
+    BurstCapture(wp<Camera2Client> client, wp<CaptureSequencer> sequencer);
+    virtual ~BurstCapture();
+
+    virtual void onFrameAvailable();
+    virtual status_t start(Vector<CameraMetadata> &metadatas, int32_t firstCaptureId);
+
+protected:
+    Mutex mInputMutex;
+    bool mInputChanged;
+    Condition mInputSignal;
+    int mCaptureStreamId;
+    wp<Camera2Client> mClient;
+    wp<CaptureSequencer> mSequencer;
+
+    // Should only be accessed by processing thread
+    enum {
+        NO_STREAM = -1
+    };
+
+    CpuConsumer::LockedBuffer* jpegEncode(
+        CpuConsumer::LockedBuffer *imgBuffer,
+        int quality);
+
+    virtual status_t processFrameAvailable(sp<Camera2Client> &client);
+
+private:
+    virtual bool threadLoop();
+    static const nsecs_t kWaitDuration = 10000000; // 10 ms
+};
+
+} // namespace camera2
+} // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
new file mode 100644
index 0000000..12d0859
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
@@ -0,0 +1,539 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera2-CallbackProcessor"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include <gui/Surface.h>
+
+#include "common/CameraDeviceBase.h"
+#include "api1/Camera2Client.h"
+#include "api1/client2/CallbackProcessor.h"
+
+#define ALIGN(x, mask) ( ((x) + (mask) - 1) & ~((mask) - 1) )
+
+namespace android {
+namespace camera2 {
+
+CallbackProcessor::CallbackProcessor(sp<Camera2Client> client):
+        Thread(false),
+        mClient(client),
+        mDevice(client->getCameraDevice()),
+        mId(client->getCameraId()),
+        mCallbackAvailable(false),
+        mCallbackToApp(false),
+        mCallbackStreamId(NO_STREAM) {
+}
+
+CallbackProcessor::~CallbackProcessor() {
+    ALOGV("%s: Exit", __FUNCTION__);
+    deleteStream();
+}
+
+void CallbackProcessor::onFrameAvailable() {
+    Mutex::Autolock l(mInputMutex);
+    if (!mCallbackAvailable) {
+        mCallbackAvailable = true;
+        mCallbackAvailableSignal.signal();
+    }
+}
+
+status_t CallbackProcessor::setCallbackWindow(
+        sp<ANativeWindow> callbackWindow) {
+    ATRACE_CALL();
+    status_t res;
+
+    Mutex::Autolock l(mInputMutex);
+
+    sp<Camera2Client> client = mClient.promote();
+    if (client == 0) return OK;
+    sp<CameraDeviceBase> device = client->getCameraDevice();
+
+    // If the window is changing, clear out stream if it already exists
+    if (mCallbackWindow != callbackWindow && mCallbackStreamId != NO_STREAM) {
+        res = device->deleteStream(mCallbackStreamId);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to delete old stream "
+                    "for callbacks: %s (%d)", __FUNCTION__,
+                    client->getCameraId(), strerror(-res), res);
+            return res;
+        }
+        mCallbackStreamId = NO_STREAM;
+        mCallbackConsumer.clear();
+    }
+    mCallbackWindow = callbackWindow;
+    mCallbackToApp = (mCallbackWindow != NULL);
+
+    return OK;
+}
+
+status_t CallbackProcessor::updateStream(const Parameters &params) {
+    ATRACE_CALL();
+    status_t res;
+
+    Mutex::Autolock l(mInputMutex);
+
+    sp<CameraDeviceBase> device = mDevice.promote();
+    if (device == 0) {
+        ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+        return INVALID_OPERATION;
+    }
+
+    // If possible, use the flexible YUV format
+    int32_t callbackFormat = params.previewFormat;
+    if (mCallbackToApp) {
+        // TODO: etalvala: This should use the flexible YUV format as well, but
+        // need to reconcile HAL2/HAL3 requirements.
+        callbackFormat = HAL_PIXEL_FORMAT_YV12;
+    } else if(params.fastInfo.useFlexibleYuv &&
+            (params.previewFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP ||
+             params.previewFormat == HAL_PIXEL_FORMAT_YV12) ) {
+        callbackFormat = HAL_PIXEL_FORMAT_YCbCr_420_888;
+    }
+
+    if (!mCallbackToApp && mCallbackConsumer == 0) {
+        // Create CPU buffer queue endpoint, since app hasn't given us one
+        // Make it async to avoid disconnect deadlocks
+        sp<BufferQueue> bq = new BufferQueue();
+        mCallbackConsumer = new CpuConsumer(bq, kCallbackHeapCount);
+        mCallbackConsumer->setFrameAvailableListener(this);
+        mCallbackConsumer->setName(String8("Camera2Client::CallbackConsumer"));
+        mCallbackWindow = new Surface(
+            mCallbackConsumer->getProducerInterface());
+    }
+
+    if (mCallbackStreamId != NO_STREAM) {
+        // Check if stream parameters have to change
+        uint32_t currentWidth, currentHeight, currentFormat;
+        res = device->getStreamInfo(mCallbackStreamId,
+                &currentWidth, &currentHeight, &currentFormat);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Error querying callback output stream info: "
+                    "%s (%d)", __FUNCTION__, mId,
+                    strerror(-res), res);
+            return res;
+        }
+        if (currentWidth != (uint32_t)params.previewWidth ||
+                currentHeight != (uint32_t)params.previewHeight ||
+                currentFormat != (uint32_t)callbackFormat) {
+            // Since size should only change while preview is not running,
+            // assuming that all existing use of old callback stream is
+            // completed.
+            ALOGV("%s: Camera %d: Deleting stream %d since the buffer "
+                    "parameters changed", __FUNCTION__, mId, mCallbackStreamId);
+            res = device->deleteStream(mCallbackStreamId);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to delete old output stream "
+                        "for callbacks: %s (%d)", __FUNCTION__,
+                        mId, strerror(-res), res);
+                return res;
+            }
+            mCallbackStreamId = NO_STREAM;
+        }
+    }
+
+    if (mCallbackStreamId == NO_STREAM) {
+        ALOGV("Creating callback stream: %d x %d, format 0x%x, API format 0x%x",
+                params.previewWidth, params.previewHeight,
+                callbackFormat, params.previewFormat);
+        res = device->createStream(mCallbackWindow,
+                params.previewWidth, params.previewHeight,
+                callbackFormat, 0, &mCallbackStreamId);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Can't create output stream for callbacks: "
+                    "%s (%d)", __FUNCTION__, mId,
+                    strerror(-res), res);
+            return res;
+        }
+    }
+
+    return OK;
+}
+
+status_t CallbackProcessor::deleteStream() {
+    ATRACE_CALL();
+    sp<CameraDeviceBase> device;
+    status_t res;
+    {
+        Mutex::Autolock l(mInputMutex);
+
+        if (mCallbackStreamId == NO_STREAM) {
+            return OK;
+        }
+        device = mDevice.promote();
+        if (device == 0) {
+            ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+            return INVALID_OPERATION;
+        }
+    }
+    res = device->waitUntilDrained();
+    if (res != OK) {
+        ALOGE("%s: Error waiting for HAL to drain: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+
+    res = device->deleteStream(mCallbackStreamId);
+    if (res != OK) {
+        ALOGE("%s: Unable to delete callback stream: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+
+    {
+        Mutex::Autolock l(mInputMutex);
+
+        mCallbackHeap.clear();
+        mCallbackWindow.clear();
+        mCallbackConsumer.clear();
+
+        mCallbackStreamId = NO_STREAM;
+    }
+    return OK;
+}
+
+int CallbackProcessor::getStreamId() const {
+    Mutex::Autolock l(mInputMutex);
+    return mCallbackStreamId;
+}
+
+void CallbackProcessor::dump(int /*fd*/, const Vector<String16>& /*args*/) const {
+}
+
+bool CallbackProcessor::threadLoop() {
+    status_t res;
+
+    {
+        Mutex::Autolock l(mInputMutex);
+        while (!mCallbackAvailable) {
+            res = mCallbackAvailableSignal.waitRelative(mInputMutex,
+                    kWaitDuration);
+            if (res == TIMED_OUT) return true;
+        }
+        mCallbackAvailable = false;
+    }
+
+    do {
+        sp<Camera2Client> client = mClient.promote();
+        if (client == 0) {
+            res = discardNewCallback();
+        } else {
+            res = processNewCallback(client);
+        }
+    } while (res == OK);
+
+    return true;
+}
+
+status_t CallbackProcessor::discardNewCallback() {
+    ATRACE_CALL();
+    status_t res;
+    CpuConsumer::LockedBuffer imgBuffer;
+    res = mCallbackConsumer->lockNextBuffer(&imgBuffer);
+    if (res != OK) {
+        if (res != BAD_VALUE) {
+            ALOGE("%s: Camera %d: Error receiving next callback buffer: "
+                    "%s (%d)", __FUNCTION__, mId, strerror(-res), res);
+        }
+        return res;
+    }
+    mCallbackConsumer->unlockBuffer(imgBuffer);
+    return OK;
+}
+
+status_t CallbackProcessor::processNewCallback(sp<Camera2Client> &client) {
+    ATRACE_CALL();
+    status_t res;
+
+    sp<Camera2Heap> callbackHeap;
+    bool useFlexibleYuv = false;
+    int32_t previewFormat = 0;
+    size_t heapIdx;
+
+    {
+        /* acquire SharedParameters before mMutex so we don't dead lock
+            with Camera2Client code calling into StreamingProcessor */
+        SharedParameters::Lock l(client->getParameters());
+        Mutex::Autolock m(mInputMutex);
+        CpuConsumer::LockedBuffer imgBuffer;
+        if (mCallbackStreamId == NO_STREAM) {
+            ALOGV("%s: Camera %d:No stream is available"
+                    , __FUNCTION__, mId);
+            return INVALID_OPERATION;
+        }
+
+        ALOGV("%s: Getting buffer", __FUNCTION__);
+        res = mCallbackConsumer->lockNextBuffer(&imgBuffer);
+        if (res != OK) {
+            if (res != BAD_VALUE) {
+                ALOGE("%s: Camera %d: Error receiving next callback buffer: "
+                        "%s (%d)", __FUNCTION__, mId, strerror(-res), res);
+            }
+            return res;
+        }
+        ALOGV("%s: Camera %d: Preview callback available", __FUNCTION__,
+                mId);
+
+        if ( l.mParameters.state != Parameters::PREVIEW
+                && l.mParameters.state != Parameters::RECORD
+                && l.mParameters.state != Parameters::VIDEO_SNAPSHOT) {
+            ALOGV("%s: Camera %d: No longer streaming",
+                    __FUNCTION__, mId);
+            mCallbackConsumer->unlockBuffer(imgBuffer);
+            return OK;
+        }
+
+        if (! (l.mParameters.previewCallbackFlags &
+                CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK) ) {
+            ALOGV("%s: No longer enabled, dropping", __FUNCTION__);
+            mCallbackConsumer->unlockBuffer(imgBuffer);
+            return OK;
+        }
+        if ((l.mParameters.previewCallbackFlags &
+                        CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK) &&
+                !l.mParameters.previewCallbackOneShot) {
+            ALOGV("%s: One shot mode, already sent, dropping", __FUNCTION__);
+            mCallbackConsumer->unlockBuffer(imgBuffer);
+            return OK;
+        }
+
+        previewFormat = l.mParameters.previewFormat;
+        useFlexibleYuv = l.mParameters.fastInfo.useFlexibleYuv &&
+                (previewFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP ||
+                 previewFormat == HAL_PIXEL_FORMAT_YV12);
+
+        int32_t expectedFormat = useFlexibleYuv ?
+                HAL_PIXEL_FORMAT_YCbCr_420_888 : previewFormat;
+
+        if (imgBuffer.format != expectedFormat) {
+            ALOGE("%s: Camera %d: Unexpected format for callback: "
+                    "0x%x, expected 0x%x", __FUNCTION__, mId,
+                    imgBuffer.format, expectedFormat);
+            mCallbackConsumer->unlockBuffer(imgBuffer);
+            return INVALID_OPERATION;
+        }
+
+        // In one-shot mode, stop sending callbacks after the first one
+        if (l.mParameters.previewCallbackFlags &
+                CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK) {
+            ALOGV("%s: clearing oneshot", __FUNCTION__);
+            l.mParameters.previewCallbackOneShot = false;
+        }
+
+        uint32_t destYStride = 0;
+        uint32_t destCStride = 0;
+        if (useFlexibleYuv) {
+            if (previewFormat == HAL_PIXEL_FORMAT_YV12) {
+                // Strides must align to 16 for YV12
+                destYStride = ALIGN(imgBuffer.width, 16);
+                destCStride = ALIGN(destYStride / 2, 16);
+            } else {
+                // No padding for NV21
+                ALOG_ASSERT(previewFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP,
+                        "Unexpected preview format 0x%x", previewFormat);
+                destYStride = imgBuffer.width;
+                destCStride = destYStride / 2;
+            }
+        } else {
+            destYStride = imgBuffer.stride;
+            // don't care about cStride
+        }
+
+        size_t bufferSize = Camera2Client::calculateBufferSize(
+                imgBuffer.width, imgBuffer.height,
+                previewFormat, destYStride);
+        size_t currentBufferSize = (mCallbackHeap == 0) ?
+                0 : (mCallbackHeap->mHeap->getSize() / kCallbackHeapCount);
+        if (bufferSize != currentBufferSize) {
+            mCallbackHeap.clear();
+            mCallbackHeap = new Camera2Heap(bufferSize, kCallbackHeapCount,
+                    "Camera2Client::CallbackHeap");
+            if (mCallbackHeap->mHeap->getSize() == 0) {
+                ALOGE("%s: Camera %d: Unable to allocate memory for callbacks",
+                        __FUNCTION__, mId);
+                mCallbackConsumer->unlockBuffer(imgBuffer);
+                return INVALID_OPERATION;
+            }
+
+            mCallbackHeapHead = 0;
+            mCallbackHeapFree = kCallbackHeapCount;
+        }
+
+        if (mCallbackHeapFree == 0) {
+            ALOGE("%s: Camera %d: No free callback buffers, dropping frame",
+                    __FUNCTION__, mId);
+            mCallbackConsumer->unlockBuffer(imgBuffer);
+            return OK;
+        }
+
+        heapIdx = mCallbackHeapHead;
+
+        mCallbackHeapHead = (mCallbackHeapHead + 1) & kCallbackHeapCount;
+        mCallbackHeapFree--;
+
+        // TODO: Get rid of this copy by passing the gralloc queue all the way
+        // to app
+
+        ssize_t offset;
+        size_t size;
+        sp<IMemoryHeap> heap =
+                mCallbackHeap->mBuffers[heapIdx]->getMemory(&offset,
+                        &size);
+        uint8_t *data = (uint8_t*)heap->getBase() + offset;
+
+        if (!useFlexibleYuv) {
+            // Can just memcpy when HAL format matches API format
+            memcpy(data, imgBuffer.data, bufferSize);
+        } else {
+            res = convertFromFlexibleYuv(previewFormat, data, imgBuffer,
+                    destYStride, destCStride);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Can't convert between 0x%x and 0x%x formats!",
+                        __FUNCTION__, mId, imgBuffer.format, previewFormat);
+                mCallbackConsumer->unlockBuffer(imgBuffer);
+                return BAD_VALUE;
+            }
+        }
+
+        ALOGV("%s: Freeing buffer", __FUNCTION__);
+        mCallbackConsumer->unlockBuffer(imgBuffer);
+
+        // mCallbackHeap may get freed up once input mutex is released
+        callbackHeap = mCallbackHeap;
+    }
+
+    // Call outside parameter lock to allow re-entrancy from notification
+    {
+        Camera2Client::SharedCameraCallbacks::Lock
+            l(client->mSharedCameraCallbacks);
+        if (l.mRemoteCallback != 0) {
+            ALOGV("%s: Camera %d: Invoking client data callback",
+                    __FUNCTION__, mId);
+            l.mRemoteCallback->dataCallback(CAMERA_MSG_PREVIEW_FRAME,
+                    callbackHeap->mBuffers[heapIdx], NULL);
+        }
+    }
+
+    // Only increment free if we're still using the same heap
+    mCallbackHeapFree++;
+
+    ALOGV("%s: exit", __FUNCTION__);
+
+    return OK;
+}
+
+status_t CallbackProcessor::convertFromFlexibleYuv(int32_t previewFormat,
+        uint8_t *dst,
+        const CpuConsumer::LockedBuffer &src,
+        uint32_t dstYStride,
+        uint32_t dstCStride) const {
+
+    if (previewFormat != HAL_PIXEL_FORMAT_YCrCb_420_SP &&
+            previewFormat != HAL_PIXEL_FORMAT_YV12) {
+        ALOGE("%s: Camera %d: Unexpected preview format when using "
+                "flexible YUV: 0x%x", __FUNCTION__, mId, previewFormat);
+        return INVALID_OPERATION;
+    }
+
+    // Copy Y plane, adjusting for stride
+    const uint8_t *ySrc = src.data;
+    uint8_t *yDst = dst;
+    for (size_t row = 0; row < src.height; row++) {
+        memcpy(yDst, ySrc, src.width);
+        ySrc += src.stride;
+        yDst += dstYStride;
+    }
+
+    // Copy/swizzle chroma planes, 4:2:0 subsampling
+    const uint8_t *cbSrc = src.dataCb;
+    const uint8_t *crSrc = src.dataCr;
+    size_t chromaHeight = src.height / 2;
+    size_t chromaWidth = src.width / 2;
+    ssize_t chromaGap = src.chromaStride -
+            (chromaWidth * src.chromaStep);
+    size_t dstChromaGap = dstCStride - chromaWidth;
+
+    if (previewFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP) {
+        // Flexible YUV chroma to NV21 chroma
+        uint8_t *crcbDst = yDst;
+        // Check for shortcuts
+        if (cbSrc == crSrc + 1 && src.chromaStep == 2) {
+            ALOGV("%s: Fast NV21->NV21", __FUNCTION__);
+            // Source has semiplanar CrCb chroma layout, can copy by rows
+            for (size_t row = 0; row < chromaHeight; row++) {
+                memcpy(crcbDst, crSrc, src.width);
+                crcbDst += src.width;
+                crSrc += src.chromaStride;
+            }
+        } else {
+            ALOGV("%s: Generic->NV21", __FUNCTION__);
+            // Generic copy, always works but not very efficient
+            for (size_t row = 0; row < chromaHeight; row++) {
+                for (size_t col = 0; col < chromaWidth; col++) {
+                    *(crcbDst++) = *crSrc;
+                    *(crcbDst++) = *cbSrc;
+                    crSrc += src.chromaStep;
+                    cbSrc += src.chromaStep;
+                }
+                crSrc += chromaGap;
+                cbSrc += chromaGap;
+            }
+        }
+    } else {
+        // flexible YUV chroma to YV12 chroma
+        ALOG_ASSERT(previewFormat == HAL_PIXEL_FORMAT_YV12,
+                "Unexpected preview format 0x%x", previewFormat);
+        uint8_t *crDst = yDst;
+        uint8_t *cbDst = yDst + chromaHeight * dstCStride;
+        if (src.chromaStep == 1) {
+            ALOGV("%s: Fast YV12->YV12", __FUNCTION__);
+            // Source has planar chroma layout, can copy by row
+            for (size_t row = 0; row < chromaHeight; row++) {
+                memcpy(crDst, crSrc, chromaWidth);
+                crDst += dstCStride;
+                crSrc += src.chromaStride;
+            }
+            for (size_t row = 0; row < chromaHeight; row++) {
+                memcpy(cbDst, cbSrc, chromaWidth);
+                cbDst += dstCStride;
+                cbSrc += src.chromaStride;
+            }
+        } else {
+            ALOGV("%s: Generic->YV12", __FUNCTION__);
+            // Generic copy, always works but not very efficient
+            for (size_t row = 0; row < chromaHeight; row++) {
+                for (size_t col = 0; col < chromaWidth; col++) {
+                    *(crDst++) = *crSrc;
+                    *(cbDst++) = *cbSrc;
+                    crSrc += src.chromaStep;
+                    cbSrc += src.chromaStep;
+                }
+                crSrc += chromaGap;
+                cbSrc += chromaGap;
+                crDst += dstChromaGap;
+                cbDst += dstChromaGap;
+            }
+        }
+    }
+
+    return OK;
+}
+
+}; // namespace camera2
+}; // namespace android
diff --git a/services/camera/libcameraservice/api1/client2/CallbackProcessor.h b/services/camera/libcameraservice/api1/client2/CallbackProcessor.h
new file mode 100644
index 0000000..613f5be
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_CALLBACKPROCESSOR_H
+#define ANDROID_SERVERS_CAMERA_CAMERA2_CALLBACKPROCESSOR_H
+
+#include <utils/Thread.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
+#include <utils/Mutex.h>
+#include <utils/Condition.h>
+#include <gui/CpuConsumer.h>
+
+#include "api1/client2/Camera2Heap.h"
+
+namespace android {
+
+class Camera2Client;
+class CameraDeviceBase;
+
+namespace camera2 {
+
+class Parameters;
+
+/***
+ * Still image capture output image processing
+ */
+class CallbackProcessor:
+            public Thread, public CpuConsumer::FrameAvailableListener {
+  public:
+    CallbackProcessor(sp<Camera2Client> client);
+    ~CallbackProcessor();
+
+    void onFrameAvailable();
+
+    // Set to NULL to disable the direct-to-app callback window
+    status_t setCallbackWindow(sp<ANativeWindow> callbackWindow);
+    status_t updateStream(const Parameters &params);
+    status_t deleteStream();
+    int getStreamId() const;
+
+    void dump(int fd, const Vector<String16>& args) const;
+  private:
+    static const nsecs_t kWaitDuration = 10000000; // 10 ms
+    wp<Camera2Client> mClient;
+    wp<CameraDeviceBase> mDevice;
+    int mId;
+
+    mutable Mutex mInputMutex;
+    bool mCallbackAvailable;
+    Condition mCallbackAvailableSignal;
+
+    enum {
+        NO_STREAM = -1
+    };
+
+    // True if mCallbackWindow is a remote consumer, false if just the local
+    // mCallbackConsumer
+    bool mCallbackToApp;
+    int mCallbackStreamId;
+    static const size_t kCallbackHeapCount = 6;
+    sp<CpuConsumer>    mCallbackConsumer;
+    sp<ANativeWindow>  mCallbackWindow;
+    sp<Camera2Heap>    mCallbackHeap;
+    int mCallbackHeapId;
+    size_t mCallbackHeapHead, mCallbackHeapFree;
+
+    virtual bool threadLoop();
+
+    status_t processNewCallback(sp<Camera2Client> &client);
+    // Used when shutting down
+    status_t discardNewCallback();
+
+    // Convert from flexible YUV to NV21 or YV12
+    status_t convertFromFlexibleYuv(int32_t previewFormat,
+            uint8_t *dst,
+            const CpuConsumer::LockedBuffer &src,
+            uint32_t dstYStride,
+            uint32_t dstCStride) const;
+};
+
+
+}; //namespace camera2
+}; //namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/api1/client2/Camera2Heap.h b/services/camera/libcameraservice/api1/client2/Camera2Heap.h
new file mode 100644
index 0000000..9c72d76
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/Camera2Heap.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROiD_SERVERS_CAMERA_CAMERA2HEAP_H
+#define ANDROiD_SERVERS_CAMERA_CAMERA2HEAP_H
+
+#include <binder/MemoryBase.h>
+#include <binder/MemoryHeapBase.h>
+
+namespace android {
+namespace camera2 {
+
+// Utility class for managing a set of IMemory blocks
+class Camera2Heap : public RefBase {
+  public:
+    Camera2Heap(size_t buf_size, uint_t num_buffers = 1,
+            const char *name = NULL) :
+            mBufSize(buf_size),
+            mNumBufs(num_buffers) {
+        mHeap = new MemoryHeapBase(buf_size * num_buffers, 0, name);
+        mBuffers = new sp<MemoryBase>[mNumBufs];
+        for (uint_t i = 0; i < mNumBufs; i++)
+            mBuffers[i] = new MemoryBase(mHeap,
+                    i * mBufSize,
+                    mBufSize);
+    }
+
+    virtual ~Camera2Heap()
+    {
+        delete [] mBuffers;
+    }
+
+    size_t mBufSize;
+    uint_t mNumBufs;
+    sp<MemoryHeapBase> mHeap;
+    sp<MemoryBase> *mBuffers;
+};
+
+}; // namespace camera2
+}; // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
new file mode 100644
index 0000000..ad1590a
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -0,0 +1,710 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera2-CaptureSequencer"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include <utils/Vector.h>
+
+#include "api1/Camera2Client.h"
+#include "api1/client2/CaptureSequencer.h"
+#include "api1/client2/BurstCapture.h"
+#include "api1/client2/Parameters.h"
+#include "api1/client2/ZslProcessorInterface.h"
+
+namespace android {
+namespace camera2 {
+
+/** Public members */
+
+CaptureSequencer::CaptureSequencer(wp<Camera2Client> client):
+        Thread(false),
+        mStartCapture(false),
+        mBusy(false),
+        mNewAEState(false),
+        mNewFrameReceived(false),
+        mNewCaptureReceived(false),
+        mShutterNotified(false),
+        mClient(client),
+        mCaptureState(IDLE),
+        mTriggerId(0),
+        mTimeoutCount(0),
+        mCaptureId(Camera2Client::kCaptureRequestIdStart),
+        mMsgType(0) {
+    ALOGV("%s", __FUNCTION__);
+}
+
+CaptureSequencer::~CaptureSequencer() {
+    ALOGV("%s: Exit", __FUNCTION__);
+}
+
+void CaptureSequencer::setZslProcessor(wp<ZslProcessorInterface> processor) {
+    Mutex::Autolock l(mInputMutex);
+    mZslProcessor = processor;
+}
+
+status_t CaptureSequencer::startCapture(int msgType) {
+    ALOGV("%s", __FUNCTION__);
+    ATRACE_CALL();
+    Mutex::Autolock l(mInputMutex);
+    if (mBusy) {
+        ALOGE("%s: Already busy capturing!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+    if (!mStartCapture) {
+        mMsgType = msgType;
+        mStartCapture = true;
+        mStartCaptureSignal.signal();
+    }
+    return OK;
+}
+
+status_t CaptureSequencer::waitUntilIdle(nsecs_t timeout) {
+    ATRACE_CALL();
+    ALOGV("%s: Waiting for idle", __FUNCTION__);
+    Mutex::Autolock l(mStateMutex);
+    status_t res = -1;
+    while (mCaptureState != IDLE) {
+        nsecs_t startTime = systemTime();
+
+        res = mStateChanged.waitRelative(mStateMutex, timeout);
+        if (res != OK) return res;
+
+        timeout -= (systemTime() - startTime);
+    }
+    ALOGV("%s: Now idle", __FUNCTION__);
+    return OK;
+}
+
+void CaptureSequencer::notifyAutoExposure(uint8_t newState, int triggerId) {
+    ATRACE_CALL();
+    Mutex::Autolock l(mInputMutex);
+    mAEState = newState;
+    mAETriggerId = triggerId;
+    if (!mNewAEState) {
+        mNewAEState = true;
+        mNewNotifySignal.signal();
+    }
+}
+
+void CaptureSequencer::onFrameAvailable(int32_t frameId,
+        const CameraMetadata &frame) {
+    ALOGV("%s: Listener found new frame", __FUNCTION__);
+    ATRACE_CALL();
+    Mutex::Autolock l(mInputMutex);
+    mNewFrameId = frameId;
+    mNewFrame = frame;
+    if (!mNewFrameReceived) {
+        mNewFrameReceived = true;
+        mNewFrameSignal.signal();
+    }
+}
+
+void CaptureSequencer::onCaptureAvailable(nsecs_t timestamp,
+        sp<MemoryBase> captureBuffer) {
+    ATRACE_CALL();
+    ALOGV("%s", __FUNCTION__);
+    Mutex::Autolock l(mInputMutex);
+    mCaptureTimestamp = timestamp;
+    mCaptureBuffer = captureBuffer;
+    if (!mNewCaptureReceived) {
+        mNewCaptureReceived = true;
+        mNewCaptureSignal.signal();
+    }
+}
+
+
+void CaptureSequencer::dump(int fd, const Vector<String16>& /*args*/) {
+    String8 result;
+    if (mCaptureRequest.entryCount() != 0) {
+        result = "    Capture request:\n";
+        write(fd, result.string(), result.size());
+        mCaptureRequest.dump(fd, 2, 6);
+    } else {
+        result = "    Capture request: undefined\n";
+        write(fd, result.string(), result.size());
+    }
+    result = String8::format("    Current capture state: %s\n",
+            kStateNames[mCaptureState]);
+    result.append("    Latest captured frame:\n");
+    write(fd, result.string(), result.size());
+    mNewFrame.dump(fd, 2, 6);
+}
+
+/** Private members */
+
+const char* CaptureSequencer::kStateNames[CaptureSequencer::NUM_CAPTURE_STATES+1] =
+{
+    "IDLE",
+    "START",
+    "ZSL_START",
+    "ZSL_WAITING",
+    "ZSL_REPROCESSING",
+    "STANDARD_START",
+    "STANDARD_PRECAPTURE_WAIT",
+    "STANDARD_CAPTURE",
+    "STANDARD_CAPTURE_WAIT",
+    "BURST_CAPTURE_START",
+    "BURST_CAPTURE_WAIT",
+    "DONE",
+    "ERROR",
+    "UNKNOWN"
+};
+
+const CaptureSequencer::StateManager
+        CaptureSequencer::kStateManagers[CaptureSequencer::NUM_CAPTURE_STATES-1] = {
+    &CaptureSequencer::manageIdle,
+    &CaptureSequencer::manageStart,
+    &CaptureSequencer::manageZslStart,
+    &CaptureSequencer::manageZslWaiting,
+    &CaptureSequencer::manageZslReprocessing,
+    &CaptureSequencer::manageStandardStart,
+    &CaptureSequencer::manageStandardPrecaptureWait,
+    &CaptureSequencer::manageStandardCapture,
+    &CaptureSequencer::manageStandardCaptureWait,
+    &CaptureSequencer::manageBurstCaptureStart,
+    &CaptureSequencer::manageBurstCaptureWait,
+    &CaptureSequencer::manageDone,
+};
+
+bool CaptureSequencer::threadLoop() {
+
+    sp<Camera2Client> client = mClient.promote();
+    if (client == 0) return false;
+
+    CaptureState currentState;
+    {
+        Mutex::Autolock l(mStateMutex);
+        currentState = mCaptureState;
+    }
+
+    currentState = (this->*kStateManagers[currentState])(client);
+
+    Mutex::Autolock l(mStateMutex);
+    if (currentState != mCaptureState) {
+        mCaptureState = currentState;
+        ATRACE_INT("cam2_capt_state", mCaptureState);
+        ALOGV("Camera %d: New capture state %s",
+                client->getCameraId(), kStateNames[mCaptureState]);
+        mStateChanged.signal();
+    }
+
+    if (mCaptureState == ERROR) {
+        ALOGE("Camera %d: Stopping capture sequencer due to error",
+                client->getCameraId());
+        return false;
+    }
+
+    return true;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageIdle(
+        sp<Camera2Client> &/*client*/) {
+    status_t res;
+    Mutex::Autolock l(mInputMutex);
+    while (!mStartCapture) {
+        res = mStartCaptureSignal.waitRelative(mInputMutex,
+                kWaitDuration);
+        if (res == TIMED_OUT) break;
+    }
+    if (mStartCapture) {
+        mStartCapture = false;
+        mBusy = true;
+        return START;
+    }
+    return IDLE;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageDone(sp<Camera2Client> &client) {
+    status_t res = OK;
+    ATRACE_CALL();
+    mCaptureId++;
+    if (mCaptureId >= Camera2Client::kCaptureRequestIdEnd) {
+        mCaptureId = Camera2Client::kCaptureRequestIdStart;
+    }
+    {
+        Mutex::Autolock l(mInputMutex);
+        mBusy = false;
+    }
+
+    {
+        SharedParameters::Lock l(client->getParameters());
+        switch (l.mParameters.state) {
+            case Parameters::DISCONNECTED:
+                ALOGW("%s: Camera %d: Discarding image data during shutdown ",
+                        __FUNCTION__, client->getCameraId());
+                res = INVALID_OPERATION;
+                break;
+            case Parameters::STILL_CAPTURE:
+                res = client->getCameraDevice()->waitUntilDrained();
+                if (res != OK) {
+                    ALOGE("%s: Camera %d: Can't idle after still capture: "
+                            "%s (%d)", __FUNCTION__, client->getCameraId(),
+                            strerror(-res), res);
+                }
+                l.mParameters.state = Parameters::STOPPED;
+                break;
+            case Parameters::VIDEO_SNAPSHOT:
+                l.mParameters.state = Parameters::RECORD;
+                break;
+            default:
+                ALOGE("%s: Camera %d: Still image produced unexpectedly "
+                        "in state %s!",
+                        __FUNCTION__, client->getCameraId(),
+                        Parameters::getStateName(l.mParameters.state));
+                res = INVALID_OPERATION;
+        }
+    }
+    sp<ZslProcessorInterface> processor = mZslProcessor.promote();
+    if (processor != 0) {
+        ALOGV("%s: Memory optimization, clearing ZSL queue",
+              __FUNCTION__);
+        processor->clearZslQueue();
+    }
+
+    /**
+     * Fire the jpegCallback in Camera#takePicture(..., jpegCallback)
+     */
+    if (mCaptureBuffer != 0 && res == OK) {
+        Camera2Client::SharedCameraCallbacks::Lock
+            l(client->mSharedCameraCallbacks);
+        ALOGV("%s: Sending still image to client", __FUNCTION__);
+        if (l.mRemoteCallback != 0) {
+            l.mRemoteCallback->dataCallback(CAMERA_MSG_COMPRESSED_IMAGE,
+                    mCaptureBuffer, NULL);
+        } else {
+            ALOGV("%s: No client!", __FUNCTION__);
+        }
+    }
+    mCaptureBuffer.clear();
+
+    return IDLE;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageStart(
+        sp<Camera2Client> &client) {
+    ALOGV("%s", __FUNCTION__);
+    status_t res;
+    ATRACE_CALL();
+    SharedParameters::Lock l(client->getParameters());
+    CaptureState nextState = DONE;
+
+    res = updateCaptureRequest(l.mParameters, client);
+    if (res != OK ) {
+        ALOGE("%s: Camera %d: Can't update still image capture request: %s (%d)",
+                __FUNCTION__, client->getCameraId(), strerror(-res), res);
+        return DONE;
+    }
+
+    if(l.mParameters.lightFx != Parameters::LIGHTFX_NONE &&
+            l.mParameters.state == Parameters::STILL_CAPTURE) {
+        nextState = BURST_CAPTURE_START;
+    }
+    else if (l.mParameters.zslMode &&
+            l.mParameters.state == Parameters::STILL_CAPTURE &&
+            l.mParameters.flashMode != Parameters::FLASH_MODE_ON) {
+        nextState = ZSL_START;
+    } else {
+        nextState = STANDARD_START;
+    }
+    mShutterNotified = false;
+
+    return nextState;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageZslStart(
+        sp<Camera2Client> &client) {
+    ALOGV("%s", __FUNCTION__);
+    status_t res;
+    sp<ZslProcessorInterface> processor = mZslProcessor.promote();
+    if (processor == 0) {
+        ALOGE("%s: No ZSL queue to use!", __FUNCTION__);
+        return DONE;
+    }
+
+    client->registerFrameListener(mCaptureId, mCaptureId + 1,
+            this);
+
+    // TODO: Actually select the right thing here.
+    res = processor->pushToReprocess(mCaptureId);
+    if (res != OK) {
+        if (res == NOT_ENOUGH_DATA) {
+            ALOGV("%s: Camera %d: ZSL queue doesn't have good frame, "
+                    "falling back to normal capture", __FUNCTION__,
+                    client->getCameraId());
+        } else {
+            ALOGE("%s: Camera %d: Error in ZSL queue: %s (%d)",
+                    __FUNCTION__, client->getCameraId(), strerror(-res), res);
+        }
+        return STANDARD_START;
+    }
+
+    SharedParameters::Lock l(client->getParameters());
+    /* warning: this also locks a SharedCameraCallbacks */
+    shutterNotifyLocked(l.mParameters, client, mMsgType);
+    mShutterNotified = true;
+    mTimeoutCount = kMaxTimeoutsForCaptureEnd;
+    return STANDARD_CAPTURE_WAIT;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageZslWaiting(
+        sp<Camera2Client> &/*client*/) {
+    ALOGV("%s", __FUNCTION__);
+    return DONE;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageZslReprocessing(
+        sp<Camera2Client> &/*client*/) {
+    ALOGV("%s", __FUNCTION__);
+    return START;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageStandardStart(
+        sp<Camera2Client> &client) {
+    ATRACE_CALL();
+
+    // Get the onFrameAvailable callback when the requestID == mCaptureId
+    client->registerFrameListener(mCaptureId, mCaptureId + 1,
+            this);
+    {
+        SharedParameters::Lock l(client->getParameters());
+        mTriggerId = l.mParameters.precaptureTriggerCounter++;
+    }
+    client->getCameraDevice()->triggerPrecaptureMetering(mTriggerId);
+
+    mAeInPrecapture = false;
+    mTimeoutCount = kMaxTimeoutsForPrecaptureStart;
+    return STANDARD_PRECAPTURE_WAIT;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageStandardPrecaptureWait(
+        sp<Camera2Client> &/*client*/) {
+    status_t res;
+    ATRACE_CALL();
+    Mutex::Autolock l(mInputMutex);
+    while (!mNewAEState) {
+        res = mNewNotifySignal.waitRelative(mInputMutex, kWaitDuration);
+        if (res == TIMED_OUT) {
+            mTimeoutCount--;
+            break;
+        }
+    }
+    if (mTimeoutCount <= 0) {
+        ALOGW("Timed out waiting for precapture %s",
+                mAeInPrecapture ? "end" : "start");
+        return STANDARD_CAPTURE;
+    }
+    if (mNewAEState) {
+        if (!mAeInPrecapture) {
+            // Waiting to see PRECAPTURE state
+            if (mAETriggerId == mTriggerId &&
+                    mAEState == ANDROID_CONTROL_AE_STATE_PRECAPTURE) {
+                ALOGV("%s: Got precapture start", __FUNCTION__);
+                mAeInPrecapture = true;
+                mTimeoutCount = kMaxTimeoutsForPrecaptureEnd;
+            }
+        } else {
+            // Waiting to see PRECAPTURE state end
+            if (mAETriggerId == mTriggerId &&
+                    mAEState != ANDROID_CONTROL_AE_STATE_PRECAPTURE) {
+                ALOGV("%s: Got precapture end", __FUNCTION__);
+                return STANDARD_CAPTURE;
+            }
+        }
+        mNewAEState = false;
+    }
+    return STANDARD_PRECAPTURE_WAIT;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageStandardCapture(
+        sp<Camera2Client> &client) {
+    status_t res;
+    ATRACE_CALL();
+    SharedParameters::Lock l(client->getParameters());
+    Vector<uint8_t> outputStreams;
+
+    /**
+     * Set up output streams in the request
+     *  - preview
+     *  - capture/jpeg
+     *  - callback (if preview callbacks enabled)
+     *  - recording (if recording enabled)
+     */
+    outputStreams.push(client->getPreviewStreamId());
+    outputStreams.push(client->getCaptureStreamId());
+
+    if (l.mParameters.previewCallbackFlags &
+            CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK) {
+        outputStreams.push(client->getCallbackStreamId());
+    }
+
+    if (l.mParameters.state == Parameters::VIDEO_SNAPSHOT) {
+        outputStreams.push(client->getRecordingStreamId());
+    }
+
+    res = mCaptureRequest.update(ANDROID_REQUEST_OUTPUT_STREAMS,
+            outputStreams);
+    if (res == OK) {
+        res = mCaptureRequest.update(ANDROID_REQUEST_ID,
+                &mCaptureId, 1);
+    }
+    if (res == OK) {
+        res = mCaptureRequest.sort();
+    }
+
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to set up still capture request: %s (%d)",
+                __FUNCTION__, client->getCameraId(), strerror(-res), res);
+        return DONE;
+    }
+
+    // Create a capture copy since CameraDeviceBase#capture takes ownership
+    CameraMetadata captureCopy = mCaptureRequest;
+    if (captureCopy.entryCount() == 0) {
+        ALOGE("%s: Camera %d: Unable to copy capture request for HAL device",
+                __FUNCTION__, client->getCameraId());
+        return DONE;
+    }
+
+    /**
+     * Clear the streaming request for still-capture pictures
+     *   (as opposed to i.e. video snapshots)
+     */
+    if (l.mParameters.state == Parameters::STILL_CAPTURE) {
+        // API definition of takePicture() - stop preview before taking pic
+        res = client->stopStream();
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to stop preview for still capture: "
+                    "%s (%d)",
+                    __FUNCTION__, client->getCameraId(), strerror(-res), res);
+            return DONE;
+        }
+    }
+    // TODO: Capture should be atomic with setStreamingRequest here
+    res = client->getCameraDevice()->capture(captureCopy);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to submit still image capture request: "
+                "%s (%d)",
+                __FUNCTION__, client->getCameraId(), strerror(-res), res);
+        return DONE;
+    }
+
+    mTimeoutCount = kMaxTimeoutsForCaptureEnd;
+    return STANDARD_CAPTURE_WAIT;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageStandardCaptureWait(
+        sp<Camera2Client> &client) {
+    status_t res;
+    ATRACE_CALL();
+    Mutex::Autolock l(mInputMutex);
+
+    // Wait for new metadata result (mNewFrame)
+    while (!mNewFrameReceived) {
+        res = mNewFrameSignal.waitRelative(mInputMutex, kWaitDuration);
+        if (res == TIMED_OUT) {
+            mTimeoutCount--;
+            break;
+        }
+    }
+
+    // Approximation of the shutter being closed
+    // - TODO: use the hal3 exposure callback in Camera3Device instead
+    if (mNewFrameReceived && !mShutterNotified) {
+        SharedParameters::Lock l(client->getParameters());
+        /* warning: this also locks a SharedCameraCallbacks */
+        shutterNotifyLocked(l.mParameters, client, mMsgType);
+        mShutterNotified = true;
+    }
+
+    // Wait until jpeg was captured by JpegProcessor
+    while (mNewFrameReceived && !mNewCaptureReceived) {
+        res = mNewCaptureSignal.waitRelative(mInputMutex, kWaitDuration);
+        if (res == TIMED_OUT) {
+            mTimeoutCount--;
+            break;
+        }
+    }
+    if (mTimeoutCount <= 0) {
+        ALOGW("Timed out waiting for capture to complete");
+        return DONE;
+    }
+    if (mNewFrameReceived && mNewCaptureReceived) {
+        if (mNewFrameId != mCaptureId) {
+            ALOGW("Mismatched capture frame IDs: Expected %d, got %d",
+                    mCaptureId, mNewFrameId);
+        }
+        camera_metadata_entry_t entry;
+        entry = mNewFrame.find(ANDROID_SENSOR_TIMESTAMP);
+        if (entry.count == 0) {
+            ALOGE("No timestamp field in capture frame!");
+        }
+        if (entry.data.i64[0] != mCaptureTimestamp) {
+            ALOGW("Mismatched capture timestamps: Metadata frame %lld,"
+                    " captured buffer %lld",
+                    entry.data.i64[0],
+                    mCaptureTimestamp);
+        }
+        client->removeFrameListener(mCaptureId, mCaptureId + 1, this);
+
+        mNewFrameReceived = false;
+        mNewCaptureReceived = false;
+        return DONE;
+    }
+    return STANDARD_CAPTURE_WAIT;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageBurstCaptureStart(
+        sp<Camera2Client> &client) {
+    ALOGV("%s", __FUNCTION__);
+    status_t res;
+    ATRACE_CALL();
+
+    // check which burst mode is set, create respective burst object
+    {
+        SharedParameters::Lock l(client->getParameters());
+
+        res = updateCaptureRequest(l.mParameters, client);
+        if(res != OK) {
+            return DONE;
+        }
+
+        //
+        // check for burst mode type in mParameters here
+        //
+        mBurstCapture = new BurstCapture(client, this);
+    }
+
+    res = mCaptureRequest.update(ANDROID_REQUEST_ID, &mCaptureId, 1);
+    if (res == OK) {
+        res = mCaptureRequest.sort();
+    }
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to set up still capture request: %s (%d)",
+                __FUNCTION__, client->getCameraId(), strerror(-res), res);
+        return DONE;
+    }
+
+    CameraMetadata captureCopy = mCaptureRequest;
+    if (captureCopy.entryCount() == 0) {
+        ALOGE("%s: Camera %d: Unable to copy capture request for HAL device",
+                __FUNCTION__, client->getCameraId());
+        return DONE;
+    }
+
+    Vector<CameraMetadata> requests;
+    requests.push(mCaptureRequest);
+    res = mBurstCapture->start(requests, mCaptureId);
+    mTimeoutCount = kMaxTimeoutsForCaptureEnd * 10;
+    return BURST_CAPTURE_WAIT;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageBurstCaptureWait(
+        sp<Camera2Client> &/*client*/) {
+    status_t res;
+    ATRACE_CALL();
+
+    while (!mNewCaptureReceived) {
+        res = mNewCaptureSignal.waitRelative(mInputMutex, kWaitDuration);
+        if (res == TIMED_OUT) {
+            mTimeoutCount--;
+            break;
+        }
+    }
+
+    if (mTimeoutCount <= 0) {
+        ALOGW("Timed out waiting for burst capture to complete");
+        return DONE;
+    }
+    if (mNewCaptureReceived) {
+        mNewCaptureReceived = false;
+        // TODO: update mCaptureId to last burst's capture ID + 1?
+        return DONE;
+    }
+
+    return BURST_CAPTURE_WAIT;
+}
+
+status_t CaptureSequencer::updateCaptureRequest(const Parameters &params,
+        sp<Camera2Client> &client) {
+    ATRACE_CALL();
+    status_t res;
+    if (mCaptureRequest.entryCount() == 0) {
+        res = client->getCameraDevice()->createDefaultRequest(
+                CAMERA2_TEMPLATE_STILL_CAPTURE,
+                &mCaptureRequest);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to create default still image request:"
+                    " %s (%d)", __FUNCTION__, client->getCameraId(),
+                    strerror(-res), res);
+            return res;
+        }
+    }
+
+    res = params.updateRequest(&mCaptureRequest);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to update common entries of capture "
+                "request: %s (%d)", __FUNCTION__, client->getCameraId(),
+                strerror(-res), res);
+        return res;
+    }
+
+    res = params.updateRequestJpeg(&mCaptureRequest);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to update JPEG entries of capture "
+                "request: %s (%d)", __FUNCTION__, client->getCameraId(),
+                strerror(-res), res);
+        return res;
+    }
+
+    return OK;
+}
+
+/*static*/ void CaptureSequencer::shutterNotifyLocked(const Parameters &params,
+            sp<Camera2Client> client, int msgType) {
+    ATRACE_CALL();
+
+    if (params.state == Parameters::STILL_CAPTURE
+        && params.playShutterSound
+        && (msgType & CAMERA_MSG_SHUTTER)) {
+        client->getCameraService()->playSound(CameraService::SOUND_SHUTTER);
+    }
+
+    {
+        Camera2Client::SharedCameraCallbacks::Lock
+            l(client->mSharedCameraCallbacks);
+
+        ALOGV("%s: Notifying of shutter close to client", __FUNCTION__);
+        if (l.mRemoteCallback != 0) {
+            // ShutterCallback
+            l.mRemoteCallback->notifyCallback(CAMERA_MSG_SHUTTER,
+                                            /*ext1*/0, /*ext2*/0);
+
+            // RawCallback with null buffer
+            l.mRemoteCallback->notifyCallback(CAMERA_MSG_RAW_IMAGE_NOTIFY,
+                                            /*ext1*/0, /*ext2*/0);
+        } else {
+            ALOGV("%s: No client!", __FUNCTION__);
+        }
+    }
+}
+
+
+}; // namespace camera2
+}; // namespace android
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
new file mode 100644
index 0000000..76750aa
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_CAPTURESEQUENCER_H
+#define ANDROID_SERVERS_CAMERA_CAMERA2_CAPTURESEQUENCER_H
+
+#include <binder/MemoryBase.h>
+#include <utils/Thread.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
+#include <utils/Mutex.h>
+#include <utils/Condition.h>
+#include "camera/CameraMetadata.h"
+#include "Parameters.h"
+#include "FrameProcessor.h"
+
+namespace android {
+
+class Camera2Client;
+
+namespace camera2 {
+
+class ZslProcessorInterface;
+class BurstCapture;
+
+/**
+ * Manages the still image capture process for
+ * zero-shutter-lag, regular, and video snapshots.
+ */
+class CaptureSequencer:
+            virtual public Thread,
+            virtual public FrameProcessor::FilteredListener {
+  public:
+    CaptureSequencer(wp<Camera2Client> client);
+    ~CaptureSequencer();
+
+    // Get reference to the ZslProcessor, which holds the ZSL buffers and frames
+    void setZslProcessor(wp<ZslProcessorInterface> processor);
+
+    // Begin still image capture
+    status_t startCapture(int msgType);
+
+    // Wait until current image capture completes; returns immediately if no
+    // capture is active. Returns TIMED_OUT if capture does not complete during
+    // the specified duration.
+    status_t waitUntilIdle(nsecs_t timeout);
+
+    // Notifications about AE state changes
+    void notifyAutoExposure(uint8_t newState, int triggerId);
+
+    // Notifications from the frame processor
+    virtual void onFrameAvailable(int32_t frameId, const CameraMetadata &frame);
+
+    // Notifications from the JPEG processor
+    void onCaptureAvailable(nsecs_t timestamp, sp<MemoryBase> captureBuffer);
+
+    void dump(int fd, const Vector<String16>& args);
+
+  private:
+    /**
+     * Accessed by other threads
+     */
+    Mutex mInputMutex;
+
+    bool mStartCapture;
+    bool mBusy;
+    Condition mStartCaptureSignal;
+
+    bool mNewAEState;
+    uint8_t mAEState;
+    int mAETriggerId;
+    Condition mNewNotifySignal;
+
+    bool mNewFrameReceived;
+    int32_t mNewFrameId;
+    CameraMetadata mNewFrame;
+    Condition mNewFrameSignal;
+
+    bool mNewCaptureReceived;
+    nsecs_t mCaptureTimestamp;
+    sp<MemoryBase> mCaptureBuffer;
+    Condition mNewCaptureSignal;
+
+    bool mShutterNotified;
+
+    /**
+     * Internal to CaptureSequencer
+     */
+    static const nsecs_t kWaitDuration = 100000000; // 100 ms
+    static const int kMaxTimeoutsForPrecaptureStart = 2; // 200 ms
+    static const int kMaxTimeoutsForPrecaptureEnd = 20;  // 2 sec
+    static const int kMaxTimeoutsForCaptureEnd    = 40;  // 4 sec
+
+    wp<Camera2Client> mClient;
+    wp<ZslProcessorInterface> mZslProcessor;
+    sp<BurstCapture> mBurstCapture;
+
+    enum CaptureState {
+        IDLE,
+        START,
+        ZSL_START,
+        ZSL_WAITING,
+        ZSL_REPROCESSING,
+        STANDARD_START,
+        STANDARD_PRECAPTURE_WAIT,
+        STANDARD_CAPTURE,
+        STANDARD_CAPTURE_WAIT,
+        BURST_CAPTURE_START,
+        BURST_CAPTURE_WAIT,
+        DONE,
+        ERROR,
+        NUM_CAPTURE_STATES
+    } mCaptureState;
+    static const char* kStateNames[];
+    Mutex mStateMutex; // Guards mCaptureState
+    Condition mStateChanged;
+
+    typedef CaptureState (CaptureSequencer::*StateManager)(sp<Camera2Client> &client);
+    static const StateManager kStateManagers[];
+
+    CameraMetadata mCaptureRequest;
+
+    int mTriggerId;
+    int mTimeoutCount;
+    bool mAeInPrecapture;
+
+    int32_t mCaptureId;
+    int mMsgType;
+
+    // Main internal methods
+
+    virtual bool threadLoop();
+
+    CaptureState manageIdle(sp<Camera2Client> &client);
+    CaptureState manageStart(sp<Camera2Client> &client);
+
+    CaptureState manageZslStart(sp<Camera2Client> &client);
+    CaptureState manageZslWaiting(sp<Camera2Client> &client);
+    CaptureState manageZslReprocessing(sp<Camera2Client> &client);
+
+    CaptureState manageStandardStart(sp<Camera2Client> &client);
+    CaptureState manageStandardPrecaptureWait(sp<Camera2Client> &client);
+    CaptureState manageStandardCapture(sp<Camera2Client> &client);
+    CaptureState manageStandardCaptureWait(sp<Camera2Client> &client);
+
+    CaptureState manageBurstCaptureStart(sp<Camera2Client> &client);
+    CaptureState manageBurstCaptureWait(sp<Camera2Client> &client);
+
+    CaptureState manageDone(sp<Camera2Client> &client);
+
+    // Utility methods
+
+    status_t updateCaptureRequest(const Parameters &params,
+            sp<Camera2Client> &client);
+
+    // Emit Shutter/Raw callback to java, and maybe play a shutter sound
+    static void shutterNotifyLocked(const Parameters &params,
+            sp<Camera2Client> client, int msgType);
+};
+
+}; // namespace camera2
+}; // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
new file mode 100644
index 0000000..c34cb12
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -0,0 +1,315 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera2-FrameProcessor"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include "common/CameraDeviceBase.h"
+#include "api1/Camera2Client.h"
+#include "api1/client2/FrameProcessor.h"
+
+namespace android {
+namespace camera2 {
+
+FrameProcessor::FrameProcessor(wp<CameraDeviceBase> device,
+                               wp<Camera2Client> client) :
+    FrameProcessorBase(device),
+    mClient(client),
+    mLastFrameNumberOfFaces(0) {
+
+    sp<CameraDeviceBase> d = device.promote();
+    mSynthesize3ANotify = !(d->willNotify3A());
+}
+
+FrameProcessor::~FrameProcessor() {
+}
+
+bool FrameProcessor::processSingleFrame(CameraMetadata &frame,
+                                        const sp<CameraDeviceBase> &device) {
+
+    sp<Camera2Client> client = mClient.promote();
+    if (!client.get()) {
+        return false;
+    }
+
+    if (processFaceDetect(frame, client) != OK) {
+        return false;
+    }
+
+    if (mSynthesize3ANotify) {
+        // Ignoring missing fields for now
+        process3aState(frame, client);
+    }
+
+    if (!FrameProcessorBase::processSingleFrame(frame, device)) {
+        return false;
+    }
+
+    return true;
+}
+
+status_t FrameProcessor::processFaceDetect(const CameraMetadata &frame,
+        const sp<Camera2Client> &client) {
+    status_t res = BAD_VALUE;
+    ATRACE_CALL();
+    camera_metadata_ro_entry_t entry;
+    bool enableFaceDetect;
+
+    {
+        SharedParameters::Lock l(client->getParameters());
+        enableFaceDetect = l.mParameters.enableFaceDetect;
+    }
+    entry = frame.find(ANDROID_STATISTICS_FACE_DETECT_MODE);
+
+    // TODO: This should be an error once implementations are compliant
+    if (entry.count == 0) {
+        return OK;
+    }
+
+    uint8_t faceDetectMode = entry.data.u8[0];
+
+    camera_frame_metadata metadata;
+    Vector<camera_face_t> faces;
+    metadata.number_of_faces = 0;
+
+    if (enableFaceDetect &&
+        faceDetectMode != ANDROID_STATISTICS_FACE_DETECT_MODE_OFF) {
+
+        SharedParameters::Lock l(client->getParameters());
+        entry = frame.find(ANDROID_STATISTICS_FACE_RECTANGLES);
+        if (entry.count == 0) {
+            // No faces this frame
+            /* warning: locks SharedCameraCallbacks */
+            callbackFaceDetection(client, metadata);
+            return OK;
+        }
+        metadata.number_of_faces = entry.count / 4;
+        if (metadata.number_of_faces >
+                l.mParameters.fastInfo.maxFaces) {
+            ALOGE("%s: Camera %d: More faces than expected! (Got %d, max %d)",
+                    __FUNCTION__, client->getCameraId(),
+                    metadata.number_of_faces, l.mParameters.fastInfo.maxFaces);
+            return res;
+        }
+        const int32_t *faceRects = entry.data.i32;
+
+        entry = frame.find(ANDROID_STATISTICS_FACE_SCORES);
+        if (entry.count == 0) {
+            ALOGE("%s: Camera %d: Unable to read face scores",
+                    __FUNCTION__, client->getCameraId());
+            return res;
+        }
+        const uint8_t *faceScores = entry.data.u8;
+
+        const int32_t *faceLandmarks = NULL;
+        const int32_t *faceIds = NULL;
+
+        if (faceDetectMode == ANDROID_STATISTICS_FACE_DETECT_MODE_FULL) {
+            entry = frame.find(ANDROID_STATISTICS_FACE_LANDMARKS);
+            if (entry.count == 0) {
+                ALOGE("%s: Camera %d: Unable to read face landmarks",
+                        __FUNCTION__, client->getCameraId());
+                return res;
+            }
+            faceLandmarks = entry.data.i32;
+
+            entry = frame.find(ANDROID_STATISTICS_FACE_IDS);
+
+            if (entry.count == 0) {
+                ALOGE("%s: Camera %d: Unable to read face IDs",
+                        __FUNCTION__, client->getCameraId());
+                return res;
+            }
+            faceIds = entry.data.i32;
+        }
+
+        faces.setCapacity(metadata.number_of_faces);
+
+        size_t maxFaces = metadata.number_of_faces;
+        for (size_t i = 0; i < maxFaces; i++) {
+            if (faceScores[i] == 0) {
+                metadata.number_of_faces--;
+                continue;
+            }
+            if (faceScores[i] > 100) {
+                ALOGW("%s: Face index %d with out of range score %d",
+                        __FUNCTION__, i, faceScores[i]);
+            }
+
+            camera_face_t face;
+
+            face.rect[0] = l.mParameters.arrayXToNormalized(faceRects[i*4 + 0]);
+            face.rect[1] = l.mParameters.arrayYToNormalized(faceRects[i*4 + 1]);
+            face.rect[2] = l.mParameters.arrayXToNormalized(faceRects[i*4 + 2]);
+            face.rect[3] = l.mParameters.arrayYToNormalized(faceRects[i*4 + 3]);
+
+            face.score = faceScores[i];
+            if (faceDetectMode == ANDROID_STATISTICS_FACE_DETECT_MODE_FULL) {
+                face.id = faceIds[i];
+                face.left_eye[0] =
+                    l.mParameters.arrayXToNormalized(faceLandmarks[i*6 + 0]);
+                face.left_eye[1] =
+                    l.mParameters.arrayYToNormalized(faceLandmarks[i*6 + 1]);
+                face.right_eye[0] =
+                    l.mParameters.arrayXToNormalized(faceLandmarks[i*6 + 2]);
+                face.right_eye[1] =
+                    l.mParameters.arrayYToNormalized(faceLandmarks[i*6 + 3]);
+                face.mouth[0] =
+                    l.mParameters.arrayXToNormalized(faceLandmarks[i*6 + 4]);
+                face.mouth[1] =
+                    l.mParameters.arrayYToNormalized(faceLandmarks[i*6 + 5]);
+            } else {
+                face.id = 0;
+                face.left_eye[0] = face.left_eye[1] = -2000;
+                face.right_eye[0] = face.right_eye[1] = -2000;
+                face.mouth[0] = face.mouth[1] = -2000;
+            }
+            faces.push_back(face);
+        }
+
+        metadata.faces = faces.editArray();
+    }
+
+    /* warning: locks SharedCameraCallbacks */
+    callbackFaceDetection(client, metadata);
+
+    return OK;
+}
+
+status_t FrameProcessor::process3aState(const CameraMetadata &frame,
+        const sp<Camera2Client> &client) {
+
+    ATRACE_CALL();
+    camera_metadata_ro_entry_t entry;
+    int mId = client->getCameraId();
+
+    entry = frame.find(ANDROID_REQUEST_FRAME_COUNT);
+    int32_t frameNumber = entry.data.i32[0];
+
+    // Get 3A states from result metadata
+    bool gotAllStates = true;
+
+    AlgState new3aState;
+
+    entry = frame.find(ANDROID_CONTROL_AE_STATE);
+    if (entry.count == 0) {
+        ALOGE("%s: Camera %d: No AE state provided by HAL for frame %d!",
+                __FUNCTION__, mId, frameNumber);
+        gotAllStates = false;
+    } else {
+        new3aState.aeState =
+                static_cast<camera_metadata_enum_android_control_ae_state>(
+                    entry.data.u8[0]);
+    }
+
+    entry = frame.find(ANDROID_CONTROL_AF_STATE);
+    if (entry.count == 0) {
+        ALOGE("%s: Camera %d: No AF state provided by HAL for frame %d!",
+                __FUNCTION__, mId, frameNumber);
+        gotAllStates = false;
+    } else {
+        new3aState.afState =
+                static_cast<camera_metadata_enum_android_control_af_state>(
+                    entry.data.u8[0]);
+    }
+
+    entry = frame.find(ANDROID_CONTROL_AWB_STATE);
+    if (entry.count == 0) {
+        ALOGE("%s: Camera %d: No AWB state provided by HAL for frame %d!",
+                __FUNCTION__, mId, frameNumber);
+        gotAllStates = false;
+    } else {
+        new3aState.awbState =
+                static_cast<camera_metadata_enum_android_control_awb_state>(
+                    entry.data.u8[0]);
+    }
+
+    int32_t afTriggerId = 0;
+    entry = frame.find(ANDROID_CONTROL_AF_TRIGGER_ID);
+    if (entry.count == 0) {
+        ALOGE("%s: Camera %d: No AF trigger ID provided by HAL for frame %d!",
+                __FUNCTION__, mId, frameNumber);
+        gotAllStates = false;
+    } else {
+        afTriggerId = entry.data.i32[0];
+    }
+
+    int32_t aeTriggerId = 0;
+    entry = frame.find(ANDROID_CONTROL_AE_PRECAPTURE_ID);
+    if (entry.count == 0) {
+        ALOGE("%s: Camera %d: No AE precapture trigger ID provided by HAL"
+                " for frame %d!",
+                __FUNCTION__, mId, frameNumber);
+        gotAllStates = false;
+    } else {
+        aeTriggerId = entry.data.i32[0];
+    }
+
+    if (!gotAllStates) return BAD_VALUE;
+
+    if (new3aState.aeState != m3aState.aeState) {
+        ALOGV("%s: AE state changed from 0x%x to 0x%x",
+                __FUNCTION__, m3aState.aeState, new3aState.aeState);
+        client->notifyAutoExposure(new3aState.aeState, aeTriggerId);
+    }
+    if (new3aState.afState != m3aState.afState) {
+        ALOGV("%s: AF state changed from 0x%x to 0x%x",
+                __FUNCTION__, m3aState.afState, new3aState.afState);
+        client->notifyAutoFocus(new3aState.afState, afTriggerId);
+    }
+    if (new3aState.awbState != m3aState.awbState) {
+        ALOGV("%s: AWB state changed from 0x%x to 0x%x",
+                __FUNCTION__, m3aState.awbState, new3aState.awbState);
+        client->notifyAutoWhitebalance(new3aState.awbState, aeTriggerId);
+    }
+
+    m3aState = new3aState;
+
+    return OK;
+}
+
+
+void FrameProcessor::callbackFaceDetection(sp<Camera2Client> client,
+                                     const camera_frame_metadata &metadata) {
+
+    camera_frame_metadata *metadata_ptr =
+        const_cast<camera_frame_metadata*>(&metadata);
+
+    /**
+     * Filter out repeated 0-face callbacks,
+     * but not when the last frame was >0
+     */
+    if (metadata.number_of_faces != 0 ||
+        mLastFrameNumberOfFaces != metadata.number_of_faces) {
+
+        Camera2Client::SharedCameraCallbacks::Lock
+            l(client->mSharedCameraCallbacks);
+        if (l.mRemoteCallback != NULL) {
+            l.mRemoteCallback->dataCallback(CAMERA_MSG_PREVIEW_METADATA,
+                                            NULL,
+                                            metadata_ptr);
+        }
+    }
+
+    mLastFrameNumberOfFaces = metadata.number_of_faces;
+}
+
+}; // namespace camera2
+}; // namespace android
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.h b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
new file mode 100644
index 0000000..2a17d45
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_FRAMEPROCESSOR_H
+#define ANDROID_SERVERS_CAMERA_CAMERA2_FRAMEPROCESSOR_H
+
+#include <utils/Thread.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
+#include <utils/KeyedVector.h>
+#include <utils/List.h>
+#include <camera/CameraMetadata.h>
+
+#include "common/FrameProcessorBase.h"
+
+struct camera_frame_metadata;
+
+namespace android {
+
+class Camera2Client;
+
+namespace camera2 {
+
+/* Output frame metadata processing thread.  This thread waits for new
+ * frames from the device, and analyzes them as necessary.
+ */
+class FrameProcessor : public FrameProcessorBase {
+  public:
+    FrameProcessor(wp<CameraDeviceBase> device, wp<Camera2Client> client);
+    ~FrameProcessor();
+
+  private:
+    wp<Camera2Client> mClient;
+
+    bool mSynthesize3ANotify;
+
+    int mLastFrameNumberOfFaces;
+
+    void processNewFrames(const sp<Camera2Client> &client);
+
+    virtual bool processSingleFrame(CameraMetadata &frame,
+                                    const sp<CameraDeviceBase> &device);
+
+    status_t processFaceDetect(const CameraMetadata &frame,
+            const sp<Camera2Client> &client);
+
+    // Send 3A state change notifications to client based on frame metadata
+    status_t process3aState(const CameraMetadata &frame,
+            const sp<Camera2Client> &client);
+
+    struct AlgState {
+        camera_metadata_enum_android_control_ae_state  aeState;
+        camera_metadata_enum_android_control_af_state  afState;
+        camera_metadata_enum_android_control_awb_state awbState;
+
+        AlgState() :
+                aeState(ANDROID_CONTROL_AE_STATE_INACTIVE),
+                afState(ANDROID_CONTROL_AF_STATE_INACTIVE),
+                awbState(ANDROID_CONTROL_AWB_STATE_INACTIVE) {
+        }
+    } m3aState;
+
+    // Emit FaceDetection event to java if faces changed
+    void callbackFaceDetection(sp<Camera2Client> client,
+                               const camera_frame_metadata &metadata);
+};
+
+
+}; //namespace camera2
+}; //namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/api1/client2/JpegCompressor.cpp b/services/camera/libcameraservice/api1/client2/JpegCompressor.cpp
new file mode 100644
index 0000000..2f0c67d
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/JpegCompressor.cpp
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "Camera2-JpegCompressor"
+
+#include <utils/Log.h>
+#include <ui/GraphicBufferMapper.h>
+
+#include "JpegCompressor.h"
+
+namespace android {
+namespace camera2 {
+
+JpegCompressor::JpegCompressor():
+        Thread(false),
+        mIsBusy(false),
+        mCaptureTime(0) {
+}
+
+JpegCompressor::~JpegCompressor() {
+    ALOGV("%s", __FUNCTION__);
+    Mutex::Autolock lock(mMutex);
+}
+
+status_t JpegCompressor::start(Vector<CpuConsumer::LockedBuffer*> buffers,
+        nsecs_t captureTime) {
+    ALOGV("%s", __FUNCTION__);
+    Mutex::Autolock busyLock(mBusyMutex);
+
+    if (mIsBusy) {
+        ALOGE("%s: Already processing a buffer!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+
+    mIsBusy = true;
+
+    mBuffers = buffers;
+    mCaptureTime = captureTime;
+
+    status_t res;
+    res = run("JpegCompressor");
+    if (res != OK) {
+        ALOGE("%s: Unable to start up compression thread: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        //delete mBuffers;  // necessary?
+    }
+    return res;
+}
+
+status_t JpegCompressor::cancel() {
+    ALOGV("%s", __FUNCTION__);
+    requestExitAndWait();
+    return OK;
+}
+
+status_t JpegCompressor::readyToRun() {
+    ALOGV("%s", __FUNCTION__);
+    return OK;
+}
+
+bool JpegCompressor::threadLoop() {
+    ALOGV("%s", __FUNCTION__);
+
+    mAuxBuffer = mBuffers[0];    // input
+    mJpegBuffer = mBuffers[1];    // output
+
+    // Set up error management
+    mJpegErrorInfo = NULL;
+    JpegError error;
+    error.parent = this;
+
+    mCInfo.err = jpeg_std_error(&error);
+    mCInfo.err->error_exit = jpegErrorHandler;
+
+    jpeg_create_compress(&mCInfo);
+    if (checkError("Error initializing compression")) return false;
+
+    // Route compressed data straight to output stream buffer
+    JpegDestination jpegDestMgr;
+    jpegDestMgr.parent = this;
+    jpegDestMgr.init_destination = jpegInitDestination;
+    jpegDestMgr.empty_output_buffer = jpegEmptyOutputBuffer;
+    jpegDestMgr.term_destination = jpegTermDestination;
+
+    mCInfo.dest = &jpegDestMgr;
+
+    // Set up compression parameters
+    mCInfo.image_width = mAuxBuffer->width;
+    mCInfo.image_height = mAuxBuffer->height;
+    mCInfo.input_components = 1; // 3;
+    mCInfo.in_color_space = JCS_GRAYSCALE; // JCS_RGB
+
+    ALOGV("%s: image_width = %d, image_height = %d", __FUNCTION__, mCInfo.image_width, mCInfo.image_height);
+
+    jpeg_set_defaults(&mCInfo);
+    if (checkError("Error configuring defaults")) return false;
+
+    // Do compression
+    jpeg_start_compress(&mCInfo, TRUE);
+    if (checkError("Error starting compression")) return false;
+
+    size_t rowStride = mAuxBuffer->stride;// * 3;
+    const size_t kChunkSize = 32;
+    while (mCInfo.next_scanline < mCInfo.image_height) {
+        JSAMPROW chunk[kChunkSize];
+        for (size_t i = 0 ; i < kChunkSize; i++) {
+            chunk[i] = (JSAMPROW)
+                    (mAuxBuffer->data + (i + mCInfo.next_scanline) * rowStride);
+        }
+        jpeg_write_scanlines(&mCInfo, chunk, kChunkSize);
+        if (checkError("Error while compressing")) return false;
+        if (exitPending()) {
+            ALOGV("%s: Cancel called, exiting early", __FUNCTION__);
+            cleanUp();
+            return false;
+        }
+    }
+
+    jpeg_finish_compress(&mCInfo);
+    if (checkError("Error while finishing compression")) return false;
+
+    cleanUp();
+    return false;
+}
+
+bool JpegCompressor::isBusy() {
+    ALOGV("%s", __FUNCTION__);
+    Mutex::Autolock busyLock(mBusyMutex);
+    return mIsBusy;
+}
+
+// old function -- TODO: update for new buffer type
+bool JpegCompressor::isStreamInUse(uint32_t /*id*/) {
+    ALOGV("%s", __FUNCTION__);
+    Mutex::Autolock lock(mBusyMutex);
+
+    if (mBuffers.size() && mIsBusy) {
+        for (size_t i = 0; i < mBuffers.size(); i++) {
+//            if ( mBuffers[i].streamId == (int)id ) return true;
+        }
+    }
+    return false;
+}
+
+bool JpegCompressor::waitForDone(nsecs_t timeout) {
+    ALOGV("%s", __FUNCTION__);
+    Mutex::Autolock lock(mBusyMutex);
+    status_t res = OK;
+    if (mIsBusy) {
+        res = mDone.waitRelative(mBusyMutex, timeout);
+    }
+    return (res == OK);
+}
+
+bool JpegCompressor::checkError(const char *msg) {
+    ALOGV("%s", __FUNCTION__);
+    if (mJpegErrorInfo) {
+        char errBuffer[JMSG_LENGTH_MAX];
+        mJpegErrorInfo->err->format_message(mJpegErrorInfo, errBuffer);
+        ALOGE("%s: %s: %s",
+                __FUNCTION__, msg, errBuffer);
+        cleanUp();
+        mJpegErrorInfo = NULL;
+        return true;
+    }
+    return false;
+}
+
+void JpegCompressor::cleanUp() {
+    ALOGV("%s", __FUNCTION__);
+    jpeg_destroy_compress(&mCInfo);
+    Mutex::Autolock lock(mBusyMutex);
+    mIsBusy = false;
+    mDone.signal();
+}
+
+void JpegCompressor::jpegErrorHandler(j_common_ptr cinfo) {
+    ALOGV("%s", __FUNCTION__);
+    JpegError *error = static_cast<JpegError*>(cinfo->err);
+    error->parent->mJpegErrorInfo = cinfo;
+}
+
+void JpegCompressor::jpegInitDestination(j_compress_ptr cinfo) {
+    ALOGV("%s", __FUNCTION__);
+    JpegDestination *dest= static_cast<JpegDestination*>(cinfo->dest);
+    ALOGV("%s: Setting destination to %p, size %d",
+            __FUNCTION__, dest->parent->mJpegBuffer->data, kMaxJpegSize);
+    dest->next_output_byte = (JOCTET*)(dest->parent->mJpegBuffer->data);
+    dest->free_in_buffer = kMaxJpegSize;
+}
+
+boolean JpegCompressor::jpegEmptyOutputBuffer(j_compress_ptr /*cinfo*/) {
+    ALOGV("%s", __FUNCTION__);
+    ALOGE("%s: JPEG destination buffer overflow!",
+            __FUNCTION__);
+    return true;
+}
+
+void JpegCompressor::jpegTermDestination(j_compress_ptr cinfo) {
+    (void) cinfo; // TODO: clean up
+    ALOGV("%s", __FUNCTION__);
+    ALOGV("%s: Done writing JPEG data. %d bytes left in buffer",
+            __FUNCTION__, cinfo->dest->free_in_buffer);
+}
+
+}; // namespace camera2
+}; // namespace android
diff --git a/services/camera/libcameraservice/api1/client2/JpegCompressor.h b/services/camera/libcameraservice/api1/client2/JpegCompressor.h
new file mode 100644
index 0000000..945b1de
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/JpegCompressor.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/**
+ * This class simulates a hardware JPEG compressor.  It receives image buffers
+ * in RGBA_8888 format, processes them in a worker thread, and then pushes them
+ * out to their destination stream.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_JPEGCOMPRESSOR_H
+#define ANDROID_SERVERS_CAMERA_JPEGCOMPRESSOR_H
+
+#include "utils/Thread.h"
+#include "utils/Mutex.h"
+#include "utils/Timers.h"
+#include "utils/Vector.h"
+//#include "Base.h"
+#include <stdio.h>
+#include <gui/CpuConsumer.h>
+
+extern "C" {
+#include <jpeglib.h>
+}
+
+
+namespace android {
+namespace camera2 {
+
+class JpegCompressor: private Thread, public virtual RefBase {
+  public:
+
+    JpegCompressor();
+    ~JpegCompressor();
+
+    // Start compressing COMPRESSED format buffers; JpegCompressor takes
+    // ownership of the Buffers vector.
+    status_t start(Vector<CpuConsumer::LockedBuffer*> buffers,
+            nsecs_t captureTime);
+
+    status_t cancel();
+
+    bool isBusy();
+    bool isStreamInUse(uint32_t id);
+
+    bool waitForDone(nsecs_t timeout);
+
+    // TODO: Measure this
+    static const size_t kMaxJpegSize = 300000;
+
+  private:
+    Mutex mBusyMutex;
+    Mutex mMutex;
+    bool mIsBusy;
+    Condition mDone;
+    nsecs_t mCaptureTime;
+
+    Vector<CpuConsumer::LockedBuffer*> mBuffers;
+    CpuConsumer::LockedBuffer *mJpegBuffer;
+    CpuConsumer::LockedBuffer *mAuxBuffer;
+    bool mFoundJpeg, mFoundAux;
+
+    jpeg_compress_struct mCInfo;
+
+    struct JpegError : public jpeg_error_mgr {
+        JpegCompressor *parent;
+    };
+    j_common_ptr mJpegErrorInfo;
+
+    struct JpegDestination : public jpeg_destination_mgr {
+        JpegCompressor *parent;
+    };
+
+    static void jpegErrorHandler(j_common_ptr cinfo);
+
+    static void jpegInitDestination(j_compress_ptr cinfo);
+    static boolean jpegEmptyOutputBuffer(j_compress_ptr cinfo);
+    static void jpegTermDestination(j_compress_ptr cinfo);
+
+    bool checkError(const char *msg);
+    void cleanUp();
+
+    /**
+     * Inherited Thread virtual overrides
+     */
+  private:
+    virtual status_t readyToRun();
+    virtual bool threadLoop();
+};
+
+}; // namespace camera2
+}; // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
new file mode 100644
index 0000000..b920edf
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -0,0 +1,388 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera2-JpegProcessor"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <netinet/in.h>
+
+#include <binder/MemoryBase.h>
+#include <binder/MemoryHeapBase.h>
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include <gui/Surface.h>
+
+#include "common/CameraDeviceBase.h"
+#include "api1/Camera2Client.h"
+#include "api1/client2/Camera2Heap.h"
+#include "api1/client2/CaptureSequencer.h"
+#include "api1/client2/JpegProcessor.h"
+
+namespace android {
+namespace camera2 {
+
+JpegProcessor::JpegProcessor(
+    sp<Camera2Client> client,
+    wp<CaptureSequencer> sequencer):
+        Thread(false),
+        mDevice(client->getCameraDevice()),
+        mSequencer(sequencer),
+        mId(client->getCameraId()),
+        mCaptureAvailable(false),
+        mCaptureStreamId(NO_STREAM) {
+}
+
+JpegProcessor::~JpegProcessor() {
+    ALOGV("%s: Exit", __FUNCTION__);
+    deleteStream();
+}
+
+void JpegProcessor::onFrameAvailable() {
+    Mutex::Autolock l(mInputMutex);
+    if (!mCaptureAvailable) {
+        mCaptureAvailable = true;
+        mCaptureAvailableSignal.signal();
+    }
+}
+
+status_t JpegProcessor::updateStream(const Parameters &params) {
+    ATRACE_CALL();
+    ALOGV("%s", __FUNCTION__);
+    status_t res;
+
+    Mutex::Autolock l(mInputMutex);
+
+    sp<CameraDeviceBase> device = mDevice.promote();
+    if (device == 0) {
+        ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+        return INVALID_OPERATION;
+    }
+
+    // Find out buffer size for JPEG
+    camera_metadata_ro_entry_t maxJpegSize =
+            params.staticInfo(ANDROID_JPEG_MAX_SIZE);
+    if (maxJpegSize.count == 0) {
+        ALOGE("%s: Camera %d: Can't find ANDROID_JPEG_MAX_SIZE!",
+                __FUNCTION__, mId);
+        return INVALID_OPERATION;
+    }
+
+    if (mCaptureConsumer == 0) {
+        // Create CPU buffer queue endpoint
+        sp<BufferQueue> bq = new BufferQueue();
+        mCaptureConsumer = new CpuConsumer(bq, 1);
+        mCaptureConsumer->setFrameAvailableListener(this);
+        mCaptureConsumer->setName(String8("Camera2Client::CaptureConsumer"));
+        mCaptureWindow = new Surface(
+            mCaptureConsumer->getProducerInterface());
+        // Create memory for API consumption
+        mCaptureHeap = new MemoryHeapBase(maxJpegSize.data.i32[0], 0,
+                                       "Camera2Client::CaptureHeap");
+        if (mCaptureHeap->getSize() == 0) {
+            ALOGE("%s: Camera %d: Unable to allocate memory for capture",
+                    __FUNCTION__, mId);
+            return NO_MEMORY;
+        }
+    }
+
+    if (mCaptureStreamId != NO_STREAM) {
+        // Check if stream parameters have to change
+        uint32_t currentWidth, currentHeight;
+        res = device->getStreamInfo(mCaptureStreamId,
+                &currentWidth, &currentHeight, 0);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Error querying capture output stream info: "
+                    "%s (%d)", __FUNCTION__,
+                    mId, strerror(-res), res);
+            return res;
+        }
+        if (currentWidth != (uint32_t)params.pictureWidth ||
+                currentHeight != (uint32_t)params.pictureHeight) {
+            ALOGV("%s: Camera %d: Deleting stream %d since the buffer dimensions changed",
+                __FUNCTION__, mId, mCaptureStreamId);
+            res = device->deleteStream(mCaptureStreamId);
+            if (res == -EBUSY) {
+                ALOGV("%s: Camera %d: Device is busy, call updateStream again "
+                      " after it becomes idle", __FUNCTION__, mId);
+                return res;
+            } else if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to delete old output stream "
+                        "for capture: %s (%d)", __FUNCTION__,
+                        mId, strerror(-res), res);
+                return res;
+            }
+            mCaptureStreamId = NO_STREAM;
+        }
+    }
+
+    if (mCaptureStreamId == NO_STREAM) {
+        // Create stream for HAL production
+        res = device->createStream(mCaptureWindow,
+                params.pictureWidth, params.pictureHeight,
+                HAL_PIXEL_FORMAT_BLOB, maxJpegSize.data.i32[0],
+                &mCaptureStreamId);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Can't create output stream for capture: "
+                    "%s (%d)", __FUNCTION__, mId,
+                    strerror(-res), res);
+            return res;
+        }
+
+    }
+    return OK;
+}
+
+status_t JpegProcessor::deleteStream() {
+    ATRACE_CALL();
+
+    Mutex::Autolock l(mInputMutex);
+
+    if (mCaptureStreamId != NO_STREAM) {
+        sp<CameraDeviceBase> device = mDevice.promote();
+        if (device == 0) {
+            ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+            return INVALID_OPERATION;
+        }
+
+        device->deleteStream(mCaptureStreamId);
+
+        mCaptureHeap.clear();
+        mCaptureWindow.clear();
+        mCaptureConsumer.clear();
+
+        mCaptureStreamId = NO_STREAM;
+    }
+    return OK;
+}
+
+int JpegProcessor::getStreamId() const {
+    Mutex::Autolock l(mInputMutex);
+    return mCaptureStreamId;
+}
+
+void JpegProcessor::dump(int /*fd*/, const Vector<String16>& /*args*/) const {
+}
+
+bool JpegProcessor::threadLoop() {
+    status_t res;
+
+    {
+        Mutex::Autolock l(mInputMutex);
+        while (!mCaptureAvailable) {
+            res = mCaptureAvailableSignal.waitRelative(mInputMutex,
+                    kWaitDuration);
+            if (res == TIMED_OUT) return true;
+        }
+        mCaptureAvailable = false;
+    }
+
+    do {
+        res = processNewCapture();
+    } while (res == OK);
+
+    return true;
+}
+
+status_t JpegProcessor::processNewCapture() {
+    ATRACE_CALL();
+    status_t res;
+    sp<Camera2Heap> captureHeap;
+
+    CpuConsumer::LockedBuffer imgBuffer;
+
+    res = mCaptureConsumer->lockNextBuffer(&imgBuffer);
+    if (res != OK) {
+        if (res != BAD_VALUE) {
+            ALOGE("%s: Camera %d: Error receiving still image buffer: "
+                    "%s (%d)", __FUNCTION__,
+                    mId, strerror(-res), res);
+        }
+        return res;
+    }
+
+    ALOGV("%s: Camera %d: Still capture available", __FUNCTION__,
+            mId);
+
+    if (imgBuffer.format != HAL_PIXEL_FORMAT_BLOB) {
+        ALOGE("%s: Camera %d: Unexpected format for still image: "
+                "%x, expected %x", __FUNCTION__, mId,
+                imgBuffer.format,
+                HAL_PIXEL_FORMAT_BLOB);
+        mCaptureConsumer->unlockBuffer(imgBuffer);
+        return OK;
+    }
+
+    // Find size of JPEG image
+    size_t jpegSize = findJpegSize(imgBuffer.data, imgBuffer.width);
+    if (jpegSize == 0) { // failed to find size, default to whole buffer
+        jpegSize = imgBuffer.width;
+    }
+    size_t heapSize = mCaptureHeap->getSize();
+    if (jpegSize > heapSize) {
+        ALOGW("%s: JPEG image is larger than expected, truncating "
+                "(got %d, expected at most %d bytes)",
+                __FUNCTION__, jpegSize, heapSize);
+        jpegSize = heapSize;
+    }
+
+    // TODO: Optimize this to avoid memcopy
+    sp<MemoryBase> captureBuffer = new MemoryBase(mCaptureHeap, 0, jpegSize);
+    void* captureMemory = mCaptureHeap->getBase();
+    memcpy(captureMemory, imgBuffer.data, jpegSize);
+
+    mCaptureConsumer->unlockBuffer(imgBuffer);
+
+    sp<CaptureSequencer> sequencer = mSequencer.promote();
+    if (sequencer != 0) {
+        sequencer->onCaptureAvailable(imgBuffer.timestamp, captureBuffer);
+    }
+
+    return OK;
+}
+
+/*
+ * JPEG FILE FORMAT OVERVIEW.
+ * http://www.jpeg.org/public/jfif.pdf
+ * (JPEG is the image compression algorithm, actual file format is called JFIF)
+ *
+ * "Markers" are 2-byte patterns used to distinguish parts of JFIF files.  The
+ * first byte is always 0xFF, and the second byte is between 0x01 and 0xFE
+ * (inclusive).  Because every marker begins with the same byte, they are
+ * referred to by the second byte's value.
+ *
+ * JFIF files all begin with the Start of Image (SOI) marker, which is 0xD8.
+ * Following it, "segment" sections begin with other markers, followed by a
+ * 2-byte length (in network byte order), then the segment data.
+ *
+ * For our purposes we will ignore the data, and just use the length to skip to
+ * the next segment.  This is necessary because the data inside segments are
+ * allowed to contain the End of Image marker (0xFF 0xD9), preventing us from
+ * naievely scanning until the end.
+ *
+ * After all the segments are processed, the jpeg compressed image stream begins.
+ * This can be considered an opaque format with one requirement: all 0xFF bytes
+ * in this stream must be followed with a 0x00 byte.  This prevents any of the
+ * image data to be interpreted as a segment.  The only exception to this is at
+ * the end of the image stream there is an End of Image (EOI) marker, which is
+ * 0xFF followed by a non-zero (0xD9) byte.
+ */
+
+const uint8_t MARK = 0xFF; // First byte of marker
+const uint8_t SOI = 0xD8; // Start of Image
+const uint8_t EOI = 0xD9; // End of Image
+const size_t MARKER_LENGTH = 2; // length of a marker
+
+#pragma pack(push)
+#pragma pack(1)
+typedef struct segment {
+    uint8_t marker[MARKER_LENGTH];
+    uint16_t length;
+} segment_t;
+#pragma pack(pop)
+
+/* HELPER FUNCTIONS */
+
+// check for Start of Image marker
+bool checkJpegStart(uint8_t* buf) {
+    return buf[0] == MARK && buf[1] == SOI;
+}
+// check for End of Image marker
+bool checkJpegEnd(uint8_t *buf) {
+    return buf[0] == MARK && buf[1] == EOI;
+}
+// check for arbitrary marker, returns marker type (second byte)
+// returns 0 if no marker found. Note: 0x00 is not a valid marker type
+uint8_t checkJpegMarker(uint8_t *buf) {
+    if (buf[0] == MARK && buf[1] > 0 && buf[1] < 0xFF) {
+        return buf[1];
+    }
+    return 0;
+}
+
+// Return the size of the JPEG, 0 indicates failure
+size_t JpegProcessor::findJpegSize(uint8_t* jpegBuffer, size_t maxSize) {
+    size_t size;
+
+    // First check for JPEG transport header at the end of the buffer
+    uint8_t *header = jpegBuffer + (maxSize - sizeof(struct camera2_jpeg_blob));
+    struct camera2_jpeg_blob *blob = (struct camera2_jpeg_blob*)(header);
+    if (blob->jpeg_blob_id == CAMERA2_JPEG_BLOB_ID) {
+        size = blob->jpeg_size;
+        if (size > 0 && size <= maxSize - sizeof(struct camera2_jpeg_blob)) {
+            // Verify SOI and EOI markers
+            size_t offset = size - MARKER_LENGTH;
+            uint8_t *end = jpegBuffer + offset;
+            if (checkJpegStart(jpegBuffer) && checkJpegEnd(end)) {
+                ALOGV("Found JPEG transport header, img size %d", size);
+                return size;
+            } else {
+                ALOGW("Found JPEG transport header with bad Image Start/End");
+            }
+        } else {
+            ALOGW("Found JPEG transport header with bad size %d", size);
+        }
+    }
+
+    // Check Start of Image
+    if ( !checkJpegStart(jpegBuffer) ) {
+        ALOGE("Could not find start of JPEG marker");
+        return 0;
+    }
+
+    // Read JFIF segment markers, skip over segment data
+    size = 0;
+    while (size <= maxSize - MARKER_LENGTH) {
+        segment_t *segment = (segment_t*)(jpegBuffer + size);
+        uint8_t type = checkJpegMarker(segment->marker);
+        if (type == 0) { // invalid marker, no more segments, begin JPEG data
+            ALOGV("JPEG stream found beginning at offset %d", size);
+            break;
+        }
+        if (type == EOI || size > maxSize - sizeof(segment_t)) {
+            ALOGE("Got premature End before JPEG data, offset %d", size);
+            return 0;
+        }
+        size_t length = ntohs(segment->length);
+        ALOGV("JFIF Segment, type %x length %x", type, length);
+        size += length + MARKER_LENGTH;
+    }
+
+    // Find End of Image
+    // Scan JPEG buffer until End of Image (EOI)
+    bool foundEnd = false;
+    for ( ; size <= maxSize - MARKER_LENGTH; size++) {
+        if ( checkJpegEnd(jpegBuffer + size) ) {
+            foundEnd = true;
+            size += MARKER_LENGTH;
+            break;
+        }
+    }
+    if (!foundEnd) {
+        ALOGE("Could not find end of JPEG marker");
+        return 0;
+    }
+
+    if (size > maxSize) {
+        ALOGW("JPEG size %d too large, reducing to maxSize %d", size, maxSize);
+        size = maxSize;
+    }
+    ALOGV("Final JPEG size %d", size);
+    return size;
+}
+
+}; // namespace camera2
+}; // namespace android
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.h b/services/camera/libcameraservice/api1/client2/JpegProcessor.h
new file mode 100644
index 0000000..b2c05df
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_JPEGPROCESSOR_H
+#define ANDROID_SERVERS_CAMERA_CAMERA2_JPEGPROCESSOR_H
+
+#include <utils/Thread.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
+#include <utils/Mutex.h>
+#include <utils/Condition.h>
+#include <gui/CpuConsumer.h>
+
+#include "camera/CameraMetadata.h"
+
+namespace android {
+
+class Camera2Client;
+class CameraDeviceBase;
+class MemoryHeapBase;
+
+namespace camera2 {
+
+class CaptureSequencer;
+class Parameters;
+
+/***
+ * Still image capture output image processing
+ */
+class JpegProcessor:
+            public Thread, public CpuConsumer::FrameAvailableListener {
+  public:
+    JpegProcessor(sp<Camera2Client> client, wp<CaptureSequencer> sequencer);
+    ~JpegProcessor();
+
+    // CpuConsumer listener implementation
+    void onFrameAvailable();
+
+    status_t updateStream(const Parameters &params);
+    status_t deleteStream();
+    int getStreamId() const;
+
+    void dump(int fd, const Vector<String16>& args) const;
+  private:
+    static const nsecs_t kWaitDuration = 10000000; // 10 ms
+    wp<CameraDeviceBase> mDevice;
+    wp<CaptureSequencer> mSequencer;
+    int mId;
+
+    mutable Mutex mInputMutex;
+    bool mCaptureAvailable;
+    Condition mCaptureAvailableSignal;
+
+    enum {
+        NO_STREAM = -1
+    };
+
+    int mCaptureStreamId;
+    sp<CpuConsumer>    mCaptureConsumer;
+    sp<ANativeWindow>  mCaptureWindow;
+    sp<MemoryHeapBase>    mCaptureHeap;
+
+    virtual bool threadLoop();
+
+    status_t processNewCapture();
+    size_t findJpegSize(uint8_t* jpegBuffer, size_t maxSize);
+
+};
+
+
+}; //namespace camera2
+}; //namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
new file mode 100644
index 0000000..0459866
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -0,0 +1,2645 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera2-Parameters"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include <utils/Vector.h>
+#include <utils/SortedVector.h>
+
+#include <math.h>
+#include <stdlib.h>
+#include <cutils/properties.h>
+
+#include "Parameters.h"
+#include "system/camera.h"
+
+namespace android {
+namespace camera2 {
+
+Parameters::Parameters(int cameraId,
+        int cameraFacing) :
+        cameraId(cameraId),
+        cameraFacing(cameraFacing),
+        info(NULL) {
+}
+
+Parameters::~Parameters() {
+}
+
+status_t Parameters::initialize(const CameraMetadata *info) {
+    status_t res;
+
+    if (info->entryCount() == 0) {
+        ALOGE("%s: No static information provided!", __FUNCTION__);
+        return BAD_VALUE;
+    }
+    Parameters::info = info;
+
+    res = buildFastInfo();
+    if (res != OK) return res;
+
+    res = buildQuirks();
+    if (res != OK) return res;
+
+    camera_metadata_ro_entry_t availableProcessedSizes =
+        staticInfo(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES, 2);
+    if (!availableProcessedSizes.count) return NO_INIT;
+
+    // TODO: Pick more intelligently
+    previewWidth = availableProcessedSizes.data.i32[0];
+    previewHeight = availableProcessedSizes.data.i32[1];
+    videoWidth = previewWidth;
+    videoHeight = previewHeight;
+
+    params.setPreviewSize(previewWidth, previewHeight);
+    params.setVideoSize(videoWidth, videoHeight);
+    params.set(CameraParameters::KEY_PREFERRED_PREVIEW_SIZE_FOR_VIDEO,
+            String8::format("%dx%d",
+                    previewWidth, previewHeight));
+    {
+        String8 supportedPreviewSizes;
+        for (size_t i=0; i < availableProcessedSizes.count; i += 2) {
+            if (i != 0) supportedPreviewSizes += ",";
+            supportedPreviewSizes += String8::format("%dx%d",
+                    availableProcessedSizes.data.i32[i],
+                    availableProcessedSizes.data.i32[i+1]);
+        }
+        params.set(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES,
+                supportedPreviewSizes);
+        params.set(CameraParameters::KEY_SUPPORTED_VIDEO_SIZES,
+                supportedPreviewSizes);
+    }
+
+    camera_metadata_ro_entry_t availableFpsRanges =
+        staticInfo(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, 2);
+    if (!availableFpsRanges.count) return NO_INIT;
+
+    previewFpsRange[0] = availableFpsRanges.data.i32[0];
+    previewFpsRange[1] = availableFpsRanges.data.i32[1];
+
+    params.set(CameraParameters::KEY_PREVIEW_FPS_RANGE,
+            String8::format("%d,%d",
+                    previewFpsRange[0] * kFpsToApiScale,
+                    previewFpsRange[1] * kFpsToApiScale));
+
+    {
+        String8 supportedPreviewFpsRange;
+        for (size_t i=0; i < availableFpsRanges.count; i += 2) {
+            if (i != 0) supportedPreviewFpsRange += ",";
+            supportedPreviewFpsRange += String8::format("(%d,%d)",
+                    availableFpsRanges.data.i32[i] * kFpsToApiScale,
+                    availableFpsRanges.data.i32[i+1] * kFpsToApiScale);
+        }
+        params.set(CameraParameters::KEY_SUPPORTED_PREVIEW_FPS_RANGE,
+                supportedPreviewFpsRange);
+    }
+
+    previewFormat = HAL_PIXEL_FORMAT_YCrCb_420_SP;
+    params.set(CameraParameters::KEY_PREVIEW_FORMAT,
+            formatEnumToString(previewFormat)); // NV21
+
+    previewTransform = degToTransform(0,
+            cameraFacing == CAMERA_FACING_FRONT);
+
+    camera_metadata_ro_entry_t availableFormats =
+        staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS);
+
+    {
+        String8 supportedPreviewFormats;
+        bool addComma = false;
+        for (size_t i=0; i < availableFormats.count; i++) {
+            if (addComma) supportedPreviewFormats += ",";
+            addComma = true;
+            switch (availableFormats.data.i32[i]) {
+            case HAL_PIXEL_FORMAT_YCbCr_422_SP:
+                supportedPreviewFormats +=
+                    CameraParameters::PIXEL_FORMAT_YUV422SP;
+                break;
+            case HAL_PIXEL_FORMAT_YCrCb_420_SP:
+                supportedPreviewFormats +=
+                    CameraParameters::PIXEL_FORMAT_YUV420SP;
+                break;
+            case HAL_PIXEL_FORMAT_YCbCr_422_I:
+                supportedPreviewFormats +=
+                    CameraParameters::PIXEL_FORMAT_YUV422I;
+                break;
+            case HAL_PIXEL_FORMAT_YV12:
+                supportedPreviewFormats +=
+                    CameraParameters::PIXEL_FORMAT_YUV420P;
+                break;
+            case HAL_PIXEL_FORMAT_RGB_565:
+                supportedPreviewFormats +=
+                    CameraParameters::PIXEL_FORMAT_RGB565;
+                break;
+            case HAL_PIXEL_FORMAT_RGBA_8888:
+                supportedPreviewFormats +=
+                    CameraParameters::PIXEL_FORMAT_RGBA8888;
+                break;
+            case HAL_PIXEL_FORMAT_YCbCr_420_888:
+                // Flexible YUV allows both YV12 and NV21
+                supportedPreviewFormats +=
+                    CameraParameters::PIXEL_FORMAT_YUV420P;
+                supportedPreviewFormats += ",";
+                supportedPreviewFormats +=
+                    CameraParameters::PIXEL_FORMAT_YUV420SP;
+                break;
+            // Not advertizing JPEG, RAW_SENSOR, etc, for preview formats
+            case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
+            case HAL_PIXEL_FORMAT_RAW_SENSOR:
+            case HAL_PIXEL_FORMAT_BLOB:
+                addComma = false;
+                break;
+
+            default:
+                ALOGW("%s: Camera %d: Unknown preview format: %x",
+                        __FUNCTION__, cameraId, availableFormats.data.i32[i]);
+                addComma = false;
+                break;
+            }
+        }
+        params.set(CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS,
+                supportedPreviewFormats);
+    }
+
+    // PREVIEW_FRAME_RATE / SUPPORTED_PREVIEW_FRAME_RATES are deprecated, but
+    // still have to do something sane for them
+
+    // NOTE: Not scaled like FPS range values are.
+    previewFps = fpsFromRange(previewFpsRange[0], previewFpsRange[1]);
+    params.set(CameraParameters::KEY_PREVIEW_FRAME_RATE,
+            previewFps);
+
+    {
+        SortedVector<int32_t> sortedPreviewFrameRates;
+
+        String8 supportedPreviewFrameRates;
+        for (size_t i=0; i < availableFpsRanges.count; i += 2) {
+            // from the [min, max] fps range use the max value
+            int fps = fpsFromRange(availableFpsRanges.data.i32[i],
+                                   availableFpsRanges.data.i32[i+1]);
+
+            // de-dupe frame rates
+            if (sortedPreviewFrameRates.indexOf(fps) == NAME_NOT_FOUND) {
+                sortedPreviewFrameRates.add(fps);
+            }
+            else {
+                continue;
+            }
+
+            if (sortedPreviewFrameRates.size() > 1) {
+                supportedPreviewFrameRates += ",";
+            }
+
+            supportedPreviewFrameRates += String8::format("%d",
+                    fps);
+
+            ALOGV("%s: Supported preview frame rates: %s",
+                    __FUNCTION__, supportedPreviewFrameRates.string());
+        }
+        params.set(CameraParameters::KEY_SUPPORTED_PREVIEW_FRAME_RATES,
+                supportedPreviewFrameRates);
+    }
+
+    camera_metadata_ro_entry_t availableJpegSizes =
+        staticInfo(ANDROID_SCALER_AVAILABLE_JPEG_SIZES, 2);
+    if (!availableJpegSizes.count) return NO_INIT;
+
+    // TODO: Pick maximum
+    pictureWidth = availableJpegSizes.data.i32[0];
+    pictureHeight = availableJpegSizes.data.i32[1];
+
+    params.setPictureSize(pictureWidth,
+            pictureHeight);
+
+    {
+        String8 supportedPictureSizes;
+        for (size_t i=0; i < availableJpegSizes.count; i += 2) {
+            if (i != 0) supportedPictureSizes += ",";
+            supportedPictureSizes += String8::format("%dx%d",
+                    availableJpegSizes.data.i32[i],
+                    availableJpegSizes.data.i32[i+1]);
+        }
+        params.set(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES,
+                supportedPictureSizes);
+    }
+
+    params.setPictureFormat(CameraParameters::PIXEL_FORMAT_JPEG);
+    params.set(CameraParameters::KEY_SUPPORTED_PICTURE_FORMATS,
+            CameraParameters::PIXEL_FORMAT_JPEG);
+
+    camera_metadata_ro_entry_t availableJpegThumbnailSizes =
+        staticInfo(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES, 4);
+    if (!availableJpegThumbnailSizes.count) return NO_INIT;
+
+    // TODO: Pick default thumbnail size sensibly
+    jpegThumbSize[0] = availableJpegThumbnailSizes.data.i32[0];
+    jpegThumbSize[1] = availableJpegThumbnailSizes.data.i32[1];
+
+    params.set(CameraParameters::KEY_JPEG_THUMBNAIL_WIDTH,
+            jpegThumbSize[0]);
+    params.set(CameraParameters::KEY_JPEG_THUMBNAIL_HEIGHT,
+            jpegThumbSize[1]);
+
+    {
+        String8 supportedJpegThumbSizes;
+        for (size_t i=0; i < availableJpegThumbnailSizes.count; i += 2) {
+            if (i != 0) supportedJpegThumbSizes += ",";
+            supportedJpegThumbSizes += String8::format("%dx%d",
+                    availableJpegThumbnailSizes.data.i32[i],
+                    availableJpegThumbnailSizes.data.i32[i+1]);
+        }
+        params.set(CameraParameters::KEY_SUPPORTED_JPEG_THUMBNAIL_SIZES,
+                supportedJpegThumbSizes);
+    }
+
+    jpegThumbQuality = 90;
+    params.set(CameraParameters::KEY_JPEG_THUMBNAIL_QUALITY,
+            jpegThumbQuality);
+    jpegQuality = 90;
+    params.set(CameraParameters::KEY_JPEG_QUALITY,
+            jpegQuality);
+    jpegRotation = 0;
+    params.set(CameraParameters::KEY_ROTATION,
+            jpegRotation);
+
+    gpsEnabled = false;
+    gpsCoordinates[0] = 0.0;
+    gpsCoordinates[1] = 0.0;
+    gpsCoordinates[2] = 0.0;
+    gpsTimestamp = 0;
+    gpsProcessingMethod = "unknown";
+    // GPS fields in CameraParameters are not set by implementation
+
+    wbMode = ANDROID_CONTROL_AWB_MODE_AUTO;
+    params.set(CameraParameters::KEY_WHITE_BALANCE,
+            CameraParameters::WHITE_BALANCE_AUTO);
+
+    camera_metadata_ro_entry_t availableWhiteBalanceModes =
+        staticInfo(ANDROID_CONTROL_AWB_AVAILABLE_MODES, 0, 0, false);
+    if (!availableWhiteBalanceModes.count) {
+        params.set(CameraParameters::KEY_SUPPORTED_WHITE_BALANCE,
+                CameraParameters::WHITE_BALANCE_AUTO);
+    } else {
+        String8 supportedWhiteBalance;
+        bool addComma = false;
+        for (size_t i=0; i < availableWhiteBalanceModes.count; i++) {
+            if (addComma) supportedWhiteBalance += ",";
+            addComma = true;
+            switch (availableWhiteBalanceModes.data.u8[i]) {
+            case ANDROID_CONTROL_AWB_MODE_AUTO:
+                supportedWhiteBalance +=
+                    CameraParameters::WHITE_BALANCE_AUTO;
+                break;
+            case ANDROID_CONTROL_AWB_MODE_INCANDESCENT:
+                supportedWhiteBalance +=
+                    CameraParameters::WHITE_BALANCE_INCANDESCENT;
+                break;
+            case ANDROID_CONTROL_AWB_MODE_FLUORESCENT:
+                supportedWhiteBalance +=
+                    CameraParameters::WHITE_BALANCE_FLUORESCENT;
+                break;
+            case ANDROID_CONTROL_AWB_MODE_WARM_FLUORESCENT:
+                supportedWhiteBalance +=
+                    CameraParameters::WHITE_BALANCE_WARM_FLUORESCENT;
+                break;
+            case ANDROID_CONTROL_AWB_MODE_DAYLIGHT:
+                supportedWhiteBalance +=
+                    CameraParameters::WHITE_BALANCE_DAYLIGHT;
+                break;
+            case ANDROID_CONTROL_AWB_MODE_CLOUDY_DAYLIGHT:
+                supportedWhiteBalance +=
+                    CameraParameters::WHITE_BALANCE_CLOUDY_DAYLIGHT;
+                break;
+            case ANDROID_CONTROL_AWB_MODE_TWILIGHT:
+                supportedWhiteBalance +=
+                    CameraParameters::WHITE_BALANCE_TWILIGHT;
+                break;
+            case ANDROID_CONTROL_AWB_MODE_SHADE:
+                supportedWhiteBalance +=
+                    CameraParameters::WHITE_BALANCE_SHADE;
+                break;
+            // Skipping values not mappable to v1 API
+            case ANDROID_CONTROL_AWB_MODE_OFF:
+                addComma = false;
+                break;
+            default:
+                ALOGW("%s: Camera %d: Unknown white balance value: %d",
+                        __FUNCTION__, cameraId,
+                        availableWhiteBalanceModes.data.u8[i]);
+                addComma = false;
+                break;
+            }
+        }
+        params.set(CameraParameters::KEY_SUPPORTED_WHITE_BALANCE,
+                supportedWhiteBalance);
+    }
+
+    effectMode = ANDROID_CONTROL_EFFECT_MODE_OFF;
+    params.set(CameraParameters::KEY_EFFECT,
+            CameraParameters::EFFECT_NONE);
+
+    camera_metadata_ro_entry_t availableEffects =
+        staticInfo(ANDROID_CONTROL_AVAILABLE_EFFECTS, 0, 0, false);
+    if (!availableEffects.count) {
+        params.set(CameraParameters::KEY_SUPPORTED_EFFECTS,
+                CameraParameters::EFFECT_NONE);
+    } else {
+        String8 supportedEffects;
+        bool addComma = false;
+        for (size_t i=0; i < availableEffects.count; i++) {
+            if (addComma) supportedEffects += ",";
+            addComma = true;
+            switch (availableEffects.data.u8[i]) {
+                case ANDROID_CONTROL_EFFECT_MODE_OFF:
+                    supportedEffects +=
+                        CameraParameters::EFFECT_NONE;
+                    break;
+                case ANDROID_CONTROL_EFFECT_MODE_MONO:
+                    supportedEffects +=
+                        CameraParameters::EFFECT_MONO;
+                    break;
+                case ANDROID_CONTROL_EFFECT_MODE_NEGATIVE:
+                    supportedEffects +=
+                        CameraParameters::EFFECT_NEGATIVE;
+                    break;
+                case ANDROID_CONTROL_EFFECT_MODE_SOLARIZE:
+                    supportedEffects +=
+                        CameraParameters::EFFECT_SOLARIZE;
+                    break;
+                case ANDROID_CONTROL_EFFECT_MODE_SEPIA:
+                    supportedEffects +=
+                        CameraParameters::EFFECT_SEPIA;
+                    break;
+                case ANDROID_CONTROL_EFFECT_MODE_POSTERIZE:
+                    supportedEffects +=
+                        CameraParameters::EFFECT_POSTERIZE;
+                    break;
+                case ANDROID_CONTROL_EFFECT_MODE_WHITEBOARD:
+                    supportedEffects +=
+                        CameraParameters::EFFECT_WHITEBOARD;
+                    break;
+                case ANDROID_CONTROL_EFFECT_MODE_BLACKBOARD:
+                    supportedEffects +=
+                        CameraParameters::EFFECT_BLACKBOARD;
+                    break;
+                case ANDROID_CONTROL_EFFECT_MODE_AQUA:
+                    supportedEffects +=
+                        CameraParameters::EFFECT_AQUA;
+                    break;
+                default:
+                    ALOGW("%s: Camera %d: Unknown effect value: %d",
+                        __FUNCTION__, cameraId, availableEffects.data.u8[i]);
+                    addComma = false;
+                    break;
+            }
+        }
+        params.set(CameraParameters::KEY_SUPPORTED_EFFECTS, supportedEffects);
+    }
+
+    antibandingMode = ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO;
+    params.set(CameraParameters::KEY_ANTIBANDING,
+            CameraParameters::ANTIBANDING_AUTO);
+
+    camera_metadata_ro_entry_t availableAntibandingModes =
+        staticInfo(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES, 0, 0, false);
+    if (!availableAntibandingModes.count) {
+        params.set(CameraParameters::KEY_SUPPORTED_ANTIBANDING,
+                CameraParameters::ANTIBANDING_OFF);
+    } else {
+        String8 supportedAntibanding;
+        bool addComma = false;
+        for (size_t i=0; i < availableAntibandingModes.count; i++) {
+            if (addComma) supportedAntibanding += ",";
+            addComma = true;
+            switch (availableAntibandingModes.data.u8[i]) {
+                case ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF:
+                    supportedAntibanding +=
+                        CameraParameters::ANTIBANDING_OFF;
+                    break;
+                case ANDROID_CONTROL_AE_ANTIBANDING_MODE_50HZ:
+                    supportedAntibanding +=
+                        CameraParameters::ANTIBANDING_50HZ;
+                    break;
+                case ANDROID_CONTROL_AE_ANTIBANDING_MODE_60HZ:
+                    supportedAntibanding +=
+                        CameraParameters::ANTIBANDING_60HZ;
+                    break;
+                case ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO:
+                    supportedAntibanding +=
+                        CameraParameters::ANTIBANDING_AUTO;
+                    break;
+                default:
+                    ALOGW("%s: Camera %d: Unknown antibanding value: %d",
+                        __FUNCTION__, cameraId,
+                            availableAntibandingModes.data.u8[i]);
+                    addComma = false;
+                    break;
+            }
+        }
+        params.set(CameraParameters::KEY_SUPPORTED_ANTIBANDING,
+                supportedAntibanding);
+    }
+
+    sceneMode = ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED;
+    params.set(CameraParameters::KEY_SCENE_MODE,
+            CameraParameters::SCENE_MODE_AUTO);
+
+    camera_metadata_ro_entry_t availableSceneModes =
+        staticInfo(ANDROID_CONTROL_AVAILABLE_SCENE_MODES, 0, 0, false);
+    if (!availableSceneModes.count) {
+        params.remove(CameraParameters::KEY_SCENE_MODE);
+    } else {
+        String8 supportedSceneModes(CameraParameters::SCENE_MODE_AUTO);
+        bool addComma = true;
+        bool noSceneModes = false;
+        for (size_t i=0; i < availableSceneModes.count; i++) {
+            if (addComma) supportedSceneModes += ",";
+            addComma = true;
+            switch (availableSceneModes.data.u8[i]) {
+                case ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED:
+                    noSceneModes = true;
+                    break;
+                case ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY:
+                    // Not in old API
+                    addComma = false;
+                    break;
+                case ANDROID_CONTROL_SCENE_MODE_ACTION:
+                    supportedSceneModes +=
+                        CameraParameters::SCENE_MODE_ACTION;
+                    break;
+                case ANDROID_CONTROL_SCENE_MODE_PORTRAIT:
+                    supportedSceneModes +=
+                        CameraParameters::SCENE_MODE_PORTRAIT;
+                    break;
+                case ANDROID_CONTROL_SCENE_MODE_LANDSCAPE:
+                    supportedSceneModes +=
+                        CameraParameters::SCENE_MODE_LANDSCAPE;
+                    break;
+                case ANDROID_CONTROL_SCENE_MODE_NIGHT:
+                    supportedSceneModes +=
+                        CameraParameters::SCENE_MODE_NIGHT;
+                    break;
+                case ANDROID_CONTROL_SCENE_MODE_NIGHT_PORTRAIT:
+                    supportedSceneModes +=
+                        CameraParameters::SCENE_MODE_NIGHT_PORTRAIT;
+                    break;
+                case ANDROID_CONTROL_SCENE_MODE_THEATRE:
+                    supportedSceneModes +=
+                        CameraParameters::SCENE_MODE_THEATRE;
+                    break;
+                case ANDROID_CONTROL_SCENE_MODE_BEACH:
+                    supportedSceneModes +=
+                        CameraParameters::SCENE_MODE_BEACH;
+                    break;
+                case ANDROID_CONTROL_SCENE_MODE_SNOW:
+                    supportedSceneModes +=
+                        CameraParameters::SCENE_MODE_SNOW;
+                    break;
+                case ANDROID_CONTROL_SCENE_MODE_SUNSET:
+                    supportedSceneModes +=
+                        CameraParameters::SCENE_MODE_SUNSET;
+                    break;
+                case ANDROID_CONTROL_SCENE_MODE_STEADYPHOTO:
+                    supportedSceneModes +=
+                        CameraParameters::SCENE_MODE_STEADYPHOTO;
+                    break;
+                case ANDROID_CONTROL_SCENE_MODE_FIREWORKS:
+                    supportedSceneModes +=
+                        CameraParameters::SCENE_MODE_FIREWORKS;
+                    break;
+                case ANDROID_CONTROL_SCENE_MODE_SPORTS:
+                    supportedSceneModes +=
+                        CameraParameters::SCENE_MODE_SPORTS;
+                    break;
+                case ANDROID_CONTROL_SCENE_MODE_PARTY:
+                    supportedSceneModes +=
+                        CameraParameters::SCENE_MODE_PARTY;
+                    break;
+                case ANDROID_CONTROL_SCENE_MODE_CANDLELIGHT:
+                    supportedSceneModes +=
+                        CameraParameters::SCENE_MODE_CANDLELIGHT;
+                    break;
+                case ANDROID_CONTROL_SCENE_MODE_BARCODE:
+                    supportedSceneModes +=
+                        CameraParameters::SCENE_MODE_BARCODE;
+                    break;
+                default:
+                    ALOGW("%s: Camera %d: Unknown scene mode value: %d",
+                        __FUNCTION__, cameraId,
+                            availableSceneModes.data.u8[i]);
+                    addComma = false;
+                    break;
+            }
+        }
+        if (!noSceneModes) {
+            params.set(CameraParameters::KEY_SUPPORTED_SCENE_MODES,
+                    supportedSceneModes);
+        } else {
+            params.remove(CameraParameters::KEY_SCENE_MODE);
+        }
+    }
+
+    bool isFlashAvailable = false;
+    camera_metadata_ro_entry_t flashAvailable =
+        staticInfo(ANDROID_FLASH_INFO_AVAILABLE, 0, 1, false);
+    if (flashAvailable.count) {
+        isFlashAvailable = flashAvailable.data.u8[0];
+    }
+
+    camera_metadata_ro_entry_t availableAeModes =
+        staticInfo(ANDROID_CONTROL_AE_AVAILABLE_MODES, 0, 0, false);
+
+    if (isFlashAvailable) {
+        flashMode = Parameters::FLASH_MODE_OFF;
+        params.set(CameraParameters::KEY_FLASH_MODE,
+                CameraParameters::FLASH_MODE_OFF);
+
+        String8 supportedFlashModes(CameraParameters::FLASH_MODE_OFF);
+        supportedFlashModes = supportedFlashModes +
+            "," + CameraParameters::FLASH_MODE_AUTO +
+            "," + CameraParameters::FLASH_MODE_ON +
+            "," + CameraParameters::FLASH_MODE_TORCH;
+        for (size_t i=0; i < availableAeModes.count; i++) {
+            if (availableAeModes.data.u8[i] ==
+                    ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE) {
+                supportedFlashModes = supportedFlashModes + "," +
+                    CameraParameters::FLASH_MODE_RED_EYE;
+                break;
+            }
+        }
+        params.set(CameraParameters::KEY_SUPPORTED_FLASH_MODES,
+                supportedFlashModes);
+    } else {
+        flashMode = Parameters::FLASH_MODE_OFF;
+        params.set(CameraParameters::KEY_FLASH_MODE,
+                CameraParameters::FLASH_MODE_OFF);
+        params.set(CameraParameters::KEY_SUPPORTED_FLASH_MODES,
+                CameraParameters::FLASH_MODE_OFF);
+    }
+
+    camera_metadata_ro_entry_t minFocusDistance =
+        staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, 0, 1, false);
+
+    camera_metadata_ro_entry_t availableAfModes =
+        staticInfo(ANDROID_CONTROL_AF_AVAILABLE_MODES, 0, 0, false);
+
+    if (!minFocusDistance.count || minFocusDistance.data.f[0] == 0) {
+        // Fixed-focus lens
+        focusMode = Parameters::FOCUS_MODE_FIXED;
+        params.set(CameraParameters::KEY_FOCUS_MODE,
+                CameraParameters::FOCUS_MODE_FIXED);
+        params.set(CameraParameters::KEY_SUPPORTED_FOCUS_MODES,
+                CameraParameters::FOCUS_MODE_FIXED);
+    } else {
+        focusMode = Parameters::FOCUS_MODE_AUTO;
+        params.set(CameraParameters::KEY_FOCUS_MODE,
+                CameraParameters::FOCUS_MODE_AUTO);
+        String8 supportedFocusModes(CameraParameters::FOCUS_MODE_INFINITY);
+        bool addComma = true;
+
+        for (size_t i=0; i < availableAfModes.count; i++) {
+            if (addComma) supportedFocusModes += ",";
+            addComma = true;
+            switch (availableAfModes.data.u8[i]) {
+                case ANDROID_CONTROL_AF_MODE_AUTO:
+                    supportedFocusModes +=
+                        CameraParameters::FOCUS_MODE_AUTO;
+                    break;
+                case ANDROID_CONTROL_AF_MODE_MACRO:
+                    supportedFocusModes +=
+                        CameraParameters::FOCUS_MODE_MACRO;
+                    break;
+                case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
+                    supportedFocusModes +=
+                        CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO;
+                    break;
+                case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
+                    supportedFocusModes +=
+                        CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE;
+                    break;
+                case ANDROID_CONTROL_AF_MODE_EDOF:
+                    supportedFocusModes +=
+                        CameraParameters::FOCUS_MODE_EDOF;
+                    break;
+                // Not supported in old API
+                case ANDROID_CONTROL_AF_MODE_OFF:
+                    addComma = false;
+                    break;
+                default:
+                    ALOGW("%s: Camera %d: Unknown AF mode value: %d",
+                        __FUNCTION__, cameraId, availableAfModes.data.u8[i]);
+                    addComma = false;
+                    break;
+            }
+        }
+        params.set(CameraParameters::KEY_SUPPORTED_FOCUS_MODES,
+                supportedFocusModes);
+    }
+    focusState = ANDROID_CONTROL_AF_STATE_INACTIVE;
+    shadowFocusMode = FOCUS_MODE_INVALID;
+
+    camera_metadata_ro_entry_t max3aRegions =
+        staticInfo(ANDROID_CONTROL_MAX_REGIONS, 1, 1);
+    if (!max3aRegions.count) return NO_INIT;
+
+    int32_t maxNumFocusAreas = 0;
+    if (focusMode != Parameters::FOCUS_MODE_FIXED) {
+        maxNumFocusAreas = max3aRegions.data.i32[0];
+    }
+    params.set(CameraParameters::KEY_MAX_NUM_FOCUS_AREAS, maxNumFocusAreas);
+    params.set(CameraParameters::KEY_FOCUS_AREAS,
+            "(0,0,0,0,0)");
+    focusingAreas.clear();
+    focusingAreas.add(Parameters::Area(0,0,0,0,0));
+
+    camera_metadata_ro_entry_t availableFocalLengths =
+        staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS, 0, 0, false);
+    if (!availableFocalLengths.count) return NO_INIT;
+
+    float minFocalLength = availableFocalLengths.data.f[0];
+    params.setFloat(CameraParameters::KEY_FOCAL_LENGTH, minFocalLength);
+
+    float horizFov, vertFov;
+    res = calculatePictureFovs(&horizFov, &vertFov);
+    if (res != OK) {
+        ALOGE("%s: Can't calculate field of views!", __FUNCTION__);
+        return res;
+    }
+
+    params.setFloat(CameraParameters::KEY_HORIZONTAL_VIEW_ANGLE, horizFov);
+    params.setFloat(CameraParameters::KEY_VERTICAL_VIEW_ANGLE, vertFov);
+
+    exposureCompensation = 0;
+    params.set(CameraParameters::KEY_EXPOSURE_COMPENSATION,
+                exposureCompensation);
+
+    camera_metadata_ro_entry_t exposureCompensationRange =
+        staticInfo(ANDROID_CONTROL_AE_COMPENSATION_RANGE, 2, 2);
+    if (!exposureCompensationRange.count) return NO_INIT;
+
+    params.set(CameraParameters::KEY_MAX_EXPOSURE_COMPENSATION,
+            exposureCompensationRange.data.i32[1]);
+    params.set(CameraParameters::KEY_MIN_EXPOSURE_COMPENSATION,
+            exposureCompensationRange.data.i32[0]);
+
+    camera_metadata_ro_entry_t exposureCompensationStep =
+        staticInfo(ANDROID_CONTROL_AE_COMPENSATION_STEP, 1, 1);
+    if (!exposureCompensationStep.count) return NO_INIT;
+
+    params.setFloat(CameraParameters::KEY_EXPOSURE_COMPENSATION_STEP,
+            (float)exposureCompensationStep.data.r[0].numerator /
+            exposureCompensationStep.data.r[0].denominator);
+
+    autoExposureLock = false;
+    params.set(CameraParameters::KEY_AUTO_EXPOSURE_LOCK,
+            CameraParameters::FALSE);
+    params.set(CameraParameters::KEY_AUTO_EXPOSURE_LOCK_SUPPORTED,
+            CameraParameters::TRUE);
+
+    autoWhiteBalanceLock = false;
+    params.set(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK,
+            CameraParameters::FALSE);
+    params.set(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK_SUPPORTED,
+            CameraParameters::TRUE);
+
+    meteringAreas.add(Parameters::Area(0, 0, 0, 0, 0));
+    params.set(CameraParameters::KEY_MAX_NUM_METERING_AREAS,
+            max3aRegions.data.i32[0]);
+    params.set(CameraParameters::KEY_METERING_AREAS,
+            "(0,0,0,0,0)");
+
+    zoom = 0;
+    params.set(CameraParameters::KEY_ZOOM, zoom);
+    params.set(CameraParameters::KEY_MAX_ZOOM, NUM_ZOOM_STEPS - 1);
+
+    camera_metadata_ro_entry_t maxDigitalZoom =
+        staticInfo(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM, /*minCount*/1, /*maxCount*/1);
+    if (!maxDigitalZoom.count) return NO_INIT;
+
+    {
+        String8 zoomRatios;
+        float zoom = 1.f;
+        float zoomIncrement = (maxDigitalZoom.data.f[0] - zoom) /
+                (NUM_ZOOM_STEPS-1);
+        bool addComma = false;
+        for (size_t i=0; i < NUM_ZOOM_STEPS; i++) {
+            if (addComma) zoomRatios += ",";
+            addComma = true;
+            zoomRatios += String8::format("%d", static_cast<int>(zoom * 100));
+            zoom += zoomIncrement;
+        }
+        params.set(CameraParameters::KEY_ZOOM_RATIOS, zoomRatios);
+    }
+
+    params.set(CameraParameters::KEY_ZOOM_SUPPORTED,
+            CameraParameters::TRUE);
+    params.set(CameraParameters::KEY_SMOOTH_ZOOM_SUPPORTED,
+            CameraParameters::FALSE);
+
+    params.set(CameraParameters::KEY_FOCUS_DISTANCES,
+            "Infinity,Infinity,Infinity");
+
+    params.set(CameraParameters::KEY_MAX_NUM_DETECTED_FACES_HW,
+            fastInfo.maxFaces);
+    params.set(CameraParameters::KEY_MAX_NUM_DETECTED_FACES_SW,
+            0);
+
+    params.set(CameraParameters::KEY_VIDEO_FRAME_FORMAT,
+            CameraParameters::PIXEL_FORMAT_ANDROID_OPAQUE);
+
+    recordingHint = false;
+    params.set(CameraParameters::KEY_RECORDING_HINT,
+            CameraParameters::FALSE);
+
+    params.set(CameraParameters::KEY_VIDEO_SNAPSHOT_SUPPORTED,
+            CameraParameters::TRUE);
+
+    videoStabilization = false;
+    params.set(CameraParameters::KEY_VIDEO_STABILIZATION,
+            CameraParameters::FALSE);
+
+    camera_metadata_ro_entry_t availableVideoStabilizationModes =
+        staticInfo(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES, 0, 0,
+                false);
+
+    if (availableVideoStabilizationModes.count > 1) {
+        params.set(CameraParameters::KEY_VIDEO_STABILIZATION_SUPPORTED,
+                CameraParameters::TRUE);
+    } else {
+        params.set(CameraParameters::KEY_VIDEO_STABILIZATION_SUPPORTED,
+                CameraParameters::FALSE);
+    }
+
+    // Set up initial state for non-Camera.Parameters state variables
+
+    storeMetadataInBuffers = true;
+    playShutterSound = true;
+    enableFaceDetect = false;
+
+    enableFocusMoveMessages = false;
+    afTriggerCounter = 1;
+    currentAfTriggerId = -1;
+    afInMotion = false;
+
+    precaptureTriggerCounter = 1;
+
+    previewCallbackFlags = 0;
+    previewCallbackOneShot = false;
+    previewCallbackSurface = false;
+
+    camera_metadata_ro_entry_t supportedHardwareLevel =
+        staticInfo(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL, 0, 0, false);
+    if (!supportedHardwareLevel.count || (supportedHardwareLevel.data.u8[0] ==
+            ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED)) {
+        ALOGI("Camera %d: ZSL mode disabled for limited mode HALs", cameraId);
+        zslMode = false;
+    } else {
+        char value[PROPERTY_VALUE_MAX];
+        property_get("camera.disable_zsl_mode", value, "0");
+        if (!strcmp(value,"1")) {
+            ALOGI("Camera %d: Disabling ZSL mode", cameraId);
+            zslMode = false;
+        } else {
+            zslMode = true;
+        }
+    }
+
+    lightFx = LIGHTFX_NONE;
+
+    state = STOPPED;
+
+    paramsFlattened = params.flatten();
+
+    return OK;
+}
+
+String8 Parameters::get() const {
+    return paramsFlattened;
+}
+
+status_t Parameters::buildFastInfo() {
+
+    camera_metadata_ro_entry_t activeArraySize =
+        staticInfo(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE, 2, 4);
+    if (!activeArraySize.count) return NO_INIT;
+    int32_t arrayWidth;
+    int32_t arrayHeight;
+    if (activeArraySize.count == 2) {
+        ALOGW("%s: Camera %d: activeArraySize is missing xmin/ymin!",
+                __FUNCTION__, cameraId);
+        arrayWidth = activeArraySize.data.i32[0];
+        arrayHeight = activeArraySize.data.i32[1];
+    } else if (activeArraySize.count == 4) {
+        arrayWidth = activeArraySize.data.i32[2];
+        arrayHeight = activeArraySize.data.i32[3];
+    } else return NO_INIT;
+
+    camera_metadata_ro_entry_t availableFaceDetectModes =
+        staticInfo(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES, 0, 0,
+                false);
+
+    uint8_t bestFaceDetectMode =
+        ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
+    for (size_t i = 0 ; i < availableFaceDetectModes.count; i++) {
+        switch (availableFaceDetectModes.data.u8[i]) {
+            case ANDROID_STATISTICS_FACE_DETECT_MODE_OFF:
+                break;
+            case ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE:
+                if (bestFaceDetectMode !=
+                        ANDROID_STATISTICS_FACE_DETECT_MODE_FULL) {
+                    bestFaceDetectMode =
+                        ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE;
+                }
+                break;
+            case ANDROID_STATISTICS_FACE_DETECT_MODE_FULL:
+                bestFaceDetectMode =
+                    ANDROID_STATISTICS_FACE_DETECT_MODE_FULL;
+                break;
+            default:
+                ALOGE("%s: Camera %d: Unknown face detect mode %d:",
+                        __FUNCTION__, cameraId,
+                        availableFaceDetectModes.data.u8[i]);
+                return NO_INIT;
+        }
+    }
+
+    int32_t maxFaces = 0;
+    camera_metadata_ro_entry_t maxFacesDetected =
+        staticInfo(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT, 0, 1, false);
+    if (maxFacesDetected.count) {
+        maxFaces = maxFacesDetected.data.i32[0];
+    }
+
+    camera_metadata_ro_entry_t availableSceneModes =
+        staticInfo(ANDROID_CONTROL_AVAILABLE_SCENE_MODES, 0, 0, false);
+    camera_metadata_ro_entry_t sceneModeOverrides =
+        staticInfo(ANDROID_CONTROL_SCENE_MODE_OVERRIDES, 0, 0, false);
+    camera_metadata_ro_entry_t minFocusDistance =
+        staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, 0, 0, false);
+    bool fixedLens = minFocusDistance.count == 0 ||
+        minFocusDistance.data.f[0] == 0;
+
+    camera_metadata_ro_entry_t availableFocalLengths =
+        staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS);
+    if (!availableFocalLengths.count) return NO_INIT;
+
+    camera_metadata_ro_entry_t availableFormats =
+        staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS);
+    if (!availableFormats.count) return NO_INIT;
+
+
+    if (sceneModeOverrides.count > 0) {
+        // sceneModeOverrides is defined to have 3 entries for each scene mode,
+        // which are AE, AWB, and AF override modes the HAL wants for that scene
+        // mode.
+        const size_t kModesPerSceneMode = 3;
+        if (sceneModeOverrides.count !=
+                availableSceneModes.count * kModesPerSceneMode) {
+            ALOGE("%s: Camera %d: Scene mode override list is an "
+                    "unexpected size: %d (expected %d)", __FUNCTION__,
+                    cameraId, sceneModeOverrides.count,
+                    availableSceneModes.count);
+            return NO_INIT;
+        }
+        for (size_t i = 0; i < availableSceneModes.count; i++) {
+            DeviceInfo::OverrideModes modes;
+            uint8_t aeMode =
+                    sceneModeOverrides.data.u8[i * kModesPerSceneMode + 0];
+            switch(aeMode) {
+                case ANDROID_CONTROL_AE_MODE_ON:
+                    modes.flashMode = FLASH_MODE_OFF;
+                    break;
+                case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH:
+                    modes.flashMode = FLASH_MODE_AUTO;
+                    break;
+                case ANDROID_CONTROL_AE_MODE_ON_ALWAYS_FLASH:
+                    modes.flashMode = FLASH_MODE_ON;
+                    break;
+                case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE:
+                    modes.flashMode = FLASH_MODE_RED_EYE;
+                    break;
+                default:
+                    ALOGE("%s: Unknown override AE mode: %d", __FUNCTION__,
+                            aeMode);
+                    modes.flashMode = FLASH_MODE_INVALID;
+                    break;
+            }
+            modes.wbMode =
+                    sceneModeOverrides.data.u8[i * kModesPerSceneMode + 1];
+            uint8_t afMode =
+                    sceneModeOverrides.data.u8[i * kModesPerSceneMode + 2];
+            switch(afMode) {
+                case ANDROID_CONTROL_AF_MODE_OFF:
+                    modes.focusMode = fixedLens ?
+                            FOCUS_MODE_FIXED : FOCUS_MODE_INFINITY;
+                    break;
+                case ANDROID_CONTROL_AF_MODE_AUTO:
+                case ANDROID_CONTROL_AF_MODE_MACRO:
+                case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
+                case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
+                case ANDROID_CONTROL_AF_MODE_EDOF:
+                    modes.focusMode = static_cast<focusMode_t>(afMode);
+                    break;
+                default:
+                    ALOGE("%s: Unknown override AF mode: %d", __FUNCTION__,
+                            afMode);
+                    modes.focusMode = FOCUS_MODE_INVALID;
+                    break;
+            }
+            fastInfo.sceneModeOverrides.add(availableSceneModes.data.u8[i],
+                    modes);
+        }
+    }
+
+    fastInfo.arrayWidth = arrayWidth;
+    fastInfo.arrayHeight = arrayHeight;
+    fastInfo.bestFaceDetectMode = bestFaceDetectMode;
+    fastInfo.maxFaces = maxFaces;
+
+    // Find smallest (widest-angle) focal length to use as basis of still
+    // picture FOV reporting.
+    fastInfo.minFocalLength = availableFocalLengths.data.f[0];
+    for (size_t i = 1; i < availableFocalLengths.count; i++) {
+        if (fastInfo.minFocalLength > availableFocalLengths.data.f[i]) {
+            fastInfo.minFocalLength = availableFocalLengths.data.f[i];
+        }
+    }
+
+    // Check if the HAL supports HAL_PIXEL_FORMAT_YCbCr_420_888
+    fastInfo.useFlexibleYuv = false;
+    for (size_t i = 0; i < availableFormats.count; i++) {
+        if (availableFormats.data.i32[i] == HAL_PIXEL_FORMAT_YCbCr_420_888) {
+            fastInfo.useFlexibleYuv = true;
+            break;
+        }
+    }
+    ALOGV("Camera %d: Flexible YUV %s supported",
+            cameraId, fastInfo.useFlexibleYuv ? "is" : "is not");
+
+    return OK;
+}
+
+status_t Parameters::buildQuirks() {
+    camera_metadata_ro_entry_t entry;
+    entry = info->find(ANDROID_QUIRKS_TRIGGER_AF_WITH_AUTO);
+    quirks.triggerAfWithAuto = (entry.count != 0 && entry.data.u8[0] == 1);
+    ALOGV_IF(quirks.triggerAfWithAuto, "Camera %d: Quirk triggerAfWithAuto enabled",
+            cameraId);
+
+    entry = info->find(ANDROID_QUIRKS_USE_ZSL_FORMAT);
+    quirks.useZslFormat = (entry.count != 0 && entry.data.u8[0] == 1);
+    ALOGV_IF(quirks.useZslFormat, "Camera %d: Quirk useZslFormat enabled",
+            cameraId);
+
+    entry = info->find(ANDROID_QUIRKS_METERING_CROP_REGION);
+    quirks.meteringCropRegion = (entry.count != 0 && entry.data.u8[0] == 1);
+    ALOGV_IF(quirks.meteringCropRegion, "Camera %d: Quirk meteringCropRegion"
+                " enabled", cameraId);
+
+    return OK;
+}
+
+camera_metadata_ro_entry_t Parameters::staticInfo(uint32_t tag,
+        size_t minCount, size_t maxCount, bool required) const {
+    camera_metadata_ro_entry_t entry = info->find(tag);
+
+    if (CC_UNLIKELY( entry.count == 0 ) && required) {
+        const char* tagSection = get_camera_metadata_section_name(tag);
+        if (tagSection == NULL) tagSection = "<unknown>";
+        const char* tagName = get_camera_metadata_tag_name(tag);
+        if (tagName == NULL) tagName = "<unknown>";
+
+        ALOGE("Error finding static metadata entry '%s.%s' (%x)",
+                tagSection, tagName, tag);
+    } else if (CC_UNLIKELY(
+            (minCount != 0 && entry.count < minCount) ||
+            (maxCount != 0 && entry.count > maxCount) ) ) {
+        const char* tagSection = get_camera_metadata_section_name(tag);
+        if (tagSection == NULL) tagSection = "<unknown>";
+        const char* tagName = get_camera_metadata_tag_name(tag);
+        if (tagName == NULL) tagName = "<unknown>";
+        ALOGE("Malformed static metadata entry '%s.%s' (%x):"
+                "Expected between %d and %d values, but got %d values",
+                tagSection, tagName, tag, minCount, maxCount, entry.count);
+    }
+
+    return entry;
+}
+
+status_t Parameters::set(const String8& paramString) {
+    status_t res;
+
+    CameraParameters newParams(paramString);
+
+    // TODO: Currently ignoring any changes to supposedly read-only parameters
+    // such as supported preview sizes, etc. Should probably produce an error if
+    // they're changed.
+
+    /** Extract and verify new parameters */
+
+    size_t i;
+
+    Parameters validatedParams(*this);
+
+    // PREVIEW_SIZE
+    newParams.getPreviewSize(&validatedParams.previewWidth,
+            &validatedParams.previewHeight);
+
+    if (validatedParams.previewWidth != previewWidth ||
+            validatedParams.previewHeight != previewHeight) {
+        if (state >= PREVIEW) {
+            ALOGE("%s: Preview size cannot be updated when preview "
+                    "is active! (Currently %d x %d, requested %d x %d",
+                    __FUNCTION__,
+                    previewWidth, previewHeight,
+                    validatedParams.previewWidth, validatedParams.previewHeight);
+            return BAD_VALUE;
+        }
+        camera_metadata_ro_entry_t availablePreviewSizes =
+            staticInfo(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES);
+        for (i = 0; i < availablePreviewSizes.count; i += 2 ) {
+            if ((availablePreviewSizes.data.i32[i] ==
+                    validatedParams.previewWidth) &&
+                (availablePreviewSizes.data.i32[i+1] ==
+                    validatedParams.previewHeight)) break;
+        }
+        if (i == availablePreviewSizes.count) {
+            ALOGE("%s: Requested preview size %d x %d is not supported",
+                    __FUNCTION__, validatedParams.previewWidth,
+                    validatedParams.previewHeight);
+            return BAD_VALUE;
+        }
+    }
+
+    // RECORDING_HINT (always supported)
+    validatedParams.recordingHint = boolFromString(
+        newParams.get(CameraParameters::KEY_RECORDING_HINT) );
+    bool recordingHintChanged = validatedParams.recordingHint != recordingHint;
+    ALOGV_IF(recordingHintChanged, "%s: Recording hint changed to %d",
+            __FUNCTION__, recordingHintChanged);
+
+    // PREVIEW_FPS_RANGE
+    bool fpsRangeChanged = false;
+    newParams.getPreviewFpsRange(&validatedParams.previewFpsRange[0],
+            &validatedParams.previewFpsRange[1]);
+    validatedParams.previewFpsRange[0] /= kFpsToApiScale;
+    validatedParams.previewFpsRange[1] /= kFpsToApiScale;
+
+    if (validatedParams.previewFpsRange[0] != previewFpsRange[0] ||
+            validatedParams.previewFpsRange[1] != previewFpsRange[1]) {
+        fpsRangeChanged = true;
+        camera_metadata_ro_entry_t availablePreviewFpsRanges =
+            staticInfo(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, 2);
+        for (i = 0; i < availablePreviewFpsRanges.count; i += 2) {
+            if ((availablePreviewFpsRanges.data.i32[i] ==
+                    validatedParams.previewFpsRange[0]) &&
+                (availablePreviewFpsRanges.data.i32[i+1] ==
+                    validatedParams.previewFpsRange[1]) ) {
+                break;
+            }
+        }
+        if (i == availablePreviewFpsRanges.count) {
+            ALOGE("%s: Requested preview FPS range %d - %d is not supported",
+                __FUNCTION__, validatedParams.previewFpsRange[0],
+                    validatedParams.previewFpsRange[1]);
+            return BAD_VALUE;
+        }
+        validatedParams.previewFps =
+            fpsFromRange(validatedParams.previewFpsRange[0],
+                         validatedParams.previewFpsRange[1]);
+        newParams.setPreviewFrameRate(validatedParams.previewFps);
+    }
+
+    // PREVIEW_FORMAT
+    validatedParams.previewFormat =
+            formatStringToEnum(newParams.getPreviewFormat());
+    if (validatedParams.previewFormat != previewFormat) {
+        if (state >= PREVIEW) {
+            ALOGE("%s: Preview format cannot be updated when preview "
+                    "is active!", __FUNCTION__);
+            return BAD_VALUE;
+        }
+        camera_metadata_ro_entry_t availableFormats =
+            staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS);
+        // If using flexible YUV, always support NV21/YV12. Otherwise, check
+        // HAL's list.
+        if (! (fastInfo.useFlexibleYuv &&
+                (validatedParams.previewFormat ==
+                        HAL_PIXEL_FORMAT_YCrCb_420_SP ||
+                 validatedParams.previewFormat ==
+                        HAL_PIXEL_FORMAT_YV12) ) ) {
+            // Not using flexible YUV format, so check explicitly
+            for (i = 0; i < availableFormats.count; i++) {
+                if (availableFormats.data.i32[i] ==
+                        validatedParams.previewFormat) break;
+            }
+            if (i == availableFormats.count) {
+                ALOGE("%s: Requested preview format %s (0x%x) is not supported",
+                        __FUNCTION__, newParams.getPreviewFormat(),
+                        validatedParams.previewFormat);
+                return BAD_VALUE;
+            }
+        }
+    }
+
+    // PREVIEW_FRAME_RATE
+    // Deprecated, only use if the preview fps range is unchanged this time.
+    // The single-value FPS is the same as the minimum of the range.
+    if (!fpsRangeChanged) {
+        validatedParams.previewFps = newParams.getPreviewFrameRate();
+        if (validatedParams.previewFps != previewFps || recordingHintChanged) {
+            camera_metadata_ro_entry_t availableFrameRates =
+                staticInfo(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES);
+            /**
+              * If recording hint is set, find the range that encompasses
+              * previewFps with the largest min index.
+              *
+              * If recording hint is not set, find the range with previewFps
+              * with the smallest min index.
+              *
+              * Either way, in case of multiple ranges, break the tie by
+              * selecting the smaller range.
+              */
+            int targetFps = validatedParams.previewFps;
+            // all ranges which have targetFps
+            Vector<Range> candidateRanges;
+            for (i = 0; i < availableFrameRates.count; i+=2) {
+                Range r = {
+                            availableFrameRates.data.i32[i],
+                            availableFrameRates.data.i32[i+1]
+                };
+
+                if (r.min <= targetFps && targetFps <= r.max) {
+                    candidateRanges.push(r);
+                }
+            }
+            if (candidateRanges.isEmpty()) {
+                ALOGE("%s: Requested preview frame rate %d is not supported",
+                        __FUNCTION__, validatedParams.previewFps);
+                return BAD_VALUE;
+            }
+            // most applicable range with targetFps
+            Range bestRange = candidateRanges[0];
+            for (i = 1; i < candidateRanges.size(); ++i) {
+                Range r = candidateRanges[i];
+
+                // Find by largest minIndex in recording mode
+                if (validatedParams.recordingHint) {
+                    if (r.min > bestRange.min) {
+                        bestRange = r;
+                    }
+                    else if (r.min == bestRange.min && r.max < bestRange.max) {
+                        bestRange = r;
+                    }
+                }
+                // Find by smallest minIndex in preview mode
+                else {
+                    if (r.min < bestRange.min) {
+                        bestRange = r;
+                    }
+                    else if (r.min == bestRange.min && r.max < bestRange.max) {
+                        bestRange = r;
+                    }
+                }
+            }
+
+            validatedParams.previewFpsRange[0] =
+                    bestRange.min;
+            validatedParams.previewFpsRange[1] =
+                    bestRange.max;
+
+            ALOGV("%s: New preview FPS range: %d, %d, recordingHint = %d",
+                __FUNCTION__,
+                validatedParams.previewFpsRange[0],
+                validatedParams.previewFpsRange[1],
+                validatedParams.recordingHint);
+        }
+        newParams.set(CameraParameters::KEY_PREVIEW_FPS_RANGE,
+                String8::format("%d,%d",
+                        validatedParams.previewFpsRange[0] * kFpsToApiScale,
+                        validatedParams.previewFpsRange[1] * kFpsToApiScale));
+
+    }
+
+    // PICTURE_SIZE
+    newParams.getPictureSize(&validatedParams.pictureWidth,
+            &validatedParams.pictureHeight);
+    if (validatedParams.pictureWidth == pictureWidth ||
+            validatedParams.pictureHeight == pictureHeight) {
+        camera_metadata_ro_entry_t availablePictureSizes =
+            staticInfo(ANDROID_SCALER_AVAILABLE_JPEG_SIZES);
+        for (i = 0; i < availablePictureSizes.count; i+=2) {
+            if ((availablePictureSizes.data.i32[i] ==
+                    validatedParams.pictureWidth) &&
+                (availablePictureSizes.data.i32[i+1] ==
+                    validatedParams.pictureHeight)) break;
+        }
+        if (i == availablePictureSizes.count) {
+            ALOGE("%s: Requested picture size %d x %d is not supported",
+                    __FUNCTION__, validatedParams.pictureWidth,
+                    validatedParams.pictureHeight);
+            return BAD_VALUE;
+        }
+    }
+
+    // JPEG_THUMBNAIL_WIDTH/HEIGHT
+    validatedParams.jpegThumbSize[0] =
+            newParams.getInt(CameraParameters::KEY_JPEG_THUMBNAIL_WIDTH);
+    validatedParams.jpegThumbSize[1] =
+            newParams.getInt(CameraParameters::KEY_JPEG_THUMBNAIL_HEIGHT);
+    if (validatedParams.jpegThumbSize[0] != jpegThumbSize[0] ||
+            validatedParams.jpegThumbSize[1] != jpegThumbSize[1]) {
+        camera_metadata_ro_entry_t availableJpegThumbSizes =
+            staticInfo(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES);
+        for (i = 0; i < availableJpegThumbSizes.count; i+=2) {
+            if ((availableJpegThumbSizes.data.i32[i] ==
+                    validatedParams.jpegThumbSize[0]) &&
+                (availableJpegThumbSizes.data.i32[i+1] ==
+                    validatedParams.jpegThumbSize[1])) break;
+        }
+        if (i == availableJpegThumbSizes.count) {
+            ALOGE("%s: Requested JPEG thumbnail size %d x %d is not supported",
+                    __FUNCTION__, validatedParams.jpegThumbSize[0],
+                    validatedParams.jpegThumbSize[1]);
+            return BAD_VALUE;
+        }
+    }
+
+    // JPEG_THUMBNAIL_QUALITY
+    int quality = newParams.getInt(CameraParameters::KEY_JPEG_THUMBNAIL_QUALITY);
+    // also makes sure quality fits in uint8_t
+    if (quality < 0 || quality > 100) {
+        ALOGE("%s: Requested JPEG thumbnail quality %d is not supported",
+                __FUNCTION__, quality);
+        return BAD_VALUE;
+    }
+    validatedParams.jpegThumbQuality = quality;
+
+    // JPEG_QUALITY
+    quality = newParams.getInt(CameraParameters::KEY_JPEG_QUALITY);
+    // also makes sure quality fits in uint8_t
+    if (quality < 0 || quality > 100) {
+        ALOGE("%s: Requested JPEG quality %d is not supported",
+                __FUNCTION__, quality);
+        return BAD_VALUE;
+    }
+    validatedParams.jpegQuality = quality;
+
+    // ROTATION
+    validatedParams.jpegRotation =
+            newParams.getInt(CameraParameters::KEY_ROTATION);
+    if (validatedParams.jpegRotation != 0 &&
+            validatedParams.jpegRotation != 90 &&
+            validatedParams.jpegRotation != 180 &&
+            validatedParams.jpegRotation != 270) {
+        ALOGE("%s: Requested picture rotation angle %d is not supported",
+                __FUNCTION__, validatedParams.jpegRotation);
+        return BAD_VALUE;
+    }
+
+    // GPS
+
+    const char *gpsLatStr =
+            newParams.get(CameraParameters::KEY_GPS_LATITUDE);
+    if (gpsLatStr != NULL) {
+        const char *gpsLongStr =
+                newParams.get(CameraParameters::KEY_GPS_LONGITUDE);
+        const char *gpsAltitudeStr =
+                newParams.get(CameraParameters::KEY_GPS_ALTITUDE);
+        const char *gpsTimeStr =
+                newParams.get(CameraParameters::KEY_GPS_TIMESTAMP);
+        const char *gpsProcMethodStr =
+                newParams.get(CameraParameters::KEY_GPS_PROCESSING_METHOD);
+        if (gpsLongStr == NULL ||
+                gpsAltitudeStr == NULL ||
+                gpsTimeStr == NULL ||
+                gpsProcMethodStr == NULL) {
+            ALOGE("%s: Incomplete set of GPS parameters provided",
+                    __FUNCTION__);
+            return BAD_VALUE;
+        }
+        char *endPtr;
+        errno = 0;
+        validatedParams.gpsCoordinates[0] = strtod(gpsLatStr, &endPtr);
+        if (errno || endPtr == gpsLatStr) {
+            ALOGE("%s: Malformed GPS latitude: %s", __FUNCTION__, gpsLatStr);
+            return BAD_VALUE;
+        }
+        errno = 0;
+        validatedParams.gpsCoordinates[1] = strtod(gpsLongStr, &endPtr);
+        if (errno || endPtr == gpsLongStr) {
+            ALOGE("%s: Malformed GPS longitude: %s", __FUNCTION__, gpsLongStr);
+            return BAD_VALUE;
+        }
+        errno = 0;
+        validatedParams.gpsCoordinates[2] = strtod(gpsAltitudeStr, &endPtr);
+        if (errno || endPtr == gpsAltitudeStr) {
+            ALOGE("%s: Malformed GPS altitude: %s", __FUNCTION__,
+                    gpsAltitudeStr);
+            return BAD_VALUE;
+        }
+        errno = 0;
+        validatedParams.gpsTimestamp = strtoll(gpsTimeStr, &endPtr, 10);
+        if (errno || endPtr == gpsTimeStr) {
+            ALOGE("%s: Malformed GPS timestamp: %s", __FUNCTION__, gpsTimeStr);
+            return BAD_VALUE;
+        }
+        validatedParams.gpsProcessingMethod = gpsProcMethodStr;
+
+        validatedParams.gpsEnabled = true;
+    } else {
+        validatedParams.gpsEnabled = false;
+    }
+
+    // EFFECT
+    validatedParams.effectMode = effectModeStringToEnum(
+        newParams.get(CameraParameters::KEY_EFFECT) );
+    if (validatedParams.effectMode != effectMode) {
+        camera_metadata_ro_entry_t availableEffectModes =
+            staticInfo(ANDROID_CONTROL_AVAILABLE_EFFECTS);
+        for (i = 0; i < availableEffectModes.count; i++) {
+            if (validatedParams.effectMode == availableEffectModes.data.u8[i]) break;
+        }
+        if (i == availableEffectModes.count) {
+            ALOGE("%s: Requested effect mode \"%s\" is not supported",
+                    __FUNCTION__,
+                    newParams.get(CameraParameters::KEY_EFFECT) );
+            return BAD_VALUE;
+        }
+    }
+
+    // ANTIBANDING
+    validatedParams.antibandingMode = abModeStringToEnum(
+        newParams.get(CameraParameters::KEY_ANTIBANDING) );
+    if (validatedParams.antibandingMode != antibandingMode) {
+        camera_metadata_ro_entry_t availableAbModes =
+            staticInfo(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES);
+        for (i = 0; i < availableAbModes.count; i++) {
+            if (validatedParams.antibandingMode == availableAbModes.data.u8[i])
+                break;
+        }
+        if (i == availableAbModes.count) {
+            ALOGE("%s: Requested antibanding mode \"%s\" is not supported",
+                    __FUNCTION__,
+                    newParams.get(CameraParameters::KEY_ANTIBANDING));
+            return BAD_VALUE;
+        }
+    }
+
+    // SCENE_MODE
+    validatedParams.sceneMode = sceneModeStringToEnum(
+        newParams.get(CameraParameters::KEY_SCENE_MODE) );
+    if (validatedParams.sceneMode != sceneMode &&
+            validatedParams.sceneMode !=
+            ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED) {
+        camera_metadata_ro_entry_t availableSceneModes =
+            staticInfo(ANDROID_CONTROL_AVAILABLE_SCENE_MODES);
+        for (i = 0; i < availableSceneModes.count; i++) {
+            if (validatedParams.sceneMode == availableSceneModes.data.u8[i])
+                break;
+        }
+        if (i == availableSceneModes.count) {
+            ALOGE("%s: Requested scene mode \"%s\" is not supported",
+                    __FUNCTION__,
+                    newParams.get(CameraParameters::KEY_SCENE_MODE));
+            return BAD_VALUE;
+        }
+    }
+    bool sceneModeSet =
+            validatedParams.sceneMode != ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED;
+
+    // FLASH_MODE
+    if (sceneModeSet) {
+        validatedParams.flashMode =
+                fastInfo.sceneModeOverrides.
+                        valueFor(validatedParams.sceneMode).flashMode;
+    } else {
+        validatedParams.flashMode = FLASH_MODE_INVALID;
+    }
+    if (validatedParams.flashMode == FLASH_MODE_INVALID) {
+        validatedParams.flashMode = flashModeStringToEnum(
+            newParams.get(CameraParameters::KEY_FLASH_MODE) );
+    }
+
+    if (validatedParams.flashMode != flashMode) {
+        camera_metadata_ro_entry_t flashAvailable =
+            staticInfo(ANDROID_FLASH_INFO_AVAILABLE, 1, 1);
+        if (!flashAvailable.data.u8[0] &&
+                validatedParams.flashMode != Parameters::FLASH_MODE_OFF) {
+            ALOGE("%s: Requested flash mode \"%s\" is not supported: "
+                    "No flash on device", __FUNCTION__,
+                    newParams.get(CameraParameters::KEY_FLASH_MODE));
+            return BAD_VALUE;
+        } else if (validatedParams.flashMode == Parameters::FLASH_MODE_RED_EYE) {
+            camera_metadata_ro_entry_t availableAeModes =
+                staticInfo(ANDROID_CONTROL_AE_AVAILABLE_MODES);
+            for (i = 0; i < availableAeModes.count; i++) {
+                if (validatedParams.flashMode == availableAeModes.data.u8[i])
+                    break;
+            }
+            if (i == availableAeModes.count) {
+                ALOGE("%s: Requested flash mode \"%s\" is not supported",
+                        __FUNCTION__,
+                        newParams.get(CameraParameters::KEY_FLASH_MODE));
+                return BAD_VALUE;
+            }
+        } else if (validatedParams.flashMode == -1) {
+            ALOGE("%s: Requested flash mode \"%s\" is unknown",
+                    __FUNCTION__,
+                    newParams.get(CameraParameters::KEY_FLASH_MODE));
+            return BAD_VALUE;
+        }
+        // Update in case of override
+        newParams.set(CameraParameters::KEY_FLASH_MODE,
+                flashModeEnumToString(validatedParams.flashMode));
+    }
+
+    // WHITE_BALANCE
+    if (sceneModeSet) {
+        validatedParams.wbMode =
+                fastInfo.sceneModeOverrides.
+                        valueFor(validatedParams.sceneMode).wbMode;
+    } else {
+        validatedParams.wbMode = ANDROID_CONTROL_AWB_MODE_OFF;
+    }
+    if (validatedParams.wbMode == ANDROID_CONTROL_AWB_MODE_OFF) {
+        validatedParams.wbMode = wbModeStringToEnum(
+            newParams.get(CameraParameters::KEY_WHITE_BALANCE) );
+    }
+    if (validatedParams.wbMode != wbMode) {
+        camera_metadata_ro_entry_t availableWbModes =
+            staticInfo(ANDROID_CONTROL_AWB_AVAILABLE_MODES, 0, 0, false);
+        for (i = 0; i < availableWbModes.count; i++) {
+            if (validatedParams.wbMode == availableWbModes.data.u8[i]) break;
+        }
+        if (i == availableWbModes.count) {
+            ALOGE("%s: Requested white balance mode %s is not supported",
+                    __FUNCTION__,
+                    newParams.get(CameraParameters::KEY_WHITE_BALANCE));
+            return BAD_VALUE;
+        }
+        // Update in case of override
+        newParams.set(CameraParameters::KEY_WHITE_BALANCE,
+                wbModeEnumToString(validatedParams.wbMode));
+    }
+
+    // FOCUS_MODE
+    if (sceneModeSet) {
+        validatedParams.focusMode =
+                fastInfo.sceneModeOverrides.
+                        valueFor(validatedParams.sceneMode).focusMode;
+    } else {
+        validatedParams.focusMode = FOCUS_MODE_INVALID;
+    }
+    if (validatedParams.focusMode == FOCUS_MODE_INVALID) {
+        validatedParams.focusMode = focusModeStringToEnum(
+                newParams.get(CameraParameters::KEY_FOCUS_MODE) );
+    }
+    if (validatedParams.focusMode != focusMode) {
+        validatedParams.currentAfTriggerId = -1;
+        if (validatedParams.focusMode != Parameters::FOCUS_MODE_FIXED) {
+            camera_metadata_ro_entry_t minFocusDistance =
+                staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, 0, 0,
+                        false);
+            if (minFocusDistance.count && minFocusDistance.data.f[0] == 0) {
+                ALOGE("%s: Requested focus mode \"%s\" is not available: "
+                        "fixed focus lens",
+                        __FUNCTION__,
+                        newParams.get(CameraParameters::KEY_FOCUS_MODE));
+                return BAD_VALUE;
+            } else if (validatedParams.focusMode !=
+                    Parameters::FOCUS_MODE_INFINITY) {
+                camera_metadata_ro_entry_t availableFocusModes =
+                    staticInfo(ANDROID_CONTROL_AF_AVAILABLE_MODES);
+                for (i = 0; i < availableFocusModes.count; i++) {
+                    if (validatedParams.focusMode ==
+                            availableFocusModes.data.u8[i]) break;
+                }
+                if (i == availableFocusModes.count) {
+                    ALOGE("%s: Requested focus mode \"%s\" is not supported",
+                            __FUNCTION__,
+                            newParams.get(CameraParameters::KEY_FOCUS_MODE));
+                    return BAD_VALUE;
+                }
+            }
+        }
+        validatedParams.focusState = ANDROID_CONTROL_AF_STATE_INACTIVE;
+        // Always reset shadow focus mode to avoid reverting settings
+        validatedParams.shadowFocusMode = FOCUS_MODE_INVALID;
+        // Update in case of override
+        newParams.set(CameraParameters::KEY_FOCUS_MODE,
+                focusModeEnumToString(validatedParams.focusMode));
+    } else {
+        validatedParams.currentAfTriggerId = currentAfTriggerId;
+    }
+
+    // FOCUS_AREAS
+    res = parseAreas(newParams.get(CameraParameters::KEY_FOCUS_AREAS),
+            &validatedParams.focusingAreas);
+    size_t max3aRegions =
+        (size_t)staticInfo(ANDROID_CONTROL_MAX_REGIONS, 1, 1).data.i32[0];
+    if (res == OK) res = validateAreas(validatedParams.focusingAreas,
+            max3aRegions, AREA_KIND_FOCUS);
+    if (res != OK) {
+        ALOGE("%s: Requested focus areas are malformed: %s",
+                __FUNCTION__, newParams.get(CameraParameters::KEY_FOCUS_AREAS));
+        return BAD_VALUE;
+    }
+
+    // EXPOSURE_COMPENSATION
+    validatedParams.exposureCompensation =
+        newParams.getInt(CameraParameters::KEY_EXPOSURE_COMPENSATION);
+    camera_metadata_ro_entry_t exposureCompensationRange =
+        staticInfo(ANDROID_CONTROL_AE_COMPENSATION_RANGE);
+    if ((validatedParams.exposureCompensation <
+            exposureCompensationRange.data.i32[0]) ||
+        (validatedParams.exposureCompensation >
+            exposureCompensationRange.data.i32[1])) {
+        ALOGE("%s: Requested exposure compensation index is out of bounds: %d",
+                __FUNCTION__, validatedParams.exposureCompensation);
+        return BAD_VALUE;
+    }
+
+    // AUTO_EXPOSURE_LOCK (always supported)
+    validatedParams.autoExposureLock = boolFromString(
+        newParams.get(CameraParameters::KEY_AUTO_EXPOSURE_LOCK));
+
+    // AUTO_WHITEBALANCE_LOCK (always supported)
+    validatedParams.autoWhiteBalanceLock = boolFromString(
+        newParams.get(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK));
+
+    // METERING_AREAS
+    res = parseAreas(newParams.get(CameraParameters::KEY_METERING_AREAS),
+            &validatedParams.meteringAreas);
+    if (res == OK) {
+        res = validateAreas(validatedParams.meteringAreas, max3aRegions,
+                            AREA_KIND_METERING);
+    }
+    if (res != OK) {
+        ALOGE("%s: Requested metering areas are malformed: %s",
+                __FUNCTION__,
+                newParams.get(CameraParameters::KEY_METERING_AREAS));
+        return BAD_VALUE;
+    }
+
+    // ZOOM
+    validatedParams.zoom = newParams.getInt(CameraParameters::KEY_ZOOM);
+    if (validatedParams.zoom < 0
+                || validatedParams.zoom >= (int)NUM_ZOOM_STEPS) {
+        ALOGE("%s: Requested zoom level %d is not supported",
+                __FUNCTION__, validatedParams.zoom);
+        return BAD_VALUE;
+    }
+
+    // VIDEO_SIZE
+    newParams.getVideoSize(&validatedParams.videoWidth,
+            &validatedParams.videoHeight);
+    if (validatedParams.videoWidth != videoWidth ||
+            validatedParams.videoHeight != videoHeight) {
+        if (state == RECORD) {
+            ALOGE("%s: Video size cannot be updated when recording is active!",
+                    __FUNCTION__);
+            return BAD_VALUE;
+        }
+        camera_metadata_ro_entry_t availableVideoSizes =
+            staticInfo(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES);
+        for (i = 0; i < availableVideoSizes.count; i += 2 ) {
+            if ((availableVideoSizes.data.i32[i] ==
+                    validatedParams.videoWidth) &&
+                (availableVideoSizes.data.i32[i+1] ==
+                    validatedParams.videoHeight)) break;
+        }
+        if (i == availableVideoSizes.count) {
+            ALOGE("%s: Requested video size %d x %d is not supported",
+                    __FUNCTION__, validatedParams.videoWidth,
+                    validatedParams.videoHeight);
+            return BAD_VALUE;
+        }
+    }
+
+    // VIDEO_STABILIZATION
+    validatedParams.videoStabilization = boolFromString(
+        newParams.get(CameraParameters::KEY_VIDEO_STABILIZATION) );
+    camera_metadata_ro_entry_t availableVideoStabilizationModes =
+        staticInfo(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES, 0, 0,
+                false);
+    if (validatedParams.videoStabilization &&
+            availableVideoStabilizationModes.count == 1) {
+        ALOGE("%s: Video stabilization not supported", __FUNCTION__);
+    }
+
+    // LIGHTFX
+    validatedParams.lightFx = lightFxStringToEnum(
+        newParams.get(CameraParameters::KEY_LIGHTFX));
+
+    /** Update internal parameters */
+
+    *this = validatedParams;
+
+    /** Update external parameters calculated from the internal ones */
+
+    // HORIZONTAL/VERTICAL FIELD OF VIEW
+    float horizFov, vertFov;
+    res = calculatePictureFovs(&horizFov, &vertFov);
+    if (res != OK) {
+        ALOGE("%s: Can't calculate FOVs", __FUNCTION__);
+        // continue so parameters are at least consistent
+    }
+    newParams.setFloat(CameraParameters::KEY_HORIZONTAL_VIEW_ANGLE,
+            horizFov);
+    newParams.setFloat(CameraParameters::KEY_VERTICAL_VIEW_ANGLE,
+            vertFov);
+    ALOGV("Current still picture FOV: %f x %f deg", horizFov, vertFov);
+
+    // Need to flatten again in case of overrides
+    paramsFlattened = newParams.flatten();
+    params = newParams;
+
+    return OK;
+}
+
+status_t Parameters::updateRequest(CameraMetadata *request) const {
+    ATRACE_CALL();
+    status_t res;
+
+    /**
+     * Mixin default important security values
+     * - android.led.transmit = defaulted ON
+     */
+    camera_metadata_ro_entry_t entry = staticInfo(ANDROID_LED_AVAILABLE_LEDS,
+                                                  /*minimumCount*/0,
+                                                  /*maximumCount*/0,
+                                                  /*required*/false);
+    for(size_t i = 0; i < entry.count; ++i) {
+        uint8_t led = entry.data.u8[i];
+
+        switch(led) {
+            // Transmit LED is unconditionally on when using
+            // the android.hardware.Camera API
+            case ANDROID_LED_AVAILABLE_LEDS_TRANSMIT: {
+                uint8_t transmitDefault = ANDROID_LED_TRANSMIT_ON;
+                res = request->update(ANDROID_LED_TRANSMIT,
+                                      &transmitDefault, 1);
+                if (res != OK) return res;
+                break;
+            }
+        }
+    }
+
+    /**
+     * Construct metadata from parameters
+     */
+
+    uint8_t metadataMode = ANDROID_REQUEST_METADATA_MODE_FULL;
+    res = request->update(ANDROID_REQUEST_METADATA_MODE,
+            &metadataMode, 1);
+    if (res != OK) return res;
+
+    res = request->update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
+            previewFpsRange, 2);
+    if (res != OK) return res;
+
+    uint8_t reqWbLock = autoWhiteBalanceLock ?
+            ANDROID_CONTROL_AWB_LOCK_ON : ANDROID_CONTROL_AWB_LOCK_OFF;
+    res = request->update(ANDROID_CONTROL_AWB_LOCK,
+            &reqWbLock, 1);
+
+    res = request->update(ANDROID_CONTROL_EFFECT_MODE,
+            &effectMode, 1);
+    if (res != OK) return res;
+    res = request->update(ANDROID_CONTROL_AE_ANTIBANDING_MODE,
+            &antibandingMode, 1);
+    if (res != OK) return res;
+
+    // android.hardware.Camera requires that when face detect is enabled, the
+    // camera is in a face-priority mode. HAL2 splits this into separate parts
+    // (face detection statistics and face priority scene mode). Map from other
+    // to the other.
+    bool sceneModeActive =
+            sceneMode != (uint8_t)ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED;
+    uint8_t reqControlMode = ANDROID_CONTROL_MODE_AUTO;
+    if (enableFaceDetect || sceneModeActive) {
+        reqControlMode = ANDROID_CONTROL_MODE_USE_SCENE_MODE;
+    }
+    res = request->update(ANDROID_CONTROL_MODE,
+            &reqControlMode, 1);
+    if (res != OK) return res;
+
+    uint8_t reqSceneMode =
+            sceneModeActive ? sceneMode :
+            enableFaceDetect ? (uint8_t)ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY :
+            (uint8_t)ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED;
+    res = request->update(ANDROID_CONTROL_SCENE_MODE,
+            &reqSceneMode, 1);
+    if (res != OK) return res;
+
+    uint8_t reqFlashMode = ANDROID_FLASH_MODE_OFF;
+    uint8_t reqAeMode = ANDROID_CONTROL_AE_MODE_OFF;
+    switch (flashMode) {
+        case Parameters::FLASH_MODE_OFF:
+            reqAeMode = ANDROID_CONTROL_AE_MODE_ON; break;
+        case Parameters::FLASH_MODE_AUTO:
+            reqAeMode = ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH; break;
+        case Parameters::FLASH_MODE_ON:
+            reqAeMode = ANDROID_CONTROL_AE_MODE_ON_ALWAYS_FLASH; break;
+        case Parameters::FLASH_MODE_TORCH:
+            reqAeMode = ANDROID_CONTROL_AE_MODE_ON;
+            reqFlashMode = ANDROID_FLASH_MODE_TORCH;
+            break;
+        case Parameters::FLASH_MODE_RED_EYE:
+            reqAeMode = ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE; break;
+        default:
+            ALOGE("%s: Camera %d: Unknown flash mode %d", __FUNCTION__,
+                    cameraId, flashMode);
+                return BAD_VALUE;
+    }
+    res = request->update(ANDROID_FLASH_MODE,
+            &reqFlashMode, 1);
+    if (res != OK) return res;
+    res = request->update(ANDROID_CONTROL_AE_MODE,
+            &reqAeMode, 1);
+    if (res != OK) return res;
+
+    uint8_t reqAeLock = autoExposureLock ?
+            ANDROID_CONTROL_AE_LOCK_ON : ANDROID_CONTROL_AE_LOCK_OFF;
+    res = request->update(ANDROID_CONTROL_AE_LOCK,
+            &reqAeLock, 1);
+    if (res != OK) return res;
+
+    res = request->update(ANDROID_CONTROL_AWB_MODE,
+            &wbMode, 1);
+    if (res != OK) return res;
+
+    float reqFocusDistance = 0; // infinity focus in diopters
+    uint8_t reqFocusMode = ANDROID_CONTROL_AF_MODE_OFF;
+    switch (focusMode) {
+        case Parameters::FOCUS_MODE_AUTO:
+        case Parameters::FOCUS_MODE_MACRO:
+        case Parameters::FOCUS_MODE_CONTINUOUS_VIDEO:
+        case Parameters::FOCUS_MODE_CONTINUOUS_PICTURE:
+        case Parameters::FOCUS_MODE_EDOF:
+            reqFocusMode = focusMode;
+            break;
+        case Parameters::FOCUS_MODE_INFINITY:
+        case Parameters::FOCUS_MODE_FIXED:
+            reqFocusMode = ANDROID_CONTROL_AF_MODE_OFF;
+            break;
+        default:
+                ALOGE("%s: Camera %d: Unknown focus mode %d", __FUNCTION__,
+                        cameraId, focusMode);
+                return BAD_VALUE;
+    }
+    res = request->update(ANDROID_LENS_FOCUS_DISTANCE,
+            &reqFocusDistance, 1);
+    if (res != OK) return res;
+    res = request->update(ANDROID_CONTROL_AF_MODE,
+            &reqFocusMode, 1);
+    if (res != OK) return res;
+
+    size_t reqFocusingAreasSize = focusingAreas.size() * 5;
+    int32_t *reqFocusingAreas = new int32_t[reqFocusingAreasSize];
+    for (size_t i = 0; i < reqFocusingAreasSize; i += 5) {
+        if (focusingAreas[i].weight != 0) {
+            reqFocusingAreas[i + 0] =
+                    normalizedXToArray(focusingAreas[i].left);
+            reqFocusingAreas[i + 1] =
+                    normalizedYToArray(focusingAreas[i].top);
+            reqFocusingAreas[i + 2] =
+                    normalizedXToArray(focusingAreas[i].right);
+            reqFocusingAreas[i + 3] =
+                    normalizedYToArray(focusingAreas[i].bottom);
+        } else {
+            reqFocusingAreas[i + 0] = 0;
+            reqFocusingAreas[i + 1] = 0;
+            reqFocusingAreas[i + 2] = 0;
+            reqFocusingAreas[i + 3] = 0;
+        }
+        reqFocusingAreas[i + 4] = focusingAreas[i].weight;
+    }
+    res = request->update(ANDROID_CONTROL_AF_REGIONS,
+            reqFocusingAreas, reqFocusingAreasSize);
+    if (res != OK) return res;
+    delete[] reqFocusingAreas;
+
+    res = request->update(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION,
+            &exposureCompensation, 1);
+    if (res != OK) return res;
+
+    size_t reqMeteringAreasSize = meteringAreas.size() * 5;
+    int32_t *reqMeteringAreas = new int32_t[reqMeteringAreasSize];
+    for (size_t i = 0; i < reqMeteringAreasSize; i += 5) {
+        if (meteringAreas[i].weight != 0) {
+            reqMeteringAreas[i + 0] =
+                normalizedXToArray(meteringAreas[i].left);
+            reqMeteringAreas[i + 1] =
+                normalizedYToArray(meteringAreas[i].top);
+            reqMeteringAreas[i + 2] =
+                normalizedXToArray(meteringAreas[i].right);
+            reqMeteringAreas[i + 3] =
+                normalizedYToArray(meteringAreas[i].bottom);
+        } else {
+            reqMeteringAreas[i + 0] = 0;
+            reqMeteringAreas[i + 1] = 0;
+            reqMeteringAreas[i + 2] = 0;
+            reqMeteringAreas[i + 3] = 0;
+        }
+        reqMeteringAreas[i + 4] = meteringAreas[i].weight;
+    }
+    res = request->update(ANDROID_CONTROL_AE_REGIONS,
+            reqMeteringAreas, reqMeteringAreasSize);
+    if (res != OK) return res;
+
+    delete[] reqMeteringAreas;
+
+    /* don't include jpeg thumbnail size - it's valid for
+       it to be set to (0,0), meaning 'no thumbnail' */
+    CropRegion crop = calculateCropRegion( (CropRegion::Outputs)(
+            CropRegion::OUTPUT_PREVIEW     |
+            CropRegion::OUTPUT_VIDEO       |
+            CropRegion::OUTPUT_PICTURE    ));
+    int32_t reqCropRegion[4] = {
+        static_cast<int32_t>(crop.left),
+        static_cast<int32_t>(crop.top),
+        static_cast<int32_t>(crop.width),
+        static_cast<int32_t>(crop.height)
+    };
+    res = request->update(ANDROID_SCALER_CROP_REGION,
+            reqCropRegion, 4);
+    if (res != OK) return res;
+
+    uint8_t reqVstabMode = videoStabilization ?
+            ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_ON :
+            ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF;
+    res = request->update(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE,
+            &reqVstabMode, 1);
+    if (res != OK) return res;
+
+    uint8_t reqFaceDetectMode = enableFaceDetect ?
+            fastInfo.bestFaceDetectMode :
+            (uint8_t)ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
+    res = request->update(ANDROID_STATISTICS_FACE_DETECT_MODE,
+            &reqFaceDetectMode, 1);
+    if (res != OK) return res;
+
+    return OK;
+}
+
+status_t Parameters::updateRequestJpeg(CameraMetadata *request) const {
+    status_t res;
+
+    res = request->update(ANDROID_JPEG_THUMBNAIL_SIZE,
+            jpegThumbSize, 2);
+    if (res != OK) return res;
+    res = request->update(ANDROID_JPEG_THUMBNAIL_QUALITY,
+            &jpegThumbQuality, 1);
+    if (res != OK) return res;
+    res = request->update(ANDROID_JPEG_QUALITY,
+            &jpegQuality, 1);
+    if (res != OK) return res;
+    res = request->update(
+            ANDROID_JPEG_ORIENTATION,
+            &jpegRotation, 1);
+    if (res != OK) return res;
+
+    if (gpsEnabled) {
+        res = request->update(
+                ANDROID_JPEG_GPS_COORDINATES,
+                gpsCoordinates, 3);
+        if (res != OK) return res;
+        res = request->update(
+                ANDROID_JPEG_GPS_TIMESTAMP,
+                &gpsTimestamp, 1);
+        if (res != OK) return res;
+        res = request->update(
+                ANDROID_JPEG_GPS_PROCESSING_METHOD,
+                gpsProcessingMethod);
+        if (res != OK) return res;
+    } else {
+        res = request->erase(ANDROID_JPEG_GPS_COORDINATES);
+        if (res != OK) return res;
+        res = request->erase(ANDROID_JPEG_GPS_TIMESTAMP);
+        if (res != OK) return res;
+        res = request->erase(ANDROID_JPEG_GPS_PROCESSING_METHOD);
+        if (res != OK) return res;
+    }
+    return OK;
+}
+
+
+const char* Parameters::getStateName(State state) {
+#define CASE_ENUM_TO_CHAR(x) case x: return(#x); break;
+    switch(state) {
+        CASE_ENUM_TO_CHAR(DISCONNECTED)
+        CASE_ENUM_TO_CHAR(STOPPED)
+        CASE_ENUM_TO_CHAR(WAITING_FOR_PREVIEW_WINDOW)
+        CASE_ENUM_TO_CHAR(PREVIEW)
+        CASE_ENUM_TO_CHAR(RECORD)
+        CASE_ENUM_TO_CHAR(STILL_CAPTURE)
+        CASE_ENUM_TO_CHAR(VIDEO_SNAPSHOT)
+        default:
+            return "Unknown state!";
+            break;
+    }
+#undef CASE_ENUM_TO_CHAR
+}
+
+int Parameters::formatStringToEnum(const char *format) {
+    return
+        !format ?
+            HAL_PIXEL_FORMAT_YCrCb_420_SP :
+        !strcmp(format, CameraParameters::PIXEL_FORMAT_YUV422SP) ?
+            HAL_PIXEL_FORMAT_YCbCr_422_SP : // NV16
+        !strcmp(format, CameraParameters::PIXEL_FORMAT_YUV420SP) ?
+            HAL_PIXEL_FORMAT_YCrCb_420_SP : // NV21
+        !strcmp(format, CameraParameters::PIXEL_FORMAT_YUV422I) ?
+            HAL_PIXEL_FORMAT_YCbCr_422_I :  // YUY2
+        !strcmp(format, CameraParameters::PIXEL_FORMAT_YUV420P) ?
+            HAL_PIXEL_FORMAT_YV12 :         // YV12
+        !strcmp(format, CameraParameters::PIXEL_FORMAT_RGB565) ?
+            HAL_PIXEL_FORMAT_RGB_565 :      // RGB565
+        !strcmp(format, CameraParameters::PIXEL_FORMAT_RGBA8888) ?
+            HAL_PIXEL_FORMAT_RGBA_8888 :    // RGB8888
+        !strcmp(format, CameraParameters::PIXEL_FORMAT_BAYER_RGGB) ?
+            HAL_PIXEL_FORMAT_RAW_SENSOR :   // Raw sensor data
+        -1;
+}
+
+const char* Parameters::formatEnumToString(int format) {
+    const char *fmt;
+    switch(format) {
+        case HAL_PIXEL_FORMAT_YCbCr_422_SP: // NV16
+            fmt = CameraParameters::PIXEL_FORMAT_YUV422SP;
+            break;
+        case HAL_PIXEL_FORMAT_YCrCb_420_SP: // NV21
+            fmt = CameraParameters::PIXEL_FORMAT_YUV420SP;
+            break;
+        case HAL_PIXEL_FORMAT_YCbCr_422_I: // YUY2
+            fmt = CameraParameters::PIXEL_FORMAT_YUV422I;
+            break;
+        case HAL_PIXEL_FORMAT_YV12:        // YV12
+            fmt = CameraParameters::PIXEL_FORMAT_YUV420P;
+            break;
+        case HAL_PIXEL_FORMAT_RGB_565:     // RGB565
+            fmt = CameraParameters::PIXEL_FORMAT_RGB565;
+            break;
+        case HAL_PIXEL_FORMAT_RGBA_8888:   // RGBA8888
+            fmt = CameraParameters::PIXEL_FORMAT_RGBA8888;
+            break;
+        case HAL_PIXEL_FORMAT_RAW_SENSOR:
+            ALOGW("Raw sensor preview format requested.");
+            fmt = CameraParameters::PIXEL_FORMAT_BAYER_RGGB;
+            break;
+        default:
+            ALOGE("%s: Unknown preview format: %x",
+                    __FUNCTION__,  format);
+            fmt = NULL;
+            break;
+    }
+    return fmt;
+}
+
+int Parameters::wbModeStringToEnum(const char *wbMode) {
+    return
+        !wbMode ?
+            ANDROID_CONTROL_AWB_MODE_AUTO :
+        !strcmp(wbMode, CameraParameters::WHITE_BALANCE_AUTO) ?
+            ANDROID_CONTROL_AWB_MODE_AUTO :
+        !strcmp(wbMode, CameraParameters::WHITE_BALANCE_INCANDESCENT) ?
+            ANDROID_CONTROL_AWB_MODE_INCANDESCENT :
+        !strcmp(wbMode, CameraParameters::WHITE_BALANCE_FLUORESCENT) ?
+            ANDROID_CONTROL_AWB_MODE_FLUORESCENT :
+        !strcmp(wbMode, CameraParameters::WHITE_BALANCE_WARM_FLUORESCENT) ?
+            ANDROID_CONTROL_AWB_MODE_WARM_FLUORESCENT :
+        !strcmp(wbMode, CameraParameters::WHITE_BALANCE_DAYLIGHT) ?
+            ANDROID_CONTROL_AWB_MODE_DAYLIGHT :
+        !strcmp(wbMode, CameraParameters::WHITE_BALANCE_CLOUDY_DAYLIGHT) ?
+            ANDROID_CONTROL_AWB_MODE_CLOUDY_DAYLIGHT :
+        !strcmp(wbMode, CameraParameters::WHITE_BALANCE_TWILIGHT) ?
+            ANDROID_CONTROL_AWB_MODE_TWILIGHT :
+        !strcmp(wbMode, CameraParameters::WHITE_BALANCE_SHADE) ?
+            ANDROID_CONTROL_AWB_MODE_SHADE :
+        -1;
+}
+
+const char* Parameters::wbModeEnumToString(uint8_t wbMode) {
+    switch (wbMode) {
+        case ANDROID_CONTROL_AWB_MODE_AUTO:
+            return CameraParameters::WHITE_BALANCE_AUTO;
+        case ANDROID_CONTROL_AWB_MODE_INCANDESCENT:
+            return CameraParameters::WHITE_BALANCE_INCANDESCENT;
+        case ANDROID_CONTROL_AWB_MODE_FLUORESCENT:
+            return CameraParameters::WHITE_BALANCE_FLUORESCENT;
+        case ANDROID_CONTROL_AWB_MODE_WARM_FLUORESCENT:
+            return CameraParameters::WHITE_BALANCE_WARM_FLUORESCENT;
+        case ANDROID_CONTROL_AWB_MODE_DAYLIGHT:
+            return CameraParameters::WHITE_BALANCE_DAYLIGHT;
+        case ANDROID_CONTROL_AWB_MODE_CLOUDY_DAYLIGHT:
+            return CameraParameters::WHITE_BALANCE_CLOUDY_DAYLIGHT;
+        case ANDROID_CONTROL_AWB_MODE_TWILIGHT:
+            return CameraParameters::WHITE_BALANCE_TWILIGHT;
+        case ANDROID_CONTROL_AWB_MODE_SHADE:
+            return CameraParameters::WHITE_BALANCE_SHADE;
+        default:
+            ALOGE("%s: Unknown AWB mode enum: %d",
+                    __FUNCTION__, wbMode);
+            return "unknown";
+    }
+}
+
+int Parameters::effectModeStringToEnum(const char *effectMode) {
+    return
+        !effectMode ?
+            ANDROID_CONTROL_EFFECT_MODE_OFF :
+        !strcmp(effectMode, CameraParameters::EFFECT_NONE) ?
+            ANDROID_CONTROL_EFFECT_MODE_OFF :
+        !strcmp(effectMode, CameraParameters::EFFECT_MONO) ?
+            ANDROID_CONTROL_EFFECT_MODE_MONO :
+        !strcmp(effectMode, CameraParameters::EFFECT_NEGATIVE) ?
+            ANDROID_CONTROL_EFFECT_MODE_NEGATIVE :
+        !strcmp(effectMode, CameraParameters::EFFECT_SOLARIZE) ?
+            ANDROID_CONTROL_EFFECT_MODE_SOLARIZE :
+        !strcmp(effectMode, CameraParameters::EFFECT_SEPIA) ?
+            ANDROID_CONTROL_EFFECT_MODE_SEPIA :
+        !strcmp(effectMode, CameraParameters::EFFECT_POSTERIZE) ?
+            ANDROID_CONTROL_EFFECT_MODE_POSTERIZE :
+        !strcmp(effectMode, CameraParameters::EFFECT_WHITEBOARD) ?
+            ANDROID_CONTROL_EFFECT_MODE_WHITEBOARD :
+        !strcmp(effectMode, CameraParameters::EFFECT_BLACKBOARD) ?
+            ANDROID_CONTROL_EFFECT_MODE_BLACKBOARD :
+        !strcmp(effectMode, CameraParameters::EFFECT_AQUA) ?
+            ANDROID_CONTROL_EFFECT_MODE_AQUA :
+        -1;
+}
+
+int Parameters::abModeStringToEnum(const char *abMode) {
+    return
+        !abMode ?
+            ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO :
+        !strcmp(abMode, CameraParameters::ANTIBANDING_AUTO) ?
+            ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO :
+        !strcmp(abMode, CameraParameters::ANTIBANDING_OFF) ?
+            ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF :
+        !strcmp(abMode, CameraParameters::ANTIBANDING_50HZ) ?
+            ANDROID_CONTROL_AE_ANTIBANDING_MODE_50HZ :
+        !strcmp(abMode, CameraParameters::ANTIBANDING_60HZ) ?
+            ANDROID_CONTROL_AE_ANTIBANDING_MODE_60HZ :
+        -1;
+}
+
+int Parameters::sceneModeStringToEnum(const char *sceneMode) {
+    return
+        !sceneMode ?
+            ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED :
+        !strcmp(sceneMode, CameraParameters::SCENE_MODE_AUTO) ?
+            ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED :
+        !strcmp(sceneMode, CameraParameters::SCENE_MODE_ACTION) ?
+            ANDROID_CONTROL_SCENE_MODE_ACTION :
+        !strcmp(sceneMode, CameraParameters::SCENE_MODE_PORTRAIT) ?
+            ANDROID_CONTROL_SCENE_MODE_PORTRAIT :
+        !strcmp(sceneMode, CameraParameters::SCENE_MODE_LANDSCAPE) ?
+            ANDROID_CONTROL_SCENE_MODE_LANDSCAPE :
+        !strcmp(sceneMode, CameraParameters::SCENE_MODE_NIGHT) ?
+            ANDROID_CONTROL_SCENE_MODE_NIGHT :
+        !strcmp(sceneMode, CameraParameters::SCENE_MODE_NIGHT_PORTRAIT) ?
+            ANDROID_CONTROL_SCENE_MODE_NIGHT_PORTRAIT :
+        !strcmp(sceneMode, CameraParameters::SCENE_MODE_THEATRE) ?
+            ANDROID_CONTROL_SCENE_MODE_THEATRE :
+        !strcmp(sceneMode, CameraParameters::SCENE_MODE_BEACH) ?
+            ANDROID_CONTROL_SCENE_MODE_BEACH :
+        !strcmp(sceneMode, CameraParameters::SCENE_MODE_SNOW) ?
+            ANDROID_CONTROL_SCENE_MODE_SNOW :
+        !strcmp(sceneMode, CameraParameters::SCENE_MODE_SUNSET) ?
+            ANDROID_CONTROL_SCENE_MODE_SUNSET :
+        !strcmp(sceneMode, CameraParameters::SCENE_MODE_STEADYPHOTO) ?
+            ANDROID_CONTROL_SCENE_MODE_STEADYPHOTO :
+        !strcmp(sceneMode, CameraParameters::SCENE_MODE_FIREWORKS) ?
+            ANDROID_CONTROL_SCENE_MODE_FIREWORKS :
+        !strcmp(sceneMode, CameraParameters::SCENE_MODE_SPORTS) ?
+            ANDROID_CONTROL_SCENE_MODE_SPORTS :
+        !strcmp(sceneMode, CameraParameters::SCENE_MODE_PARTY) ?
+            ANDROID_CONTROL_SCENE_MODE_PARTY :
+        !strcmp(sceneMode, CameraParameters::SCENE_MODE_CANDLELIGHT) ?
+            ANDROID_CONTROL_SCENE_MODE_CANDLELIGHT :
+        !strcmp(sceneMode, CameraParameters::SCENE_MODE_BARCODE) ?
+            ANDROID_CONTROL_SCENE_MODE_BARCODE:
+        -1;
+}
+
+Parameters::Parameters::flashMode_t Parameters::flashModeStringToEnum(
+        const char *flashMode) {
+    return
+        !flashMode ?
+            Parameters::FLASH_MODE_INVALID :
+        !strcmp(flashMode, CameraParameters::FLASH_MODE_OFF) ?
+            Parameters::FLASH_MODE_OFF :
+        !strcmp(flashMode, CameraParameters::FLASH_MODE_AUTO) ?
+            Parameters::FLASH_MODE_AUTO :
+        !strcmp(flashMode, CameraParameters::FLASH_MODE_ON) ?
+            Parameters::FLASH_MODE_ON :
+        !strcmp(flashMode, CameraParameters::FLASH_MODE_RED_EYE) ?
+            Parameters::FLASH_MODE_RED_EYE :
+        !strcmp(flashMode, CameraParameters::FLASH_MODE_TORCH) ?
+            Parameters::FLASH_MODE_TORCH :
+        Parameters::FLASH_MODE_INVALID;
+}
+
+const char *Parameters::flashModeEnumToString(flashMode_t flashMode) {
+    switch (flashMode) {
+        case FLASH_MODE_OFF:
+            return CameraParameters::FLASH_MODE_OFF;
+        case FLASH_MODE_AUTO:
+            return CameraParameters::FLASH_MODE_AUTO;
+        case FLASH_MODE_ON:
+            return CameraParameters::FLASH_MODE_ON;
+        case FLASH_MODE_RED_EYE:
+            return CameraParameters::FLASH_MODE_RED_EYE;
+        case FLASH_MODE_TORCH:
+            return CameraParameters::FLASH_MODE_TORCH;
+        default:
+            ALOGE("%s: Unknown flash mode enum %d",
+                    __FUNCTION__, flashMode);
+            return "unknown";
+    }
+}
+
+Parameters::Parameters::focusMode_t Parameters::focusModeStringToEnum(
+        const char *focusMode) {
+    return
+        !focusMode ?
+            Parameters::FOCUS_MODE_INVALID :
+        !strcmp(focusMode, CameraParameters::FOCUS_MODE_AUTO) ?
+            Parameters::FOCUS_MODE_AUTO :
+        !strcmp(focusMode, CameraParameters::FOCUS_MODE_INFINITY) ?
+            Parameters::FOCUS_MODE_INFINITY :
+        !strcmp(focusMode, CameraParameters::FOCUS_MODE_MACRO) ?
+            Parameters::FOCUS_MODE_MACRO :
+        !strcmp(focusMode, CameraParameters::FOCUS_MODE_FIXED) ?
+            Parameters::FOCUS_MODE_FIXED :
+        !strcmp(focusMode, CameraParameters::FOCUS_MODE_EDOF) ?
+            Parameters::FOCUS_MODE_EDOF :
+        !strcmp(focusMode, CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO) ?
+            Parameters::FOCUS_MODE_CONTINUOUS_VIDEO :
+        !strcmp(focusMode, CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE) ?
+            Parameters::FOCUS_MODE_CONTINUOUS_PICTURE :
+        Parameters::FOCUS_MODE_INVALID;
+}
+
+const char *Parameters::focusModeEnumToString(focusMode_t focusMode) {
+    switch (focusMode) {
+        case FOCUS_MODE_AUTO:
+            return CameraParameters::FOCUS_MODE_AUTO;
+        case FOCUS_MODE_MACRO:
+            return CameraParameters::FOCUS_MODE_MACRO;
+        case FOCUS_MODE_CONTINUOUS_VIDEO:
+            return CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO;
+        case FOCUS_MODE_CONTINUOUS_PICTURE:
+            return CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE;
+        case FOCUS_MODE_EDOF:
+            return CameraParameters::FOCUS_MODE_EDOF;
+        case FOCUS_MODE_INFINITY:
+            return CameraParameters::FOCUS_MODE_INFINITY;
+        case FOCUS_MODE_FIXED:
+            return CameraParameters::FOCUS_MODE_FIXED;
+        default:
+            ALOGE("%s: Unknown focus mode enum: %d",
+                    __FUNCTION__, focusMode);
+            return "unknown";
+    }
+}
+
+Parameters::Parameters::lightFxMode_t Parameters::lightFxStringToEnum(
+        const char *lightFxMode) {
+    return
+        !lightFxMode ?
+            Parameters::LIGHTFX_NONE :
+        !strcmp(lightFxMode, CameraParameters::LIGHTFX_LOWLIGHT) ?
+            Parameters::LIGHTFX_LOWLIGHT :
+        !strcmp(lightFxMode, CameraParameters::LIGHTFX_HDR) ?
+            Parameters::LIGHTFX_HDR :
+        Parameters::LIGHTFX_NONE;
+}
+
+status_t Parameters::parseAreas(const char *areasCStr,
+        Vector<Parameters::Area> *areas) {
+    static const size_t NUM_FIELDS = 5;
+    areas->clear();
+    if (areasCStr == NULL) {
+        // If no key exists, use default (0,0,0,0,0)
+        areas->push();
+        return OK;
+    }
+    String8 areasStr(areasCStr);
+    ssize_t areaStart = areasStr.find("(", 0) + 1;
+    while (areaStart != 0) {
+        const char* area = areasStr.string() + areaStart;
+        char *numEnd;
+        int vals[NUM_FIELDS];
+        for (size_t i = 0; i < NUM_FIELDS; i++) {
+            errno = 0;
+            vals[i] = strtol(area, &numEnd, 10);
+            if (errno || numEnd == area) return BAD_VALUE;
+            area = numEnd + 1;
+        }
+        areas->push(Parameters::Area(
+            vals[0], vals[1], vals[2], vals[3], vals[4]) );
+        areaStart = areasStr.find("(", areaStart) + 1;
+    }
+    return OK;
+}
+
+status_t Parameters::validateAreas(const Vector<Parameters::Area> &areas,
+                                      size_t maxRegions,
+                                      AreaKind areaKind) const {
+    // Definition of valid area can be found in
+    // include/camera/CameraParameters.h
+    if (areas.size() == 0) return BAD_VALUE;
+    if (areas.size() == 1) {
+        if (areas[0].left == 0 &&
+                areas[0].top == 0 &&
+                areas[0].right == 0 &&
+                areas[0].bottom == 0 &&
+                areas[0].weight == 0) {
+            // Single (0,0,0,0,0) entry is always valid (== driver decides)
+            return OK;
+        }
+    }
+
+    // fixed focus can only set (0,0,0,0,0) focus area
+    if (areaKind == AREA_KIND_FOCUS && focusMode == FOCUS_MODE_FIXED) {
+        return BAD_VALUE;
+    }
+
+    if (areas.size() > maxRegions) {
+        ALOGE("%s: Too many areas requested: %d",
+                __FUNCTION__, areas.size());
+        return BAD_VALUE;
+    }
+
+    for (Vector<Parameters::Area>::const_iterator a = areas.begin();
+         a != areas.end(); a++) {
+        if (a->weight < 1 || a->weight > 1000) return BAD_VALUE;
+        if (a->left < -1000 || a->left > 1000) return BAD_VALUE;
+        if (a->top < -1000 || a->top > 1000) return BAD_VALUE;
+        if (a->right < -1000 || a->right > 1000) return BAD_VALUE;
+        if (a->bottom < -1000 || a->bottom > 1000) return BAD_VALUE;
+        if (a->left >= a->right) return BAD_VALUE;
+        if (a->top >= a->bottom) return BAD_VALUE;
+    }
+    return OK;
+}
+
+bool Parameters::boolFromString(const char *boolStr) {
+    return !boolStr ? false :
+        !strcmp(boolStr, CameraParameters::TRUE) ? true :
+        false;
+}
+
+int Parameters::degToTransform(int degrees, bool mirror) {
+    if (!mirror) {
+        if (degrees == 0) return 0;
+        else if (degrees == 90) return HAL_TRANSFORM_ROT_90;
+        else if (degrees == 180) return HAL_TRANSFORM_ROT_180;
+        else if (degrees == 270) return HAL_TRANSFORM_ROT_270;
+    } else {  // Do mirror (horizontal flip)
+        if (degrees == 0) {           // FLIP_H and ROT_0
+            return HAL_TRANSFORM_FLIP_H;
+        } else if (degrees == 90) {   // FLIP_H and ROT_90
+            return HAL_TRANSFORM_FLIP_H | HAL_TRANSFORM_ROT_90;
+        } else if (degrees == 180) {  // FLIP_H and ROT_180
+            return HAL_TRANSFORM_FLIP_V;
+        } else if (degrees == 270) {  // FLIP_H and ROT_270
+            return HAL_TRANSFORM_FLIP_V | HAL_TRANSFORM_ROT_90;
+        }
+    }
+    ALOGE("%s: Bad input: %d", __FUNCTION__, degrees);
+    return -1;
+}
+
+int Parameters::cropXToArray(int x) const {
+    ALOG_ASSERT(x >= 0, "Crop-relative X coordinate = '%d' is out of bounds"
+                         "(lower = 0)", x);
+
+    CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW);
+    ALOG_ASSERT(x < previewCrop.width, "Crop-relative X coordinate = '%d' "
+                    "is out of bounds (upper = %f)", x, previewCrop.width);
+
+    int ret = x + previewCrop.left;
+
+    ALOG_ASSERT( (ret >= 0 && ret < fastInfo.arrayWidth),
+        "Calculated pixel array value X = '%d' is out of bounds (upper = %d)",
+        ret, fastInfo.arrayWidth);
+    return ret;
+}
+
+int Parameters::cropYToArray(int y) const {
+    ALOG_ASSERT(y >= 0, "Crop-relative Y coordinate = '%d' is out of bounds "
+        "(lower = 0)", y);
+
+    CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW);
+    ALOG_ASSERT(y < previewCrop.height, "Crop-relative Y coordinate = '%d' is "
+                "out of bounds (upper = %f)", y, previewCrop.height);
+
+    int ret = y + previewCrop.top;
+
+    ALOG_ASSERT( (ret >= 0 && ret < fastInfo.arrayHeight),
+        "Calculated pixel array value Y = '%d' is out of bounds (upper = %d)",
+        ret, fastInfo.arrayHeight);
+
+    return ret;
+
+}
+
+int Parameters::normalizedXToCrop(int x) const {
+    CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW);
+    return (x + 1000) * (previewCrop.width - 1) / 2000;
+}
+
+int Parameters::normalizedYToCrop(int y) const {
+    CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW);
+    return (y + 1000) * (previewCrop.height - 1) / 2000;
+}
+
+int Parameters::arrayXToCrop(int x) const {
+    CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW);
+    return x - previewCrop.left;
+}
+
+int Parameters::arrayYToCrop(int y) const {
+    CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW);
+    return y - previewCrop.top;
+}
+
+int Parameters::cropXToNormalized(int x) const {
+    CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW);
+    return x * 2000 / (previewCrop.width - 1) - 1000;
+}
+
+int Parameters::cropYToNormalized(int y) const {
+    CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW);
+    return y * 2000 / (previewCrop.height - 1) - 1000;
+}
+
+int Parameters::arrayXToNormalized(int width) const {
+    int ret = cropXToNormalized(arrayXToCrop(width));
+
+    ALOG_ASSERT(ret >= -1000, "Calculated normalized value out of "
+        "lower bounds %d", ret);
+    ALOG_ASSERT(ret <= 1000, "Calculated normalized value out of "
+        "upper bounds %d", ret);
+
+    // Work-around for HAL pre-scaling the coordinates themselves
+    if (quirks.meteringCropRegion) {
+        return width * 2000 / (fastInfo.arrayWidth - 1) - 1000;
+    }
+
+    return ret;
+}
+
+int Parameters::arrayYToNormalized(int height) const {
+    int ret = cropYToNormalized(arrayYToCrop(height));
+
+    ALOG_ASSERT(ret >= -1000, "Calculated normalized value out of lower bounds"
+        " %d", ret);
+    ALOG_ASSERT(ret <= 1000, "Calculated normalized value out of upper bounds"
+        " %d", ret);
+
+    // Work-around for HAL pre-scaling the coordinates themselves
+    if (quirks.meteringCropRegion) {
+        return height * 2000 / (fastInfo.arrayHeight - 1) - 1000;
+    }
+
+    return ret;
+}
+
+int Parameters::normalizedXToArray(int x) const {
+
+    // Work-around for HAL pre-scaling the coordinates themselves
+    if (quirks.meteringCropRegion) {
+        return (x + 1000) * (fastInfo.arrayWidth - 1) / 2000;
+    }
+
+    return cropXToArray(normalizedXToCrop(x));
+}
+
+int Parameters::normalizedYToArray(int y) const {
+    // Work-around for HAL pre-scaling the coordinates themselves
+    if (quirks.meteringCropRegion) {
+        return (y + 1000) * (fastInfo.arrayHeight - 1) / 2000;
+    }
+
+    return cropYToArray(normalizedYToCrop(y));
+}
+
+Parameters::CropRegion Parameters::calculateCropRegion(
+                            Parameters::CropRegion::Outputs outputs) const {
+
+    float zoomLeft, zoomTop, zoomWidth, zoomHeight;
+
+    // Need to convert zoom index into a crop rectangle. The rectangle is
+    // chosen to maximize its area on the sensor
+
+    camera_metadata_ro_entry_t maxDigitalZoom =
+            staticInfo(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM);
+    // For each zoom step by how many pixels more do we change the zoom
+    float zoomIncrement = (maxDigitalZoom.data.f[0] - 1) /
+            (NUM_ZOOM_STEPS-1);
+    // The desired activeAreaWidth/cropAreaWidth ratio (or height if h>w)
+    // via interpolating zoom step into a zoom ratio
+    float zoomRatio = 1 + zoomIncrement * zoom;
+    ALOG_ASSERT( (zoomRatio >= 1.f && zoomRatio <= maxDigitalZoom.data.f[0]),
+        "Zoom ratio calculated out of bounds. Expected 1 - %f, actual: %f",
+        maxDigitalZoom.data.f[0], zoomRatio);
+
+    ALOGV("Zoom maxDigital=%f, increment=%f, ratio=%f, previewWidth=%d, "
+          "previewHeight=%d, activeWidth=%d, activeHeight=%d",
+          maxDigitalZoom.data.f[0], zoomIncrement, zoomRatio, previewWidth,
+          previewHeight, fastInfo.arrayWidth, fastInfo.arrayHeight);
+
+    /*
+     * Assumption: On the HAL side each stream buffer calculates its crop
+     * rectangle as follows:
+     *   cropRect = (zoomLeft, zoomRight,
+     *               zoomWidth, zoomHeight * zoomWidth / outputWidth);
+     *
+     * Note that if zoomWidth > bufferWidth, the new cropHeight > zoomHeight
+     *      (we can then get into trouble if the cropHeight > arrayHeight).
+     * By selecting the zoomRatio based on the smallest outputRatio, we
+     * guarantee this will never happen.
+     */
+
+    // Enumerate all possible output sizes, select the one with the smallest
+    // aspect ratio
+    float minOutputWidth, minOutputHeight, minOutputRatio;
+    {
+        float outputSizes[][2] = {
+            { static_cast<float>(previewWidth),
+              static_cast<float>(previewHeight) },
+            { static_cast<float>(videoWidth),
+              static_cast<float>(videoHeight) },
+            { static_cast<float>(jpegThumbSize[0]),
+              static_cast<float>(jpegThumbSize[1]) },
+            { static_cast<float>(pictureWidth),
+              static_cast<float>(pictureHeight) },
+        };
+
+        minOutputWidth = outputSizes[0][0];
+        minOutputHeight = outputSizes[0][1];
+        minOutputRatio = minOutputWidth / minOutputHeight;
+        for (unsigned int i = 0;
+             i < sizeof(outputSizes) / sizeof(outputSizes[0]);
+             ++i) {
+
+            // skip over outputs we don't want to consider for the crop region
+            if ( !((1 << i) & outputs) ) {
+                continue;
+            }
+
+            float outputWidth = outputSizes[i][0];
+            float outputHeight = outputSizes[i][1];
+            float outputRatio = outputWidth / outputHeight;
+
+            if (minOutputRatio > outputRatio) {
+                minOutputRatio = outputRatio;
+                minOutputWidth = outputWidth;
+                minOutputHeight = outputHeight;
+            }
+
+            // and then use this output ratio instead of preview output ratio
+            ALOGV("Enumerating output ratio %f = %f / %f, min is %f",
+                  outputRatio, outputWidth, outputHeight, minOutputRatio);
+        }
+    }
+
+    /* Ensure that the width/height never go out of bounds
+     * by scaling across a diffent dimension if an out-of-bounds
+     * possibility exists.
+     *
+     * e.g. if the previewratio < arrayratio and e.g. zoomratio = 1.0, then by
+     * calculating the zoomWidth from zoomHeight we'll actually get a
+     * zoomheight > arrayheight
+     */
+    float arrayRatio = 1.f * fastInfo.arrayWidth / fastInfo.arrayHeight;
+    if (minOutputRatio >= arrayRatio) {
+        // Adjust the height based on the width
+        zoomWidth =  fastInfo.arrayWidth / zoomRatio;
+        zoomHeight = zoomWidth *
+                minOutputHeight / minOutputWidth;
+
+    } else {
+        // Adjust the width based on the height
+        zoomHeight = fastInfo.arrayHeight / zoomRatio;
+        zoomWidth = zoomHeight *
+                minOutputWidth / minOutputHeight;
+    }
+    // centering the zoom area within the active area
+    zoomLeft = (fastInfo.arrayWidth - zoomWidth) / 2;
+    zoomTop = (fastInfo.arrayHeight - zoomHeight) / 2;
+
+    ALOGV("Crop region calculated (x=%d,y=%d,w=%f,h=%f) for zoom=%d",
+        (int32_t)zoomLeft, (int32_t)zoomTop, zoomWidth, zoomHeight, this->zoom);
+
+
+    CropRegion crop = { zoomLeft, zoomTop, zoomWidth, zoomHeight };
+    return crop;
+}
+
+status_t Parameters::calculatePictureFovs(float *horizFov, float *vertFov)
+        const {
+    camera_metadata_ro_entry_t sensorSize =
+            staticInfo(ANDROID_SENSOR_INFO_PHYSICAL_SIZE, 2, 2);
+    if (!sensorSize.count) return NO_INIT;
+
+    float arrayAspect = static_cast<float>(fastInfo.arrayWidth) /
+            fastInfo.arrayHeight;
+    float stillAspect = static_cast<float>(pictureWidth) / pictureHeight;
+    ALOGV("Array aspect: %f, still aspect: %f", arrayAspect, stillAspect);
+
+    // The crop factors from the full sensor array to the still picture crop
+    // region
+    float horizCropFactor = 1.f;
+    float vertCropFactor = 1.f;
+
+    /**
+     * Need to calculate the still image field of view based on the total pixel
+     * array field of view, and the relative aspect ratios of the pixel array
+     * and output streams.
+     *
+     * Special treatment for quirky definition of crop region and relative
+     * stream cropping.
+     */
+    if (quirks.meteringCropRegion) {
+        // Use max of preview and video as first crop
+        float previewAspect = static_cast<float>(previewWidth) / previewHeight;
+        float videoAspect = static_cast<float>(videoWidth) / videoHeight;
+        if (videoAspect > previewAspect) {
+            previewAspect = videoAspect;
+        }
+        // First crop sensor to preview aspect ratio
+        if (arrayAspect < previewAspect) {
+            vertCropFactor = arrayAspect / previewAspect;
+        } else {
+            horizCropFactor = previewAspect / arrayAspect;
+        }
+        // Second crop to still aspect ratio
+        if (stillAspect < previewAspect) {
+            horizCropFactor *= stillAspect / previewAspect;
+        } else {
+            vertCropFactor *= previewAspect / stillAspect;
+        }
+    } else {
+        /**
+         * Crop are just a function of just the still/array relative aspect
+         * ratios. Since each stream will maximize its area within the crop
+         * region, and for FOV we assume a full-sensor crop region, we only ever
+         * crop the FOV either vertically or horizontally, never both.
+         */
+        horizCropFactor = (arrayAspect > stillAspect) ?
+                (stillAspect / arrayAspect) : 1.f;
+        vertCropFactor = (arrayAspect < stillAspect) ?
+                (arrayAspect / stillAspect) : 1.f;
+    }
+    ALOGV("Horiz crop factor: %f, vert crop fact: %f",
+            horizCropFactor, vertCropFactor);
+    /**
+     * Basic field of view formula is:
+     *   angle of view = 2 * arctangent ( d / 2f )
+     * where d is the physical sensor dimension of interest, and f is
+     * the focal length. This only applies to rectilinear sensors, for focusing
+     * at distances >> f, etc.
+     */
+    if (horizFov != NULL) {
+        *horizFov = 180 / M_PI * 2 *
+                atanf(horizCropFactor * sensorSize.data.f[0] /
+                        (2 * fastInfo.minFocalLength));
+    }
+    if (vertFov != NULL) {
+        *vertFov = 180 / M_PI * 2 *
+                atanf(vertCropFactor * sensorSize.data.f[1] /
+                        (2 * fastInfo.minFocalLength));
+    }
+    return OK;
+}
+
+int32_t Parameters::fpsFromRange(int32_t /*min*/, int32_t max) const {
+    return max;
+}
+
+}; // namespace camera2
+}; // namespace android
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
new file mode 100644
index 0000000..464830c
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -0,0 +1,372 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA2PARAMETERS_H
+#define ANDROID_SERVERS_CAMERA_CAMERA2PARAMETERS_H
+
+#include <system/graphics.h>
+
+#include <utils/Errors.h>
+#include <utils/Mutex.h>
+#include <utils/String8.h>
+#include <utils/Vector.h>
+#include <utils/KeyedVector.h>
+#include <camera/CameraParameters.h>
+#include <camera/CameraMetadata.h>
+
+namespace android {
+namespace camera2 {
+
+/**
+ * Current camera state; this is the full state of the Camera under the old
+ * camera API (contents of the CameraParameters object in a more-efficient
+ * format, plus other state). The enum values are mostly based off the
+ * corresponding camera2 enums, not the camera1 strings. A few are defined here
+ * if they don't cleanly map to camera2 values.
+ */
+struct Parameters {
+    /**
+     * Parameters and other state
+     */
+    int cameraId;
+    int cameraFacing;
+
+    int previewWidth, previewHeight;
+    int32_t previewFpsRange[2];
+    int previewFps; // deprecated, here only for tracking changes
+    int previewFormat;
+
+    int previewTransform; // set by CAMERA_CMD_SET_DISPLAY_ORIENTATION
+
+    int pictureWidth, pictureHeight;
+
+    int32_t jpegThumbSize[2];
+    uint8_t jpegQuality, jpegThumbQuality;
+    int32_t jpegRotation;
+
+    bool gpsEnabled;
+    double gpsCoordinates[3];
+    int64_t gpsTimestamp;
+    String8 gpsProcessingMethod;
+
+    uint8_t wbMode;
+    uint8_t effectMode;
+    uint8_t antibandingMode;
+    uint8_t sceneMode;
+
+    enum flashMode_t {
+        FLASH_MODE_OFF = 0,
+        FLASH_MODE_AUTO,
+        FLASH_MODE_ON,
+        FLASH_MODE_TORCH,
+        FLASH_MODE_RED_EYE = ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE,
+        FLASH_MODE_INVALID = -1
+    } flashMode;
+
+    enum focusMode_t {
+        FOCUS_MODE_AUTO = ANDROID_CONTROL_AF_MODE_AUTO,
+        FOCUS_MODE_MACRO = ANDROID_CONTROL_AF_MODE_MACRO,
+        FOCUS_MODE_CONTINUOUS_VIDEO = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO,
+        FOCUS_MODE_CONTINUOUS_PICTURE = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE,
+        FOCUS_MODE_EDOF = ANDROID_CONTROL_AF_MODE_EDOF,
+        FOCUS_MODE_INFINITY,
+        FOCUS_MODE_FIXED,
+        FOCUS_MODE_INVALID = -1
+    } focusMode;
+
+    uint8_t focusState; // Latest focus state from HAL
+
+    // For use with triggerAfWithAuto quirk
+    focusMode_t shadowFocusMode;
+
+    struct Area {
+        int left, top, right, bottom;
+        int weight;
+        Area() {}
+        Area(int left, int top, int right, int bottom, int weight):
+                left(left), top(top), right(right), bottom(bottom),
+                weight(weight) {}
+        bool isEmpty() const {
+            return (left == 0) && (top == 0) && (right == 0) && (bottom == 0);
+        }
+    };
+    Vector<Area> focusingAreas;
+
+    int32_t exposureCompensation;
+    bool autoExposureLock;
+    bool autoWhiteBalanceLock;
+
+    Vector<Area> meteringAreas;
+
+    int zoom;
+
+    int videoWidth, videoHeight;
+
+    bool recordingHint;
+    bool videoStabilization;
+
+    enum lightFxMode_t {
+        LIGHTFX_NONE = 0,
+        LIGHTFX_LOWLIGHT,
+        LIGHTFX_HDR
+    } lightFx;
+
+    CameraParameters params;
+    String8 paramsFlattened;
+
+    // These parameters are also part of the camera API-visible state, but not
+    // directly listed in Camera.Parameters
+    bool storeMetadataInBuffers;
+    bool playShutterSound;
+    bool enableFaceDetect;
+
+    bool enableFocusMoveMessages;
+    int afTriggerCounter;
+    int currentAfTriggerId;
+    bool afInMotion;
+
+    int precaptureTriggerCounter;
+
+    uint32_t previewCallbackFlags;
+    bool previewCallbackOneShot;
+    bool previewCallbackSurface;
+
+    bool zslMode;
+
+    // Overall camera state
+    enum State {
+        DISCONNECTED,
+        STOPPED,
+        WAITING_FOR_PREVIEW_WINDOW,
+        PREVIEW,
+        RECORD,
+        STILL_CAPTURE,
+        VIDEO_SNAPSHOT
+    } state;
+
+    // Number of zoom steps to simulate
+    static const unsigned int NUM_ZOOM_STEPS = 100;
+
+    // Full static camera info, object owned by someone else, such as
+    // Camera2Device.
+    const CameraMetadata *info;
+
+    // Fast-access static device information; this is a subset of the
+    // information available through the staticInfo() method, used for
+    // frequently-accessed values or values that have to be calculated from the
+    // static information.
+    struct DeviceInfo {
+        int32_t arrayWidth;
+        int32_t arrayHeight;
+        uint8_t bestFaceDetectMode;
+        int32_t maxFaces;
+        struct OverrideModes {
+            flashMode_t flashMode;
+            uint8_t     wbMode;
+            focusMode_t focusMode;
+            OverrideModes():
+                    flashMode(FLASH_MODE_INVALID),
+                    wbMode(ANDROID_CONTROL_AWB_MODE_OFF),
+                    focusMode(FOCUS_MODE_INVALID) {
+            }
+        };
+        DefaultKeyedVector<uint8_t, OverrideModes> sceneModeOverrides;
+        float minFocalLength;
+        bool useFlexibleYuv;
+    } fastInfo;
+
+    // Quirks information; these are short-lived flags to enable workarounds for
+    // incomplete HAL implementations
+    struct Quirks {
+        bool triggerAfWithAuto;
+        bool useZslFormat;
+        bool meteringCropRegion;
+    } quirks;
+
+    /**
+     * Parameter manipulation and setup methods
+     */
+
+    Parameters(int cameraId, int cameraFacing);
+    ~Parameters();
+
+    // Sets up default parameters
+    status_t initialize(const CameraMetadata *info);
+
+    // Build fast-access device static info from static info
+    status_t buildFastInfo();
+    // Query for quirks from static info
+    status_t buildQuirks();
+
+    // Get entry from camera static characteristics information. min/maxCount
+    // are used for error checking the number of values in the entry. 0 for
+    // max/minCount means to do no bounds check in that direction. In case of
+    // error, the entry data pointer is null and the count is 0.
+    camera_metadata_ro_entry_t staticInfo(uint32_t tag,
+            size_t minCount=0, size_t maxCount=0, bool required=true) const;
+
+    // Validate and update camera parameters based on new settings
+    status_t set(const String8 &paramString);
+
+    // Retrieve the current settings
+    String8 get() const;
+
+    // Update passed-in request for common parameters
+    status_t updateRequest(CameraMetadata *request) const;
+
+    // Add/update JPEG entries in metadata
+    status_t updateRequestJpeg(CameraMetadata *request) const;
+
+    // Calculate the crop region rectangle based on current stream sizes
+    struct CropRegion {
+        float left;
+        float top;
+        float width;
+        float height;
+
+        enum Outputs {
+            OUTPUT_PREVIEW         = 0x01,
+            OUTPUT_VIDEO           = 0x02,
+            OUTPUT_JPEG_THUMBNAIL  = 0x04,
+            OUTPUT_PICTURE         = 0x08,
+        };
+    };
+    CropRegion calculateCropRegion(CropRegion::Outputs outputs) const;
+
+    // Calculate the field of view of the high-resolution JPEG capture
+    status_t calculatePictureFovs(float *horizFov, float *vertFov) const;
+
+    // Static methods for debugging and converting between camera1 and camera2
+    // parameters
+
+    static const char *getStateName(State state);
+
+    static int formatStringToEnum(const char *format);
+    static const char *formatEnumToString(int format);
+
+    static int wbModeStringToEnum(const char *wbMode);
+    static const char* wbModeEnumToString(uint8_t wbMode);
+    static int effectModeStringToEnum(const char *effectMode);
+    static int abModeStringToEnum(const char *abMode);
+    static int sceneModeStringToEnum(const char *sceneMode);
+    static flashMode_t flashModeStringToEnum(const char *flashMode);
+    static const char* flashModeEnumToString(flashMode_t flashMode);
+    static focusMode_t focusModeStringToEnum(const char *focusMode);
+    static const char* focusModeEnumToString(focusMode_t focusMode);
+    static lightFxMode_t lightFxStringToEnum(const char *lightFxMode);
+
+    static status_t parseAreas(const char *areasCStr,
+            Vector<Area> *areas);
+
+    enum AreaKind
+    {
+        AREA_KIND_FOCUS,
+        AREA_KIND_METERING
+    };
+    status_t validateAreas(const Vector<Area> &areas,
+                                  size_t maxRegions,
+                                  AreaKind areaKind) const;
+    static bool boolFromString(const char *boolStr);
+
+    // Map from camera orientation + facing to gralloc transform enum
+    static int degToTransform(int degrees, bool mirror);
+
+    // API specifies FPS ranges are done in fixed point integer, with LSB = 0.001.
+    // Note that this doesn't apply to the (deprecated) single FPS value.
+    static const int kFpsToApiScale = 1000;
+
+    // Transform between (-1000,-1000)-(1000,1000) normalized coords from camera
+    // API and HAL2 (0,0)-(activePixelArray.width/height) coordinates
+    int arrayXToNormalized(int width) const;
+    int arrayYToNormalized(int height) const;
+    int normalizedXToArray(int x) const;
+    int normalizedYToArray(int y) const;
+
+    struct Range {
+        int min;
+        int max;
+    };
+
+    int32_t fpsFromRange(int32_t min, int32_t max) const;
+
+private:
+
+    // Convert between HAL2 sensor array coordinates and
+    // viewfinder crop-region relative array coordinates
+    int cropXToArray(int x) const;
+    int cropYToArray(int y) const;
+    int arrayXToCrop(int x) const;
+    int arrayYToCrop(int y) const;
+
+    // Convert between viewfinder crop-region relative array coordinates
+    // and camera API (-1000,1000)-(1000,1000) normalized coords
+    int cropXToNormalized(int x) const;
+    int cropYToNormalized(int y) const;
+    int normalizedXToCrop(int x) const;
+    int normalizedYToCrop(int y) const;
+};
+
+// This class encapsulates the Parameters class so that it can only be accessed
+// by constructing a Lock object, which locks the SharedParameter's mutex.
+class SharedParameters {
+  public:
+    SharedParameters(int cameraId, int cameraFacing):
+            mParameters(cameraId, cameraFacing) {
+    }
+
+    template<typename S, typename P>
+    class BaseLock {
+      public:
+        BaseLock(S &p):
+                mParameters(p.mParameters),
+                mSharedParameters(p) {
+            mSharedParameters.mLock.lock();
+        }
+
+        ~BaseLock() {
+            mSharedParameters.mLock.unlock();
+        }
+        P &mParameters;
+      private:
+        // Disallow copying, default construction
+        BaseLock();
+        BaseLock(const BaseLock &);
+        BaseLock &operator=(const BaseLock &);
+        S &mSharedParameters;
+    };
+    typedef BaseLock<SharedParameters, Parameters> Lock;
+    typedef BaseLock<const SharedParameters, const Parameters> ReadLock;
+
+    // Access static info, read-only and immutable, so no lock needed
+    camera_metadata_ro_entry_t staticInfo(uint32_t tag,
+            size_t minCount=0, size_t maxCount=0) const {
+        return mParameters.staticInfo(tag, minCount, maxCount);
+    }
+
+    // Only use for dumping or other debugging
+    const Parameters &unsafeAccess() {
+        return mParameters;
+    }
+  private:
+    Parameters mParameters;
+    mutable Mutex mLock;
+};
+
+
+}; // namespace camera2
+}; // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
new file mode 100644
index 0000000..7e98016
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
@@ -0,0 +1,880 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera2-StreamingProcessor"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+//#define LOG_NNDEBUG 0 // Per-frame verbose logging
+
+#ifdef LOG_NNDEBUG
+#define ALOGVV(...) ALOGV(__VA_ARGS__)
+#else
+#define ALOGVV(...) ((void)0)
+#endif
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include <gui/Surface.h>
+#include <media/hardware/MetadataBufferType.h>
+
+#include "common/CameraDeviceBase.h"
+#include "api1/Camera2Client.h"
+#include "api1/client2/StreamingProcessor.h"
+#include "api1/client2/Camera2Heap.h"
+
+namespace android {
+namespace camera2 {
+
+StreamingProcessor::StreamingProcessor(sp<Camera2Client> client):
+        mClient(client),
+        mDevice(client->getCameraDevice()),
+        mId(client->getCameraId()),
+        mActiveRequest(NONE),
+        mPaused(false),
+        mPreviewRequestId(Camera2Client::kPreviewRequestIdStart),
+        mPreviewStreamId(NO_STREAM),
+        mRecordingRequestId(Camera2Client::kRecordingRequestIdStart),
+        mRecordingStreamId(NO_STREAM),
+        mRecordingFrameAvailable(false),
+        mRecordingHeapCount(kDefaultRecordingHeapCount),
+        mRecordingHeapFree(kDefaultRecordingHeapCount)
+{
+}
+
+StreamingProcessor::~StreamingProcessor() {
+    deletePreviewStream();
+    deleteRecordingStream();
+}
+
+status_t StreamingProcessor::setPreviewWindow(sp<ANativeWindow> window) {
+    ATRACE_CALL();
+    status_t res;
+
+    res = deletePreviewStream();
+    if (res != OK) return res;
+
+    Mutex::Autolock m(mMutex);
+
+    mPreviewWindow = window;
+
+    return OK;
+}
+
+bool StreamingProcessor::haveValidPreviewWindow() const {
+    Mutex::Autolock m(mMutex);
+    return mPreviewWindow != 0;
+}
+
+status_t StreamingProcessor::updatePreviewRequest(const Parameters &params) {
+    ATRACE_CALL();
+    status_t res;
+    sp<CameraDeviceBase> device = mDevice.promote();
+    if (device == 0) {
+        ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+        return INVALID_OPERATION;
+    }
+
+    Mutex::Autolock m(mMutex);
+    if (mPreviewRequest.entryCount() == 0) {
+        res = device->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW,
+                &mPreviewRequest);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to create default preview request: "
+                    "%s (%d)", __FUNCTION__, mId, strerror(-res), res);
+            return res;
+        }
+    }
+
+    res = params.updateRequest(&mPreviewRequest);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to update common entries of preview "
+                "request: %s (%d)", __FUNCTION__, mId,
+                strerror(-res), res);
+        return res;
+    }
+
+    res = mPreviewRequest.update(ANDROID_REQUEST_ID,
+            &mPreviewRequestId, 1);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to update request id for preview: %s (%d)",
+                __FUNCTION__, mId, strerror(-res), res);
+        return res;
+    }
+
+    return OK;
+}
+
+status_t StreamingProcessor::updatePreviewStream(const Parameters &params) {
+    ATRACE_CALL();
+    Mutex::Autolock m(mMutex);
+
+    status_t res;
+    sp<CameraDeviceBase> device = mDevice.promote();
+    if (device == 0) {
+        ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+        return INVALID_OPERATION;
+    }
+
+    if (mPreviewStreamId != NO_STREAM) {
+        // Check if stream parameters have to change
+        uint32_t currentWidth, currentHeight;
+        res = device->getStreamInfo(mPreviewStreamId,
+                &currentWidth, &currentHeight, 0);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Error querying preview stream info: "
+                    "%s (%d)", __FUNCTION__, mId, strerror(-res), res);
+            return res;
+        }
+        if (currentWidth != (uint32_t)params.previewWidth ||
+                currentHeight != (uint32_t)params.previewHeight) {
+            ALOGV("%s: Camera %d: Preview size switch: %d x %d -> %d x %d",
+                    __FUNCTION__, mId, currentWidth, currentHeight,
+                    params.previewWidth, params.previewHeight);
+            res = device->waitUntilDrained();
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Error waiting for preview to drain: "
+                        "%s (%d)", __FUNCTION__, mId, strerror(-res), res);
+                return res;
+            }
+            res = device->deleteStream(mPreviewStreamId);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to delete old output stream "
+                        "for preview: %s (%d)", __FUNCTION__, mId,
+                        strerror(-res), res);
+                return res;
+            }
+            mPreviewStreamId = NO_STREAM;
+        }
+    }
+
+    if (mPreviewStreamId == NO_STREAM) {
+        res = device->createStream(mPreviewWindow,
+                params.previewWidth, params.previewHeight,
+                CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, 0,
+                &mPreviewStreamId);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to create preview stream: %s (%d)",
+                    __FUNCTION__, mId, strerror(-res), res);
+            return res;
+        }
+    }
+
+    res = device->setStreamTransform(mPreviewStreamId,
+            params.previewTransform);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to set preview stream transform: "
+                "%s (%d)", __FUNCTION__, mId, strerror(-res), res);
+        return res;
+    }
+
+    return OK;
+}
+
+status_t StreamingProcessor::deletePreviewStream() {
+    ATRACE_CALL();
+    status_t res;
+
+    Mutex::Autolock m(mMutex);
+
+    if (mPreviewStreamId != NO_STREAM) {
+        sp<CameraDeviceBase> device = mDevice.promote();
+        if (device == 0) {
+            ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+            return INVALID_OPERATION;
+        }
+
+        ALOGV("%s: for cameraId %d on streamId %d",
+            __FUNCTION__, mId, mPreviewStreamId);
+
+        res = device->waitUntilDrained();
+        if (res != OK) {
+            ALOGE("%s: Error waiting for preview to drain: %s (%d)",
+                    __FUNCTION__, strerror(-res), res);
+            return res;
+        }
+        res = device->deleteStream(mPreviewStreamId);
+        if (res != OK) {
+            ALOGE("%s: Unable to delete old preview stream: %s (%d)",
+                    __FUNCTION__, strerror(-res), res);
+            return res;
+        }
+        mPreviewStreamId = NO_STREAM;
+    }
+    return OK;
+}
+
+int StreamingProcessor::getPreviewStreamId() const {
+    Mutex::Autolock m(mMutex);
+    return mPreviewStreamId;
+}
+
+status_t StreamingProcessor::setRecordingBufferCount(size_t count) {
+    ATRACE_CALL();
+    // Make sure we can support this many buffer slots
+    if (count > BufferQueue::NUM_BUFFER_SLOTS) {
+        ALOGE("%s: Camera %d: Too many recording buffers requested: %d, max %d",
+                __FUNCTION__, mId, count, BufferQueue::NUM_BUFFER_SLOTS);
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock m(mMutex);
+
+    ALOGV("%s: Camera %d: New recording buffer count from encoder: %d",
+            __FUNCTION__, mId, count);
+
+    // Need to re-size consumer and heap
+    if (mRecordingHeapCount != count) {
+        ALOGV("%s: Camera %d: Resetting recording heap and consumer",
+            __FUNCTION__, mId);
+
+        if (isStreamActive(mActiveStreamIds, mRecordingStreamId)) {
+            ALOGE("%s: Camera %d: Setting recording buffer count when "
+                    "recording stream is already active!", __FUNCTION__,
+                    mId);
+            return INVALID_OPERATION;
+        }
+
+        releaseAllRecordingFramesLocked();
+
+        if (mRecordingHeap != 0) {
+            mRecordingHeap.clear();
+        }
+        mRecordingHeapCount = count;
+        mRecordingHeapFree = count;
+
+        mRecordingConsumer.clear();
+    }
+
+    return OK;
+}
+
+status_t StreamingProcessor::updateRecordingRequest(const Parameters &params) {
+    ATRACE_CALL();
+    status_t res;
+    Mutex::Autolock m(mMutex);
+
+    sp<CameraDeviceBase> device = mDevice.promote();
+    if (device == 0) {
+        ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+        return INVALID_OPERATION;
+    }
+
+    if (mRecordingRequest.entryCount() == 0) {
+        res = device->createDefaultRequest(CAMERA2_TEMPLATE_VIDEO_RECORD,
+                &mRecordingRequest);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to create default recording request:"
+                    " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
+            return res;
+        }
+    }
+
+    res = params.updateRequest(&mRecordingRequest);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to update common entries of recording "
+                "request: %s (%d)", __FUNCTION__, mId,
+                strerror(-res), res);
+        return res;
+    }
+
+    res = mRecordingRequest.update(ANDROID_REQUEST_ID,
+            &mRecordingRequestId, 1);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to update request id for request: %s (%d)",
+                __FUNCTION__, mId, strerror(-res), res);
+        return res;
+    }
+
+    return OK;
+}
+
+status_t StreamingProcessor::updateRecordingStream(const Parameters &params) {
+    ATRACE_CALL();
+    status_t res;
+    Mutex::Autolock m(mMutex);
+
+    sp<CameraDeviceBase> device = mDevice.promote();
+    if (device == 0) {
+        ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+        return INVALID_OPERATION;
+    }
+
+    bool newConsumer = false;
+    if (mRecordingConsumer == 0) {
+        ALOGV("%s: Camera %d: Creating recording consumer with %d + 1 "
+                "consumer-side buffers", __FUNCTION__, mId, mRecordingHeapCount);
+        // Create CPU buffer queue endpoint. We need one more buffer here so that we can
+        // always acquire and free a buffer when the heap is full; otherwise the consumer
+        // will have buffers in flight we'll never clear out.
+        sp<BufferQueue> bq = new BufferQueue();
+        mRecordingConsumer = new BufferItemConsumer(bq,
+                GRALLOC_USAGE_HW_VIDEO_ENCODER,
+                mRecordingHeapCount + 1);
+        mRecordingConsumer->setFrameAvailableListener(this);
+        mRecordingConsumer->setName(String8("Camera2-RecordingConsumer"));
+        mRecordingWindow = new Surface(
+            mRecordingConsumer->getProducerInterface());
+        newConsumer = true;
+        // Allocate memory later, since we don't know buffer size until receipt
+    }
+
+    if (mRecordingStreamId != NO_STREAM) {
+        // Check if stream parameters have to change
+        uint32_t currentWidth, currentHeight;
+        res = device->getStreamInfo(mRecordingStreamId,
+                &currentWidth, &currentHeight, 0);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Error querying recording output stream info: "
+                    "%s (%d)", __FUNCTION__, mId,
+                    strerror(-res), res);
+            return res;
+        }
+        if (currentWidth != (uint32_t)params.videoWidth ||
+                currentHeight != (uint32_t)params.videoHeight || newConsumer) {
+            // TODO: Should wait to be sure previous recording has finished
+            res = device->deleteStream(mRecordingStreamId);
+
+            if (res == -EBUSY) {
+                ALOGV("%s: Camera %d: Device is busy, call "
+                      "updateRecordingStream after it becomes idle",
+                      __FUNCTION__, mId);
+                return res;
+            } else if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to delete old output stream "
+                        "for recording: %s (%d)", __FUNCTION__,
+                        mId, strerror(-res), res);
+                return res;
+            }
+            mRecordingStreamId = NO_STREAM;
+        }
+    }
+
+    if (mRecordingStreamId == NO_STREAM) {
+        mRecordingFrameCount = 0;
+        res = device->createStream(mRecordingWindow,
+                params.videoWidth, params.videoHeight,
+                CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, 0, &mRecordingStreamId);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Can't create output stream for recording: "
+                    "%s (%d)", __FUNCTION__, mId,
+                    strerror(-res), res);
+            return res;
+        }
+    }
+
+    return OK;
+}
+
+status_t StreamingProcessor::deleteRecordingStream() {
+    ATRACE_CALL();
+    status_t res;
+
+    Mutex::Autolock m(mMutex);
+
+    if (mRecordingStreamId != NO_STREAM) {
+        sp<CameraDeviceBase> device = mDevice.promote();
+        if (device == 0) {
+            ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+            return INVALID_OPERATION;
+        }
+
+        res = device->waitUntilDrained();
+        if (res != OK) {
+            ALOGE("%s: Error waiting for HAL to drain: %s (%d)",
+                    __FUNCTION__, strerror(-res), res);
+            return res;
+        }
+        res = device->deleteStream(mRecordingStreamId);
+        if (res != OK) {
+            ALOGE("%s: Unable to delete recording stream: %s (%d)",
+                    __FUNCTION__, strerror(-res), res);
+            return res;
+        }
+        mRecordingStreamId = NO_STREAM;
+    }
+    return OK;
+}
+
+int StreamingProcessor::getRecordingStreamId() const {
+    return mRecordingStreamId;
+}
+
+status_t StreamingProcessor::startStream(StreamType type,
+        const Vector<uint8_t> &outputStreams) {
+    ATRACE_CALL();
+    status_t res;
+
+    if (type == NONE) return INVALID_OPERATION;
+
+    sp<CameraDeviceBase> device = mDevice.promote();
+    if (device == 0) {
+        ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+        return INVALID_OPERATION;
+    }
+
+    ALOGV("%s: Camera %d: type = %d", __FUNCTION__, mId, type);
+
+    Mutex::Autolock m(mMutex);
+
+    // If a recording stream is being started up, free up any
+    // outstanding buffers left from the previous recording session.
+    // There should never be any, so if there are, warn about it.
+    if (isStreamActive(outputStreams, mRecordingStreamId)) {
+        releaseAllRecordingFramesLocked();
+    }
+
+    ALOGV("%s: Camera %d: %s started, recording heap has %d free of %d",
+            __FUNCTION__, mId, (type == PREVIEW) ? "preview" : "recording",
+            mRecordingHeapFree, mRecordingHeapCount);
+
+    CameraMetadata &request = (type == PREVIEW) ?
+            mPreviewRequest : mRecordingRequest;
+
+    res = request.update(
+        ANDROID_REQUEST_OUTPUT_STREAMS,
+        outputStreams);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to set up preview request: %s (%d)",
+                __FUNCTION__, mId, strerror(-res), res);
+        return res;
+    }
+
+    res = request.sort();
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Error sorting preview request: %s (%d)",
+                __FUNCTION__, mId, strerror(-res), res);
+        return res;
+    }
+
+    res = device->setStreamingRequest(request);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to set preview request to start preview: "
+                "%s (%d)",
+                __FUNCTION__, mId, strerror(-res), res);
+        return res;
+    }
+    mActiveRequest = type;
+    mPaused = false;
+    mActiveStreamIds = outputStreams;
+    return OK;
+}
+
+status_t StreamingProcessor::togglePauseStream(bool pause) {
+    ATRACE_CALL();
+    status_t res;
+
+    sp<CameraDeviceBase> device = mDevice.promote();
+    if (device == 0) {
+        ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+        return INVALID_OPERATION;
+    }
+
+    ALOGV("%s: Camera %d: toggling pause to %d", __FUNCTION__, mId, pause);
+
+    Mutex::Autolock m(mMutex);
+
+    if (mActiveRequest == NONE) {
+        ALOGE("%s: Camera %d: Can't toggle pause, streaming was not started",
+              __FUNCTION__, mId);
+        return INVALID_OPERATION;
+    }
+
+    if (mPaused == pause) {
+        return OK;
+    }
+
+    if (pause) {
+        res = device->clearStreamingRequest();
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Can't clear stream request: %s (%d)",
+                    __FUNCTION__, mId, strerror(-res), res);
+            return res;
+        }
+    } else {
+        CameraMetadata &request =
+                (mActiveRequest == PREVIEW) ? mPreviewRequest
+                                            : mRecordingRequest;
+        res = device->setStreamingRequest(request);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to set preview request to resume: "
+                    "%s (%d)",
+                    __FUNCTION__, mId, strerror(-res), res);
+            return res;
+        }
+    }
+
+    mPaused = pause;
+    return OK;
+}
+
+status_t StreamingProcessor::stopStream() {
+    ATRACE_CALL();
+    status_t res;
+
+    Mutex::Autolock m(mMutex);
+
+    sp<CameraDeviceBase> device = mDevice.promote();
+    if (device == 0) {
+        ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+        return INVALID_OPERATION;
+    }
+
+    res = device->clearStreamingRequest();
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Can't clear stream request: %s (%d)",
+                __FUNCTION__, mId, strerror(-res), res);
+        return res;
+    }
+
+    mActiveRequest = NONE;
+    mActiveStreamIds.clear();
+    mPaused = false;
+
+    return OK;
+}
+
+int32_t StreamingProcessor::getActiveRequestId() const {
+    Mutex::Autolock m(mMutex);
+    switch (mActiveRequest) {
+        case NONE:
+            return 0;
+        case PREVIEW:
+            return mPreviewRequestId;
+        case RECORD:
+            return mRecordingRequestId;
+        default:
+            ALOGE("%s: Unexpected mode %d", __FUNCTION__, mActiveRequest);
+            return 0;
+    }
+}
+
+status_t StreamingProcessor::incrementStreamingIds() {
+    ATRACE_CALL();
+    Mutex::Autolock m(mMutex);
+
+    mPreviewRequestId++;
+    if (mPreviewRequestId >= Camera2Client::kPreviewRequestIdEnd) {
+        mPreviewRequestId = Camera2Client::kPreviewRequestIdStart;
+    }
+    mRecordingRequestId++;
+    if (mRecordingRequestId >= Camera2Client::kRecordingRequestIdEnd) {
+        mRecordingRequestId = Camera2Client::kRecordingRequestIdStart;
+    }
+    return OK;
+}
+
+void StreamingProcessor::onFrameAvailable() {
+    ATRACE_CALL();
+    Mutex::Autolock l(mMutex);
+    if (!mRecordingFrameAvailable) {
+        mRecordingFrameAvailable = true;
+        mRecordingFrameAvailableSignal.signal();
+    }
+
+}
+
+bool StreamingProcessor::threadLoop() {
+    status_t res;
+
+    {
+        Mutex::Autolock l(mMutex);
+        while (!mRecordingFrameAvailable) {
+            res = mRecordingFrameAvailableSignal.waitRelative(
+                mMutex, kWaitDuration);
+            if (res == TIMED_OUT) return true;
+        }
+        mRecordingFrameAvailable = false;
+    }
+
+    do {
+        res = processRecordingFrame();
+    } while (res == OK);
+
+    return true;
+}
+
+status_t StreamingProcessor::processRecordingFrame() {
+    ATRACE_CALL();
+    status_t res;
+    sp<Camera2Heap> recordingHeap;
+    size_t heapIdx = 0;
+    nsecs_t timestamp;
+
+    sp<Camera2Client> client = mClient.promote();
+    if (client == 0) {
+        // Discard frames during shutdown
+        BufferItemConsumer::BufferItem imgBuffer;
+        res = mRecordingConsumer->acquireBuffer(&imgBuffer, 0);
+        if (res != OK) {
+            if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
+                ALOGE("%s: Camera %d: Can't acquire recording buffer: %s (%d)",
+                        __FUNCTION__, mId, strerror(-res), res);
+            }
+            return res;
+        }
+        mRecordingConsumer->releaseBuffer(imgBuffer);
+        return OK;
+    }
+
+    {
+        /* acquire SharedParameters before mMutex so we don't dead lock
+            with Camera2Client code calling into StreamingProcessor */
+        SharedParameters::Lock l(client->getParameters());
+        Mutex::Autolock m(mMutex);
+        BufferItemConsumer::BufferItem imgBuffer;
+        res = mRecordingConsumer->acquireBuffer(&imgBuffer, 0);
+        if (res != OK) {
+            if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
+                ALOGE("%s: Camera %d: Can't acquire recording buffer: %s (%d)",
+                        __FUNCTION__, mId, strerror(-res), res);
+            }
+            return res;
+        }
+        timestamp = imgBuffer.mTimestamp;
+
+        mRecordingFrameCount++;
+        ALOGVV("OnRecordingFrame: Frame %d", mRecordingFrameCount);
+
+        if (l.mParameters.state != Parameters::RECORD &&
+                l.mParameters.state != Parameters::VIDEO_SNAPSHOT) {
+            ALOGV("%s: Camera %d: Discarding recording image buffers "
+                    "received after recording done", __FUNCTION__,
+                    mId);
+            mRecordingConsumer->releaseBuffer(imgBuffer);
+            return INVALID_OPERATION;
+        }
+
+        if (mRecordingHeap == 0) {
+            const size_t bufferSize = 4 + sizeof(buffer_handle_t);
+            ALOGV("%s: Camera %d: Creating recording heap with %d buffers of "
+                    "size %d bytes", __FUNCTION__, mId,
+                    mRecordingHeapCount, bufferSize);
+
+            mRecordingHeap = new Camera2Heap(bufferSize, mRecordingHeapCount,
+                    "Camera2Client::RecordingHeap");
+            if (mRecordingHeap->mHeap->getSize() == 0) {
+                ALOGE("%s: Camera %d: Unable to allocate memory for recording",
+                        __FUNCTION__, mId);
+                mRecordingConsumer->releaseBuffer(imgBuffer);
+                return NO_MEMORY;
+            }
+            for (size_t i = 0; i < mRecordingBuffers.size(); i++) {
+                if (mRecordingBuffers[i].mBuf !=
+                        BufferItemConsumer::INVALID_BUFFER_SLOT) {
+                    ALOGE("%s: Camera %d: Non-empty recording buffers list!",
+                            __FUNCTION__, mId);
+                }
+            }
+            mRecordingBuffers.clear();
+            mRecordingBuffers.setCapacity(mRecordingHeapCount);
+            mRecordingBuffers.insertAt(0, mRecordingHeapCount);
+
+            mRecordingHeapHead = 0;
+            mRecordingHeapFree = mRecordingHeapCount;
+        }
+
+        if ( mRecordingHeapFree == 0) {
+            ALOGE("%s: Camera %d: No free recording buffers, dropping frame",
+                    __FUNCTION__, mId);
+            mRecordingConsumer->releaseBuffer(imgBuffer);
+            return NO_MEMORY;
+        }
+
+        heapIdx = mRecordingHeapHead;
+        mRecordingHeapHead = (mRecordingHeapHead + 1) % mRecordingHeapCount;
+        mRecordingHeapFree--;
+
+        ALOGVV("%s: Camera %d: Timestamp %lld",
+                __FUNCTION__, mId, timestamp);
+
+        ssize_t offset;
+        size_t size;
+        sp<IMemoryHeap> heap =
+                mRecordingHeap->mBuffers[heapIdx]->getMemory(&offset,
+                        &size);
+
+        uint8_t *data = (uint8_t*)heap->getBase() + offset;
+        uint32_t type = kMetadataBufferTypeGrallocSource;
+        *((uint32_t*)data) = type;
+        *((buffer_handle_t*)(data + 4)) = imgBuffer.mGraphicBuffer->handle;
+        ALOGVV("%s: Camera %d: Sending out buffer_handle_t %p",
+                __FUNCTION__, mId,
+                imgBuffer.mGraphicBuffer->handle);
+        mRecordingBuffers.replaceAt(imgBuffer, heapIdx);
+        recordingHeap = mRecordingHeap;
+    }
+
+    // Call outside locked parameters to allow re-entrancy from notification
+    Camera2Client::SharedCameraCallbacks::Lock l(client->mSharedCameraCallbacks);
+    if (l.mRemoteCallback != 0) {
+        l.mRemoteCallback->dataCallbackTimestamp(timestamp,
+                CAMERA_MSG_VIDEO_FRAME,
+                recordingHeap->mBuffers[heapIdx]);
+    } else {
+        ALOGW("%s: Camera %d: Remote callback gone", __FUNCTION__, mId);
+    }
+
+    return OK;
+}
+
+void StreamingProcessor::releaseRecordingFrame(const sp<IMemory>& mem) {
+    ATRACE_CALL();
+    status_t res;
+
+    Mutex::Autolock m(mMutex);
+    // Make sure this is for the current heap
+    ssize_t offset;
+    size_t size;
+    sp<IMemoryHeap> heap = mem->getMemory(&offset, &size);
+    if (heap->getHeapID() != mRecordingHeap->mHeap->getHeapID()) {
+        ALOGW("%s: Camera %d: Mismatched heap ID, ignoring release "
+                "(got %x, expected %x)", __FUNCTION__, mId,
+                heap->getHeapID(), mRecordingHeap->mHeap->getHeapID());
+        return;
+    }
+    uint8_t *data = (uint8_t*)heap->getBase() + offset;
+    uint32_t type = *(uint32_t*)data;
+    if (type != kMetadataBufferTypeGrallocSource) {
+        ALOGE("%s: Camera %d: Recording frame type invalid (got %x, expected %x)",
+                __FUNCTION__, mId, type,
+                kMetadataBufferTypeGrallocSource);
+        return;
+    }
+
+    // Release the buffer back to the recording queue
+
+    buffer_handle_t imgHandle = *(buffer_handle_t*)(data + 4);
+
+    size_t itemIndex;
+    for (itemIndex = 0; itemIndex < mRecordingBuffers.size(); itemIndex++) {
+        const BufferItemConsumer::BufferItem item =
+                mRecordingBuffers[itemIndex];
+        if (item.mBuf != BufferItemConsumer::INVALID_BUFFER_SLOT &&
+                item.mGraphicBuffer->handle == imgHandle) {
+            break;
+        }
+    }
+    if (itemIndex == mRecordingBuffers.size()) {
+        ALOGE("%s: Camera %d: Can't find buffer_handle_t %p in list of "
+                "outstanding buffers", __FUNCTION__, mId,
+                imgHandle);
+        return;
+    }
+
+    ALOGVV("%s: Camera %d: Freeing buffer_handle_t %p", __FUNCTION__,
+            mId, imgHandle);
+
+    res = mRecordingConsumer->releaseBuffer(mRecordingBuffers[itemIndex]);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to free recording frame "
+                "(buffer_handle_t: %p): %s (%d)", __FUNCTION__,
+                mId, imgHandle, strerror(-res), res);
+        return;
+    }
+    mRecordingBuffers.replaceAt(itemIndex);
+
+    mRecordingHeapFree++;
+    ALOGV_IF(mRecordingHeapFree == mRecordingHeapCount,
+            "%s: Camera %d: All %d recording buffers returned",
+            __FUNCTION__, mId, mRecordingHeapCount);
+}
+
+void StreamingProcessor::releaseAllRecordingFramesLocked() {
+    ATRACE_CALL();
+    status_t res;
+
+    if (mRecordingConsumer == 0) {
+        return;
+    }
+
+    ALOGV("%s: Camera %d: Releasing all recording buffers", __FUNCTION__,
+            mId);
+
+    size_t releasedCount = 0;
+    for (size_t itemIndex = 0; itemIndex < mRecordingBuffers.size(); itemIndex++) {
+        const BufferItemConsumer::BufferItem item =
+                mRecordingBuffers[itemIndex];
+        if (item.mBuf != BufferItemConsumer::INVALID_BUFFER_SLOT) {
+            res = mRecordingConsumer->releaseBuffer(mRecordingBuffers[itemIndex]);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to free recording frame "
+                        "(buffer_handle_t: %p): %s (%d)", __FUNCTION__,
+                        mId, item.mGraphicBuffer->handle, strerror(-res), res);
+            }
+            mRecordingBuffers.replaceAt(itemIndex);
+            releasedCount++;
+        }
+    }
+
+    if (releasedCount > 0) {
+        ALOGW("%s: Camera %d: Force-freed %d outstanding buffers "
+                "from previous recording session", __FUNCTION__, mId, releasedCount);
+        ALOGE_IF(releasedCount != mRecordingHeapCount - mRecordingHeapFree,
+            "%s: Camera %d: Force-freed %d buffers, but expected %d",
+            __FUNCTION__, mId, releasedCount, mRecordingHeapCount - mRecordingHeapFree);
+    }
+
+    mRecordingHeapHead = 0;
+    mRecordingHeapFree = mRecordingHeapCount;
+}
+
+bool StreamingProcessor::isStreamActive(const Vector<uint8_t> &streams,
+        uint8_t recordingStreamId) {
+    for (size_t i = 0; i < streams.size(); i++) {
+        if (streams[i] == recordingStreamId) {
+            return true;
+        }
+    }
+    return false;
+}
+
+
+status_t StreamingProcessor::dump(int fd, const Vector<String16>& /*args*/) {
+    String8 result;
+
+    result.append("  Current requests:\n");
+    if (mPreviewRequest.entryCount() != 0) {
+        result.append("    Preview request:\n");
+        write(fd, result.string(), result.size());
+        mPreviewRequest.dump(fd, 2, 6);
+        result.clear();
+    } else {
+        result.append("    Preview request: undefined\n");
+    }
+
+    if (mRecordingRequest.entryCount() != 0) {
+        result = "    Recording request:\n";
+        write(fd, result.string(), result.size());
+        mRecordingRequest.dump(fd, 2, 6);
+        result.clear();
+    } else {
+        result = "    Recording request: undefined\n";
+    }
+
+    const char* streamTypeString[] = {
+        "none", "preview", "record"
+    };
+    result.append(String8::format("   Active request: %s (paused: %s)\n",
+                                  streamTypeString[mActiveRequest],
+                                  mPaused ? "yes" : "no"));
+
+    write(fd, result.string(), result.size());
+
+    return OK;
+}
+
+}; // namespace camera2
+}; // namespace android
diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.h b/services/camera/libcameraservice/api1/client2/StreamingProcessor.h
new file mode 100644
index 0000000..d879b83
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_STREAMINGPROCESSOR_H
+#define ANDROID_SERVERS_CAMERA_CAMERA2_STREAMINGPROCESSOR_H
+
+#include <utils/Mutex.h>
+#include <utils/String16.h>
+#include <gui/BufferItemConsumer.h>
+
+#include "camera/CameraMetadata.h"
+
+namespace android {
+
+class Camera2Client;
+class CameraDeviceBase;
+class IMemory;
+
+namespace camera2 {
+
+class Parameters;
+class Camera2Heap;
+
+/**
+ * Management and processing for preview and recording streams
+ */
+class StreamingProcessor:
+            public Thread, public BufferItemConsumer::FrameAvailableListener {
+  public:
+    StreamingProcessor(sp<Camera2Client> client);
+    ~StreamingProcessor();
+
+    status_t setPreviewWindow(sp<ANativeWindow> window);
+
+    bool haveValidPreviewWindow() const;
+
+    status_t updatePreviewRequest(const Parameters &params);
+    status_t updatePreviewStream(const Parameters &params);
+    status_t deletePreviewStream();
+    int getPreviewStreamId() const;
+
+    status_t setRecordingBufferCount(size_t count);
+    status_t updateRecordingRequest(const Parameters &params);
+    status_t updateRecordingStream(const Parameters &params);
+    status_t deleteRecordingStream();
+    int getRecordingStreamId() const;
+
+    enum StreamType {
+        NONE,
+        PREVIEW,
+        RECORD
+    };
+    status_t startStream(StreamType type,
+            const Vector<uint8_t> &outputStreams);
+
+    // Toggle between paused and unpaused. Stream must be started first.
+    status_t togglePauseStream(bool pause);
+
+    status_t stopStream();
+
+    // Returns the request ID for the currently streaming request
+    // Returns 0 if there is no active request.
+    status_t getActiveRequestId() const;
+    status_t incrementStreamingIds();
+
+    // Callback for new recording frames from HAL
+    virtual void onFrameAvailable();
+    // Callback from stagefright which returns used recording frames
+    void releaseRecordingFrame(const sp<IMemory>& mem);
+
+    status_t dump(int fd, const Vector<String16>& args);
+
+  private:
+    mutable Mutex mMutex;
+
+    enum {
+        NO_STREAM = -1
+    };
+
+    wp<Camera2Client> mClient;
+    wp<CameraDeviceBase> mDevice;
+    int mId;
+
+    StreamType mActiveRequest;
+    bool mPaused;
+
+    Vector<uint8_t> mActiveStreamIds;
+
+    // Preview-related members
+    int32_t mPreviewRequestId;
+    int mPreviewStreamId;
+    CameraMetadata mPreviewRequest;
+    sp<ANativeWindow> mPreviewWindow;
+
+    // Recording-related members
+    static const nsecs_t kWaitDuration = 50000000; // 50 ms
+
+    int32_t mRecordingRequestId;
+    int mRecordingStreamId;
+    int mRecordingFrameCount;
+    sp<BufferItemConsumer> mRecordingConsumer;
+    sp<ANativeWindow>  mRecordingWindow;
+    CameraMetadata mRecordingRequest;
+    sp<camera2::Camera2Heap> mRecordingHeap;
+
+    bool mRecordingFrameAvailable;
+    Condition mRecordingFrameAvailableSignal;
+
+    static const size_t kDefaultRecordingHeapCount = 8;
+    size_t mRecordingHeapCount;
+    Vector<BufferItemConsumer::BufferItem> mRecordingBuffers;
+    size_t mRecordingHeapHead, mRecordingHeapFree;
+
+    virtual bool threadLoop();
+
+    status_t processRecordingFrame();
+
+    // Unilaterally free any buffers still outstanding to stagefright
+    void releaseAllRecordingFramesLocked();
+
+    // Determine if the specified stream is currently in use
+    static bool isStreamActive(const Vector<uint8_t> &streams,
+            uint8_t recordingStreamId);
+};
+
+
+}; // namespace camera2
+}; // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
new file mode 100644
index 0000000..11a2cbb
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
@@ -0,0 +1,556 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera2-ZslProcessor"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+//#define LOG_NNDEBUG 0
+
+#ifdef LOG_NNDEBUG
+#define ALOGVV(...) ALOGV(__VA_ARGS__)
+#else
+#define ALOGVV(...) ((void)0)
+#endif
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include <gui/Surface.h>
+
+#include "common/CameraDeviceBase.h"
+#include "api1/Camera2Client.h"
+#include "api1/client2/CaptureSequencer.h"
+#include "api1/client2/ZslProcessor.h"
+
+namespace android {
+namespace camera2 {
+
+ZslProcessor::ZslProcessor(
+    sp<Camera2Client> client,
+    wp<CaptureSequencer> sequencer):
+        Thread(false),
+        mState(RUNNING),
+        mClient(client),
+        mDevice(client->getCameraDevice()),
+        mSequencer(sequencer),
+        mId(client->getCameraId()),
+        mZslBufferAvailable(false),
+        mZslStreamId(NO_STREAM),
+        mZslReprocessStreamId(NO_STREAM),
+        mFrameListHead(0),
+        mZslQueueHead(0),
+        mZslQueueTail(0) {
+    mZslQueue.insertAt(0, kZslBufferDepth);
+    mFrameList.insertAt(0, kFrameListDepth);
+    sp<CaptureSequencer> captureSequencer = mSequencer.promote();
+    if (captureSequencer != 0) captureSequencer->setZslProcessor(this);
+}
+
+ZslProcessor::~ZslProcessor() {
+    ALOGV("%s: Exit", __FUNCTION__);
+    deleteStream();
+}
+
+void ZslProcessor::onFrameAvailable() {
+    Mutex::Autolock l(mInputMutex);
+    if (!mZslBufferAvailable) {
+        mZslBufferAvailable = true;
+        mZslBufferAvailableSignal.signal();
+    }
+}
+
+void ZslProcessor::onFrameAvailable(int32_t /*frameId*/,
+        const CameraMetadata &frame) {
+    Mutex::Autolock l(mInputMutex);
+    camera_metadata_ro_entry_t entry;
+    entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
+    nsecs_t timestamp = entry.data.i64[0];
+    (void)timestamp;
+    ALOGVV("Got preview frame for timestamp %lld", timestamp);
+
+    if (mState != RUNNING) return;
+
+    mFrameList.editItemAt(mFrameListHead) = frame;
+    mFrameListHead = (mFrameListHead + 1) % kFrameListDepth;
+
+    findMatchesLocked();
+}
+
+void ZslProcessor::onBufferReleased(buffer_handle_t *handle) {
+    Mutex::Autolock l(mInputMutex);
+
+    // Verify that the buffer is in our queue
+    size_t i = 0;
+    for (; i < mZslQueue.size(); i++) {
+        if (&(mZslQueue[i].buffer.mGraphicBuffer->handle) == handle) break;
+    }
+    if (i == mZslQueue.size()) {
+        ALOGW("%s: Released buffer %p not found in queue",
+                __FUNCTION__, handle);
+    }
+
+    // Erase entire ZSL queue since we've now completed the capture and preview
+    // is stopped.
+    clearZslQueueLocked();
+
+    mState = RUNNING;
+}
+
+status_t ZslProcessor::updateStream(const Parameters &params) {
+    ATRACE_CALL();
+    ALOGV("%s: Configuring ZSL streams", __FUNCTION__);
+    status_t res;
+
+    Mutex::Autolock l(mInputMutex);
+
+    sp<Camera2Client> client = mClient.promote();
+    if (client == 0) {
+        ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
+        return INVALID_OPERATION;
+    }
+    sp<CameraDeviceBase> device = mDevice.promote();
+    if (device == 0) {
+        ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+        return INVALID_OPERATION;
+    }
+
+    if (mZslConsumer == 0) {
+        // Create CPU buffer queue endpoint
+        sp<BufferQueue> bq = new BufferQueue();
+        mZslConsumer = new BufferItemConsumer(bq,
+            GRALLOC_USAGE_HW_CAMERA_ZSL,
+            kZslBufferDepth);
+        mZslConsumer->setFrameAvailableListener(this);
+        mZslConsumer->setName(String8("Camera2Client::ZslConsumer"));
+        mZslWindow = new Surface(
+            mZslConsumer->getProducerInterface());
+    }
+
+    if (mZslStreamId != NO_STREAM) {
+        // Check if stream parameters have to change
+        uint32_t currentWidth, currentHeight;
+        res = device->getStreamInfo(mZslStreamId,
+                &currentWidth, &currentHeight, 0);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Error querying capture output stream info: "
+                    "%s (%d)", __FUNCTION__,
+                    mId, strerror(-res), res);
+            return res;
+        }
+        if (currentWidth != (uint32_t)params.fastInfo.arrayWidth ||
+                currentHeight != (uint32_t)params.fastInfo.arrayHeight) {
+            res = device->deleteReprocessStream(mZslReprocessStreamId);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to delete old reprocess stream "
+                        "for ZSL: %s (%d)", __FUNCTION__,
+                        mId, strerror(-res), res);
+                return res;
+            }
+            ALOGV("%s: Camera %d: Deleting stream %d since the buffer dimensions changed",
+                __FUNCTION__, mId, mZslStreamId);
+            res = device->deleteStream(mZslStreamId);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to delete old output stream "
+                        "for ZSL: %s (%d)", __FUNCTION__,
+                        mId, strerror(-res), res);
+                return res;
+            }
+            mZslStreamId = NO_STREAM;
+        }
+    }
+
+    if (mZslStreamId == NO_STREAM) {
+        // Create stream for HAL production
+        // TODO: Sort out better way to select resolution for ZSL
+        int streamType = params.quirks.useZslFormat ?
+                (int)CAMERA2_HAL_PIXEL_FORMAT_ZSL :
+                (int)HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+        res = device->createStream(mZslWindow,
+                params.fastInfo.arrayWidth, params.fastInfo.arrayHeight,
+                streamType, 0,
+                &mZslStreamId);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Can't create output stream for ZSL: "
+                    "%s (%d)", __FUNCTION__, mId,
+                    strerror(-res), res);
+            return res;
+        }
+        res = device->createReprocessStreamFromStream(mZslStreamId,
+                &mZslReprocessStreamId);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Can't create reprocess stream for ZSL: "
+                    "%s (%d)", __FUNCTION__, mId,
+                    strerror(-res), res);
+            return res;
+        }
+    }
+    client->registerFrameListener(Camera2Client::kPreviewRequestIdStart,
+            Camera2Client::kPreviewRequestIdEnd,
+            this);
+
+    return OK;
+}
+
+status_t ZslProcessor::deleteStream() {
+    ATRACE_CALL();
+    status_t res;
+
+    Mutex::Autolock l(mInputMutex);
+
+    if (mZslStreamId != NO_STREAM) {
+        sp<CameraDeviceBase> device = mDevice.promote();
+        if (device == 0) {
+            ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+            return INVALID_OPERATION;
+        }
+
+        clearZslQueueLocked();
+
+        res = device->deleteReprocessStream(mZslReprocessStreamId);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Cannot delete ZSL reprocessing stream %d: "
+                    "%s (%d)", __FUNCTION__, mId,
+                    mZslReprocessStreamId, strerror(-res), res);
+            return res;
+        }
+
+        mZslReprocessStreamId = NO_STREAM;
+        res = device->deleteStream(mZslStreamId);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Cannot delete ZSL output stream %d: "
+                    "%s (%d)", __FUNCTION__, mId,
+                    mZslStreamId, strerror(-res), res);
+            return res;
+        }
+
+        mZslWindow.clear();
+        mZslConsumer.clear();
+
+        mZslStreamId = NO_STREAM;
+    }
+    return OK;
+}
+
+int ZslProcessor::getStreamId() const {
+    Mutex::Autolock l(mInputMutex);
+    return mZslStreamId;
+}
+
+status_t ZslProcessor::pushToReprocess(int32_t requestId) {
+    ALOGV("%s: Send in reprocess request with id %d",
+            __FUNCTION__, requestId);
+    Mutex::Autolock l(mInputMutex);
+    status_t res;
+    sp<Camera2Client> client = mClient.promote();
+
+    if (client == 0) {
+        ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
+        return INVALID_OPERATION;
+    }
+
+    IF_ALOGV() {
+        dumpZslQueue(-1);
+    }
+
+    if (mZslQueueTail != mZslQueueHead) {
+        CameraMetadata request;
+        size_t index = mZslQueueTail;
+        while (index != mZslQueueHead) {
+            if (!mZslQueue[index].frame.isEmpty()) {
+                request = mZslQueue[index].frame;
+                break;
+            }
+            index = (index + 1) % kZslBufferDepth;
+        }
+        if (index == mZslQueueHead) {
+            ALOGV("%s: ZSL queue has no valid frames to send yet.",
+                  __FUNCTION__);
+            return NOT_ENOUGH_DATA;
+        }
+        // Verify that the frame is reasonable for reprocessing
+
+        camera_metadata_entry_t entry;
+        entry = request.find(ANDROID_CONTROL_AE_STATE);
+        if (entry.count == 0) {
+            ALOGE("%s: ZSL queue frame has no AE state field!",
+                    __FUNCTION__);
+            return BAD_VALUE;
+        }
+        if (entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_CONVERGED &&
+                entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_LOCKED) {
+            ALOGV("%s: ZSL queue frame AE state is %d, need full capture",
+                    __FUNCTION__, entry.data.u8[0]);
+            return NOT_ENOUGH_DATA;
+        }
+
+        buffer_handle_t *handle =
+            &(mZslQueue[index].buffer.mGraphicBuffer->handle);
+
+        uint8_t requestType = ANDROID_REQUEST_TYPE_REPROCESS;
+        res = request.update(ANDROID_REQUEST_TYPE,
+                &requestType, 1);
+        uint8_t inputStreams[1] =
+                { static_cast<uint8_t>(mZslReprocessStreamId) };
+        if (res == OK) request.update(ANDROID_REQUEST_INPUT_STREAMS,
+                inputStreams, 1);
+        uint8_t outputStreams[1] =
+                { static_cast<uint8_t>(client->getCaptureStreamId()) };
+        if (res == OK) request.update(ANDROID_REQUEST_OUTPUT_STREAMS,
+                outputStreams, 1);
+        res = request.update(ANDROID_REQUEST_ID,
+                &requestId, 1);
+
+        if (res != OK ) {
+            ALOGE("%s: Unable to update frame to a reprocess request", __FUNCTION__);
+            return INVALID_OPERATION;
+        }
+
+        res = client->stopStream();
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to stop preview for ZSL capture: "
+                "%s (%d)",
+                __FUNCTION__, mId, strerror(-res), res);
+            return INVALID_OPERATION;
+        }
+        // TODO: have push-and-clear be atomic
+        res = client->getCameraDevice()->pushReprocessBuffer(mZslReprocessStreamId,
+                handle, this);
+        if (res != OK) {
+            ALOGE("%s: Unable to push buffer for reprocessing: %s (%d)",
+                    __FUNCTION__, strerror(-res), res);
+            return res;
+        }
+
+        // Update JPEG settings
+        {
+            SharedParameters::Lock l(client->getParameters());
+            res = l.mParameters.updateRequestJpeg(&request);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to update JPEG entries of ZSL "
+                        "capture request: %s (%d)", __FUNCTION__,
+                        mId,
+                        strerror(-res), res);
+                return res;
+            }
+        }
+
+        mLatestCapturedRequest = request;
+        res = client->getCameraDevice()->capture(request);
+        if (res != OK ) {
+            ALOGE("%s: Unable to send ZSL reprocess request to capture: %s (%d)",
+                    __FUNCTION__, strerror(-res), res);
+            return res;
+        }
+
+        mState = LOCKED;
+    } else {
+        ALOGV("%s: No ZSL buffers yet", __FUNCTION__);
+        return NOT_ENOUGH_DATA;
+    }
+    return OK;
+}
+
+status_t ZslProcessor::clearZslQueue() {
+    Mutex::Autolock l(mInputMutex);
+    // If in middle of capture, can't clear out queue
+    if (mState == LOCKED) return OK;
+
+    return clearZslQueueLocked();
+}
+
+status_t ZslProcessor::clearZslQueueLocked() {
+    for (size_t i = 0; i < mZslQueue.size(); i++) {
+        if (mZslQueue[i].buffer.mTimestamp != 0) {
+            mZslConsumer->releaseBuffer(mZslQueue[i].buffer);
+        }
+        mZslQueue.replaceAt(i);
+    }
+    mZslQueueHead = 0;
+    mZslQueueTail = 0;
+    return OK;
+}
+
+void ZslProcessor::dump(int fd, const Vector<String16>& /*args*/) const {
+    Mutex::Autolock l(mInputMutex);
+    if (!mLatestCapturedRequest.isEmpty()) {
+        String8 result("    Latest ZSL capture request:\n");
+        write(fd, result.string(), result.size());
+        mLatestCapturedRequest.dump(fd, 2, 6);
+    } else {
+        String8 result("    Latest ZSL capture request: none yet\n");
+        write(fd, result.string(), result.size());
+    }
+    dumpZslQueue(fd);
+}
+
+bool ZslProcessor::threadLoop() {
+    status_t res;
+
+    {
+        Mutex::Autolock l(mInputMutex);
+        while (!mZslBufferAvailable) {
+            res = mZslBufferAvailableSignal.waitRelative(mInputMutex,
+                    kWaitDuration);
+            if (res == TIMED_OUT) return true;
+        }
+        mZslBufferAvailable = false;
+    }
+
+    do {
+        res = processNewZslBuffer();
+    } while (res == OK);
+
+    return true;
+}
+
+status_t ZslProcessor::processNewZslBuffer() {
+    ATRACE_CALL();
+    status_t res;
+    sp<BufferItemConsumer> zslConsumer;
+    {
+        Mutex::Autolock l(mInputMutex);
+        if (mZslConsumer == 0) return OK;
+        zslConsumer = mZslConsumer;
+    }
+    ALOGVV("Trying to get next buffer");
+    BufferItemConsumer::BufferItem item;
+    res = zslConsumer->acquireBuffer(&item, 0);
+    if (res != OK) {
+        if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
+            ALOGE("%s: Camera %d: Error receiving ZSL image buffer: "
+                    "%s (%d)", __FUNCTION__,
+                    mId, strerror(-res), res);
+        } else {
+            ALOGVV("  No buffer");
+        }
+        return res;
+    }
+
+    Mutex::Autolock l(mInputMutex);
+
+    if (mState == LOCKED) {
+        ALOGVV("In capture, discarding new ZSL buffers");
+        zslConsumer->releaseBuffer(item);
+        return OK;
+    }
+
+    ALOGVV("Got ZSL buffer: head: %d, tail: %d", mZslQueueHead, mZslQueueTail);
+
+    if ( (mZslQueueHead + 1) % kZslBufferDepth == mZslQueueTail) {
+        ALOGVV("Releasing oldest buffer");
+        zslConsumer->releaseBuffer(mZslQueue[mZslQueueTail].buffer);
+        mZslQueue.replaceAt(mZslQueueTail);
+        mZslQueueTail = (mZslQueueTail + 1) % kZslBufferDepth;
+    }
+
+    ZslPair &queueHead = mZslQueue.editItemAt(mZslQueueHead);
+
+    queueHead.buffer = item;
+    queueHead.frame.release();
+
+    mZslQueueHead = (mZslQueueHead + 1) % kZslBufferDepth;
+
+    ALOGVV("  Acquired buffer, timestamp %lld", queueHead.buffer.mTimestamp);
+
+    findMatchesLocked();
+
+    return OK;
+}
+
+void ZslProcessor::findMatchesLocked() {
+    ALOGVV("Scanning");
+    for (size_t i = 0; i < mZslQueue.size(); i++) {
+        ZslPair &queueEntry = mZslQueue.editItemAt(i);
+        nsecs_t bufferTimestamp = queueEntry.buffer.mTimestamp;
+        IF_ALOGV() {
+            camera_metadata_entry_t entry;
+            nsecs_t frameTimestamp = 0;
+            if (!queueEntry.frame.isEmpty()) {
+                entry = queueEntry.frame.find(ANDROID_SENSOR_TIMESTAMP);
+                frameTimestamp = entry.data.i64[0];
+            }
+            ALOGVV("   %d: b: %lld\tf: %lld", i,
+                    bufferTimestamp, frameTimestamp );
+        }
+        if (queueEntry.frame.isEmpty() && bufferTimestamp != 0) {
+            // Have buffer, no matching frame. Look for one
+            for (size_t j = 0; j < mFrameList.size(); j++) {
+                bool match = false;
+                CameraMetadata &frame = mFrameList.editItemAt(j);
+                if (!frame.isEmpty()) {
+                    camera_metadata_entry_t entry;
+                    entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
+                    if (entry.count == 0) {
+                        ALOGE("%s: Can't find timestamp in frame!",
+                                __FUNCTION__);
+                        continue;
+                    }
+                    nsecs_t frameTimestamp = entry.data.i64[0];
+                    if (bufferTimestamp == frameTimestamp) {
+                        ALOGVV("%s: Found match %lld", __FUNCTION__,
+                                frameTimestamp);
+                        match = true;
+                    } else {
+                        int64_t delta = abs(bufferTimestamp - frameTimestamp);
+                        if ( delta < 1000000) {
+                            ALOGVV("%s: Found close match %lld (delta %lld)",
+                                    __FUNCTION__, bufferTimestamp, delta);
+                            match = true;
+                        }
+                    }
+                }
+                if (match) {
+                    queueEntry.frame.acquire(frame);
+                    break;
+                }
+            }
+        }
+    }
+}
+
+void ZslProcessor::dumpZslQueue(int fd) const {
+    String8 header("ZSL queue contents:");
+    String8 indent("    ");
+    ALOGV("%s", header.string());
+    if (fd != -1) {
+        header = indent + header + "\n";
+        write(fd, header.string(), header.size());
+    }
+    for (size_t i = 0; i < mZslQueue.size(); i++) {
+        const ZslPair &queueEntry = mZslQueue[i];
+        nsecs_t bufferTimestamp = queueEntry.buffer.mTimestamp;
+        camera_metadata_ro_entry_t entry;
+        nsecs_t frameTimestamp = 0;
+        int frameAeState = -1;
+        if (!queueEntry.frame.isEmpty()) {
+            entry = queueEntry.frame.find(ANDROID_SENSOR_TIMESTAMP);
+            if (entry.count > 0) frameTimestamp = entry.data.i64[0];
+            entry = queueEntry.frame.find(ANDROID_CONTROL_AE_STATE);
+            if (entry.count > 0) frameAeState = entry.data.u8[0];
+        }
+        String8 result =
+                String8::format("   %d: b: %lld\tf: %lld, AE state: %d", i,
+                        bufferTimestamp, frameTimestamp, frameAeState);
+        ALOGV("%s", result.string());
+        if (fd != -1) {
+            result = indent + result + "\n";
+            write(fd, result.string(), result.size());
+        }
+
+    }
+}
+
+}; // namespace camera2
+}; // namespace android
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.h b/services/camera/libcameraservice/api1/client2/ZslProcessor.h
new file mode 100644
index 0000000..5fb178f
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSOR_H
+#define ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSOR_H
+
+#include <utils/Thread.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
+#include <utils/Mutex.h>
+#include <utils/Condition.h>
+#include <gui/BufferItemConsumer.h>
+#include <camera/CameraMetadata.h>
+
+#include "common/CameraDeviceBase.h"
+#include "api1/client2/ZslProcessorInterface.h"
+#include "api1/client2/FrameProcessor.h"
+
+namespace android {
+
+class Camera2Client;
+
+namespace camera2 {
+
+class CaptureSequencer;
+class Parameters;
+
+/***
+ * ZSL queue processing
+ */
+class ZslProcessor:
+            virtual public Thread,
+            virtual public BufferItemConsumer::FrameAvailableListener,
+            virtual public FrameProcessor::FilteredListener,
+            virtual public CameraDeviceBase::BufferReleasedListener,
+                    public ZslProcessorInterface {
+  public:
+    ZslProcessor(sp<Camera2Client> client, wp<CaptureSequencer> sequencer);
+    ~ZslProcessor();
+
+    // From mZslConsumer
+    virtual void onFrameAvailable();
+    // From FrameProcessor
+    virtual void onFrameAvailable(int32_t frameId, const CameraMetadata &frame);
+
+    virtual void onBufferReleased(buffer_handle_t *handle);
+
+    /**
+     ****************************************
+     * ZslProcessorInterface implementation *
+     ****************************************
+     */
+
+    status_t updateStream(const Parameters &params);
+    status_t deleteStream();
+    int getStreamId() const;
+
+    status_t pushToReprocess(int32_t requestId);
+    status_t clearZslQueue();
+
+    void dump(int fd, const Vector<String16>& args) const;
+  private:
+    static const nsecs_t kWaitDuration = 10000000; // 10 ms
+
+    enum {
+        RUNNING,
+        LOCKED
+    } mState;
+
+    wp<Camera2Client> mClient;
+    wp<CameraDeviceBase> mDevice;
+    wp<CaptureSequencer> mSequencer;
+    int mId;
+
+    mutable Mutex mInputMutex;
+    bool mZslBufferAvailable;
+    Condition mZslBufferAvailableSignal;
+
+    enum {
+        NO_STREAM = -1
+    };
+
+    int mZslStreamId;
+    int mZslReprocessStreamId;
+    sp<BufferItemConsumer> mZslConsumer;
+    sp<ANativeWindow>      mZslWindow;
+
+    struct ZslPair {
+        BufferItemConsumer::BufferItem buffer;
+        CameraMetadata frame;
+    };
+
+    static const size_t kZslBufferDepth = 4;
+    static const size_t kFrameListDepth = kZslBufferDepth * 2;
+    Vector<CameraMetadata> mFrameList;
+    size_t mFrameListHead;
+
+    ZslPair mNextPair;
+
+    Vector<ZslPair> mZslQueue;
+    size_t mZslQueueHead;
+    size_t mZslQueueTail;
+
+    CameraMetadata mLatestCapturedRequest;
+
+    virtual bool threadLoop();
+
+    status_t processNewZslBuffer();
+
+    // Match up entries from frame list to buffers in ZSL queue
+    void findMatchesLocked();
+
+    status_t clearZslQueueLocked();
+
+    void dumpZslQueue(int id) const;
+};
+
+
+}; //namespace camera2
+}; //namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
new file mode 100644
index 0000000..7c4da50
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
@@ -0,0 +1,482 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera2-ZslProcessor3"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+//#define LOG_NNDEBUG 0
+
+#ifdef LOG_NNDEBUG
+#define ALOGVV(...) ALOGV(__VA_ARGS__)
+#else
+#define ALOGVV(...) ((void)0)
+#endif
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include <gui/Surface.h>
+
+#include "common/CameraDeviceBase.h"
+#include "api1/Camera2Client.h"
+#include "api1/client2/CaptureSequencer.h"
+#include "api1/client2/ZslProcessor3.h"
+#include "device3/Camera3Device.h"
+
+namespace android {
+namespace camera2 {
+
+ZslProcessor3::ZslProcessor3(
+    sp<Camera2Client> client,
+    wp<CaptureSequencer> sequencer):
+        Thread(false),
+        mState(RUNNING),
+        mClient(client),
+        mSequencer(sequencer),
+        mId(client->getCameraId()),
+        mZslStreamId(NO_STREAM),
+        mFrameListHead(0),
+        mZslQueueHead(0),
+        mZslQueueTail(0) {
+    mZslQueue.insertAt(0, kZslBufferDepth);
+    mFrameList.insertAt(0, kFrameListDepth);
+    sp<CaptureSequencer> captureSequencer = mSequencer.promote();
+    if (captureSequencer != 0) captureSequencer->setZslProcessor(this);
+}
+
+ZslProcessor3::~ZslProcessor3() {
+    ALOGV("%s: Exit", __FUNCTION__);
+    deleteStream();
+}
+
+void ZslProcessor3::onFrameAvailable(int32_t /*frameId*/,
+                                     const CameraMetadata &frame) {
+    Mutex::Autolock l(mInputMutex);
+    camera_metadata_ro_entry_t entry;
+    entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
+    nsecs_t timestamp = entry.data.i64[0];
+    (void)timestamp;
+    ALOGVV("Got preview metadata for timestamp %lld", timestamp);
+
+    if (mState != RUNNING) return;
+
+    mFrameList.editItemAt(mFrameListHead) = frame;
+    mFrameListHead = (mFrameListHead + 1) % kFrameListDepth;
+}
+
+status_t ZslProcessor3::updateStream(const Parameters &params) {
+    ATRACE_CALL();
+    ALOGV("%s: Configuring ZSL streams", __FUNCTION__);
+    status_t res;
+
+    Mutex::Autolock l(mInputMutex);
+
+    sp<Camera2Client> client = mClient.promote();
+    if (client == 0) {
+        ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
+        return INVALID_OPERATION;
+    }
+    sp<Camera3Device> device =
+        static_cast<Camera3Device*>(client->getCameraDevice().get());
+    if (device == 0) {
+        ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+        return INVALID_OPERATION;
+    }
+
+    if (mZslStreamId != NO_STREAM) {
+        // Check if stream parameters have to change
+        uint32_t currentWidth, currentHeight;
+        res = device->getStreamInfo(mZslStreamId,
+                &currentWidth, &currentHeight, 0);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Error querying capture output stream info: "
+                    "%s (%d)", __FUNCTION__,
+                    client->getCameraId(), strerror(-res), res);
+            return res;
+        }
+        if (currentWidth != (uint32_t)params.fastInfo.arrayWidth ||
+                currentHeight != (uint32_t)params.fastInfo.arrayHeight) {
+            ALOGV("%s: Camera %d: Deleting stream %d since the buffer "
+                  "dimensions changed",
+                __FUNCTION__, client->getCameraId(), mZslStreamId);
+            res = device->deleteStream(mZslStreamId);
+            if (res == -EBUSY) {
+                ALOGV("%s: Camera %d: Device is busy, call updateStream again "
+                      " after it becomes idle", __FUNCTION__, mId);
+                return res;
+            } else if(res != OK) {
+                ALOGE("%s: Camera %d: Unable to delete old output stream "
+                        "for ZSL: %s (%d)", __FUNCTION__,
+                        client->getCameraId(), strerror(-res), res);
+                return res;
+            }
+            mZslStreamId = NO_STREAM;
+        }
+    }
+
+    if (mZslStreamId == NO_STREAM) {
+        // Create stream for HAL production
+        // TODO: Sort out better way to select resolution for ZSL
+
+        // Note that format specified internally in Camera3ZslStream
+        res = device->createZslStream(
+                params.fastInfo.arrayWidth, params.fastInfo.arrayHeight,
+                kZslBufferDepth,
+                &mZslStreamId,
+                &mZslStream);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Can't create ZSL stream: "
+                    "%s (%d)", __FUNCTION__, client->getCameraId(),
+                    strerror(-res), res);
+            return res;
+        }
+    }
+    client->registerFrameListener(Camera2Client::kPreviewRequestIdStart,
+            Camera2Client::kPreviewRequestIdEnd,
+            this);
+
+    return OK;
+}
+
+status_t ZslProcessor3::deleteStream() {
+    ATRACE_CALL();
+    status_t res;
+
+    Mutex::Autolock l(mInputMutex);
+
+    if (mZslStreamId != NO_STREAM) {
+        sp<Camera2Client> client = mClient.promote();
+        if (client == 0) {
+            ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
+            return INVALID_OPERATION;
+        }
+
+        sp<Camera3Device> device =
+            reinterpret_cast<Camera3Device*>(client->getCameraDevice().get());
+        if (device == 0) {
+            ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+            return INVALID_OPERATION;
+        }
+
+        res = device->deleteStream(mZslStreamId);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Cannot delete ZSL output stream %d: "
+                    "%s (%d)", __FUNCTION__, client->getCameraId(),
+                    mZslStreamId, strerror(-res), res);
+            return res;
+        }
+
+        mZslStreamId = NO_STREAM;
+    }
+    return OK;
+}
+
+int ZslProcessor3::getStreamId() const {
+    Mutex::Autolock l(mInputMutex);
+    return mZslStreamId;
+}
+
+status_t ZslProcessor3::pushToReprocess(int32_t requestId) {
+    ALOGV("%s: Send in reprocess request with id %d",
+            __FUNCTION__, requestId);
+    Mutex::Autolock l(mInputMutex);
+    status_t res;
+    sp<Camera2Client> client = mClient.promote();
+
+    if (client == 0) {
+        ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
+        return INVALID_OPERATION;
+    }
+
+    IF_ALOGV() {
+        dumpZslQueue(-1);
+    }
+
+    size_t metadataIdx;
+    nsecs_t candidateTimestamp = getCandidateTimestampLocked(&metadataIdx);
+
+    if (candidateTimestamp == -1) {
+        ALOGE("%s: Could not find good candidate for ZSL reprocessing",
+              __FUNCTION__);
+        return NOT_ENOUGH_DATA;
+    }
+
+    res = mZslStream->enqueueInputBufferByTimestamp(candidateTimestamp,
+                                                    /*actualTimestamp*/NULL);
+
+    if (res == mZslStream->NO_BUFFER_AVAILABLE) {
+        ALOGV("%s: No ZSL buffers yet", __FUNCTION__);
+        return NOT_ENOUGH_DATA;
+    } else if (res != OK) {
+        ALOGE("%s: Unable to push buffer for reprocessing: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+
+    {
+        CameraMetadata request = mFrameList[metadataIdx];
+
+        // Verify that the frame is reasonable for reprocessing
+
+        camera_metadata_entry_t entry;
+        entry = request.find(ANDROID_CONTROL_AE_STATE);
+        if (entry.count == 0) {
+            ALOGE("%s: ZSL queue frame has no AE state field!",
+                    __FUNCTION__);
+            return BAD_VALUE;
+        }
+        if (entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_CONVERGED &&
+                entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_LOCKED) {
+            ALOGV("%s: ZSL queue frame AE state is %d, need full capture",
+                    __FUNCTION__, entry.data.u8[0]);
+            return NOT_ENOUGH_DATA;
+        }
+
+        uint8_t requestType = ANDROID_REQUEST_TYPE_REPROCESS;
+        res = request.update(ANDROID_REQUEST_TYPE,
+                &requestType, 1);
+        uint8_t inputStreams[1] =
+                { static_cast<uint8_t>(mZslStreamId) };
+        if (res == OK) request.update(ANDROID_REQUEST_INPUT_STREAMS,
+                inputStreams, 1);
+        // TODO: Shouldn't we also update the latest preview frame?
+        uint8_t outputStreams[1] =
+                { static_cast<uint8_t>(client->getCaptureStreamId()) };
+        if (res == OK) request.update(ANDROID_REQUEST_OUTPUT_STREAMS,
+                outputStreams, 1);
+        res = request.update(ANDROID_REQUEST_ID,
+                &requestId, 1);
+
+        if (res != OK ) {
+            ALOGE("%s: Unable to update frame to a reprocess request",
+                  __FUNCTION__);
+            return INVALID_OPERATION;
+        }
+
+        res = client->stopStream();
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to stop preview for ZSL capture: "
+                "%s (%d)",
+                __FUNCTION__, client->getCameraId(), strerror(-res), res);
+            return INVALID_OPERATION;
+        }
+
+        // Update JPEG settings
+        {
+            SharedParameters::Lock l(client->getParameters());
+            res = l.mParameters.updateRequestJpeg(&request);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to update JPEG entries of ZSL "
+                        "capture request: %s (%d)", __FUNCTION__,
+                        client->getCameraId(),
+                        strerror(-res), res);
+                return res;
+            }
+        }
+
+        mLatestCapturedRequest = request;
+        res = client->getCameraDevice()->capture(request);
+        if (res != OK ) {
+            ALOGE("%s: Unable to send ZSL reprocess request to capture: %s"
+                  " (%d)", __FUNCTION__, strerror(-res), res);
+            return res;
+        }
+
+        mState = LOCKED;
+    }
+
+    return OK;
+}
+
+status_t ZslProcessor3::clearZslQueue() {
+    Mutex::Autolock l(mInputMutex);
+    // If in middle of capture, can't clear out queue
+    if (mState == LOCKED) return OK;
+
+    return clearZslQueueLocked();
+}
+
+status_t ZslProcessor3::clearZslQueueLocked() {
+    if (mZslStream != 0) {
+        return mZslStream->clearInputRingBuffer();
+    }
+    return OK;
+}
+
+void ZslProcessor3::dump(int fd, const Vector<String16>& /*args*/) const {
+    Mutex::Autolock l(mInputMutex);
+    if (!mLatestCapturedRequest.isEmpty()) {
+        String8 result("    Latest ZSL capture request:\n");
+        write(fd, result.string(), result.size());
+        mLatestCapturedRequest.dump(fd, 2, 6);
+    } else {
+        String8 result("    Latest ZSL capture request: none yet\n");
+        write(fd, result.string(), result.size());
+    }
+    dumpZslQueue(fd);
+}
+
+bool ZslProcessor3::threadLoop() {
+    // TODO: remove dependency on thread. For now, shut thread down right
+    // away.
+    return false;
+}
+
+void ZslProcessor3::dumpZslQueue(int fd) const {
+    String8 header("ZSL queue contents:");
+    String8 indent("    ");
+    ALOGV("%s", header.string());
+    if (fd != -1) {
+        header = indent + header + "\n";
+        write(fd, header.string(), header.size());
+    }
+    for (size_t i = 0; i < mZslQueue.size(); i++) {
+        const ZslPair &queueEntry = mZslQueue[i];
+        nsecs_t bufferTimestamp = queueEntry.buffer.mTimestamp;
+        camera_metadata_ro_entry_t entry;
+        nsecs_t frameTimestamp = 0;
+        int frameAeState = -1;
+        if (!queueEntry.frame.isEmpty()) {
+            entry = queueEntry.frame.find(ANDROID_SENSOR_TIMESTAMP);
+            if (entry.count > 0) frameTimestamp = entry.data.i64[0];
+            entry = queueEntry.frame.find(ANDROID_CONTROL_AE_STATE);
+            if (entry.count > 0) frameAeState = entry.data.u8[0];
+        }
+        String8 result =
+                String8::format("   %d: b: %lld\tf: %lld, AE state: %d", i,
+                        bufferTimestamp, frameTimestamp, frameAeState);
+        ALOGV("%s", result.string());
+        if (fd != -1) {
+            result = indent + result + "\n";
+            write(fd, result.string(), result.size());
+        }
+
+    }
+}
+
+nsecs_t ZslProcessor3::getCandidateTimestampLocked(size_t* metadataIdx) const {
+    /**
+     * Find the smallest timestamp we know about so far
+     * - ensure that aeState is either converged or locked
+     */
+
+    size_t idx = 0;
+    nsecs_t minTimestamp = -1;
+
+    size_t emptyCount = mFrameList.size();
+
+    for (size_t j = 0; j < mFrameList.size(); j++) {
+        const CameraMetadata &frame = mFrameList[j];
+        if (!frame.isEmpty()) {
+
+            emptyCount--;
+
+            camera_metadata_ro_entry_t entry;
+            entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
+            if (entry.count == 0) {
+                ALOGE("%s: Can't find timestamp in frame!",
+                        __FUNCTION__);
+                continue;
+            }
+            nsecs_t frameTimestamp = entry.data.i64[0];
+            if (minTimestamp > frameTimestamp || minTimestamp == -1) {
+
+                entry = frame.find(ANDROID_CONTROL_AE_STATE);
+
+                if (entry.count == 0) {
+                    /**
+                     * This is most likely a HAL bug. The aeState field is
+                     * mandatory, so it should always be in a metadata packet.
+                     */
+                    ALOGW("%s: ZSL queue frame has no AE state field!",
+                            __FUNCTION__);
+                    continue;
+                }
+                if (entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_CONVERGED &&
+                        entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_LOCKED) {
+                    ALOGVV("%s: ZSL queue frame AE state is %d, need "
+                           "full capture",  __FUNCTION__, entry.data.u8[0]);
+                    continue;
+                }
+
+                minTimestamp = frameTimestamp;
+                idx = j;
+            }
+
+            ALOGVV("%s: Saw timestamp %lld", __FUNCTION__, frameTimestamp);
+        }
+    }
+
+    if (emptyCount == mFrameList.size()) {
+        /**
+         * This could be mildly bad and means our ZSL was triggered before
+         * there were any frames yet received by the camera framework.
+         *
+         * This is a fairly corner case which can happen under:
+         * + a user presses the shutter button real fast when the camera starts
+         *     (startPreview followed immediately by takePicture).
+         * + burst capture case (hitting shutter button as fast possible)
+         *
+         * If this happens in steady case (preview running for a while, call
+         *     a single takePicture) then this might be a fwk bug.
+         */
+        ALOGW("%s: ZSL queue has no metadata frames", __FUNCTION__);
+    }
+
+    ALOGV("%s: Candidate timestamp %lld (idx %d), empty frames: %d",
+          __FUNCTION__, minTimestamp, idx, emptyCount);
+
+    if (metadataIdx) {
+        *metadataIdx = idx;
+    }
+
+    return minTimestamp;
+}
+
+void ZslProcessor3::onBufferAcquired(const BufferInfo& /*bufferInfo*/) {
+    // Intentionally left empty
+    // Although theoretically we could use this to get better dump info
+}
+
+void ZslProcessor3::onBufferReleased(const BufferInfo& bufferInfo) {
+    Mutex::Autolock l(mInputMutex);
+
+    // ignore output buffers
+    if (bufferInfo.mOutput) {
+        return;
+    }
+
+    // TODO: Verify that the buffer is in our queue by looking at timestamp
+    // theoretically unnecessary unless we change the following assumptions:
+    // -- only 1 buffer reprocessed at a time (which is the case now)
+
+    // Erase entire ZSL queue since we've now completed the capture and preview
+    // is stopped.
+    //
+    // We need to guarantee that if we do two back-to-back captures,
+    // the second won't use a buffer that's older/the same as the first, which
+    // is theoretically possible if we don't clear out the queue and the
+    // selection criteria is something like 'newest'. Clearing out the queue
+    // on a completed capture ensures we'll only use new data.
+    ALOGV("%s: Memory optimization, clearing ZSL queue",
+          __FUNCTION__);
+    clearZslQueueLocked();
+
+    // Required so we accept more ZSL requests
+    mState = RUNNING;
+}
+
+}; // namespace camera2
+}; // namespace android
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.h b/services/camera/libcameraservice/api1/client2/ZslProcessor3.h
new file mode 100644
index 0000000..35b85f5
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSOR3_H
+#define ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSOR3_H
+
+#include <utils/Thread.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
+#include <utils/Mutex.h>
+#include <utils/Condition.h>
+#include <gui/BufferItemConsumer.h>
+#include <camera/CameraMetadata.h>
+
+#include "api1/client2/FrameProcessor.h"
+#include "api1/client2/ZslProcessorInterface.h"
+#include "device3/Camera3ZslStream.h"
+
+namespace android {
+
+class Camera2Client;
+
+namespace camera2 {
+
+class CaptureSequencer;
+class Parameters;
+
+/***
+ * ZSL queue processing
+ */
+class ZslProcessor3 :
+                    public ZslProcessorInterface,
+                    public camera3::Camera3StreamBufferListener,
+            virtual public Thread,
+            virtual public FrameProcessor::FilteredListener {
+  public:
+    ZslProcessor3(sp<Camera2Client> client, wp<CaptureSequencer> sequencer);
+    ~ZslProcessor3();
+
+    // From FrameProcessor
+    virtual void onFrameAvailable(int32_t frameId, const CameraMetadata &frame);
+
+    /**
+     ****************************************
+     * ZslProcessorInterface implementation *
+     ****************************************
+     */
+
+    virtual status_t updateStream(const Parameters &params);
+    virtual status_t deleteStream();
+    virtual int getStreamId() const;
+
+    virtual status_t pushToReprocess(int32_t requestId);
+    virtual status_t clearZslQueue();
+
+    void dump(int fd, const Vector<String16>& args) const;
+
+  protected:
+    /**
+     **********************************************
+     * Camera3StreamBufferListener implementation *
+     **********************************************
+     */
+    typedef camera3::Camera3StreamBufferListener::BufferInfo BufferInfo;
+    // Buffer was acquired by the HAL
+    virtual void onBufferAcquired(const BufferInfo& bufferInfo);
+    // Buffer was released by the HAL
+    virtual void onBufferReleased(const BufferInfo& bufferInfo);
+
+  private:
+    static const nsecs_t kWaitDuration = 10000000; // 10 ms
+
+    enum {
+        RUNNING,
+        LOCKED
+    } mState;
+
+    wp<Camera2Client> mClient;
+    wp<CaptureSequencer> mSequencer;
+
+    const int mId;
+
+    mutable Mutex mInputMutex;
+
+    enum {
+        NO_STREAM = -1
+    };
+
+    int mZslStreamId;
+    sp<camera3::Camera3ZslStream> mZslStream;
+
+    struct ZslPair {
+        BufferItemConsumer::BufferItem buffer;
+        CameraMetadata frame;
+    };
+
+    static const size_t kZslBufferDepth = 4;
+    static const size_t kFrameListDepth = kZslBufferDepth * 2;
+    Vector<CameraMetadata> mFrameList;
+    size_t mFrameListHead;
+
+    ZslPair mNextPair;
+
+    Vector<ZslPair> mZslQueue;
+    size_t mZslQueueHead;
+    size_t mZslQueueTail;
+
+    CameraMetadata mLatestCapturedRequest;
+
+    virtual bool threadLoop();
+
+    status_t clearZslQueueLocked();
+
+    void dumpZslQueue(int id) const;
+
+    nsecs_t getCandidateTimestampLocked(size_t* metadataIdx) const;
+};
+
+
+}; //namespace camera2
+}; //namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h b/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h
new file mode 100644
index 0000000..183c0c2
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSORINTERFACE_H
+#define ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSORINTERFACE_H
+
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+namespace android {
+namespace camera2 {
+
+class Parameters;
+
+class ZslProcessorInterface : virtual public RefBase {
+public:
+
+    // Get ID for use with android.request.outputStreams / inputStreams
+    virtual int getStreamId() const = 0;
+
+    // Update the streams by recreating them if the size/format has changed
+    virtual status_t updateStream(const Parameters& params) = 0;
+
+    // Delete the underlying CameraDevice streams
+    virtual status_t deleteStream() = 0;
+
+    /**
+     * Submits a ZSL capture request (id = requestId)
+     *
+     * An appropriate ZSL buffer is selected by the closest timestamp,
+     * then we push that buffer to be reprocessed by the HAL.
+     * A capture request is created and submitted on behalf of the client.
+     */
+    virtual status_t pushToReprocess(int32_t requestId) = 0;
+
+    // Flush the ZSL buffer queue, freeing up all the buffers
+    virtual status_t clearZslQueue() = 0;
+
+    // (Debugging only) Dump the current state to the specified file descriptor
+    virtual void dump(int fd, const Vector<String16>& args) const = 0;
+};
+
+}; //namespace camera2
+}; //namespace android
+
+#endif